code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
#!/usr/bin/env python
#
# Copyright 2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import math
from gnuradio import gr, gr_unittest, analog, blocks
class test_cpfsk_bc(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_cpfsk_bc_001(self):
# Test set/gets
op = analog.cpfsk_bc(2, 1, 2)
op.set_amplitude(2)
a = op.amplitude()
self.assertEqual(2, a)
freq = 2*math.pi/2.0
f = op.freq()
self.assertAlmostEqual(freq, f, 5)
p = op.phase()
self.assertEqual(0, p)
def test_cpfsk_bc_002(self):
src_data = 10*[0, 1]
expected_result = map(lambda x: complex(2*x-1,0), src_data)
src = blocks.vector_source_b(src_data)
op = analog.cpfsk_bc(2, 1, 2)
dst = blocks.vector_sink_c()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()[0:len(expected_result)]
self.assertComplexTuplesAlmostEqual(expected_result, result_data, 4)
if __name__ == '__main__':
gr_unittest.run(test_cpfsk_bc, "test_cpfsk_bc.xml") | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package syscall
import "unsafe"
func setTimespec(sec, nsec int64) Timespec {
return Timespec{Sec: sec, Nsec: nsec}
}
func setTimeval(sec, usec int64) Timeval {
return Timeval{Sec: sec, Usec: usec}
}
func SetKevent(k *Kevent_t, fd, mode, flags int) {
k.Ident = uint64(fd)
k.Filter = int16(mode)
k.Flags = uint16(flags)
}
func (iov *Iovec) SetLen(length int) {
iov.Len = uint64(length)
}
func (msghdr *Msghdr) SetControllen(length int) {
msghdr.Controllen = uint32(length)
}
func (cmsg *Cmsghdr) SetLen(length int) {
cmsg.Len = uint32(length)
}
func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
var writtenOut uint64 = 0
_, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0)
written = int(writtenOut)
if e1 != 0 {
err = e1
}
return
}
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) | go | github | https://github.com/golang/go | src/syscall/syscall_freebsd_arm64.go |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__all__ = ["StorageLevel"]
class StorageLevel(object):
"""
Flags for controlling the storage of an RDD. Each StorageLevel records whether to use memory,
whether to drop the RDD to disk if it falls out of memory, whether to keep the data in memory
in a serialized format, and whether to replicate the RDD partitions on multiple nodes.
Also contains static constants for some commonly used storage levels, such as MEMORY_ONLY.
"""
def __init__(self, useDisk, useMemory, useOffHeap, deserialized, replication=1):
self.useDisk = useDisk
self.useMemory = useMemory
self.useOffHeap = useOffHeap
self.deserialized = deserialized
self.replication = replication
def __repr__(self):
return "StorageLevel(%s, %s, %s, %s, %s)" % (
self.useDisk, self.useMemory, self.useOffHeap, self.deserialized, self.replication)
def __str__(self):
result = ""
result += "Disk " if self.useDisk else ""
result += "Memory " if self.useMemory else ""
result += "Tachyon " if self.useOffHeap else ""
result += "Deserialized " if self.deserialized else "Serialized "
result += "%sx Replicated" % self.replication
return result
StorageLevel.DISK_ONLY = StorageLevel(True, False, False, False)
StorageLevel.DISK_ONLY_2 = StorageLevel(True, False, False, False, 2)
StorageLevel.MEMORY_ONLY = StorageLevel(False, True, False, True)
StorageLevel.MEMORY_ONLY_2 = StorageLevel(False, True, False, True, 2)
StorageLevel.MEMORY_ONLY_SER = StorageLevel(False, True, False, False)
StorageLevel.MEMORY_ONLY_SER_2 = StorageLevel(False, True, False, False, 2)
StorageLevel.MEMORY_AND_DISK = StorageLevel(True, True, False, True)
StorageLevel.MEMORY_AND_DISK_2 = StorageLevel(True, True, False, True, 2)
StorageLevel.MEMORY_AND_DISK_SER = StorageLevel(True, True, False, False)
StorageLevel.MEMORY_AND_DISK_SER_2 = StorageLevel(True, True, False, False, 2)
StorageLevel.OFF_HEAP = StorageLevel(False, False, True, False, 1) | unknown | codeparrot/codeparrot-clean | ||
"""
Provides a sensor to track various status aspects of a UPS.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.nut/
"""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (
CONF_HOST, CONF_PORT, CONF_NAME, CONF_USERNAME, CONF_PASSWORD,
TEMP_CELSIUS, CONF_RESOURCES, CONF_ALIAS, ATTR_STATE, STATE_UNKNOWN)
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
REQUIREMENTS = ['pynut2==2.1.2']
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'NUT UPS'
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 3493
KEY_STATUS = 'ups.status'
KEY_STATUS_DISPLAY = 'ups.status.display'
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60)
SENSOR_TYPES = {
'ups.status.display': ['Status', '', 'mdi:information-outline'],
'ups.status': ['Status Data', '', 'mdi:information-outline'],
'ups.alarm': ['Alarms', '', 'mdi:alarm'],
'ups.time': ['Internal Time', '', 'mdi:calendar-clock'],
'ups.date': ['Internal Date', '', 'mdi:calendar'],
'ups.model': ['Model', '', 'mdi:information-outline'],
'ups.mfr': ['Manufacturer', '', 'mdi:information-outline'],
'ups.mfr.date': ['Manufacture Date', '', 'mdi:calendar'],
'ups.serial': ['Serial Number', '', 'mdi:information-outline'],
'ups.vendorid': ['Vendor ID', '', 'mdi:information-outline'],
'ups.productid': ['Product ID', '', 'mdi:information-outline'],
'ups.firmware': ['Firmware Version', '', 'mdi:information-outline'],
'ups.firmware.aux': ['Firmware Version 2', '', 'mdi:information-outline'],
'ups.temperature': ['UPS Temperature', TEMP_CELSIUS, 'mdi:thermometer'],
'ups.load': ['Load', '%', 'mdi:gauge'],
'ups.load.high': ['Overload Setting', '%', 'mdi:gauge'],
'ups.id': ['System identifier', '', 'mdi:information-outline'],
'ups.delay.start': ['Load Restart Delay', 's', 'mdi:timer'],
'ups.delay.reboot': ['UPS Reboot Delay', 's', 'mdi:timer'],
'ups.delay.shutdown': ['UPS Shutdown Delay', 's', 'mdi:timer'],
'ups.timer.start': ['Load Start Timer', 's', 'mdi:timer'],
'ups.timer.reboot': ['Load Reboot Timer', 's', 'mdi:timer'],
'ups.timer.shutdown': ['Load Shutdown Timer', 's', 'mdi:timer'],
'ups.test.interval': ['Self-Test Interval', 's', 'mdi:timer'],
'ups.test.result': ['Self-Test Result', '', 'mdi:information-outline'],
'ups.test.date': ['Self-Test Date', '', 'mdi:calendar'],
'ups.display.language': ['Language', '', 'mdi:information-outline'],
'ups.contacts': ['External Contacts', '', 'mdi:information-outline'],
'ups.efficiency': ['Efficiency', '%', 'mdi:gauge'],
'ups.power': ['Current Apparent Power', 'VA', 'mdi:flash'],
'ups.power.nominal': ['Nominal Power', 'VA', 'mdi:flash'],
'ups.realpower': ['Current Real Power', 'W', 'mdi:flash'],
'ups.realpower.nominal': ['Nominal Real Power', 'W', 'mdi:flash'],
'ups.beeper.status': ['Beeper Status', '', 'mdi:information-outline'],
'ups.type': ['UPS Type', '', 'mdi:information-outline'],
'ups.watchdog.status': ['Watchdog Status', '', 'mdi:information-outline'],
'ups.start.auto': ['Start on AC', '', 'mdi:information-outline'],
'ups.start.battery': ['Start on Battery', '', 'mdi:information-outline'],
'ups.start.reboot': ['Reboot on Battery', '', 'mdi:information-outline'],
'ups.shutdown': ['Shutdown Ability', '', 'mdi:information-outline'],
'battery.charge': ['Battery Charge', '%', 'mdi:gauge'],
'battery.charge.low': ['Low Battery Setpoint', '%', 'mdi:gauge'],
'battery.charge.restart': ['Minimum Battery to Start', '%', 'mdi:gauge'],
'battery.charge.warning': ['Warning Battery Setpoint', '%', 'mdi:gauge'],
'battery.charger.status':
['Charging Status', '', 'mdi:information-outline'],
'battery.voltage': ['Battery Voltage', 'V', 'mdi:flash'],
'battery.voltage.nominal': ['Nominal Battery Voltage', 'V', 'mdi:flash'],
'battery.voltage.low': ['Low Battery Voltage', 'V', 'mdi:flash'],
'battery.voltage.high': ['High Battery Voltage', 'V', 'mdi:flash'],
'battery.capacity': ['Battery Capacity', 'Ah', 'mdi:flash'],
'battery.current': ['Battery Current', 'A', 'mdi:flash'],
'battery.current.total': ['Total Battery Current', 'A', 'mdi:flash'],
'battery.temperature':
['Battery Temperature', TEMP_CELSIUS, 'mdi:thermometer'],
'battery.runtime': ['Battery Runtime', 's', 'mdi:timer'],
'battery.runtime.low': ['Low Battery Runtime', 's', 'mdi:timer'],
'battery.runtime.restart':
['Minimum Battery Runtime to Start', 's', 'mdi:timer'],
'battery.alarm.threshold':
['Battery Alarm Threshold', '', 'mdi:information-outline'],
'battery.date': ['Battery Date', '', 'mdi:calendar'],
'battery.mfr.date': ['Battery Manuf. Date', '', 'mdi:calendar'],
'battery.packs': ['Number of Batteries', '', 'mdi:information-outline'],
'battery.packs.bad':
['Number of Bad Batteries', '', 'mdi:information-outline'],
'battery.type': ['Battery Chemistry', '', 'mdi:information-outline'],
'input.sensitivity':
['Input Power Sensitivity', '', 'mdi:information-outline'],
'input.transfer.low': ['Low Voltage Transfer', 'V', 'mdi:flash'],
'input.transfer.high': ['High Voltage Transfer', 'V', 'mdi:flash'],
'input.transfer.reason':
['Voltage Transfer Reason', '', 'mdi:information-outline'],
'input.voltage': ['Input Voltage', 'V', 'mdi:flash'],
'input.voltage.nominal': ['Nominal Input Voltage', 'V', 'mdi:flash'],
'input.frequency': ['Input Line Frequency', 'hz', 'mdi:flash'],
'input.frequency.nominal':
['Nominal Input Line Frequency', 'hz', 'mdi:flash'],
'input.frequency.status':
['Input Frequency Status', '', 'mdi:information-outline'],
'output.current': ['Output Current', 'A', 'mdi:flash'],
'output.current.nominal':
['Nominal Output Current', 'A', 'mdi:flash'],
'output.voltage': ['Output Voltage', 'V', 'mdi:flash'],
'output.voltage.nominal':
['Nominal Output Voltage', 'V', 'mdi:flash'],
'output.frequency': ['Output Frequency', 'hz', 'mdi:flash'],
'output.frequency.nominal':
['Nominal Output Frequency', 'hz', 'mdi:flash'],
}
STATE_TYPES = {
'OL': 'Online',
'OB': 'On Battery',
'LB': 'Low Battery',
'HB': 'High Battery',
'RB': 'Battery Needs Replaced',
'CHRG': 'Battery Charging',
'DISCHRG': 'Battery Discharging',
'BYPASS': 'Bypass Active',
'CAL': 'Runtime Calibration',
'OFF': 'Offline',
'OVER': 'Overloaded',
'TRIM': 'Trimming Voltage',
'BOOST': 'Boosting Voltage',
'FSD': 'Forced Shutdown',
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_ALIAS): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Required(CONF_RESOURCES):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the NUT sensors."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
alias = config.get(CONF_ALIAS)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
data = PyNUTData(host, port, alias, username, password)
if data.status is None:
_LOGGER.error("NUT Sensor has no data, unable to set up")
raise PlatformNotReady
_LOGGER.debug('NUT Sensors Available: %s', data.status)
entities = []
for resource in config[CONF_RESOURCES]:
sensor_type = resource.lower()
# Display status is a special case that falls back to the status value
# of the UPS instead.
if sensor_type in data.status or (sensor_type == KEY_STATUS_DISPLAY
and KEY_STATUS in data.status):
entities.append(NUTSensor(name, data, sensor_type))
else:
_LOGGER.warning(
"Sensor type: %s does not appear in the NUT status "
"output, cannot add", sensor_type)
try:
data.update(no_throttle=True)
except data.pynuterror as err:
_LOGGER.error("Failure while testing NUT status retrieval. "
"Cannot continue setup: %s", err)
raise PlatformNotReady
add_entities(entities, True)
class NUTSensor(Entity):
"""Representation of a sensor entity for NUT status values."""
def __init__(self, name, data, sensor_type):
"""Initialize the sensor."""
self._data = data
self.type = sensor_type
self._name = "{} {}".format(name, SENSOR_TYPES[sensor_type][0])
self._unit = SENSOR_TYPES[sensor_type][1]
self._state = None
@property
def name(self):
"""Return the name of the UPS sensor."""
return self._name
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return SENSOR_TYPES[self.type][2]
@property
def state(self):
"""Return entity state from ups."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit
@property
def device_state_attributes(self):
"""Return the sensor attributes."""
attr = dict()
attr[ATTR_STATE] = self.display_state()
return attr
def display_state(self):
"""Return UPS display state."""
if self._data.status is None:
return STATE_TYPES['OFF']
try:
return " ".join(
STATE_TYPES[state]
for state in self._data.status[KEY_STATUS].split())
except KeyError:
return STATE_UNKNOWN
def update(self):
"""Get the latest status and use it to update our sensor state."""
if self._data.status is None:
self._state = None
return
# In case of the display status sensor, keep a human-readable form
# as the sensor state.
if self.type == KEY_STATUS_DISPLAY:
self._state = self.display_state()
elif self.type not in self._data.status:
self._state = None
else:
self._state = self._data.status[self.type]
class PyNUTData:
"""Stores the data retrieved from NUT.
For each entity to use, acts as the single point responsible for fetching
updates from the server.
"""
def __init__(self, host, port, alias, username, password):
"""Initialize the data object."""
from pynut2.nut2 import PyNUTClient, PyNUTError
self._host = host
self._port = port
self._alias = alias
self._username = username
self._password = password
self.pynuterror = PyNUTError
# Establish client with persistent=False to open/close connection on
# each update call. This is more reliable with async.
self._client = PyNUTClient(self._host, self._port,
self._username, self._password, 5, False)
self._status = None
@property
def status(self):
"""Get latest update if throttle allows. Return status."""
self.update()
return self._status
def _get_alias(self):
"""Get the ups alias from NUT."""
try:
return next(iter(self._client.list_ups()))
except self.pynuterror as err:
_LOGGER.error("Failure getting NUT ups alias, %s", err)
return None
def _get_status(self):
"""Get the ups status from NUT."""
if self._alias is None:
self._alias = self._get_alias()
try:
return self._client.list_vars(self._alias)
except (self.pynuterror, ConnectionResetError) as err:
_LOGGER.debug(
"Error getting NUT vars for host %s: %s", self._host, err)
return None
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self, **kwargs):
"""Fetch the latest status from NUT."""
self._status = self._get_status() | unknown | codeparrot/codeparrot-clean | ||
function useHook(a, b) {
b.test = 1;
a.test = 2;
} | javascript | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/error.mutate-hook-argument.js |
# -*- coding: utf-8 -*-
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""Boto translation layer for resumable uploads.
See http://code.google.com/apis/storage/docs/developer-guide.html#resumable
for details.
Resumable uploads will retry interrupted uploads, resuming at the byte
count completed by the last upload attempt. If too many retries happen with
no progress (per configurable num_retries param), the upload will be
aborted in the current process.
Unlike the boto implementation of resumable upload handler, this class does
not directly interact with tracker files.
Originally Google wrote and contributed this code to the boto project,
then copied that code back into gsutil on the release of gsutil 4.0 which
supports both boto and non-boto codepaths for resumable uploads. Any bug
fixes made to this file should also be integrated to resumable_upload_handler.py
in boto, where applicable.
TODO: gsutil-beta: Add a similar comment to the boto code.
"""
from __future__ import absolute_import
import errno
import httplib
import random
import re
import socket
import time
import urlparse
from boto import UserAgent
from boto.connection import AWSAuthConnection
from boto.exception import ResumableTransferDisposition
from boto.exception import ResumableUploadException
from gslib.exception import InvalidUrlError
from gslib.util import GetMaxRetryDelay
from gslib.util import GetNumRetries
from gslib.util import XML_PROGRESS_CALLBACKS
class BotoResumableUpload(object):
"""Upload helper class for resumable uploads via boto."""
BUFFER_SIZE = 8192
RETRYABLE_EXCEPTIONS = (httplib.HTTPException, IOError, socket.error,
socket.gaierror)
# (start, end) response indicating service has nothing (upload protocol uses
# inclusive numbering).
SERVICE_HAS_NOTHING = (0, -1)
def __init__(self, tracker_callback, logger,
resume_url=None, num_retries=None):
"""Constructor. Instantiate once for each uploaded file.
Args:
tracker_callback: Callback function that takes a string argument. Used
by caller to track this upload across upload
interruption.
logger: logging.logger instance to use for debug messages.
resume_url: If present, attempt to resume the upload at this URL.
num_retries: Number of times to retry the upload making no progress.
This count resets every time we make progress, so the upload
can span many more than this number of retries.
"""
if resume_url:
self._SetUploadUrl(resume_url)
else:
self.upload_url = None
self.num_retries = num_retries
self.service_has_bytes = 0 # Byte count at last service check.
# Save upload_start_point in instance state so caller can find how
# much was transferred by this ResumableUploadHandler (across retries).
self.upload_start_point = None
self.tracker_callback = tracker_callback
self.logger = logger
def _SetUploadUrl(self, url):
"""Saves URL and resets upload state.
Called when we start a new resumable upload or get a new tracker
URL for the upload.
Args:
url: URL string for the upload.
Raises InvalidUrlError if URL is syntactically invalid.
"""
parse_result = urlparse.urlparse(url)
if (parse_result.scheme.lower() not in ['http', 'https'] or
not parse_result.netloc):
raise InvalidUrlError('Invalid upload URL (%s)' % url)
self.upload_url = url
self.upload_url_host = parse_result.netloc
self.upload_url_path = '%s?%s' % (
parse_result.path, parse_result.query)
self.service_has_bytes = 0
def _BuildContentRangeHeader(self, range_spec='*', length_spec='*'):
return 'bytes %s/%s' % (range_spec, length_spec)
def _QueryServiceState(self, conn, file_length):
"""Queries service to find out state of given upload.
Note that this method really just makes special case use of the
fact that the upload service always returns the current start/end
state whenever a PUT doesn't complete.
Args:
conn: HTTPConnection to use for the query.
file_length: Total length of the file.
Returns:
HTTP response from sending request.
Raises:
ResumableUploadException if problem querying service.
"""
# Send an empty PUT so that service replies with this resumable
# transfer's state.
put_headers = {}
put_headers['Content-Range'] = (
self._BuildContentRangeHeader('*', file_length))
put_headers['Content-Length'] = '0'
return AWSAuthConnection.make_request(
conn, 'PUT', path=self.upload_url_path, auth_path=self.upload_url_path,
headers=put_headers, host=self.upload_url_host)
def _QueryServicePos(self, conn, file_length):
"""Queries service to find out what bytes it currently has.
Args:
conn: HTTPConnection to use for the query.
file_length: Total length of the file.
Returns:
(service_start, service_end), where the values are inclusive.
For example, (0, 2) would mean that the service has bytes 0, 1, *and* 2.
Raises:
ResumableUploadException if problem querying service.
"""
resp = self._QueryServiceState(conn, file_length)
if resp.status == 200:
# To handle the boundary condition where the service has the complete
# file, we return (service_start, file_length-1). That way the
# calling code can always simply read up through service_end. (If we
# didn't handle this boundary condition here, the caller would have
# to check whether service_end == file_length and read one fewer byte
# in that case.)
return (0, file_length - 1) # Completed upload.
if resp.status != 308:
# This means the service didn't have any state for the given
# upload ID, which can happen (for example) if the caller saved
# the upload URL to a file and then tried to restart the transfer
# after that upload ID has gone stale. In that case we need to
# start a new transfer (and the caller will then save the new
# upload URL to the tracker file).
raise ResumableUploadException(
'Got non-308 response (%s) from service state query' %
resp.status, ResumableTransferDisposition.START_OVER)
got_valid_response = False
range_spec = resp.getheader('range')
if range_spec:
# Parse 'bytes=<from>-<to>' range_spec.
m = re.search(r'bytes=(\d+)-(\d+)', range_spec)
if m:
service_start = long(m.group(1))
service_end = long(m.group(2))
got_valid_response = True
else:
# No Range header, which means the service does not yet have
# any bytes. Note that the Range header uses inclusive 'from'
# and 'to' values. Since Range 0-0 would mean that the service
# has byte 0, omitting the Range header is used to indicate that
# the service doesn't have any bytes.
return self.SERVICE_HAS_NOTHING
if not got_valid_response:
raise ResumableUploadException(
'Couldn\'t parse upload service state query response (%s)' %
str(resp.getheaders()), ResumableTransferDisposition.START_OVER)
if conn.debug >= 1:
self.logger.debug('Service has: Range: %d - %d.', service_start,
service_end)
return (service_start, service_end)
def _StartNewResumableUpload(self, key, headers=None):
"""Starts a new resumable upload.
Args:
key: Boto Key representing the object to upload.
headers: Headers to use in the upload requests.
Raises:
ResumableUploadException if any errors occur.
"""
conn = key.bucket.connection
if conn.debug >= 1:
self.logger.debug('Starting new resumable upload.')
self.service_has_bytes = 0
# Start a new resumable upload by sending a POST request with an
# empty body and the "X-Goog-Resumable: start" header. Include any
# caller-provided headers (e.g., Content-Type) EXCEPT Content-Length
# (and raise an exception if they tried to pass one, since it's
# a semantic error to specify it at this point, and if we were to
# include one now it would cause the service to expect that many
# bytes; the POST doesn't include the actual file bytes We set
# the Content-Length in the subsequent PUT, based on the uploaded
# file size.
post_headers = {}
for k in headers:
if k.lower() == 'content-length':
raise ResumableUploadException(
'Attempt to specify Content-Length header (disallowed)',
ResumableTransferDisposition.ABORT)
post_headers[k] = headers[k]
post_headers[conn.provider.resumable_upload_header] = 'start'
resp = conn.make_request(
'POST', key.bucket.name, key.name, post_headers)
# Get upload URL from response 'Location' header.
body = resp.read()
# Check for various status conditions.
if resp.status in [429, 500, 503]:
# Retry after a delay.
raise ResumableUploadException(
'Got status %d from attempt to start resumable upload. '
'Will wait/retry' % resp.status,
ResumableTransferDisposition.WAIT_BEFORE_RETRY)
elif resp.status != 200 and resp.status != 201:
raise ResumableUploadException(
'Got status %d from attempt to start resumable upload. '
'Aborting' % resp.status,
ResumableTransferDisposition.ABORT)
# Else we got 200 or 201 response code, indicating the resumable
# upload was created.
upload_url = resp.getheader('Location')
if not upload_url:
raise ResumableUploadException(
'No resumable upload URL found in resumable initiation '
'POST response (%s)' % body,
ResumableTransferDisposition.WAIT_BEFORE_RETRY)
self._SetUploadUrl(upload_url)
self.tracker_callback(upload_url)
def _UploadFileBytes(self, conn, http_conn, fp, file_length,
total_bytes_uploaded, cb, num_cb, headers):
"""Attempts to upload file bytes.
Makes a single attempt using an existing resumable upload connection.
Args:
conn: HTTPConnection from the boto Key.
http_conn: Separate HTTPConnection for the transfer.
fp: File pointer containing bytes to upload.
file_length: Total length of the file.
total_bytes_uploaded: The total number of bytes uploaded.
cb: Progress callback function that takes (progress, total_size).
num_cb: Granularity of the callback (maximum number of times the
callback will be called during the file transfer). If negative,
perform callback with each buffer read.
headers: Headers to be used in the upload requests.
Returns:
(etag, generation, metageneration) from service upon success.
Raises:
ResumableUploadException if any problems occur.
"""
buf = fp.read(self.BUFFER_SIZE)
if cb:
# The cb_count represents the number of full buffers to send between
# cb executions.
if num_cb > 2:
cb_count = file_length / self.BUFFER_SIZE / (num_cb-2)
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(total_bytes_uploaded, file_length)
# Build resumable upload headers for the transfer. Don't send a
# Content-Range header if the file is 0 bytes long, because the
# resumable upload protocol uses an *inclusive* end-range (so, sending
# 'bytes 0-0/1' would actually mean you're sending a 1-byte file).
put_headers = headers.copy() if headers else {}
if file_length:
if total_bytes_uploaded == file_length:
range_header = self._BuildContentRangeHeader(
'*', file_length)
else:
range_header = self._BuildContentRangeHeader(
'%d-%d' % (total_bytes_uploaded, file_length - 1),
file_length)
put_headers['Content-Range'] = range_header
# Set Content-Length to the total bytes we'll send with this PUT.
put_headers['Content-Length'] = str(file_length - total_bytes_uploaded)
http_request = AWSAuthConnection.build_base_http_request(
conn, 'PUT', path=self.upload_url_path, auth_path=None,
headers=put_headers, host=self.upload_url_host)
http_conn.putrequest('PUT', http_request.path)
for k in put_headers:
http_conn.putheader(k, put_headers[k])
http_conn.endheaders()
# Turn off debug on http connection so upload content isn't included
# in debug stream.
http_conn.set_debuglevel(0)
while buf:
http_conn.send(buf)
total_bytes_uploaded += len(buf)
if cb:
i += 1
if i == cb_count or cb_count == -1:
cb(total_bytes_uploaded, file_length)
i = 0
buf = fp.read(self.BUFFER_SIZE)
http_conn.set_debuglevel(conn.debug)
if cb:
cb(total_bytes_uploaded, file_length)
if total_bytes_uploaded != file_length:
# Abort (and delete the tracker file) so if the user retries
# they'll start a new resumable upload rather than potentially
# attempting to pick back up later where we left off.
raise ResumableUploadException(
'File changed during upload: EOF at %d bytes of %d byte file.' %
(total_bytes_uploaded, file_length),
ResumableTransferDisposition.ABORT)
resp = http_conn.getresponse()
# Restore http connection debug level.
http_conn.set_debuglevel(conn.debug)
if resp.status == 200:
# Success.
return (resp.getheader('etag'),
resp.getheader('x-goog-generation'),
resp.getheader('x-goog-metageneration'))
# Retry timeout (408) and status 429, 500 and 503 errors after a delay.
elif resp.status in [408, 429, 500, 503]:
disposition = ResumableTransferDisposition.WAIT_BEFORE_RETRY
else:
# Catch all for any other error codes.
disposition = ResumableTransferDisposition.ABORT
raise ResumableUploadException('Got response code %d while attempting '
'upload (%s)' %
(resp.status, resp.reason), disposition)
def _AttemptResumableUpload(self, key, fp, file_length, headers, cb,
num_cb):
"""Attempts a resumable upload.
Args:
key: Boto key representing object to upload.
fp: File pointer containing upload bytes.
file_length: Total length of the upload.
headers: Headers to be used in upload requests.
cb: Progress callback function that takes (progress, total_size).
num_cb: Granularity of the callback (maximum number of times the
callback will be called during the file transfer). If negative,
perform callback with each buffer read.
Returns:
(etag, generation, metageneration) from service upon success.
Raises:
ResumableUploadException if any problems occur.
"""
(service_start, service_end) = self.SERVICE_HAS_NOTHING
conn = key.bucket.connection
if self.upload_url:
# Try to resume existing resumable upload.
try:
(service_start, service_end) = (
self._QueryServicePos(conn, file_length))
self.service_has_bytes = service_start
if conn.debug >= 1:
self.logger.debug('Resuming transfer.')
except ResumableUploadException, e:
if conn.debug >= 1:
self.logger.debug('Unable to resume transfer (%s).', e.message)
self._StartNewResumableUpload(key, headers)
else:
self._StartNewResumableUpload(key, headers)
# upload_start_point allows the code that instantiated the
# ResumableUploadHandler to find out the point from which it started
# uploading (e.g., so it can correctly compute throughput).
if self.upload_start_point is None:
self.upload_start_point = service_end
total_bytes_uploaded = service_end + 1
# Start reading from the file based upon the number of bytes that the
# server has so far.
if total_bytes_uploaded < file_length:
fp.seek(total_bytes_uploaded)
conn = key.bucket.connection
# Get a new HTTP connection (vs conn.get_http_connection(), which reuses
# pool connections) because httplib requires a new HTTP connection per
# transaction. (Without this, calling http_conn.getresponse() would get
# "ResponseNotReady".)
http_conn = conn.new_http_connection(self.upload_url_host, conn.port,
conn.is_secure)
http_conn.set_debuglevel(conn.debug)
# Make sure to close http_conn at end so if a local file read
# failure occurs partway through service will terminate current upload
# and can report that progress on next attempt.
try:
return self._UploadFileBytes(conn, http_conn, fp, file_length,
total_bytes_uploaded, cb, num_cb,
headers)
except (ResumableUploadException, socket.error):
resp = self._QueryServiceState(conn, file_length)
if resp.status == 400:
raise ResumableUploadException(
'Got 400 response from service state query after failed resumable '
'upload attempt. This can happen for various reasons, including '
'specifying an invalid request (e.g., an invalid canned ACL) or '
'if the file size changed between upload attempts',
ResumableTransferDisposition.ABORT)
else:
raise
finally:
http_conn.close()
def HandleResumableUploadException(self, e, debug):
if e.disposition == ResumableTransferDisposition.ABORT_CUR_PROCESS:
if debug >= 1:
self.logger.debug('Caught non-retryable ResumableUploadException (%s); '
'aborting but retaining tracker file', e.message)
raise
elif e.disposition == ResumableTransferDisposition.ABORT:
if debug >= 1:
self.logger.debug('Caught non-retryable ResumableUploadException (%s); '
'aborting and removing tracker file', e.message)
raise
elif e.disposition == ResumableTransferDisposition.START_OVER:
raise
else:
if debug >= 1:
self.logger.debug(
'Caught ResumableUploadException (%s) - will retry', e.message)
def TrackProgressLessIterations(self, service_had_bytes_before_attempt,
debug=0):
"""Tracks the number of iterations without progress.
Performs randomized exponential backoff.
Args:
service_had_bytes_before_attempt: Number of bytes the service had prior
to this upload attempt.
debug: debug level 0..3
"""
# At this point we had a re-tryable failure; see if made progress.
if self.service_has_bytes > service_had_bytes_before_attempt:
self.progress_less_iterations = 0 # If progress, reset counter.
else:
self.progress_less_iterations += 1
if self.progress_less_iterations > self.num_retries:
# Don't retry any longer in the current process.
raise ResumableUploadException(
'Too many resumable upload attempts failed without '
'progress. You might try this upload again later',
ResumableTransferDisposition.ABORT_CUR_PROCESS)
# Use binary exponential backoff to desynchronize client requests.
sleep_time_secs = min(random.random() * (2**self.progress_less_iterations),
GetMaxRetryDelay())
if debug >= 1:
self.logger.debug('Got retryable failure (%d progress-less in a row).\n'
'Sleeping %3.1f seconds before re-trying',
self.progress_less_iterations, sleep_time_secs)
time.sleep(sleep_time_secs)
def SendFile(self, key, fp, size, headers, canned_acl=None, cb=None,
num_cb=XML_PROGRESS_CALLBACKS):
"""Upload a file to a key into a bucket on GS, resumable upload protocol.
Args:
key: `boto.s3.key.Key` or subclass representing the upload destination.
fp: File pointer to upload
size: Size of the file to upload.
headers: The headers to pass along with the PUT request
canned_acl: Optional canned ACL to apply to object.
cb: Callback function that will be called to report progress on
the upload. The callback should accept two integer parameters, the
first representing the number of bytes that have been successfully
transmitted to GS, and the second representing the total number of
bytes that need to be transmitted.
num_cb: (optional) If a callback is specified with the cb parameter, this
parameter determines the granularity of the callback by defining
the maximum number of times the callback will be called during the
file transfer. Providing a negative integer will cause your
callback to be called with each buffer read.
Raises:
ResumableUploadException if a problem occurs during the transfer.
"""
if not headers:
headers = {}
# If Content-Type header is present and set to None, remove it.
# This is gsutil's way of asking boto to refrain from auto-generating
# that header.
content_type = 'Content-Type'
if content_type in headers and headers[content_type] is None:
del headers[content_type]
if canned_acl:
headers[key.provider.acl_header] = canned_acl
headers['User-Agent'] = UserAgent
file_length = size
debug = key.bucket.connection.debug
# Use num-retries from constructor if one was provided; else check
# for a value specified in the boto config file; else default to 5.
if self.num_retries is None:
self.num_retries = GetNumRetries()
self.progress_less_iterations = 0
while True: # Retry as long as we're making progress.
service_had_bytes_before_attempt = self.service_has_bytes
try:
# Save generation and metageneration in class state so caller
# can find these values, for use in preconditions of future
# operations on the uploaded object.
(_, self.generation, self.metageneration) = (
self._AttemptResumableUpload(key, fp, file_length,
headers, cb, num_cb))
key.generation = self.generation
if debug >= 1:
self.logger.debug('Resumable upload complete.')
return
except self.RETRYABLE_EXCEPTIONS, e:
if debug >= 1:
self.logger.debug('Caught exception (%s)', e.__repr__())
if isinstance(e, IOError) and e.errno == errno.EPIPE:
# Broken pipe error causes httplib to immediately
# close the socket (http://bugs.python.org/issue5542),
# so we need to close the connection before we resume
# the upload (which will cause a new connection to be
# opened the next time an HTTP request is sent).
key.bucket.connection.connection.close()
except ResumableUploadException, e:
self.HandleResumableUploadException(e, debug)
self.TrackProgressLessIterations(service_had_bytes_before_attempt,
debug=debug) | unknown | codeparrot/codeparrot-clean | ||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/mtd/loongson,ls1b-nand-controller.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Loongson NAND Controller
maintainers:
- Keguang Zhang <keguang.zhang@gmail.com>
- Binbin Zhou <zhoubinbin@loongson.cn>
description:
The Loongson NAND controller abstracts all supported operations,
meaning it does not support low-level access to raw NAND flash chips.
Moreover, the controller is paired with the DMA engine to perform
READ and PROGRAM functions.
allOf:
- $ref: nand-controller.yaml
properties:
compatible:
oneOf:
- enum:
- loongson,ls1b-nand-controller
- loongson,ls1c-nand-controller
- loongson,ls2k0500-nand-controller
- loongson,ls2k1000-nand-controller
- items:
- enum:
- loongson,ls1a-nand-controller
- const: loongson,ls1b-nand-controller
reg:
minItems: 2
maxItems: 3
reg-names:
minItems: 2
items:
- const: nand
- const: nand-dma
- const: dma-config
dmas:
maxItems: 1
dma-names:
const: rxtx
required:
- compatible
- reg
- reg-names
- dmas
- dma-names
unevaluatedProperties: false
if:
properties:
compatible:
contains:
enum:
- loongson,ls2k1000-nand-controller
then:
properties:
reg:
minItems: 3
reg-names:
minItems: 3
else:
properties:
reg:
maxItems: 2
reg-names:
maxItems: 2
examples:
- |
nand-controller@1fe78000 {
compatible = "loongson,ls1b-nand-controller";
reg = <0x1fe78000 0x24>, <0x1fe78040 0x4>;
reg-names = "nand", "nand-dma";
dmas = <&dma 0>;
dma-names = "rxtx";
#address-cells = <1>;
#size-cells = <0>;
nand@0 {
reg = <0>;
label = "ls1x-nand";
nand-use-soft-ecc-engine;
nand-ecc-algo = "hamming";
};
};
- |
nand-controller@1fe26000 {
compatible = "loongson,ls2k1000-nand-controller";
reg = <0x1fe26000 0x24>,
<0x1fe26040 0x4>,
<0x1fe00438 0x8>;
reg-names = "nand", "nand-dma", "dma-config";
dmas = <&apbdma0 0>;
dma-names = "rxtx";
#address-cells = <1>;
#size-cells = <0>;
nand@0 {
reg = <0>;
label = "ls2k1000-nand";
nand-use-soft-ecc-engine;
nand-ecc-algo = "bch";
nand-ecc-strength = <8>;
nand-ecc-step-size = <512>;
};
}; | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/mtd/loongson,ls1b-nand-controller.yaml |
# BurnMan - a lower mantle toolkit
# Copyright (C) 2012, 2013, Heister, T., Unterborn, C., Rose, I. and Cottaar, S.
# Released under GPL v2 or later.
class Material(object):
"""
Base class for all materials. The main functionality is unroll() which
returns a list of objects of type :class:`~burnman.mineral.Mineral` and their molar
fractions. This class is available as ``burnman.Material``.
The user needs to call set_method() (once in the beginning) and set_state()
before querying the material with unroll() or density().
Attributes
----------
pressure : float
The current pressure as set by :func:`~burnman.Material.set_state`. [Pa]
temperature : float
The current temperature as set by :func:`~burnman.Material.set_state`. [K]
"""
def __init__(self):
self.pressure = None
self.temperature = None
def set_method(self, method):
"""
Set the averaging method. See :doc:`averaging` for details.
Notes
-----
Needs to be implemented in derived classes.
"""
raise NotImplementedError("need to implement set_method() in derived class!")
def to_string(self):
"""
Returns a human-readable name of this material. The default implementation will return the name of the class,
which is a reasonable default.
Returns
-------
name : string
Name of this material.
"""
return "'" + self.__class__.__name__ + "'"
def debug_print(self, indent=""):
"""
Print a human-readable representation of this Material.
"""
raise NotImplementedError("Derived classes need to implement debug_print(). This is '" + self.__class__.__name__ + "'")
def print_minerals_of_current_state(self):
"""
Print a human-readable representation of this Material at the current P, T as a list of minerals.
This requires set_state() has been called before.
"""
(frs,mins) = self.unroll()
if len(mins)==1:
print mins[0].to_string()
else:
print "Material %s:" % self.to_string()
for (fr,mi) in zip(frs,mins):
print " %g of phase %s" % (fr, mi.to_string())
def set_state(self, pressure, temperature):
"""
Set the material to the given pressure and temperature.
Parameters
----------
pressure : float
The desired pressure in [Pa].
temperature : float
The desired temperature in [K].
"""
self.pressure = pressure
self.temperature = temperature
def unroll(self):
"""
Unroll this material into a list of :class:`burnman.Mineral` and their molar fractions. All averaging schemes
then operate on this list of minerals. Note that the return value of this function may depend on the current
state (temperature, pressure).
Notes
-----
Needs to be implemented in derived classes.
Returns
-------
fractions : list of float
List of molar fractions, should sum to 1.0.
minerals : list of :class:`burnman.Mineral`
List of minerals.
"""
raise NotImplementedError("need to implement unroll() in derived class!")
return ([], [])
def density(self):
"""
Returns the density of this material. Note that the return value of this function may depend on the current
state (temperature, pressure). [kg/m^3]
Notes
-----
Needs to be implemented in derived classes.
Returns
-------
density : float
The density of this material in [kg/m^3]
"""
raise NotImplementedError("need to implement density() in derived class!")
return None | unknown | codeparrot/codeparrot-clean | ||
#! /usr/bin/env python
# encoding: utf-8
# Eclipse CDT 5.0 generator for Waf
# Richard Quirk 2009-1011 (New BSD License)
# Thomas Nagy 2011 (ported to Waf 1.6)
"""
Usage:
def options(opt):
opt.load('eclipse')
$ waf configure eclipse
"""
import sys, os
from waflib import Utils, Logs, Context, Build, TaskGen, Scripting, Errors, Node
from xml.dom.minidom import Document
STANDARD_INCLUDES = [ '/usr/local/include', '/usr/include' ]
oe_cdt = 'org.eclipse.cdt'
cdt_mk = oe_cdt + '.make.core'
cdt_core = oe_cdt + '.core'
cdt_bld = oe_cdt + '.build.core'
extbuilder_dir = '.externalToolBuilders'
extbuilder_name = 'Waf_Builder.launch'
class eclipse(Build.BuildContext):
cmd = 'eclipse'
fun = Scripting.default_cmd
def execute(self):
"""
Entry point
"""
self.restore()
if not self.all_envs:
self.load_envs()
self.recurse([self.run_dir])
appname = getattr(Context.g_module, Context.APPNAME, os.path.basename(self.srcnode.abspath()))
self.create_cproject(appname, pythonpath=self.env['ECLIPSE_PYTHON_PATH'])
# Helper to dump the XML document content to XML with UTF-8 encoding
def write_conf_to_xml(self, filename, document):
self.srcnode.make_node(filename).write(document.toprettyxml(encoding='UTF-8'), flags='wb')
def create_cproject(self, appname, workspace_includes=[], pythonpath=[]):
"""
Create the Eclipse CDT .project and .cproject files
@param appname The name that will appear in the Project Explorer
@param build The BuildContext object to extract includes from
@param workspace_includes Optional project includes to prevent
"Unresolved Inclusion" errors in the Eclipse editor
@param pythonpath Optional project specific python paths
"""
hasc = hasjava = haspython = False
source_dirs = []
cpppath = self.env['CPPPATH']
javasrcpath = []
javalibpath = []
includes = STANDARD_INCLUDES
if sys.platform != 'win32':
cc = self.env.CC or self.env.CXX
if cc:
cmd = cc + ['-xc++', '-E', '-Wp,-v', '-']
try:
gccout = self.cmd_and_log(cmd, output=Context.STDERR, quiet=Context.BOTH, input='\n'.encode()).splitlines()
except Errors.WafError:
pass
else:
includes = []
for ipath in gccout:
if ipath.startswith(' /'):
includes.append(ipath[1:])
cpppath += includes
Logs.warn('Generating Eclipse CDT project files')
for g in self.groups:
for tg in g:
if not isinstance(tg, TaskGen.task_gen):
continue
tg.post()
# Add local Python modules paths to configuration so object resolving will work in IDE
# This may also contain generated files (ie. pyqt5 or protoc) that get picked from build
if 'py' in tg.features:
pypath = tg.path.relpath()
py_installfrom = getattr(tg, 'install_from', None)
if isinstance(py_installfrom, Node.Node):
pypath = py_installfrom.path_from(self.root.make_node(self.top_dir))
if pypath not in pythonpath:
pythonpath.append(pypath)
haspython = True
# Add Java source directories so object resolving works in IDE
# This may also contain generated files (ie. protoc) that get picked from build
if 'javac' in tg.features:
java_src = tg.path.relpath()
java_srcdir = getattr(tg.javac_task, 'srcdir', None)
if java_srcdir:
if isinstance(java_srcdir, Node.Node):
java_srcdir = [java_srcdir]
for x in Utils.to_list(java_srcdir):
x = x.path_from(self.root.make_node(self.top_dir))
if x not in javasrcpath:
javasrcpath.append(x)
else:
if java_src not in javasrcpath:
javasrcpath.append(java_src)
hasjava = True
# Check if there are external dependencies and add them as external jar so they will be resolved by Eclipse
usedlibs=getattr(tg, 'use', [])
for x in Utils.to_list(usedlibs):
for cl in Utils.to_list(tg.env['CLASSPATH_'+x]):
if cl not in javalibpath:
javalibpath.append(cl)
if not getattr(tg, 'link_task', None):
continue
features = Utils.to_list(getattr(tg, 'features', ''))
is_cc = 'c' in features or 'cxx' in features
incnodes = tg.to_incnodes(tg.to_list(getattr(tg, 'includes', [])) + tg.env['INCLUDES'])
for p in incnodes:
path = p.path_from(self.srcnode)
if (path.startswith("/")):
cpppath.append(path)
else:
workspace_includes.append(path)
if is_cc and path not in source_dirs:
source_dirs.append(path)
hasc = True
waf_executable = os.path.abspath(sys.argv[0])
project = self.impl_create_project(sys.executable, appname, hasc, hasjava, haspython, waf_executable)
self.write_conf_to_xml('.project', project)
if hasc:
project = self.impl_create_cproject(sys.executable, waf_executable, appname, workspace_includes, cpppath, source_dirs)
self.write_conf_to_xml('.cproject', project)
if haspython:
project = self.impl_create_pydevproject(sys.path, pythonpath)
self.write_conf_to_xml('.pydevproject', project)
if hasjava:
project = self.impl_create_javaproject(javasrcpath, javalibpath)
self.write_conf_to_xml('.classpath', project)
def impl_create_project(self, executable, appname, hasc, hasjava, haspython, waf_executable):
doc = Document()
projectDescription = doc.createElement('projectDescription')
self.add(doc, projectDescription, 'name', appname)
self.add(doc, projectDescription, 'comment')
self.add(doc, projectDescription, 'projects')
buildSpec = self.add(doc, projectDescription, 'buildSpec')
buildCommand = self.add(doc, buildSpec, 'buildCommand')
self.add(doc, buildCommand, 'triggers', 'clean,full,incremental,')
arguments = self.add(doc, buildCommand, 'arguments')
dictionaries = {}
# If CDT is present, instruct this one to call waf as it is more flexible (separate build/clean ...)
if hasc:
self.add(doc, buildCommand, 'name', oe_cdt + '.managedbuilder.core.genmakebuilder')
# the default make-style targets are overwritten by the .cproject values
dictionaries = {
cdt_mk + '.contents': cdt_mk + '.activeConfigSettings',
cdt_mk + '.enableAutoBuild': 'false',
cdt_mk + '.enableCleanBuild': 'true',
cdt_mk + '.enableFullBuild': 'true',
}
else:
# Otherwise for Java/Python an external builder tool is created that will call waf build
self.add(doc, buildCommand, 'name', 'org.eclipse.ui.externaltools.ExternalToolBuilder')
dictionaries = {
'LaunchConfigHandle': '<project>/%s/%s'%(extbuilder_dir, extbuilder_name),
}
# The definition is in a separate directory XML file
try:
os.mkdir(extbuilder_dir)
except OSError:
pass # Ignore error if already exists
# Populate here the external builder XML calling waf
builder = Document()
launchConfiguration = doc.createElement('launchConfiguration')
launchConfiguration.setAttribute('type', 'org.eclipse.ui.externaltools.ProgramBuilderLaunchConfigurationType')
self.add(doc, launchConfiguration, 'booleanAttribute', {'key': 'org.eclipse.debug.ui.ATTR_LAUNCH_IN_BACKGROUND', 'value': 'false'})
self.add(doc, launchConfiguration, 'booleanAttribute', {'key': 'org.eclipse.ui.externaltools.ATTR_TRIGGERS_CONFIGURED', 'value': 'true'})
self.add(doc, launchConfiguration, 'stringAttribute', {'key': 'org.eclipse.ui.externaltools.ATTR_LOCATION', 'value': waf_executable})
self.add(doc, launchConfiguration, 'stringAttribute', {'key': 'org.eclipse.ui.externaltools.ATTR_RUN_BUILD_KINDS', 'value': 'full,incremental,'})
self.add(doc, launchConfiguration, 'stringAttribute', {'key': 'org.eclipse.ui.externaltools.ATTR_TOOL_ARGUMENTS', 'value': 'build'})
self.add(doc, launchConfiguration, 'stringAttribute', {'key': 'org.eclipse.ui.externaltools.ATTR_WORKING_DIRECTORY', 'value': '${project_loc}'})
builder.appendChild(launchConfiguration)
# And write the XML to the file references before
self.write_conf_to_xml('%s%s%s'%(extbuilder_dir, os.path.sep, extbuilder_name), builder)
for k, v in dictionaries.items():
self.addDictionary(doc, arguments, k, v)
natures = self.add(doc, projectDescription, 'natures')
if hasc:
nature_list = """
core.ccnature
managedbuilder.core.ScannerConfigNature
managedbuilder.core.managedBuildNature
core.cnature
""".split()
for n in nature_list:
self.add(doc, natures, 'nature', oe_cdt + '.' + n)
if haspython:
self.add(doc, natures, 'nature', 'org.python.pydev.pythonNature')
if hasjava:
self.add(doc, natures, 'nature', 'org.eclipse.jdt.core.javanature')
doc.appendChild(projectDescription)
return doc
def impl_create_cproject(self, executable, waf_executable, appname, workspace_includes, cpppath, source_dirs=[]):
doc = Document()
doc.appendChild(doc.createProcessingInstruction('fileVersion', '4.0.0'))
cconf_id = cdt_core + '.default.config.1'
cproject = doc.createElement('cproject')
storageModule = self.add(doc, cproject, 'storageModule',
{'moduleId': cdt_core + '.settings'})
cconf = self.add(doc, storageModule, 'cconfiguration', {'id':cconf_id})
storageModule = self.add(doc, cconf, 'storageModule',
{'buildSystemId': oe_cdt + '.managedbuilder.core.configurationDataProvider',
'id': cconf_id,
'moduleId': cdt_core + '.settings',
'name': 'Default'})
self.add(doc, storageModule, 'externalSettings')
extensions = self.add(doc, storageModule, 'extensions')
extension_list = """
VCErrorParser
MakeErrorParser
GCCErrorParser
GASErrorParser
GLDErrorParser
""".split()
self.add(doc, extensions, 'extension', {'id': cdt_core + '.ELF', 'point':cdt_core + '.BinaryParser'})
for e in extension_list:
self.add(doc, extensions, 'extension', {'id': cdt_core + '.' + e, 'point':cdt_core + '.ErrorParser'})
storageModule = self.add(doc, cconf, 'storageModule',
{'moduleId': 'cdtBuildSystem', 'version': '4.0.0'})
config = self.add(doc, storageModule, 'configuration',
{'artifactName': appname,
'id': cconf_id,
'name': 'Default',
'parent': cdt_bld + '.prefbase.cfg'})
folderInfo = self.add(doc, config, 'folderInfo',
{'id': cconf_id+'.', 'name': '/', 'resourcePath': ''})
toolChain = self.add(doc, folderInfo, 'toolChain',
{'id': cdt_bld + '.prefbase.toolchain.1',
'name': 'No ToolChain',
'resourceTypeBasedDiscovery': 'false',
'superClass': cdt_bld + '.prefbase.toolchain'})
self.add(doc, toolChain, 'targetPlatform', {'binaryParser': 'org.eclipse.cdt.core.ELF', 'id': cdt_bld + '.prefbase.toolchain.1', 'name': ''})
waf_build = '"%s" %s'%(waf_executable, eclipse.fun)
waf_clean = '"%s" clean'%(waf_executable)
self.add(doc, toolChain, 'builder',
{'autoBuildTarget': waf_build,
'command': executable,
'enableAutoBuild': 'false',
'cleanBuildTarget': waf_clean,
'enableIncrementalBuild': 'true',
'id': cdt_bld + '.settings.default.builder.1',
'incrementalBuildTarget': waf_build,
'managedBuildOn': 'false',
'name': 'Gnu Make Builder',
'superClass': cdt_bld + '.settings.default.builder'})
tool_index = 1;
for tool_name in ("Assembly", "GNU C++", "GNU C"):
tool = self.add(doc, toolChain, 'tool',
{'id': cdt_bld + '.settings.holder.' + str(tool_index),
'name': tool_name,
'superClass': cdt_bld + '.settings.holder'})
if cpppath or workspace_includes:
incpaths = cdt_bld + '.settings.holder.incpaths'
option = self.add(doc, tool, 'option',
{'id': incpaths + '.' + str(tool_index),
'name': 'Include Paths',
'superClass': incpaths,
'valueType': 'includePath'})
for i in workspace_includes:
self.add(doc, option, 'listOptionValue',
{'builtIn': 'false',
'value': '"${workspace_loc:/%s/%s}"'%(appname, i)})
for i in cpppath:
self.add(doc, option, 'listOptionValue',
{'builtIn': 'false',
'value': '"%s"'%(i)})
if tool_name == "GNU C++" or tool_name == "GNU C":
self.add(doc,tool,'inputType',{ 'id':'org.eclipse.cdt.build.core.settings.holder.inType.' + str(tool_index), \
'languageId':'org.eclipse.cdt.core.gcc' if tool_name == "GNU C" else 'org.eclipse.cdt.core.g++','languageName':tool_name, \
'sourceContentType':'org.eclipse.cdt.core.cSource,org.eclipse.cdt.core.cHeader', \
'superClass':'org.eclipse.cdt.build.core.settings.holder.inType' })
tool_index += 1
if source_dirs:
sourceEntries = self.add(doc, config, 'sourceEntries')
for i in source_dirs:
self.add(doc, sourceEntries, 'entry',
{'excluding': i,
'flags': 'VALUE_WORKSPACE_PATH|RESOLVED',
'kind': 'sourcePath',
'name': ''})
self.add(doc, sourceEntries, 'entry',
{
'flags': 'VALUE_WORKSPACE_PATH|RESOLVED',
'kind': 'sourcePath',
'name': i})
storageModule = self.add(doc, cconf, 'storageModule',
{'moduleId': cdt_mk + '.buildtargets'})
buildTargets = self.add(doc, storageModule, 'buildTargets')
def addTargetWrap(name, runAll):
return self.addTarget(doc, buildTargets, executable, name,
'"%s" %s'%(waf_executable, name), runAll)
addTargetWrap('configure', True)
addTargetWrap('dist', False)
addTargetWrap('install', False)
addTargetWrap('check', False)
storageModule = self.add(doc, cproject, 'storageModule',
{'moduleId': 'cdtBuildSystem',
'version': '4.0.0'})
self.add(doc, storageModule, 'project', {'id': '%s.null.1'%appname, 'name': appname})
doc.appendChild(cproject)
return doc
def impl_create_pydevproject(self, system_path, user_path):
# create a pydevproject file
doc = Document()
doc.appendChild(doc.createProcessingInstruction('eclipse-pydev', 'version="1.0"'))
pydevproject = doc.createElement('pydev_project')
prop = self.add(doc, pydevproject,
'pydev_property',
'python %d.%d'%(sys.version_info[0], sys.version_info[1]))
prop.setAttribute('name', 'org.python.pydev.PYTHON_PROJECT_VERSION')
prop = self.add(doc, pydevproject, 'pydev_property', 'Default')
prop.setAttribute('name', 'org.python.pydev.PYTHON_PROJECT_INTERPRETER')
# add waf's paths
wafadmin = [p for p in system_path if p.find('wafadmin') != -1]
if wafadmin:
prop = self.add(doc, pydevproject, 'pydev_pathproperty',
{'name':'org.python.pydev.PROJECT_EXTERNAL_SOURCE_PATH'})
for i in wafadmin:
self.add(doc, prop, 'path', i)
if user_path:
prop = self.add(doc, pydevproject, 'pydev_pathproperty',
{'name':'org.python.pydev.PROJECT_SOURCE_PATH'})
for i in user_path:
self.add(doc, prop, 'path', '/${PROJECT_DIR_NAME}/'+i)
doc.appendChild(pydevproject)
return doc
def impl_create_javaproject(self, javasrcpath, javalibpath):
# create a .classpath file for java usage
doc = Document()
javaproject = doc.createElement('classpath')
if javasrcpath:
for i in javasrcpath:
self.add(doc, javaproject, 'classpathentry',
{'kind': 'src', 'path': i})
if javalibpath:
for i in javalibpath:
self.add(doc, javaproject, 'classpathentry',
{'kind': 'lib', 'path': i})
self.add(doc, javaproject, 'classpathentry', {'kind': 'con', 'path': 'org.eclipse.jdt.launching.JRE_CONTAINER'})
self.add(doc, javaproject, 'classpathentry', {'kind': 'output', 'path': self.bldnode.name })
doc.appendChild(javaproject)
return doc
def addDictionary(self, doc, parent, k, v):
dictionary = self.add(doc, parent, 'dictionary')
self.add(doc, dictionary, 'key', k)
self.add(doc, dictionary, 'value', v)
return dictionary
def addTarget(self, doc, buildTargets, executable, name, buildTarget, runAllBuilders=True):
target = self.add(doc, buildTargets, 'target',
{'name': name,
'path': '',
'targetID': oe_cdt + '.build.MakeTargetBuilder'})
self.add(doc, target, 'buildCommand', executable)
self.add(doc, target, 'buildArguments', None)
self.add(doc, target, 'buildTarget', buildTarget)
self.add(doc, target, 'stopOnError', 'true')
self.add(doc, target, 'useDefaultCommand', 'false')
self.add(doc, target, 'runAllBuilders', str(runAllBuilders).lower())
def add(self, doc, parent, tag, value = None):
el = doc.createElement(tag)
if (value):
if type(value) == type(str()):
el.appendChild(doc.createTextNode(value))
elif type(value) == type(dict()):
self.setAttributes(el, value)
parent.appendChild(el)
return el
def setAttributes(self, node, attrs):
for k, v in attrs.items():
node.setAttribute(k, v) | unknown | codeparrot/codeparrot-clean | ||
- hosts: testhost
gather_facts: no
tasks:
- name: Test proper bool evaluation of ansible_become (issue #70476)
shell: whoami
register: output
- name: Assert we are NOT the become user specified
assert:
that:
- "output.stdout != 'ansibletest1'" | unknown | github | https://github.com/ansible/ansible | test/integration/targets/inventory_ini/test_ansible_become.yml |
/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_COMPILER_MLIR_TF2XLA_API_V1_COMPILE_TF_GRAPH_H_
#define TENSORFLOW_COMPILER_MLIR_TF2XLA_API_V1_COMPILE_TF_GRAPH_H_
#include <variant>
#include <vector>
#include "absl/status/status.h"
#include "absl/types/variant.h"
#include "tensorflow/compiler/tf2xla/layout_util.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "xla/client/compile_only_client.h"
#include "xla/pjrt/proto/compile_options.pb.h"
#include "xla/shape.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/tpu/kernels/tpu_compile.pb.h"
#include "tensorflow/core/tpu/kernels/tpu_compile_op_support.h"
namespace tensorflow {
namespace tf2xla {
namespace v1 {
// Compiles the given Tensorflow graph into xla::HLO. The result is in
// compilation_result. If the input computation is in MLIR, it will be
// converted to a Tensorflow graph. Otherwise, the graph compiler will be run.
absl::Status CompileTensorflowGraphToHlo(
const std::variant<tpu::MlirToHloArgs, tpu::FunctionToHloArgs>& computation,
const tpu::TPUCompileMetadataProto& metadata, bool use_tuple_args,
XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_funcs,
const std::vector<tensorflow::TensorShape>& arg_shapes,
tsl::DeviceType device_type,
std::vector<tpu::ShardingAndIndex>* arg_core_mapping,
std::vector<std::vector<xla::Shape>>* per_core_arg_shapes,
xla::CompileOnlyClient* client,
XlaCompiler::CompilationResult* compilation_result);
} // namespace v1
} // namespace tf2xla
}; // namespace tensorflow
#endif // TENSORFLOW_COMPILER_MLIR_TF2XLA_API_V1_COMPILE_TF_GRAPH_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph.h |
# -*- coding: UTF-8 -*-
from flask import (jsonify, request, render_template, redirect, session,
current_app)
from flask.ext.wtf import Form
from flask.ext.babel import _
from wtforms import TextField, PasswordField
from wtforms.validators import DataRequired
from werkzeug.security import generate_password_hash
from flask.ext.login import (current_user, login_user, login_required,
logout_user)
from flask.ext.principal import Identity, AnonymousIdentity, identity_changed
from cloud_dashing.portal.user import user_ws, user
from cloud_dashing.models import User, Group
from cloud_dashing import utils, apis
from cloud_dashing.exceptions import AuthenticateFailure
@user_ws.route('/register', methods=['POST'])
def register_ws():
name = request.args.get("name", type=str)
password = request.args.get("password", type=str)
if not name or not password:
return u"需要name或者password字段", 403
user = User.query.filter(User.name == name).first()
if user:
return u'用户名已存在, 请更换注册名', 403
user = utils.do_commit(User(name=name,
password=generate_password_hash(
password, 'pbkdf2:sha256'),
group=Group.query.get(const.CUSTOMER_GROUP)))
user = apis.wraps(user)
return jsonify(user.as_dict(include_auth_token=True)), 201
@user_ws.route("/login", methods=["POST"])
def login_ws():
name = request.args.get("name", type=str)
password = request.args.get("password", type=str)
if not name or not password:
return u"需要name或者password字段", 403
try:
user = apis.user.authenticate(name, password)
except AuthenticateFailure:
return u'用户名或者密码错误', 403
return jsonify(user.as_dict(include_auth_token=True))
class LoginForm(Form):
username = TextField('username', validators=[DataRequired()])
password = PasswordField('password', validators=[DataRequired()])
@user.route('/login.html', methods=['GET', 'POST'])
def login():
form = LoginForm()
if request.method == "GET":
if current_user.is_anonymous():
return render_template("user/login.html", form=form,
error=request.args.get('error'),
next_url=request.args.get('next'))
return redirect("/")
else:
if form.validate_on_submit():
username = form.username.data
password = form.password.data
try:
user = apis.user.authenticate(username, password)
except AuthenticateFailure:
return render_template("user/login.html",
error=_("invalid username or password"),
form=form), 403
if not login_user(user):
return render_template("user/login.html",
error=_("failed to login")), 403
identity_changed.send(current_app._get_current_object(),
identity=Identity(user.id))
return redirect(request.args.get('next') or "/")
return render_template("user/login.html",
error=_("please input username or password"),
form=form), 403
@user.route("/logout.html")
@login_required
def logout():
try:
logout_user()
except Exception: # in case sesson expire
pass
for key in ('identity.name', 'identity.auth_type'):
session.pop(key, None)
identity_changed.send(current_app._get_current_object(),
identity=AnonymousIdentity())
next_url = request.args.get("next", "/")
return redirect(next_url) | unknown | codeparrot/codeparrot-clean | ||
from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..utils import (
ExtractorError,
float_or_none,
sanitized_Request,
)
class AzubuIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?azubu\.(?:tv|uol.com.br)/[^/]+#!/play/(?P<id>\d+)'
_TESTS = [
{
'url': 'http://www.azubu.tv/GSL#!/play/15575/2014-hot6-cup-last-big-match-ro8-day-1',
'md5': 'a88b42fcf844f29ad6035054bd9ecaf4',
'info_dict': {
'id': '15575',
'ext': 'mp4',
'title': '2014 HOT6 CUP LAST BIG MATCH Ro8 Day 1',
'description': 'md5:d06bdea27b8cc4388a90ad35b5c66c01',
'thumbnail': 're:^https?://.*\.jpe?g',
'timestamp': 1417523507.334,
'upload_date': '20141202',
'duration': 9988.7,
'uploader': 'GSL',
'uploader_id': 414310,
'view_count': int,
},
},
{
'url': 'http://www.azubu.tv/FnaticTV#!/play/9344/-fnatic-at-worlds-2014:-toyz---%22i-love-rekkles,-he-has-amazing-mechanics%22-',
'md5': 'b72a871fe1d9f70bd7673769cdb3b925',
'info_dict': {
'id': '9344',
'ext': 'mp4',
'title': 'Fnatic at Worlds 2014: Toyz - "I love Rekkles, he has amazing mechanics"',
'description': 'md5:4a649737b5f6c8b5c5be543e88dc62af',
'thumbnail': 're:^https?://.*\.jpe?g',
'timestamp': 1410530893.320,
'upload_date': '20140912',
'duration': 172.385,
'uploader': 'FnaticTV',
'uploader_id': 272749,
'view_count': int,
},
'skip': 'Channel offline',
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
data = self._download_json(
'http://www.azubu.tv/api/video/%s' % video_id, video_id)['data']
title = data['title'].strip()
description = data.get('description')
thumbnail = data.get('thumbnail')
view_count = data.get('view_count')
user = data.get('user', {})
uploader = user.get('username')
uploader_id = user.get('id')
stream_params = json.loads(data['stream_params'])
timestamp = float_or_none(stream_params.get('creationDate'), 1000)
duration = float_or_none(stream_params.get('length'), 1000)
renditions = stream_params.get('renditions') or []
video = stream_params.get('FLVFullLength') or stream_params.get('videoFullLength')
if video:
renditions.append(video)
if not renditions and not user.get('channel', {}).get('is_live', True):
raise ExtractorError('%s said: channel is offline.' % self.IE_NAME, expected=True)
formats = [{
'url': fmt['url'],
'width': fmt['frameWidth'],
'height': fmt['frameHeight'],
'vbr': float_or_none(fmt['encodingRate'], 1000),
'filesize': fmt['size'],
'vcodec': fmt['videoCodec'],
'container': fmt['videoContainer'],
} for fmt in renditions if fmt['url']]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'duration': duration,
'uploader': uploader,
'uploader_id': uploader_id,
'view_count': view_count,
'formats': formats,
}
class AzubuLiveIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?azubu\.(?:tv|uol.com.br)/(?P<id>[^/]+)$'
_TESTS = [{
'url': 'http://www.azubu.tv/MarsTVMDLen',
'only_matching': True,
}, {
'url': 'http://azubu.uol.com.br/adolfz',
'only_matching': True,
}]
def _real_extract(self, url):
user = self._match_id(url)
info = self._download_json(
'http://api.azubu.tv/public/modules/last-video/{0}/info'.format(user),
user)['data']
if info['type'] != 'STREAM':
raise ExtractorError('{0} is not streaming live'.format(user), expected=True)
req = sanitized_Request(
'https://edge-elb.api.brightcove.com/playback/v1/accounts/3361910549001/videos/ref:' + info['reference_id'])
req.add_header('Accept', 'application/json;pk=BCpkADawqM1gvI0oGWg8dxQHlgT8HkdE2LnAlWAZkOlznO39bSZX726u4JqnDsK3MDXcO01JxXK2tZtJbgQChxgaFzEVdHRjaDoxaOu8hHOO8NYhwdxw9BzvgkvLUlpbDNUuDoc4E4wxDToV')
bc_info = self._download_json(req, user)
m3u8_url = next(source['src'] for source in bc_info['sources'] if source['container'] == 'M2TS')
formats = self._extract_m3u8_formats(m3u8_url, user, ext='mp4')
self._sort_formats(formats)
return {
'id': info['id'],
'title': self._live_title(info['title']),
'uploader_id': user,
'formats': formats,
'is_live': True,
'thumbnail': bc_info['poster'],
} | unknown | codeparrot/codeparrot-clean | ||
from __future__ import print_function
from builtins import str
import os
import unittest
import shutil
import unittest
import yaml
from fundamentals.utKit import utKit
from fundamentals import tools
from os.path import expanduser
home = expanduser("~")
packageDirectory = utKit("").get_project_root()
settingsFile = packageDirectory + "/test_settings.yaml"
# settingsFile = home + "/.config/soxspipe.recipes/soxspipe.recipes.yaml"
su = tools(
arguments={"settingsFile": settingsFile},
docString=__doc__,
logLevel="DEBUG",
options_first=False,
projectName=None,
defaultSettingsFile=False
)
arguments, settings, log, dbConn = su.setup()
# SETUP AND TEARDOWN FIXTURE FUNCTIONS FOR THE ENTIRE MODULE
moduleDirectory = os.path.dirname(__file__)
utKit = utKit(moduleDirectory)
log, dbConn, pathToInputDir, pathToOutputDir = utKit.setupModule()
utKit.tearDownModule()
try:
shutil.rmtree(pathToOutputDir)
except:
pass
# COPY INPUT TO OUTPUT DIR
shutil.copytree(pathToInputDir, pathToOutputDir)
# Recursively create missing directories
if not os.path.exists(pathToOutputDir):
os.makedirs(pathToOutputDir)
class test_recursive_directory_listing(unittest.TestCase):
def test_recursive_directory_listing_function(self):
from fundamentals.files import recursive_directory_listing
theseFiles = recursive_directory_listing(
log,
baseFolderPath="/tmp",
whatToList="all"
)
# print(theseFiles)
from fundamentals.files import recursive_directory_listing
theseFiles = recursive_directory_listing(
log,
baseFolderPath="/tmp",
whatToList="files"
)
# print(theseFiles)
from fundamentals.files import recursive_directory_listing
theseFiles = recursive_directory_listing(
log,
baseFolderPath="/tmp",
whatToList="dirs"
)
# print(theseFiles)
# x-print-testpage-for-pessto-marshall-web-object
# x-class-to-test-named-worker-function | unknown | codeparrot/codeparrot-clean | ||
package overlay
import (
"context"
"encoding/json"
"net"
"net/http"
"sync"
"github.com/Microsoft/hcsshim"
"github.com/containerd/log"
"github.com/moby/moby/v2/daemon/libnetwork/driverapi"
"github.com/moby/moby/v2/daemon/libnetwork/scope"
)
const (
NetworkType = "overlay"
)
var _ driverapi.TableWatcher = (*driver)(nil)
type driver struct {
networks networkTable
sync.Mutex
}
// Register registers a new instance of the overlay driver.
func Register(r driverapi.Registerer) error {
d := &driver{
networks: networkTable{},
}
d.restoreHNSNetworks()
return r.RegisterDriver(NetworkType, d, driverapi.Capability{
DataScope: scope.Global,
ConnectivityScope: scope.Global,
})
}
func (d *driver) restoreHNSNetworks() error {
log.G(context.TODO()).Infof("Restoring existing overlay networks from HNS into docker")
hnsresponse, err := hcsshim.HNSListNetworkRequest(http.MethodGet, "", "")
if err != nil {
return err
}
for _, v := range hnsresponse {
if v.Type != NetworkType {
continue
}
log.G(context.TODO()).Infof("Restoring overlay network: %s", v.Name)
n := d.convertToOverlayNetwork(&v)
d.addNetwork(n)
//
// We assume that any network will be recreated on daemon restart
// and therefore don't restore hns endpoints for now
//
// n.restoreNetworkEndpoints()
}
return nil
}
func (d *driver) convertToOverlayNetwork(v *hcsshim.HNSNetwork) *network {
n := &network{
id: v.Name,
hnsID: v.Id,
driver: d,
endpoints: endpointTable{},
subnets: []*subnet{},
providerAddress: v.ManagementIP,
}
for _, hnsSubnet := range v.Subnets {
vsidPolicy := &hcsshim.VsidPolicy{}
for _, policy := range hnsSubnet.Policies {
if err := json.Unmarshal([]byte(policy), &vsidPolicy); err == nil && vsidPolicy.Type == "VSID" {
break
}
}
gwIP := net.ParseIP(hnsSubnet.GatewayAddress)
localsubnet := &subnet{
vni: uint32(vsidPolicy.VSID),
gwIP: &gwIP,
}
_, subnetIP, err := net.ParseCIDR(hnsSubnet.AddressPrefix)
if err != nil {
log.G(context.TODO()).Errorf("Error parsing subnet address %s ", hnsSubnet.AddressPrefix)
continue
}
localsubnet.subnetIP = subnetIP
n.subnets = append(n.subnets, localsubnet)
}
return n
}
func (d *driver) Type() string {
return NetworkType
}
func (d *driver) IsBuiltIn() bool {
return true
} | go | github | https://github.com/moby/moby | daemon/libnetwork/drivers/windows/overlay/overlay_windows.go |
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import cStringIO
import logging
import os
import sys
import textwrap
import unittest
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, ROOT_DIR)
import reduce_debugline
class ReduceDebuglineTest(unittest.TestCase):
_DECODED_DEBUGLINE = textwrap.dedent("""\
Decoded dump of debug contents of section .debug_line:
CU: ../../chrome/service/service_main.cc:
File name Line number Starting address
service_main.cc 21 0xa41210
service_main.cc 24 0xa4141f
service_main.cc 30 0xa4142b
service_main.cc 31 0xa4143e
../../base/message_loop.h:
message_loop.h 550 0xa41300
message_loop.h 551 0xa41310
../../base/logging.h:
logging.h 246 0xa41710
logging.h 247 0xa41726
../../base/logging.h:
logging.h 846 0xa3fd90
logging.h 846 0xa3fda0
""")
_EXPECTED_REDUCED_DEBUGLINE = [
(0xa3fd90, '../../base/logging.h'),
(0xa41210, '../../chrome/service/service_main.cc'),
(0xa41300, '../../base/message_loop.h'),
(0xa4141f, '../../chrome/service/service_main.cc'),
(0xa41710, '../../base/logging.h'),
]
def test(self):
ranges_dict = reduce_debugline.reduce_decoded_debugline(
cStringIO.StringIO(self._DECODED_DEBUGLINE))
self.assertEqual(self._EXPECTED_REDUCED_DEBUGLINE, ranges_dict)
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG if '-v' in sys.argv else logging.ERROR,
format='%(levelname)5s %(filename)15s(%(lineno)3d): %(message)s')
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
# some accessing of the semantic concordance data for wordnet 1.6
# by Des Berry, berry@ais.it
import string, os
from wordnet import binarySearchFile
# Sample entries in the 'taglist' file
# ordinary%1:18:01:: 1 br-a01:78,1;86,1;88,4
# ordered%5:00:00:organized:01 2 br-j23:6,14;13,32;66,12
# where the general form is:
# lemma%ss_type:lex_filenum:lex_id:head_word:head_id sense_number
[location_list]
# location_list: filename:sent_num,word_num[;sent_num,word_num...]
ss_type = ("NOUN", "VERB", "ADJECTIVE", "ADVERB", "ADJECTIVE SATELLITE")
# given a sentence number (and the contents of a semantic concordance file)
# return a string of words as the sentence
def find_sentence(snum, msg):
str = "<s snum=%s>" % snum
s = string.find(msg, str)
if s < 0:
return "<Unknown>"
s = s + len(str)
sentence = ""
tag = ""
while 1:
if msg[s] == '\n':
s = s + 1
n = string.find(msg, '<', s)
if n < 0:
break
if n - s != 0:
if tag == "w" and msg[s] != "'" and len(sentence) > 0: # word form
sentence = sentence + " "
sentence = sentence + msg[s:n]
e = string.find(msg, '>', n)
if e < 0:
break
tag = msg[n+1]
if tag == "/": #check for ending sentence
if msg[n+2] == 's':
#end of sentence
break
s = e + 1
return sentence
# given a taglist sense (one line of the tagfile) and where to find the tagfile (root)
# return a tuple of
# symset type ('1' .. '5')
# sense (numeric character string)
# list of sentences (constructed from the taglist)
def tagsentence(tag, root):
s = string.find(tag, '%')
sentence = []
type = tag[s+1]
c = s
for i in range(0,4):
c = string.find(tag, ':', c + 1)
c = string.find(tag, ' ', c + 1)
sense = tag[c+1]
c = c + 3
while 1:
d = string.find(tag, ' ', c) # file separator
if d < 0:
loclist = tag[c:]
else:
loclist = tag[c:d]
c = d + 1
e = string.find(loclist, ':')
filename = loclist[:e]
fh = open(root + filename, "rb")
msg = fh.read()
fh.close()
while 1:
e = e + 1
f = string.find(loclist, ';', e)
if f < 0:
sent_word = loclist[e:]
else:
sent_word = loclist[e:f]
e = f
g = string.find(sent_word, ',')
sent = sent_word[:g]
sentence.append(find_sentence(sent, msg))
if f < 0:
break
if d < 0:
break
return (type, sense, sentence)
# given a word to search for and where to find the files (root)
# displays the information
# This could be changed to display in different ways!
def sentences(word, root):
cache = {}
file = open(root + "taglist", "rb")
key = word + "%"
keylen = len(key)
binarySearchFile(file, key + " ", cache, 10)
print "Word '%s'" % word
while 1:
line = file.readline()
if line[:keylen] != key:
break
type, sense, sentence = tagsentence(line, root + "tagfiles/")
print ss_type[string.atoi(type) - 1], sense
for sent in sentence:
print sent
def _test(word, corpus, base):
print corpus
sentences("ordinary", base + corpus + "/")
if __name__ == '__main__':
base = "C:/win16/dict/semcor/"
word = "ordinary"
_test(word, "brown1", base)
_test(word, "brown2", base)
_test(word, "brownv", base) | unknown | codeparrot/codeparrot-clean | ||
## Input
```javascript
function Component(props) {
const x = makeOptionalObject(props);
const y = makeObject(props);
const z = x?.optionalMethod?.(y.a, props.a, foo(y.b), bar(props.b));
return z;
}
```
## Code
```javascript
import { c as _c } from "react/compiler-runtime";
function Component(props) {
const $ = _c(2);
let t0;
if ($[0] !== props) {
const x = makeOptionalObject(props);
const y = makeObject(props);
t0 = x?.optionalMethod?.(y.a, props.a, foo(y.b), bar(props.b));
$[0] = props;
$[1] = t0;
} else {
t0 = $[1];
}
const z = t0;
return z;
}
``` | unknown | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/optional-receiver-optional-method.expect.md |
package schemaversion
import "context"
// V41 removes the deprecated time_options property from dashboard timepicker configuration.
//
// This migration addresses technical debt by cleaning up legacy timepicker settings that have
// been obsolete since Grafana version 5. The time_options property was originally designed to
// allow customization of predefined time range options in the time picker dropdown, but this
// functionality was superseded by more flexible time selection mechanisms.
//
// The migration works by:
// 1. Locating dashboard timepicker configuration objects
// 2. Removing the deprecated time_options property if present
// 3. Preserving all other timepicker settings (refresh_intervals, etc.)
//
// This cleanup prevents potential confusion for developers and ensures the dashboard schema
// remains focused on actively used configuration options. The removal is safe because the
// time_options property has had no functional impact for several major Grafana versions.
//
// Example transformation:
//
// Before migration:
//
// timepicker: {
// refresh_intervals: ["5s", "10s", "30s", "1m"],
// time_options: ["5m", "15m", "1h", "6h", "12h", "24h"]
// }
//
// After migration:
//
// timepicker: {
// refresh_intervals: ["5s", "10s", "30s", "1m"]
// }
func V41(_ context.Context, dash map[string]interface{}) error {
dash["schemaVersion"] = int(41)
if timepicker, ok := dash["timepicker"].(map[string]interface{}); ok {
// time_options is a legacy property that was not used since grafana version 5
// therefore deprecating this property from the schema
delete(timepicker, "time_options")
}
return nil
} | go | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/schemaversion/v41.go |
//// [tests/cases/conformance/async/es2017/awaitCallExpression/awaitCallExpression8_es2017.ts] ////
//// [awaitCallExpression8_es2017.ts]
declare var a: boolean;
declare var p: Promise<boolean>;
declare function fn(arg0: boolean, arg1: boolean, arg2: boolean): void;
declare var o: { fn(arg0: boolean, arg1: boolean, arg2: boolean): void; };
declare var pfn: Promise<{ (arg0: boolean, arg1: boolean, arg2: boolean): void; }>;
declare var po: Promise<{ fn(arg0: boolean, arg1: boolean, arg2: boolean): void; }>;
declare function before(): void;
declare function after(): void;
async function func(): Promise<void> {
before();
var b = (await po).fn(a, a, a);
after();
}
//// [awaitCallExpression8_es2017.js]
"use strict";
async function func() {
before();
var b = (await po).fn(a, a, a);
after();
} | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/awaitCallExpression8_es2017.js |
import datetime
import re
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from django.forms.widgets import Widget, Select
from django.utils import six
from django.utils.dates import MONTHS
from django.utils.safestring import mark_safe
from .models import Address, CreditCard
class MonthYearWidget(Widget):
'''
A Widget that splits date input into two <select> boxes for month and year,
with "day" defaulting to the first of the month.
'''
none_value = (0, '---')
month_field = '%s_month'
year_field = '%s_year'
date_re = re.compile(r'(\d{4})-(0[1-9]|1[0-2])-(0[1-9]|[1-2][0-9]|3[0-1])$')
def __init__(self, attrs=None, years=None, required=True):
self.attrs = attrs or {}
self.required = required
if years:
self.years = years
else:
this_year = datetime.date.today().year
self.years = range(this_year, this_year + 10)
def render(self, name, value, attrs=None):
try:
year_val, month_val = value.year, value.month
except AttributeError:
year_val = month_val = None
if isinstance(value, six.string_types):
match = date_re.match(value)
if match:
year_val, month_val, day_val = [int(v) for v in match.groups()]
output = []
if 'id' in self.attrs:
id_ = self.attrs['id']
else:
id_ = 'id_%s' % name
month_choices = list(MONTHS.items())
year_choices = [(i, i) for i in self.years]
if not self.required:
month_choices.insert(0, self.none_value)
year_choices.insert(0, self.none_value)
local_attrs = self.build_attrs({'id': self.month_field % id_})
s = Select(choices=month_choices)
select_html = s.render(self.month_field % name, month_val, local_attrs)
output.append(select_html)
local_attrs['id'] = self.year_field % id_
s = Select(choices=year_choices)
select_html = s.render(self.year_field % name, year_val, local_attrs)
output.append(select_html)
return mark_safe('\n'.join(output))
def id_for_label(self, id_):
return '%s_month' % id_
def value_from_datadict(self, data, files, name):
y = data.get(self.year_field % name)
m = data.get(self.month_field % name)
if y == m == '0':
return None
if y and m:
return '%s-%s-%s' % (y, m, 1)
return data.get(name)
class AddressForm(forms.ModelForm):
class Meta:
model = Address
fields = ('street', 'city', 'state', 'zip_code')
class CreditCardForm(forms.ModelForm):
number = forms.CharField(label='Card Number', min_length=13, max_length=19)
class Meta:
model = CreditCard
fields = ('holder_name', 'expiration_date')
widgets = {'expiration_date': MonthYearWidget}
@staticmethod
def is_luhn(number):
if len(number) < 2:
raise ValueError('Card number is too short.')
digits = list(map(int, number))
total = sum(digits[-1::-2])
even_digits = digits[-2::-2]
for digit in even_digits:
digit += digit
total += (digit if digit <= 9 else digit - 9)
return total % 10 == 0
@staticmethod
def get_card_type(number):
if number[0] == '4':
return 'Visa'
elif number[:2] in ('34', '37'):
return 'American Express'
elif number[:2] in ('51', '52', '53', '54', '55'):
return 'MasterCard'
else:
raise forms.ValidationError('Unsupported card entered.')
def clean_number(self):
number = self.cleaned_data['number']
if not number.isdigit():
raise forms.ValidationError('Card number must be numeric.')
if not self.is_luhn(number):
raise forms.ValidationError('Invalid card number entered.')
return number
def clean_expiration_date(self):
exp_date = self.cleaned_data['expiration_date']
today = datetime.date.today()
expired = exp_date.year < today.year or (
exp_date.year == today.year and exp_date.month < today.month
)
if expired:
raise forms.ValidationError('Card is expired.')
return exp_date
def clean(self):
cleaned_data = super().clean()
number = cleaned_data.get('number')
if number:
cleaned_data['card_type'] = self.get_card_type(number)
return cleaned_data
'''def save(self, commit=True):
card = super().save(commit=False)
card.number = self.cleaned_data['number']
if commit:
card.save()
return card'''
class MyUserCreationForm(UserCreationForm):
class Meta:
model = User
fields = (
'username',
'first_name',
'email',
)
email = forms.EmailField(required=True)
first_name = forms.CharField(required=True) | unknown | codeparrot/codeparrot-clean | ||
""" Test suite for the fixer modules """
# Python imports
import os
import unittest
from itertools import chain
from operator import itemgetter
# Local imports
from lib2to3 import pygram, pytree, refactor, fixer_util
from lib2to3.tests import support
class FixerTestCase(support.TestCase):
# Other test cases can subclass this class and replace "fixer_pkg" with
# their own.
def setUp(self, fix_list=None, fixer_pkg="lib2to3", options=None):
if fix_list is None:
fix_list = [self.fixer]
self.refactor = support.get_refactorer(fixer_pkg, fix_list, options)
self.fixer_log = []
self.filename = u"<string>"
for fixer in chain(self.refactor.pre_order,
self.refactor.post_order):
fixer.log = self.fixer_log
def _check(self, before, after):
before = support.reformat(before)
after = support.reformat(after)
tree = self.refactor.refactor_string(before, self.filename)
self.assertEqual(after, unicode(tree))
return tree
def check(self, before, after, ignore_warnings=False):
tree = self._check(before, after)
self.assertTrue(tree.was_changed)
if not ignore_warnings:
self.assertEqual(self.fixer_log, [])
def warns(self, before, after, message, unchanged=False):
tree = self._check(before, after)
self.assertTrue(message in "".join(self.fixer_log))
if not unchanged:
self.assertTrue(tree.was_changed)
def warns_unchanged(self, before, message):
self.warns(before, before, message, unchanged=True)
def unchanged(self, before, ignore_warnings=False):
self._check(before, before)
if not ignore_warnings:
self.assertEqual(self.fixer_log, [])
def assert_runs_after(self, *names):
fixes = [self.fixer]
fixes.extend(names)
r = support.get_refactorer("lib2to3", fixes)
(pre, post) = r.get_fixers()
n = "fix_" + self.fixer
if post and post[-1].__class__.__module__.endswith(n):
# We're the last fixer to run
return
if pre and pre[-1].__class__.__module__.endswith(n) and not post:
# We're the last in pre and post is empty
return
self.fail("Fixer run order (%s) is incorrect; %s should be last."\
%(", ".join([x.__class__.__module__ for x in (pre+post)]), n))
class Test_ne(FixerTestCase):
fixer = "ne"
def test_basic(self):
b = """if x <> y:
pass"""
a = """if x != y:
pass"""
self.check(b, a)
def test_no_spaces(self):
b = """if x<>y:
pass"""
a = """if x!=y:
pass"""
self.check(b, a)
def test_chained(self):
b = """if x<>y<>z:
pass"""
a = """if x!=y!=z:
pass"""
self.check(b, a)
class Test_has_key(FixerTestCase):
fixer = "has_key"
def test_1(self):
b = """x = d.has_key("x") or d.has_key("y")"""
a = """x = "x" in d or "y" in d"""
self.check(b, a)
def test_2(self):
b = """x = a.b.c.d.has_key("x") ** 3"""
a = """x = ("x" in a.b.c.d) ** 3"""
self.check(b, a)
def test_3(self):
b = """x = a.b.has_key(1 + 2).__repr__()"""
a = """x = (1 + 2 in a.b).__repr__()"""
self.check(b, a)
def test_4(self):
b = """x = a.b.has_key(1 + 2).__repr__() ** -3 ** 4"""
a = """x = (1 + 2 in a.b).__repr__() ** -3 ** 4"""
self.check(b, a)
def test_5(self):
b = """x = a.has_key(f or g)"""
a = """x = (f or g) in a"""
self.check(b, a)
def test_6(self):
b = """x = a + b.has_key(c)"""
a = """x = a + (c in b)"""
self.check(b, a)
def test_7(self):
b = """x = a.has_key(lambda: 12)"""
a = """x = (lambda: 12) in a"""
self.check(b, a)
def test_8(self):
b = """x = a.has_key(a for a in b)"""
a = """x = (a for a in b) in a"""
self.check(b, a)
def test_9(self):
b = """if not a.has_key(b): pass"""
a = """if b not in a: pass"""
self.check(b, a)
def test_10(self):
b = """if not a.has_key(b).__repr__(): pass"""
a = """if not (b in a).__repr__(): pass"""
self.check(b, a)
def test_11(self):
b = """if not a.has_key(b) ** 2: pass"""
a = """if not (b in a) ** 2: pass"""
self.check(b, a)
class Test_apply(FixerTestCase):
fixer = "apply"
def test_1(self):
b = """x = apply(f, g + h)"""
a = """x = f(*g + h)"""
self.check(b, a)
def test_2(self):
b = """y = apply(f, g, h)"""
a = """y = f(*g, **h)"""
self.check(b, a)
def test_3(self):
b = """z = apply(fs[0], g or h, h or g)"""
a = """z = fs[0](*g or h, **h or g)"""
self.check(b, a)
def test_4(self):
b = """apply(f, (x, y) + t)"""
a = """f(*(x, y) + t)"""
self.check(b, a)
def test_5(self):
b = """apply(f, args,)"""
a = """f(*args)"""
self.check(b, a)
def test_6(self):
b = """apply(f, args, kwds,)"""
a = """f(*args, **kwds)"""
self.check(b, a)
# Test that complex functions are parenthesized
def test_complex_1(self):
b = """x = apply(f+g, args)"""
a = """x = (f+g)(*args)"""
self.check(b, a)
def test_complex_2(self):
b = """x = apply(f*g, args)"""
a = """x = (f*g)(*args)"""
self.check(b, a)
def test_complex_3(self):
b = """x = apply(f**g, args)"""
a = """x = (f**g)(*args)"""
self.check(b, a)
# But dotted names etc. not
def test_dotted_name(self):
b = """x = apply(f.g, args)"""
a = """x = f.g(*args)"""
self.check(b, a)
def test_subscript(self):
b = """x = apply(f[x], args)"""
a = """x = f[x](*args)"""
self.check(b, a)
def test_call(self):
b = """x = apply(f(), args)"""
a = """x = f()(*args)"""
self.check(b, a)
# Extreme case
def test_extreme(self):
b = """x = apply(a.b.c.d.e.f, args, kwds)"""
a = """x = a.b.c.d.e.f(*args, **kwds)"""
self.check(b, a)
# XXX Comments in weird places still get lost
def test_weird_comments(self):
b = """apply( # foo
f, # bar
args)"""
a = """f(*args)"""
self.check(b, a)
# These should *not* be touched
def test_unchanged_1(self):
s = """apply()"""
self.unchanged(s)
def test_unchanged_2(self):
s = """apply(f)"""
self.unchanged(s)
def test_unchanged_3(self):
s = """apply(f,)"""
self.unchanged(s)
def test_unchanged_4(self):
s = """apply(f, args, kwds, extras)"""
self.unchanged(s)
def test_unchanged_5(self):
s = """apply(f, *args, **kwds)"""
self.unchanged(s)
def test_unchanged_6(self):
s = """apply(f, *args)"""
self.unchanged(s)
def test_unchanged_7(self):
s = """apply(func=f, args=args, kwds=kwds)"""
self.unchanged(s)
def test_unchanged_8(self):
s = """apply(f, args=args, kwds=kwds)"""
self.unchanged(s)
def test_unchanged_9(self):
s = """apply(f, args, kwds=kwds)"""
self.unchanged(s)
def test_space_1(self):
a = """apply( f, args, kwds)"""
b = """f(*args, **kwds)"""
self.check(a, b)
def test_space_2(self):
a = """apply( f ,args,kwds )"""
b = """f(*args, **kwds)"""
self.check(a, b)
class Test_intern(FixerTestCase):
fixer = "intern"
def test_prefix_preservation(self):
b = """x = intern( a )"""
a = """import sys\nx = sys.intern( a )"""
self.check(b, a)
b = """y = intern("b" # test
)"""
a = """import sys\ny = sys.intern("b" # test
)"""
self.check(b, a)
b = """z = intern(a+b+c.d, )"""
a = """import sys\nz = sys.intern(a+b+c.d, )"""
self.check(b, a)
def test(self):
b = """x = intern(a)"""
a = """import sys\nx = sys.intern(a)"""
self.check(b, a)
b = """z = intern(a+b+c.d,)"""
a = """import sys\nz = sys.intern(a+b+c.d,)"""
self.check(b, a)
b = """intern("y%s" % 5).replace("y", "")"""
a = """import sys\nsys.intern("y%s" % 5).replace("y", "")"""
self.check(b, a)
# These should not be refactored
def test_unchanged(self):
s = """intern(a=1)"""
self.unchanged(s)
s = """intern(f, g)"""
self.unchanged(s)
s = """intern(*h)"""
self.unchanged(s)
s = """intern(**i)"""
self.unchanged(s)
s = """intern()"""
self.unchanged(s)
class Test_reduce(FixerTestCase):
fixer = "reduce"
def test_simple_call(self):
b = "reduce(a, b, c)"
a = "from functools import reduce\nreduce(a, b, c)"
self.check(b, a)
def test_bug_7253(self):
# fix_tuple_params was being bad and orphaning nodes in the tree.
b = "def x(arg): reduce(sum, [])"
a = "from functools import reduce\ndef x(arg): reduce(sum, [])"
self.check(b, a)
def test_call_with_lambda(self):
b = "reduce(lambda x, y: x + y, seq)"
a = "from functools import reduce\nreduce(lambda x, y: x + y, seq)"
self.check(b, a)
def test_unchanged(self):
s = "reduce(a)"
self.unchanged(s)
s = "reduce(a, b=42)"
self.unchanged(s)
s = "reduce(a, b, c, d)"
self.unchanged(s)
s = "reduce(**c)"
self.unchanged(s)
s = "reduce()"
self.unchanged(s)
class Test_print(FixerTestCase):
fixer = "print"
def test_prefix_preservation(self):
b = """print 1, 1+1, 1+1+1"""
a = """print(1, 1+1, 1+1+1)"""
self.check(b, a)
def test_idempotency(self):
s = """print()"""
self.unchanged(s)
s = """print('')"""
self.unchanged(s)
def test_idempotency_print_as_function(self):
self.refactor.driver.grammar = pygram.python_grammar_no_print_statement
s = """print(1, 1+1, 1+1+1)"""
self.unchanged(s)
s = """print()"""
self.unchanged(s)
s = """print('')"""
self.unchanged(s)
def test_1(self):
b = """print 1, 1+1, 1+1+1"""
a = """print(1, 1+1, 1+1+1)"""
self.check(b, a)
def test_2(self):
b = """print 1, 2"""
a = """print(1, 2)"""
self.check(b, a)
def test_3(self):
b = """print"""
a = """print()"""
self.check(b, a)
def test_4(self):
# from bug 3000
b = """print whatever; print"""
a = """print(whatever); print()"""
self.check(b, a)
def test_5(self):
b = """print; print whatever;"""
a = """print(); print(whatever);"""
self.check(b, a)
def test_tuple(self):
b = """print (a, b, c)"""
a = """print((a, b, c))"""
self.check(b, a)
# trailing commas
def test_trailing_comma_1(self):
b = """print 1, 2, 3,"""
a = """print(1, 2, 3, end=' ')"""
self.check(b, a)
def test_trailing_comma_2(self):
b = """print 1, 2,"""
a = """print(1, 2, end=' ')"""
self.check(b, a)
def test_trailing_comma_3(self):
b = """print 1,"""
a = """print(1, end=' ')"""
self.check(b, a)
# >> stuff
def test_vargs_without_trailing_comma(self):
b = """print >>sys.stderr, 1, 2, 3"""
a = """print(1, 2, 3, file=sys.stderr)"""
self.check(b, a)
def test_with_trailing_comma(self):
b = """print >>sys.stderr, 1, 2,"""
a = """print(1, 2, end=' ', file=sys.stderr)"""
self.check(b, a)
def test_no_trailing_comma(self):
b = """print >>sys.stderr, 1+1"""
a = """print(1+1, file=sys.stderr)"""
self.check(b, a)
def test_spaces_before_file(self):
b = """print >> sys.stderr"""
a = """print(file=sys.stderr)"""
self.check(b, a)
def test_with_future_print_function(self):
s = "from __future__ import print_function\n" \
"print('Hai!', end=' ')"
self.unchanged(s)
b = "print 'Hello, world!'"
a = "print('Hello, world!')"
self.check(b, a)
class Test_exec(FixerTestCase):
fixer = "exec"
def test_prefix_preservation(self):
b = """ exec code in ns1, ns2"""
a = """ exec(code, ns1, ns2)"""
self.check(b, a)
def test_basic(self):
b = """exec code"""
a = """exec(code)"""
self.check(b, a)
def test_with_globals(self):
b = """exec code in ns"""
a = """exec(code, ns)"""
self.check(b, a)
def test_with_globals_locals(self):
b = """exec code in ns1, ns2"""
a = """exec(code, ns1, ns2)"""
self.check(b, a)
def test_complex_1(self):
b = """exec (a.b()) in ns"""
a = """exec((a.b()), ns)"""
self.check(b, a)
def test_complex_2(self):
b = """exec a.b() + c in ns"""
a = """exec(a.b() + c, ns)"""
self.check(b, a)
# These should not be touched
def test_unchanged_1(self):
s = """exec(code)"""
self.unchanged(s)
def test_unchanged_2(self):
s = """exec (code)"""
self.unchanged(s)
def test_unchanged_3(self):
s = """exec(code, ns)"""
self.unchanged(s)
def test_unchanged_4(self):
s = """exec(code, ns1, ns2)"""
self.unchanged(s)
class Test_repr(FixerTestCase):
fixer = "repr"
def test_prefix_preservation(self):
b = """x = `1 + 2`"""
a = """x = repr(1 + 2)"""
self.check(b, a)
def test_simple_1(self):
b = """x = `1 + 2`"""
a = """x = repr(1 + 2)"""
self.check(b, a)
def test_simple_2(self):
b = """y = `x`"""
a = """y = repr(x)"""
self.check(b, a)
def test_complex(self):
b = """z = `y`.__repr__()"""
a = """z = repr(y).__repr__()"""
self.check(b, a)
def test_tuple(self):
b = """x = `1, 2, 3`"""
a = """x = repr((1, 2, 3))"""
self.check(b, a)
def test_nested(self):
b = """x = `1 + `2``"""
a = """x = repr(1 + repr(2))"""
self.check(b, a)
def test_nested_tuples(self):
b = """x = `1, 2 + `3, 4``"""
a = """x = repr((1, 2 + repr((3, 4))))"""
self.check(b, a)
class Test_except(FixerTestCase):
fixer = "except"
def test_prefix_preservation(self):
b = """
try:
pass
except (RuntimeError, ImportError), e:
pass"""
a = """
try:
pass
except (RuntimeError, ImportError) as e:
pass"""
self.check(b, a)
def test_simple(self):
b = """
try:
pass
except Foo, e:
pass"""
a = """
try:
pass
except Foo as e:
pass"""
self.check(b, a)
def test_simple_no_space_before_target(self):
b = """
try:
pass
except Foo,e:
pass"""
a = """
try:
pass
except Foo as e:
pass"""
self.check(b, a)
def test_tuple_unpack(self):
b = """
def foo():
try:
pass
except Exception, (f, e):
pass
except ImportError, e:
pass"""
a = """
def foo():
try:
pass
except Exception as xxx_todo_changeme:
(f, e) = xxx_todo_changeme.args
pass
except ImportError as e:
pass"""
self.check(b, a)
def test_multi_class(self):
b = """
try:
pass
except (RuntimeError, ImportError), e:
pass"""
a = """
try:
pass
except (RuntimeError, ImportError) as e:
pass"""
self.check(b, a)
def test_list_unpack(self):
b = """
try:
pass
except Exception, [a, b]:
pass"""
a = """
try:
pass
except Exception as xxx_todo_changeme:
[a, b] = xxx_todo_changeme.args
pass"""
self.check(b, a)
def test_weird_target_1(self):
b = """
try:
pass
except Exception, d[5]:
pass"""
a = """
try:
pass
except Exception as xxx_todo_changeme:
d[5] = xxx_todo_changeme
pass"""
self.check(b, a)
def test_weird_target_2(self):
b = """
try:
pass
except Exception, a.foo:
pass"""
a = """
try:
pass
except Exception as xxx_todo_changeme:
a.foo = xxx_todo_changeme
pass"""
self.check(b, a)
def test_weird_target_3(self):
b = """
try:
pass
except Exception, a().foo:
pass"""
a = """
try:
pass
except Exception as xxx_todo_changeme:
a().foo = xxx_todo_changeme
pass"""
self.check(b, a)
def test_bare_except(self):
b = """
try:
pass
except Exception, a:
pass
except:
pass"""
a = """
try:
pass
except Exception as a:
pass
except:
pass"""
self.check(b, a)
def test_bare_except_and_else_finally(self):
b = """
try:
pass
except Exception, a:
pass
except:
pass
else:
pass
finally:
pass"""
a = """
try:
pass
except Exception as a:
pass
except:
pass
else:
pass
finally:
pass"""
self.check(b, a)
def test_multi_fixed_excepts_before_bare_except(self):
b = """
try:
pass
except TypeError, b:
pass
except Exception, a:
pass
except:
pass"""
a = """
try:
pass
except TypeError as b:
pass
except Exception as a:
pass
except:
pass"""
self.check(b, a)
def test_one_line_suites(self):
b = """
try: raise TypeError
except TypeError, e:
pass
"""
a = """
try: raise TypeError
except TypeError as e:
pass
"""
self.check(b, a)
b = """
try:
raise TypeError
except TypeError, e: pass
"""
a = """
try:
raise TypeError
except TypeError as e: pass
"""
self.check(b, a)
b = """
try: raise TypeError
except TypeError, e: pass
"""
a = """
try: raise TypeError
except TypeError as e: pass
"""
self.check(b, a)
b = """
try: raise TypeError
except TypeError, e: pass
else: function()
finally: done()
"""
a = """
try: raise TypeError
except TypeError as e: pass
else: function()
finally: done()
"""
self.check(b, a)
# These should not be touched:
def test_unchanged_1(self):
s = """
try:
pass
except:
pass"""
self.unchanged(s)
def test_unchanged_2(self):
s = """
try:
pass
except Exception:
pass"""
self.unchanged(s)
def test_unchanged_3(self):
s = """
try:
pass
except (Exception, SystemExit):
pass"""
self.unchanged(s)
class Test_raise(FixerTestCase):
fixer = "raise"
def test_basic(self):
b = """raise Exception, 5"""
a = """raise Exception(5)"""
self.check(b, a)
def test_prefix_preservation(self):
b = """raise Exception,5"""
a = """raise Exception(5)"""
self.check(b, a)
b = """raise Exception, 5"""
a = """raise Exception(5)"""
self.check(b, a)
def test_with_comments(self):
b = """raise Exception, 5 # foo"""
a = """raise Exception(5) # foo"""
self.check(b, a)
b = """raise E, (5, 6) % (a, b) # foo"""
a = """raise E((5, 6) % (a, b)) # foo"""
self.check(b, a)
b = """def foo():
raise Exception, 5, 6 # foo"""
a = """def foo():
raise Exception(5).with_traceback(6) # foo"""
self.check(b, a)
def test_None_value(self):
b = """raise Exception(5), None, tb"""
a = """raise Exception(5).with_traceback(tb)"""
self.check(b, a)
def test_tuple_value(self):
b = """raise Exception, (5, 6, 7)"""
a = """raise Exception(5, 6, 7)"""
self.check(b, a)
def test_tuple_detection(self):
b = """raise E, (5, 6) % (a, b)"""
a = """raise E((5, 6) % (a, b))"""
self.check(b, a)
def test_tuple_exc_1(self):
b = """raise (((E1, E2), E3), E4), V"""
a = """raise E1(V)"""
self.check(b, a)
def test_tuple_exc_2(self):
b = """raise (E1, (E2, E3), E4), V"""
a = """raise E1(V)"""
self.check(b, a)
# These should produce a warning
def test_string_exc(self):
s = """raise 'foo'"""
self.warns_unchanged(s, "Python 3 does not support string exceptions")
def test_string_exc_val(self):
s = """raise "foo", 5"""
self.warns_unchanged(s, "Python 3 does not support string exceptions")
def test_string_exc_val_tb(self):
s = """raise "foo", 5, 6"""
self.warns_unchanged(s, "Python 3 does not support string exceptions")
# These should result in traceback-assignment
def test_tb_1(self):
b = """def foo():
raise Exception, 5, 6"""
a = """def foo():
raise Exception(5).with_traceback(6)"""
self.check(b, a)
def test_tb_2(self):
b = """def foo():
a = 5
raise Exception, 5, 6
b = 6"""
a = """def foo():
a = 5
raise Exception(5).with_traceback(6)
b = 6"""
self.check(b, a)
def test_tb_3(self):
b = """def foo():
raise Exception,5,6"""
a = """def foo():
raise Exception(5).with_traceback(6)"""
self.check(b, a)
def test_tb_4(self):
b = """def foo():
a = 5
raise Exception,5,6
b = 6"""
a = """def foo():
a = 5
raise Exception(5).with_traceback(6)
b = 6"""
self.check(b, a)
def test_tb_5(self):
b = """def foo():
raise Exception, (5, 6, 7), 6"""
a = """def foo():
raise Exception(5, 6, 7).with_traceback(6)"""
self.check(b, a)
def test_tb_6(self):
b = """def foo():
a = 5
raise Exception, (5, 6, 7), 6
b = 6"""
a = """def foo():
a = 5
raise Exception(5, 6, 7).with_traceback(6)
b = 6"""
self.check(b, a)
class Test_throw(FixerTestCase):
fixer = "throw"
def test_1(self):
b = """g.throw(Exception, 5)"""
a = """g.throw(Exception(5))"""
self.check(b, a)
def test_2(self):
b = """g.throw(Exception,5)"""
a = """g.throw(Exception(5))"""
self.check(b, a)
def test_3(self):
b = """g.throw(Exception, (5, 6, 7))"""
a = """g.throw(Exception(5, 6, 7))"""
self.check(b, a)
def test_4(self):
b = """5 + g.throw(Exception, 5)"""
a = """5 + g.throw(Exception(5))"""
self.check(b, a)
# These should produce warnings
def test_warn_1(self):
s = """g.throw("foo")"""
self.warns_unchanged(s, "Python 3 does not support string exceptions")
def test_warn_2(self):
s = """g.throw("foo", 5)"""
self.warns_unchanged(s, "Python 3 does not support string exceptions")
def test_warn_3(self):
s = """g.throw("foo", 5, 6)"""
self.warns_unchanged(s, "Python 3 does not support string exceptions")
# These should not be touched
def test_untouched_1(self):
s = """g.throw(Exception)"""
self.unchanged(s)
def test_untouched_2(self):
s = """g.throw(Exception(5, 6))"""
self.unchanged(s)
def test_untouched_3(self):
s = """5 + g.throw(Exception(5, 6))"""
self.unchanged(s)
# These should result in traceback-assignment
def test_tb_1(self):
b = """def foo():
g.throw(Exception, 5, 6)"""
a = """def foo():
g.throw(Exception(5).with_traceback(6))"""
self.check(b, a)
def test_tb_2(self):
b = """def foo():
a = 5
g.throw(Exception, 5, 6)
b = 6"""
a = """def foo():
a = 5
g.throw(Exception(5).with_traceback(6))
b = 6"""
self.check(b, a)
def test_tb_3(self):
b = """def foo():
g.throw(Exception,5,6)"""
a = """def foo():
g.throw(Exception(5).with_traceback(6))"""
self.check(b, a)
def test_tb_4(self):
b = """def foo():
a = 5
g.throw(Exception,5,6)
b = 6"""
a = """def foo():
a = 5
g.throw(Exception(5).with_traceback(6))
b = 6"""
self.check(b, a)
def test_tb_5(self):
b = """def foo():
g.throw(Exception, (5, 6, 7), 6)"""
a = """def foo():
g.throw(Exception(5, 6, 7).with_traceback(6))"""
self.check(b, a)
def test_tb_6(self):
b = """def foo():
a = 5
g.throw(Exception, (5, 6, 7), 6)
b = 6"""
a = """def foo():
a = 5
g.throw(Exception(5, 6, 7).with_traceback(6))
b = 6"""
self.check(b, a)
def test_tb_7(self):
b = """def foo():
a + g.throw(Exception, 5, 6)"""
a = """def foo():
a + g.throw(Exception(5).with_traceback(6))"""
self.check(b, a)
def test_tb_8(self):
b = """def foo():
a = 5
a + g.throw(Exception, 5, 6)
b = 6"""
a = """def foo():
a = 5
a + g.throw(Exception(5).with_traceback(6))
b = 6"""
self.check(b, a)
class Test_long(FixerTestCase):
fixer = "long"
def test_1(self):
b = """x = long(x)"""
a = """x = int(x)"""
self.check(b, a)
def test_2(self):
b = """y = isinstance(x, long)"""
a = """y = isinstance(x, int)"""
self.check(b, a)
def test_3(self):
b = """z = type(x) in (int, long)"""
a = """z = type(x) in (int, int)"""
self.check(b, a)
def test_unchanged(self):
s = """long = True"""
self.unchanged(s)
s = """s.long = True"""
self.unchanged(s)
s = """def long(): pass"""
self.unchanged(s)
s = """class long(): pass"""
self.unchanged(s)
s = """def f(long): pass"""
self.unchanged(s)
s = """def f(g, long): pass"""
self.unchanged(s)
s = """def f(x, long=True): pass"""
self.unchanged(s)
def test_prefix_preservation(self):
b = """x = long( x )"""
a = """x = int( x )"""
self.check(b, a)
class Test_execfile(FixerTestCase):
fixer = "execfile"
def test_conversion(self):
b = """execfile("fn")"""
a = """exec(compile(open("fn").read(), "fn", 'exec'))"""
self.check(b, a)
b = """execfile("fn", glob)"""
a = """exec(compile(open("fn").read(), "fn", 'exec'), glob)"""
self.check(b, a)
b = """execfile("fn", glob, loc)"""
a = """exec(compile(open("fn").read(), "fn", 'exec'), glob, loc)"""
self.check(b, a)
b = """execfile("fn", globals=glob)"""
a = """exec(compile(open("fn").read(), "fn", 'exec'), globals=glob)"""
self.check(b, a)
b = """execfile("fn", locals=loc)"""
a = """exec(compile(open("fn").read(), "fn", 'exec'), locals=loc)"""
self.check(b, a)
b = """execfile("fn", globals=glob, locals=loc)"""
a = """exec(compile(open("fn").read(), "fn", 'exec'), globals=glob, locals=loc)"""
self.check(b, a)
def test_spacing(self):
b = """execfile( "fn" )"""
a = """exec(compile(open( "fn" ).read(), "fn", 'exec'))"""
self.check(b, a)
b = """execfile("fn", globals = glob)"""
a = """exec(compile(open("fn").read(), "fn", 'exec'), globals = glob)"""
self.check(b, a)
class Test_isinstance(FixerTestCase):
fixer = "isinstance"
def test_remove_multiple_items(self):
b = """isinstance(x, (int, int, int))"""
a = """isinstance(x, int)"""
self.check(b, a)
b = """isinstance(x, (int, float, int, int, float))"""
a = """isinstance(x, (int, float))"""
self.check(b, a)
b = """isinstance(x, (int, float, int, int, float, str))"""
a = """isinstance(x, (int, float, str))"""
self.check(b, a)
b = """isinstance(foo() + bar(), (x(), y(), x(), int, int))"""
a = """isinstance(foo() + bar(), (x(), y(), x(), int))"""
self.check(b, a)
def test_prefix_preservation(self):
b = """if isinstance( foo(), ( bar, bar, baz )) : pass"""
a = """if isinstance( foo(), ( bar, baz )) : pass"""
self.check(b, a)
def test_unchanged(self):
self.unchanged("isinstance(x, (str, int))")
class Test_dict(FixerTestCase):
fixer = "dict"
def test_prefix_preservation(self):
b = "if d. keys ( ) : pass"
a = "if list(d. keys ( )) : pass"
self.check(b, a)
b = "if d. items ( ) : pass"
a = "if list(d. items ( )) : pass"
self.check(b, a)
b = "if d. iterkeys ( ) : pass"
a = "if iter(d. keys ( )) : pass"
self.check(b, a)
b = "[i for i in d. iterkeys( ) ]"
a = "[i for i in d. keys( ) ]"
self.check(b, a)
b = "if d. viewkeys ( ) : pass"
a = "if d. keys ( ) : pass"
self.check(b, a)
b = "[i for i in d. viewkeys( ) ]"
a = "[i for i in d. keys( ) ]"
self.check(b, a)
def test_trailing_comment(self):
b = "d.keys() # foo"
a = "list(d.keys()) # foo"
self.check(b, a)
b = "d.items() # foo"
a = "list(d.items()) # foo"
self.check(b, a)
b = "d.iterkeys() # foo"
a = "iter(d.keys()) # foo"
self.check(b, a)
b = """[i for i in d.iterkeys() # foo
]"""
a = """[i for i in d.keys() # foo
]"""
self.check(b, a)
b = """[i for i in d.iterkeys() # foo
]"""
a = """[i for i in d.keys() # foo
]"""
self.check(b, a)
b = "d.viewitems() # foo"
a = "d.items() # foo"
self.check(b, a)
def test_unchanged(self):
for wrapper in fixer_util.consuming_calls:
s = "s = %s(d.keys())" % wrapper
self.unchanged(s)
s = "s = %s(d.values())" % wrapper
self.unchanged(s)
s = "s = %s(d.items())" % wrapper
self.unchanged(s)
def test_01(self):
b = "d.keys()"
a = "list(d.keys())"
self.check(b, a)
b = "a[0].foo().keys()"
a = "list(a[0].foo().keys())"
self.check(b, a)
def test_02(self):
b = "d.items()"
a = "list(d.items())"
self.check(b, a)
def test_03(self):
b = "d.values()"
a = "list(d.values())"
self.check(b, a)
def test_04(self):
b = "d.iterkeys()"
a = "iter(d.keys())"
self.check(b, a)
def test_05(self):
b = "d.iteritems()"
a = "iter(d.items())"
self.check(b, a)
def test_06(self):
b = "d.itervalues()"
a = "iter(d.values())"
self.check(b, a)
def test_07(self):
s = "list(d.keys())"
self.unchanged(s)
def test_08(self):
s = "sorted(d.keys())"
self.unchanged(s)
def test_09(self):
b = "iter(d.keys())"
a = "iter(list(d.keys()))"
self.check(b, a)
def test_10(self):
b = "foo(d.keys())"
a = "foo(list(d.keys()))"
self.check(b, a)
def test_11(self):
b = "for i in d.keys(): print i"
a = "for i in list(d.keys()): print i"
self.check(b, a)
def test_12(self):
b = "for i in d.iterkeys(): print i"
a = "for i in d.keys(): print i"
self.check(b, a)
def test_13(self):
b = "[i for i in d.keys()]"
a = "[i for i in list(d.keys())]"
self.check(b, a)
def test_14(self):
b = "[i for i in d.iterkeys()]"
a = "[i for i in d.keys()]"
self.check(b, a)
def test_15(self):
b = "(i for i in d.keys())"
a = "(i for i in list(d.keys()))"
self.check(b, a)
def test_16(self):
b = "(i for i in d.iterkeys())"
a = "(i for i in d.keys())"
self.check(b, a)
def test_17(self):
b = "iter(d.iterkeys())"
a = "iter(d.keys())"
self.check(b, a)
def test_18(self):
b = "list(d.iterkeys())"
a = "list(d.keys())"
self.check(b, a)
def test_19(self):
b = "sorted(d.iterkeys())"
a = "sorted(d.keys())"
self.check(b, a)
def test_20(self):
b = "foo(d.iterkeys())"
a = "foo(iter(d.keys()))"
self.check(b, a)
def test_21(self):
b = "print h.iterkeys().next()"
a = "print iter(h.keys()).next()"
self.check(b, a)
def test_22(self):
b = "print h.keys()[0]"
a = "print list(h.keys())[0]"
self.check(b, a)
def test_23(self):
b = "print list(h.iterkeys().next())"
a = "print list(iter(h.keys()).next())"
self.check(b, a)
def test_24(self):
b = "for x in h.keys()[0]: print x"
a = "for x in list(h.keys())[0]: print x"
self.check(b, a)
def test_25(self):
b = "d.viewkeys()"
a = "d.keys()"
self.check(b, a)
def test_26(self):
b = "d.viewitems()"
a = "d.items()"
self.check(b, a)
def test_27(self):
b = "d.viewvalues()"
a = "d.values()"
self.check(b, a)
def test_14(self):
b = "[i for i in d.viewkeys()]"
a = "[i for i in d.keys()]"
self.check(b, a)
def test_15(self):
b = "(i for i in d.viewkeys())"
a = "(i for i in d.keys())"
self.check(b, a)
def test_17(self):
b = "iter(d.viewkeys())"
a = "iter(d.keys())"
self.check(b, a)
def test_18(self):
b = "list(d.viewkeys())"
a = "list(d.keys())"
self.check(b, a)
def test_19(self):
b = "sorted(d.viewkeys())"
a = "sorted(d.keys())"
self.check(b, a)
class Test_xrange(FixerTestCase):
fixer = "xrange"
def test_prefix_preservation(self):
b = """x = xrange( 10 )"""
a = """x = range( 10 )"""
self.check(b, a)
b = """x = xrange( 1 , 10 )"""
a = """x = range( 1 , 10 )"""
self.check(b, a)
b = """x = xrange( 0 , 10 , 2 )"""
a = """x = range( 0 , 10 , 2 )"""
self.check(b, a)
def test_single_arg(self):
b = """x = xrange(10)"""
a = """x = range(10)"""
self.check(b, a)
def test_two_args(self):
b = """x = xrange(1, 10)"""
a = """x = range(1, 10)"""
self.check(b, a)
def test_three_args(self):
b = """x = xrange(0, 10, 2)"""
a = """x = range(0, 10, 2)"""
self.check(b, a)
def test_wrap_in_list(self):
b = """x = range(10, 3, 9)"""
a = """x = list(range(10, 3, 9))"""
self.check(b, a)
b = """x = foo(range(10, 3, 9))"""
a = """x = foo(list(range(10, 3, 9)))"""
self.check(b, a)
b = """x = range(10, 3, 9) + [4]"""
a = """x = list(range(10, 3, 9)) + [4]"""
self.check(b, a)
b = """x = range(10)[::-1]"""
a = """x = list(range(10))[::-1]"""
self.check(b, a)
b = """x = range(10) [3]"""
a = """x = list(range(10)) [3]"""
self.check(b, a)
def test_xrange_in_for(self):
b = """for i in xrange(10):\n j=i"""
a = """for i in range(10):\n j=i"""
self.check(b, a)
b = """[i for i in xrange(10)]"""
a = """[i for i in range(10)]"""
self.check(b, a)
def test_range_in_for(self):
self.unchanged("for i in range(10): pass")
self.unchanged("[i for i in range(10)]")
def test_in_contains_test(self):
self.unchanged("x in range(10, 3, 9)")
def test_in_consuming_context(self):
for call in fixer_util.consuming_calls:
self.unchanged("a = %s(range(10))" % call)
class Test_xrange_with_reduce(FixerTestCase):
def setUp(self):
super(Test_xrange_with_reduce, self).setUp(["xrange", "reduce"])
def test_double_transform(self):
b = """reduce(x, xrange(5))"""
a = """from functools import reduce
reduce(x, range(5))"""
self.check(b, a)
class Test_raw_input(FixerTestCase):
fixer = "raw_input"
def test_prefix_preservation(self):
b = """x = raw_input( )"""
a = """x = input( )"""
self.check(b, a)
b = """x = raw_input( '' )"""
a = """x = input( '' )"""
self.check(b, a)
def test_1(self):
b = """x = raw_input()"""
a = """x = input()"""
self.check(b, a)
def test_2(self):
b = """x = raw_input('')"""
a = """x = input('')"""
self.check(b, a)
def test_3(self):
b = """x = raw_input('prompt')"""
a = """x = input('prompt')"""
self.check(b, a)
def test_4(self):
b = """x = raw_input(foo(a) + 6)"""
a = """x = input(foo(a) + 6)"""
self.check(b, a)
def test_5(self):
b = """x = raw_input(invite).split()"""
a = """x = input(invite).split()"""
self.check(b, a)
def test_6(self):
b = """x = raw_input(invite) . split ()"""
a = """x = input(invite) . split ()"""
self.check(b, a)
def test_8(self):
b = "x = int(raw_input())"
a = "x = int(input())"
self.check(b, a)
class Test_funcattrs(FixerTestCase):
fixer = "funcattrs"
attrs = ["closure", "doc", "name", "defaults", "code", "globals", "dict"]
def test(self):
for attr in self.attrs:
b = "a.func_%s" % attr
a = "a.__%s__" % attr
self.check(b, a)
b = "self.foo.func_%s.foo_bar" % attr
a = "self.foo.__%s__.foo_bar" % attr
self.check(b, a)
def test_unchanged(self):
for attr in self.attrs:
s = "foo(func_%s + 5)" % attr
self.unchanged(s)
s = "f(foo.__%s__)" % attr
self.unchanged(s)
s = "f(foo.__%s__.foo)" % attr
self.unchanged(s)
class Test_xreadlines(FixerTestCase):
fixer = "xreadlines"
def test_call(self):
b = "for x in f.xreadlines(): pass"
a = "for x in f: pass"
self.check(b, a)
b = "for x in foo().xreadlines(): pass"
a = "for x in foo(): pass"
self.check(b, a)
b = "for x in (5 + foo()).xreadlines(): pass"
a = "for x in (5 + foo()): pass"
self.check(b, a)
def test_attr_ref(self):
b = "foo(f.xreadlines + 5)"
a = "foo(f.__iter__ + 5)"
self.check(b, a)
b = "foo(f().xreadlines + 5)"
a = "foo(f().__iter__ + 5)"
self.check(b, a)
b = "foo((5 + f()).xreadlines + 5)"
a = "foo((5 + f()).__iter__ + 5)"
self.check(b, a)
def test_unchanged(self):
s = "for x in f.xreadlines(5): pass"
self.unchanged(s)
s = "for x in f.xreadlines(k=5): pass"
self.unchanged(s)
s = "for x in f.xreadlines(*k, **v): pass"
self.unchanged(s)
s = "foo(xreadlines)"
self.unchanged(s)
class ImportsFixerTests:
def test_import_module(self):
for old, new in self.modules.items():
b = "import %s" % old
a = "import %s" % new
self.check(b, a)
b = "import foo, %s, bar" % old
a = "import foo, %s, bar" % new
self.check(b, a)
def test_import_from(self):
for old, new in self.modules.items():
b = "from %s import foo" % old
a = "from %s import foo" % new
self.check(b, a)
b = "from %s import foo, bar" % old
a = "from %s import foo, bar" % new
self.check(b, a)
b = "from %s import (yes, no)" % old
a = "from %s import (yes, no)" % new
self.check(b, a)
def test_import_module_as(self):
for old, new in self.modules.items():
b = "import %s as foo_bar" % old
a = "import %s as foo_bar" % new
self.check(b, a)
b = "import %s as foo_bar" % old
a = "import %s as foo_bar" % new
self.check(b, a)
def test_import_from_as(self):
for old, new in self.modules.items():
b = "from %s import foo as bar" % old
a = "from %s import foo as bar" % new
self.check(b, a)
def test_star(self):
for old, new in self.modules.items():
b = "from %s import *" % old
a = "from %s import *" % new
self.check(b, a)
def test_import_module_usage(self):
for old, new in self.modules.items():
b = """
import %s
foo(%s.bar)
""" % (old, old)
a = """
import %s
foo(%s.bar)
""" % (new, new)
self.check(b, a)
b = """
from %s import x
%s = 23
""" % (old, old)
a = """
from %s import x
%s = 23
""" % (new, old)
self.check(b, a)
s = """
def f():
%s.method()
""" % (old,)
self.unchanged(s)
# test nested usage
b = """
import %s
%s.bar(%s.foo)
""" % (old, old, old)
a = """
import %s
%s.bar(%s.foo)
""" % (new, new, new)
self.check(b, a)
b = """
import %s
x.%s
""" % (old, old)
a = """
import %s
x.%s
""" % (new, old)
self.check(b, a)
class Test_imports(FixerTestCase, ImportsFixerTests):
fixer = "imports"
from ..fixes.fix_imports import MAPPING as modules
def test_multiple_imports(self):
b = """import urlparse, cStringIO"""
a = """import urllib.parse, io"""
self.check(b, a)
def test_multiple_imports_as(self):
b = """
import copy_reg as bar, HTMLParser as foo, urlparse
s = urlparse.spam(bar.foo())
"""
a = """
import copyreg as bar, html.parser as foo, urllib.parse
s = urllib.parse.spam(bar.foo())
"""
self.check(b, a)
class Test_imports2(FixerTestCase, ImportsFixerTests):
fixer = "imports2"
from ..fixes.fix_imports2 import MAPPING as modules
class Test_imports_fixer_order(FixerTestCase, ImportsFixerTests):
def setUp(self):
super(Test_imports_fixer_order, self).setUp(['imports', 'imports2'])
from ..fixes.fix_imports2 import MAPPING as mapping2
self.modules = mapping2.copy()
from ..fixes.fix_imports import MAPPING as mapping1
for key in ('dbhash', 'dumbdbm', 'dbm', 'gdbm'):
self.modules[key] = mapping1[key]
def test_after_local_imports_refactoring(self):
for fix in ("imports", "imports2"):
self.fixer = fix
self.assert_runs_after("import")
class Test_urllib(FixerTestCase):
fixer = "urllib"
from ..fixes.fix_urllib import MAPPING as modules
def test_import_module(self):
for old, changes in self.modules.items():
b = "import %s" % old
a = "import %s" % ", ".join(map(itemgetter(0), changes))
self.check(b, a)
def test_import_from(self):
for old, changes in self.modules.items():
all_members = []
for new, members in changes:
for member in members:
all_members.append(member)
b = "from %s import %s" % (old, member)
a = "from %s import %s" % (new, member)
self.check(b, a)
s = "from foo import %s" % member
self.unchanged(s)
b = "from %s import %s" % (old, ", ".join(members))
a = "from %s import %s" % (new, ", ".join(members))
self.check(b, a)
s = "from foo import %s" % ", ".join(members)
self.unchanged(s)
# test the breaking of a module into multiple replacements
b = "from %s import %s" % (old, ", ".join(all_members))
a = "\n".join(["from %s import %s" % (new, ", ".join(members))
for (new, members) in changes])
self.check(b, a)
def test_import_module_as(self):
for old in self.modules:
s = "import %s as foo" % old
self.warns_unchanged(s, "This module is now multiple modules")
def test_import_from_as(self):
for old, changes in self.modules.items():
for new, members in changes:
for member in members:
b = "from %s import %s as foo_bar" % (old, member)
a = "from %s import %s as foo_bar" % (new, member)
self.check(b, a)
b = "from %s import %s as blah, %s" % (old, member, member)
a = "from %s import %s as blah, %s" % (new, member, member)
self.check(b, a)
def test_star(self):
for old in self.modules:
s = "from %s import *" % old
self.warns_unchanged(s, "Cannot handle star imports")
def test_indented(self):
b = """
def foo():
from urllib import urlencode, urlopen
"""
a = """
def foo():
from urllib.parse import urlencode
from urllib.request import urlopen
"""
self.check(b, a)
b = """
def foo():
other()
from urllib import urlencode, urlopen
"""
a = """
def foo():
other()
from urllib.parse import urlencode
from urllib.request import urlopen
"""
self.check(b, a)
def test_import_module_usage(self):
for old, changes in self.modules.items():
for new, members in changes:
for member in members:
new_import = ", ".join([n for (n, mems)
in self.modules[old]])
b = """
import %s
foo(%s.%s)
""" % (old, old, member)
a = """
import %s
foo(%s.%s)
""" % (new_import, new, member)
self.check(b, a)
b = """
import %s
%s.%s(%s.%s)
""" % (old, old, member, old, member)
a = """
import %s
%s.%s(%s.%s)
""" % (new_import, new, member, new, member)
self.check(b, a)
class Test_input(FixerTestCase):
fixer = "input"
def test_prefix_preservation(self):
b = """x = input( )"""
a = """x = eval(input( ))"""
self.check(b, a)
b = """x = input( '' )"""
a = """x = eval(input( '' ))"""
self.check(b, a)
def test_trailing_comment(self):
b = """x = input() # foo"""
a = """x = eval(input()) # foo"""
self.check(b, a)
def test_idempotency(self):
s = """x = eval(input())"""
self.unchanged(s)
s = """x = eval(input(''))"""
self.unchanged(s)
s = """x = eval(input(foo(5) + 9))"""
self.unchanged(s)
def test_1(self):
b = """x = input()"""
a = """x = eval(input())"""
self.check(b, a)
def test_2(self):
b = """x = input('')"""
a = """x = eval(input(''))"""
self.check(b, a)
def test_3(self):
b = """x = input('prompt')"""
a = """x = eval(input('prompt'))"""
self.check(b, a)
def test_4(self):
b = """x = input(foo(5) + 9)"""
a = """x = eval(input(foo(5) + 9))"""
self.check(b, a)
class Test_tuple_params(FixerTestCase):
fixer = "tuple_params"
def test_unchanged_1(self):
s = """def foo(): pass"""
self.unchanged(s)
def test_unchanged_2(self):
s = """def foo(a, b, c): pass"""
self.unchanged(s)
def test_unchanged_3(self):
s = """def foo(a=3, b=4, c=5): pass"""
self.unchanged(s)
def test_1(self):
b = """
def foo(((a, b), c)):
x = 5"""
a = """
def foo(xxx_todo_changeme):
((a, b), c) = xxx_todo_changeme
x = 5"""
self.check(b, a)
def test_2(self):
b = """
def foo(((a, b), c), d):
x = 5"""
a = """
def foo(xxx_todo_changeme, d):
((a, b), c) = xxx_todo_changeme
x = 5"""
self.check(b, a)
def test_3(self):
b = """
def foo(((a, b), c), d) -> e:
x = 5"""
a = """
def foo(xxx_todo_changeme, d) -> e:
((a, b), c) = xxx_todo_changeme
x = 5"""
self.check(b, a)
def test_semicolon(self):
b = """
def foo(((a, b), c)): x = 5; y = 7"""
a = """
def foo(xxx_todo_changeme): ((a, b), c) = xxx_todo_changeme; x = 5; y = 7"""
self.check(b, a)
def test_keywords(self):
b = """
def foo(((a, b), c), d, e=5) -> z:
x = 5"""
a = """
def foo(xxx_todo_changeme, d, e=5) -> z:
((a, b), c) = xxx_todo_changeme
x = 5"""
self.check(b, a)
def test_varargs(self):
b = """
def foo(((a, b), c), d, *vargs, **kwargs) -> z:
x = 5"""
a = """
def foo(xxx_todo_changeme, d, *vargs, **kwargs) -> z:
((a, b), c) = xxx_todo_changeme
x = 5"""
self.check(b, a)
def test_multi_1(self):
b = """
def foo(((a, b), c), (d, e, f)) -> z:
x = 5"""
a = """
def foo(xxx_todo_changeme, xxx_todo_changeme1) -> z:
((a, b), c) = xxx_todo_changeme
(d, e, f) = xxx_todo_changeme1
x = 5"""
self.check(b, a)
def test_multi_2(self):
b = """
def foo(x, ((a, b), c), d, (e, f, g), y) -> z:
x = 5"""
a = """
def foo(x, xxx_todo_changeme, d, xxx_todo_changeme1, y) -> z:
((a, b), c) = xxx_todo_changeme
(e, f, g) = xxx_todo_changeme1
x = 5"""
self.check(b, a)
def test_docstring(self):
b = """
def foo(((a, b), c), (d, e, f)) -> z:
"foo foo foo foo"
x = 5"""
a = """
def foo(xxx_todo_changeme, xxx_todo_changeme1) -> z:
"foo foo foo foo"
((a, b), c) = xxx_todo_changeme
(d, e, f) = xxx_todo_changeme1
x = 5"""
self.check(b, a)
def test_lambda_no_change(self):
s = """lambda x: x + 5"""
self.unchanged(s)
def test_lambda_parens_single_arg(self):
b = """lambda (x): x + 5"""
a = """lambda x: x + 5"""
self.check(b, a)
b = """lambda(x): x + 5"""
a = """lambda x: x + 5"""
self.check(b, a)
b = """lambda ((((x)))): x + 5"""
a = """lambda x: x + 5"""
self.check(b, a)
b = """lambda((((x)))): x + 5"""
a = """lambda x: x + 5"""
self.check(b, a)
def test_lambda_simple(self):
b = """lambda (x, y): x + f(y)"""
a = """lambda x_y: x_y[0] + f(x_y[1])"""
self.check(b, a)
b = """lambda(x, y): x + f(y)"""
a = """lambda x_y: x_y[0] + f(x_y[1])"""
self.check(b, a)
b = """lambda (((x, y))): x + f(y)"""
a = """lambda x_y: x_y[0] + f(x_y[1])"""
self.check(b, a)
b = """lambda(((x, y))): x + f(y)"""
a = """lambda x_y: x_y[0] + f(x_y[1])"""
self.check(b, a)
def test_lambda_one_tuple(self):
b = """lambda (x,): x + f(x)"""
a = """lambda x1: x1[0] + f(x1[0])"""
self.check(b, a)
b = """lambda (((x,))): x + f(x)"""
a = """lambda x1: x1[0] + f(x1[0])"""
self.check(b, a)
def test_lambda_simple_multi_use(self):
b = """lambda (x, y): x + x + f(x) + x"""
a = """lambda x_y: x_y[0] + x_y[0] + f(x_y[0]) + x_y[0]"""
self.check(b, a)
def test_lambda_simple_reverse(self):
b = """lambda (x, y): y + x"""
a = """lambda x_y: x_y[1] + x_y[0]"""
self.check(b, a)
def test_lambda_nested(self):
b = """lambda (x, (y, z)): x + y + z"""
a = """lambda x_y_z: x_y_z[0] + x_y_z[1][0] + x_y_z[1][1]"""
self.check(b, a)
b = """lambda (((x, (y, z)))): x + y + z"""
a = """lambda x_y_z: x_y_z[0] + x_y_z[1][0] + x_y_z[1][1]"""
self.check(b, a)
def test_lambda_nested_multi_use(self):
b = """lambda (x, (y, z)): x + y + f(y)"""
a = """lambda x_y_z: x_y_z[0] + x_y_z[1][0] + f(x_y_z[1][0])"""
self.check(b, a)
class Test_methodattrs(FixerTestCase):
fixer = "methodattrs"
attrs = ["func", "self", "class"]
def test(self):
for attr in self.attrs:
b = "a.im_%s" % attr
if attr == "class":
a = "a.__self__.__class__"
else:
a = "a.__%s__" % attr
self.check(b, a)
b = "self.foo.im_%s.foo_bar" % attr
if attr == "class":
a = "self.foo.__self__.__class__.foo_bar"
else:
a = "self.foo.__%s__.foo_bar" % attr
self.check(b, a)
def test_unchanged(self):
for attr in self.attrs:
s = "foo(im_%s + 5)" % attr
self.unchanged(s)
s = "f(foo.__%s__)" % attr
self.unchanged(s)
s = "f(foo.__%s__.foo)" % attr
self.unchanged(s)
class Test_next(FixerTestCase):
fixer = "next"
def test_1(self):
b = """it.next()"""
a = """next(it)"""
self.check(b, a)
def test_2(self):
b = """a.b.c.d.next()"""
a = """next(a.b.c.d)"""
self.check(b, a)
def test_3(self):
b = """(a + b).next()"""
a = """next((a + b))"""
self.check(b, a)
def test_4(self):
b = """a().next()"""
a = """next(a())"""
self.check(b, a)
def test_5(self):
b = """a().next() + b"""
a = """next(a()) + b"""
self.check(b, a)
def test_6(self):
b = """c( a().next() + b)"""
a = """c( next(a()) + b)"""
self.check(b, a)
def test_prefix_preservation_1(self):
b = """
for a in b:
foo(a)
a.next()
"""
a = """
for a in b:
foo(a)
next(a)
"""
self.check(b, a)
def test_prefix_preservation_2(self):
b = """
for a in b:
foo(a) # abc
# def
a.next()
"""
a = """
for a in b:
foo(a) # abc
# def
next(a)
"""
self.check(b, a)
def test_prefix_preservation_3(self):
b = """
next = 5
for a in b:
foo(a)
a.next()
"""
a = """
next = 5
for a in b:
foo(a)
a.__next__()
"""
self.check(b, a, ignore_warnings=True)
def test_prefix_preservation_4(self):
b = """
next = 5
for a in b:
foo(a) # abc
# def
a.next()
"""
a = """
next = 5
for a in b:
foo(a) # abc
# def
a.__next__()
"""
self.check(b, a, ignore_warnings=True)
def test_prefix_preservation_5(self):
b = """
next = 5
for a in b:
foo(foo(a), # abc
a.next())
"""
a = """
next = 5
for a in b:
foo(foo(a), # abc
a.__next__())
"""
self.check(b, a, ignore_warnings=True)
def test_prefix_preservation_6(self):
b = """
for a in b:
foo(foo(a), # abc
a.next())
"""
a = """
for a in b:
foo(foo(a), # abc
next(a))
"""
self.check(b, a)
def test_method_1(self):
b = """
class A:
def next(self):
pass
"""
a = """
class A:
def __next__(self):
pass
"""
self.check(b, a)
def test_method_2(self):
b = """
class A(object):
def next(self):
pass
"""
a = """
class A(object):
def __next__(self):
pass
"""
self.check(b, a)
def test_method_3(self):
b = """
class A:
def next(x):
pass
"""
a = """
class A:
def __next__(x):
pass
"""
self.check(b, a)
def test_method_4(self):
b = """
class A:
def __init__(self, foo):
self.foo = foo
def next(self):
pass
def __iter__(self):
return self
"""
a = """
class A:
def __init__(self, foo):
self.foo = foo
def __next__(self):
pass
def __iter__(self):
return self
"""
self.check(b, a)
def test_method_unchanged(self):
s = """
class A:
def next(self, a, b):
pass
"""
self.unchanged(s)
def test_shadowing_assign_simple(self):
s = """
next = foo
class A:
def next(self, a, b):
pass
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_shadowing_assign_tuple_1(self):
s = """
(next, a) = foo
class A:
def next(self, a, b):
pass
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_shadowing_assign_tuple_2(self):
s = """
(a, (b, (next, c)), a) = foo
class A:
def next(self, a, b):
pass
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_shadowing_assign_list_1(self):
s = """
[next, a] = foo
class A:
def next(self, a, b):
pass
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_shadowing_assign_list_2(self):
s = """
[a, [b, [next, c]], a] = foo
class A:
def next(self, a, b):
pass
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_builtin_assign(self):
s = """
def foo():
__builtin__.next = foo
class A:
def next(self, a, b):
pass
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_builtin_assign_in_tuple(self):
s = """
def foo():
(a, __builtin__.next) = foo
class A:
def next(self, a, b):
pass
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_builtin_assign_in_list(self):
s = """
def foo():
[a, __builtin__.next] = foo
class A:
def next(self, a, b):
pass
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_assign_to_next(self):
s = """
def foo():
A.next = foo
class A:
def next(self, a, b):
pass
"""
self.unchanged(s)
def test_assign_to_next_in_tuple(self):
s = """
def foo():
(a, A.next) = foo
class A:
def next(self, a, b):
pass
"""
self.unchanged(s)
def test_assign_to_next_in_list(self):
s = """
def foo():
[a, A.next] = foo
class A:
def next(self, a, b):
pass
"""
self.unchanged(s)
def test_shadowing_import_1(self):
s = """
import foo.bar as next
class A:
def next(self, a, b):
pass
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_shadowing_import_2(self):
s = """
import bar, bar.foo as next
class A:
def next(self, a, b):
pass
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_shadowing_import_3(self):
s = """
import bar, bar.foo as next, baz
class A:
def next(self, a, b):
pass
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_shadowing_import_from_1(self):
s = """
from x import next
class A:
def next(self, a, b):
pass
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_shadowing_import_from_2(self):
s = """
from x.a import next
class A:
def next(self, a, b):
pass
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_shadowing_import_from_3(self):
s = """
from x import a, next, b
class A:
def next(self, a, b):
pass
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_shadowing_import_from_4(self):
s = """
from x.a import a, next, b
class A:
def next(self, a, b):
pass
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_shadowing_funcdef_1(self):
s = """
def next(a):
pass
class A:
def next(self, a, b):
pass
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_shadowing_funcdef_2(self):
b = """
def next(a):
pass
class A:
def next(self):
pass
it.next()
"""
a = """
def next(a):
pass
class A:
def __next__(self):
pass
it.__next__()
"""
self.warns(b, a, "Calls to builtin next() possibly shadowed")
def test_shadowing_global_1(self):
s = """
def f():
global next
next = 5
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_shadowing_global_2(self):
s = """
def f():
global a, next, b
next = 5
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_shadowing_for_simple(self):
s = """
for next in it():
pass
b = 5
c = 6
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_shadowing_for_tuple_1(self):
s = """
for next, b in it():
pass
b = 5
c = 6
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_shadowing_for_tuple_2(self):
s = """
for a, (next, c), b in it():
pass
b = 5
c = 6
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_noncall_access_1(self):
b = """gnext = g.next"""
a = """gnext = g.__next__"""
self.check(b, a)
def test_noncall_access_2(self):
b = """f(g.next + 5)"""
a = """f(g.__next__ + 5)"""
self.check(b, a)
def test_noncall_access_3(self):
b = """f(g().next + 5)"""
a = """f(g().__next__ + 5)"""
self.check(b, a)
class Test_nonzero(FixerTestCase):
fixer = "nonzero"
def test_1(self):
b = """
class A:
def __nonzero__(self):
pass
"""
a = """
class A:
def __bool__(self):
pass
"""
self.check(b, a)
def test_2(self):
b = """
class A(object):
def __nonzero__(self):
pass
"""
a = """
class A(object):
def __bool__(self):
pass
"""
self.check(b, a)
def test_unchanged_1(self):
s = """
class A(object):
def __bool__(self):
pass
"""
self.unchanged(s)
def test_unchanged_2(self):
s = """
class A(object):
def __nonzero__(self, a):
pass
"""
self.unchanged(s)
def test_unchanged_func(self):
s = """
def __nonzero__(self):
pass
"""
self.unchanged(s)
class Test_numliterals(FixerTestCase):
fixer = "numliterals"
def test_octal_1(self):
b = """0755"""
a = """0o755"""
self.check(b, a)
def test_long_int_1(self):
b = """a = 12L"""
a = """a = 12"""
self.check(b, a)
def test_long_int_2(self):
b = """a = 12l"""
a = """a = 12"""
self.check(b, a)
def test_long_hex(self):
b = """b = 0x12l"""
a = """b = 0x12"""
self.check(b, a)
def test_comments_and_spacing(self):
b = """b = 0x12L"""
a = """b = 0x12"""
self.check(b, a)
b = """b = 0755 # spam"""
a = """b = 0o755 # spam"""
self.check(b, a)
def test_unchanged_int(self):
s = """5"""
self.unchanged(s)
def test_unchanged_float(self):
s = """5.0"""
self.unchanged(s)
def test_unchanged_octal(self):
s = """0o755"""
self.unchanged(s)
def test_unchanged_hex(self):
s = """0xABC"""
self.unchanged(s)
def test_unchanged_exp(self):
s = """5.0e10"""
self.unchanged(s)
def test_unchanged_complex_int(self):
s = """5 + 4j"""
self.unchanged(s)
def test_unchanged_complex_float(self):
s = """5.4 + 4.9j"""
self.unchanged(s)
def test_unchanged_complex_bare(self):
s = """4j"""
self.unchanged(s)
s = """4.4j"""
self.unchanged(s)
class Test_renames(FixerTestCase):
fixer = "renames"
modules = {"sys": ("maxint", "maxsize"),
}
def test_import_from(self):
for mod, (old, new) in self.modules.items():
b = "from %s import %s" % (mod, old)
a = "from %s import %s" % (mod, new)
self.check(b, a)
s = "from foo import %s" % old
self.unchanged(s)
def test_import_from_as(self):
for mod, (old, new) in self.modules.items():
b = "from %s import %s as foo_bar" % (mod, old)
a = "from %s import %s as foo_bar" % (mod, new)
self.check(b, a)
def test_import_module_usage(self):
for mod, (old, new) in self.modules.items():
b = """
import %s
foo(%s, %s.%s)
""" % (mod, mod, mod, old)
a = """
import %s
foo(%s, %s.%s)
""" % (mod, mod, mod, new)
self.check(b, a)
def XXX_test_from_import_usage(self):
# not implemented yet
for mod, (old, new) in self.modules.items():
b = """
from %s import %s
foo(%s, %s)
""" % (mod, old, mod, old)
a = """
from %s import %s
foo(%s, %s)
""" % (mod, new, mod, new)
self.check(b, a)
class Test_unicode(FixerTestCase):
fixer = "unicode"
def test_whitespace(self):
b = """unicode( x)"""
a = """str( x)"""
self.check(b, a)
b = """ unicode(x )"""
a = """ str(x )"""
self.check(b, a)
b = """ u'h'"""
a = """ 'h'"""
self.check(b, a)
def test_unicode_call(self):
b = """unicode(x, y, z)"""
a = """str(x, y, z)"""
self.check(b, a)
def test_unichr(self):
b = """unichr(u'h')"""
a = """chr('h')"""
self.check(b, a)
def test_unicode_literal_1(self):
b = '''u"x"'''
a = '''"x"'''
self.check(b, a)
def test_unicode_literal_2(self):
b = """ur'x'"""
a = """r'x'"""
self.check(b, a)
def test_unicode_literal_3(self):
b = """UR'''x''' """
a = """R'''x''' """
self.check(b, a)
class Test_callable(FixerTestCase):
fixer = "callable"
def test_prefix_preservation(self):
b = """callable( x)"""
a = """import collections\nisinstance( x, collections.Callable)"""
self.check(b, a)
b = """if callable(x): pass"""
a = """import collections
if isinstance(x, collections.Callable): pass"""
self.check(b, a)
def test_callable_call(self):
b = """callable(x)"""
a = """import collections\nisinstance(x, collections.Callable)"""
self.check(b, a)
def test_global_import(self):
b = """
def spam(foo):
callable(foo)"""[1:]
a = """
import collections
def spam(foo):
isinstance(foo, collections.Callable)"""[1:]
self.check(b, a)
b = """
import collections
def spam(foo):
callable(foo)"""[1:]
# same output if it was already imported
self.check(b, a)
b = """
from collections import *
def spam(foo):
callable(foo)"""[1:]
a = """
from collections import *
import collections
def spam(foo):
isinstance(foo, collections.Callable)"""[1:]
self.check(b, a)
b = """
do_stuff()
do_some_other_stuff()
assert callable(do_stuff)"""[1:]
a = """
import collections
do_stuff()
do_some_other_stuff()
assert isinstance(do_stuff, collections.Callable)"""[1:]
self.check(b, a)
b = """
if isinstance(do_stuff, Callable):
assert callable(do_stuff)
do_stuff(do_stuff)
if not callable(do_stuff):
exit(1)
else:
assert callable(do_stuff)
else:
assert not callable(do_stuff)"""[1:]
a = """
import collections
if isinstance(do_stuff, Callable):
assert isinstance(do_stuff, collections.Callable)
do_stuff(do_stuff)
if not isinstance(do_stuff, collections.Callable):
exit(1)
else:
assert isinstance(do_stuff, collections.Callable)
else:
assert not isinstance(do_stuff, collections.Callable)"""[1:]
self.check(b, a)
def test_callable_should_not_change(self):
a = """callable(*x)"""
self.unchanged(a)
a = """callable(x, y)"""
self.unchanged(a)
a = """callable(x, kw=y)"""
self.unchanged(a)
a = """callable()"""
self.unchanged(a)
class Test_filter(FixerTestCase):
fixer = "filter"
def test_prefix_preservation(self):
b = """x = filter( foo, 'abc' )"""
a = """x = list(filter( foo, 'abc' ))"""
self.check(b, a)
b = """x = filter( None , 'abc' )"""
a = """x = [_f for _f in 'abc' if _f]"""
self.check(b, a)
def test_filter_basic(self):
b = """x = filter(None, 'abc')"""
a = """x = [_f for _f in 'abc' if _f]"""
self.check(b, a)
b = """x = len(filter(f, 'abc'))"""
a = """x = len(list(filter(f, 'abc')))"""
self.check(b, a)
b = """x = filter(lambda x: x%2 == 0, range(10))"""
a = """x = [x for x in range(10) if x%2 == 0]"""
self.check(b, a)
# Note the parens around x
b = """x = filter(lambda (x): x%2 == 0, range(10))"""
a = """x = [x for x in range(10) if x%2 == 0]"""
self.check(b, a)
# XXX This (rare) case is not supported
## b = """x = filter(f, 'abc')[0]"""
## a = """x = list(filter(f, 'abc'))[0]"""
## self.check(b, a)
def test_filter_nochange(self):
a = """b.join(filter(f, 'abc'))"""
self.unchanged(a)
a = """(a + foo(5)).join(filter(f, 'abc'))"""
self.unchanged(a)
a = """iter(filter(f, 'abc'))"""
self.unchanged(a)
a = """list(filter(f, 'abc'))"""
self.unchanged(a)
a = """list(filter(f, 'abc'))[0]"""
self.unchanged(a)
a = """set(filter(f, 'abc'))"""
self.unchanged(a)
a = """set(filter(f, 'abc')).pop()"""
self.unchanged(a)
a = """tuple(filter(f, 'abc'))"""
self.unchanged(a)
a = """any(filter(f, 'abc'))"""
self.unchanged(a)
a = """all(filter(f, 'abc'))"""
self.unchanged(a)
a = """sum(filter(f, 'abc'))"""
self.unchanged(a)
a = """sorted(filter(f, 'abc'))"""
self.unchanged(a)
a = """sorted(filter(f, 'abc'), key=blah)"""
self.unchanged(a)
a = """sorted(filter(f, 'abc'), key=blah)[0]"""
self.unchanged(a)
a = """for i in filter(f, 'abc'): pass"""
self.unchanged(a)
a = """[x for x in filter(f, 'abc')]"""
self.unchanged(a)
a = """(x for x in filter(f, 'abc'))"""
self.unchanged(a)
def test_future_builtins(self):
a = "from future_builtins import spam, filter; filter(f, 'ham')"
self.unchanged(a)
b = """from future_builtins import spam; x = filter(f, 'abc')"""
a = """from future_builtins import spam; x = list(filter(f, 'abc'))"""
self.check(b, a)
a = "from future_builtins import *; filter(f, 'ham')"
self.unchanged(a)
class Test_map(FixerTestCase):
fixer = "map"
def check(self, b, a):
self.unchanged("from future_builtins import map; " + b, a)
super(Test_map, self).check(b, a)
def test_prefix_preservation(self):
b = """x = map( f, 'abc' )"""
a = """x = list(map( f, 'abc' ))"""
self.check(b, a)
def test_trailing_comment(self):
b = """x = map(f, 'abc') # foo"""
a = """x = list(map(f, 'abc')) # foo"""
self.check(b, a)
def test_None_with_multiple_arguments(self):
s = """x = map(None, a, b, c)"""
self.warns_unchanged(s, "cannot convert map(None, ...) with "
"multiple arguments")
def test_map_basic(self):
b = """x = map(f, 'abc')"""
a = """x = list(map(f, 'abc'))"""
self.check(b, a)
b = """x = len(map(f, 'abc', 'def'))"""
a = """x = len(list(map(f, 'abc', 'def')))"""
self.check(b, a)
b = """x = map(None, 'abc')"""
a = """x = list('abc')"""
self.check(b, a)
b = """x = map(lambda x: x+1, range(4))"""
a = """x = [x+1 for x in range(4)]"""
self.check(b, a)
# Note the parens around x
b = """x = map(lambda (x): x+1, range(4))"""
a = """x = [x+1 for x in range(4)]"""
self.check(b, a)
b = """
foo()
# foo
map(f, x)
"""
a = """
foo()
# foo
list(map(f, x))
"""
self.warns(b, a, "You should use a for loop here")
# XXX This (rare) case is not supported
## b = """x = map(f, 'abc')[0]"""
## a = """x = list(map(f, 'abc'))[0]"""
## self.check(b, a)
def test_map_nochange(self):
a = """b.join(map(f, 'abc'))"""
self.unchanged(a)
a = """(a + foo(5)).join(map(f, 'abc'))"""
self.unchanged(a)
a = """iter(map(f, 'abc'))"""
self.unchanged(a)
a = """list(map(f, 'abc'))"""
self.unchanged(a)
a = """list(map(f, 'abc'))[0]"""
self.unchanged(a)
a = """set(map(f, 'abc'))"""
self.unchanged(a)
a = """set(map(f, 'abc')).pop()"""
self.unchanged(a)
a = """tuple(map(f, 'abc'))"""
self.unchanged(a)
a = """any(map(f, 'abc'))"""
self.unchanged(a)
a = """all(map(f, 'abc'))"""
self.unchanged(a)
a = """sum(map(f, 'abc'))"""
self.unchanged(a)
a = """sorted(map(f, 'abc'))"""
self.unchanged(a)
a = """sorted(map(f, 'abc'), key=blah)"""
self.unchanged(a)
a = """sorted(map(f, 'abc'), key=blah)[0]"""
self.unchanged(a)
a = """for i in map(f, 'abc'): pass"""
self.unchanged(a)
a = """[x for x in map(f, 'abc')]"""
self.unchanged(a)
a = """(x for x in map(f, 'abc'))"""
self.unchanged(a)
def test_future_builtins(self):
a = "from future_builtins import spam, map, eggs; map(f, 'ham')"
self.unchanged(a)
b = """from future_builtins import spam, eggs; x = map(f, 'abc')"""
a = """from future_builtins import spam, eggs; x = list(map(f, 'abc'))"""
self.check(b, a)
a = "from future_builtins import *; map(f, 'ham')"
self.unchanged(a)
class Test_zip(FixerTestCase):
fixer = "zip"
def check(self, b, a):
self.unchanged("from future_builtins import zip; " + b, a)
super(Test_zip, self).check(b, a)
def test_zip_basic(self):
b = """x = zip(a, b, c)"""
a = """x = list(zip(a, b, c))"""
self.check(b, a)
b = """x = len(zip(a, b))"""
a = """x = len(list(zip(a, b)))"""
self.check(b, a)
def test_zip_nochange(self):
a = """b.join(zip(a, b))"""
self.unchanged(a)
a = """(a + foo(5)).join(zip(a, b))"""
self.unchanged(a)
a = """iter(zip(a, b))"""
self.unchanged(a)
a = """list(zip(a, b))"""
self.unchanged(a)
a = """list(zip(a, b))[0]"""
self.unchanged(a)
a = """set(zip(a, b))"""
self.unchanged(a)
a = """set(zip(a, b)).pop()"""
self.unchanged(a)
a = """tuple(zip(a, b))"""
self.unchanged(a)
a = """any(zip(a, b))"""
self.unchanged(a)
a = """all(zip(a, b))"""
self.unchanged(a)
a = """sum(zip(a, b))"""
self.unchanged(a)
a = """sorted(zip(a, b))"""
self.unchanged(a)
a = """sorted(zip(a, b), key=blah)"""
self.unchanged(a)
a = """sorted(zip(a, b), key=blah)[0]"""
self.unchanged(a)
a = """for i in zip(a, b): pass"""
self.unchanged(a)
a = """[x for x in zip(a, b)]"""
self.unchanged(a)
a = """(x for x in zip(a, b))"""
self.unchanged(a)
def test_future_builtins(self):
a = "from future_builtins import spam, zip, eggs; zip(a, b)"
self.unchanged(a)
b = """from future_builtins import spam, eggs; x = zip(a, b)"""
a = """from future_builtins import spam, eggs; x = list(zip(a, b))"""
self.check(b, a)
a = "from future_builtins import *; zip(a, b)"
self.unchanged(a)
class Test_standarderror(FixerTestCase):
fixer = "standarderror"
def test(self):
b = """x = StandardError()"""
a = """x = Exception()"""
self.check(b, a)
b = """x = StandardError(a, b, c)"""
a = """x = Exception(a, b, c)"""
self.check(b, a)
b = """f(2 + StandardError(a, b, c))"""
a = """f(2 + Exception(a, b, c))"""
self.check(b, a)
class Test_types(FixerTestCase):
fixer = "types"
def test_basic_types_convert(self):
b = """types.StringType"""
a = """bytes"""
self.check(b, a)
b = """types.DictType"""
a = """dict"""
self.check(b, a)
b = """types . IntType"""
a = """int"""
self.check(b, a)
b = """types.ListType"""
a = """list"""
self.check(b, a)
b = """types.LongType"""
a = """int"""
self.check(b, a)
b = """types.NoneType"""
a = """type(None)"""
self.check(b, a)
class Test_idioms(FixerTestCase):
fixer = "idioms"
def test_while(self):
b = """while 1: foo()"""
a = """while True: foo()"""
self.check(b, a)
b = """while 1: foo()"""
a = """while True: foo()"""
self.check(b, a)
b = """
while 1:
foo()
"""
a = """
while True:
foo()
"""
self.check(b, a)
def test_while_unchanged(self):
s = """while 11: foo()"""
self.unchanged(s)
s = """while 0: foo()"""
self.unchanged(s)
s = """while foo(): foo()"""
self.unchanged(s)
s = """while []: foo()"""
self.unchanged(s)
def test_eq_simple(self):
b = """type(x) == T"""
a = """isinstance(x, T)"""
self.check(b, a)
b = """if type(x) == T: pass"""
a = """if isinstance(x, T): pass"""
self.check(b, a)
def test_eq_reverse(self):
b = """T == type(x)"""
a = """isinstance(x, T)"""
self.check(b, a)
b = """if T == type(x): pass"""
a = """if isinstance(x, T): pass"""
self.check(b, a)
def test_eq_expression(self):
b = """type(x+y) == d.get('T')"""
a = """isinstance(x+y, d.get('T'))"""
self.check(b, a)
b = """type( x + y) == d.get('T')"""
a = """isinstance(x + y, d.get('T'))"""
self.check(b, a)
def test_is_simple(self):
b = """type(x) is T"""
a = """isinstance(x, T)"""
self.check(b, a)
b = """if type(x) is T: pass"""
a = """if isinstance(x, T): pass"""
self.check(b, a)
def test_is_reverse(self):
b = """T is type(x)"""
a = """isinstance(x, T)"""
self.check(b, a)
b = """if T is type(x): pass"""
a = """if isinstance(x, T): pass"""
self.check(b, a)
def test_is_expression(self):
b = """type(x+y) is d.get('T')"""
a = """isinstance(x+y, d.get('T'))"""
self.check(b, a)
b = """type( x + y) is d.get('T')"""
a = """isinstance(x + y, d.get('T'))"""
self.check(b, a)
def test_is_not_simple(self):
b = """type(x) is not T"""
a = """not isinstance(x, T)"""
self.check(b, a)
b = """if type(x) is not T: pass"""
a = """if not isinstance(x, T): pass"""
self.check(b, a)
def test_is_not_reverse(self):
b = """T is not type(x)"""
a = """not isinstance(x, T)"""
self.check(b, a)
b = """if T is not type(x): pass"""
a = """if not isinstance(x, T): pass"""
self.check(b, a)
def test_is_not_expression(self):
b = """type(x+y) is not d.get('T')"""
a = """not isinstance(x+y, d.get('T'))"""
self.check(b, a)
b = """type( x + y) is not d.get('T')"""
a = """not isinstance(x + y, d.get('T'))"""
self.check(b, a)
def test_ne_simple(self):
b = """type(x) != T"""
a = """not isinstance(x, T)"""
self.check(b, a)
b = """if type(x) != T: pass"""
a = """if not isinstance(x, T): pass"""
self.check(b, a)
def test_ne_reverse(self):
b = """T != type(x)"""
a = """not isinstance(x, T)"""
self.check(b, a)
b = """if T != type(x): pass"""
a = """if not isinstance(x, T): pass"""
self.check(b, a)
def test_ne_expression(self):
b = """type(x+y) != d.get('T')"""
a = """not isinstance(x+y, d.get('T'))"""
self.check(b, a)
b = """type( x + y) != d.get('T')"""
a = """not isinstance(x + y, d.get('T'))"""
self.check(b, a)
def test_type_unchanged(self):
a = """type(x).__name__"""
self.unchanged(a)
def test_sort_list_call(self):
b = """
v = list(t)
v.sort()
foo(v)
"""
a = """
v = sorted(t)
foo(v)
"""
self.check(b, a)
b = """
v = list(foo(b) + d)
v.sort()
foo(v)
"""
a = """
v = sorted(foo(b) + d)
foo(v)
"""
self.check(b, a)
b = """
while x:
v = list(t)
v.sort()
foo(v)
"""
a = """
while x:
v = sorted(t)
foo(v)
"""
self.check(b, a)
b = """
v = list(t)
# foo
v.sort()
foo(v)
"""
a = """
v = sorted(t)
# foo
foo(v)
"""
self.check(b, a)
b = r"""
v = list( t)
v.sort()
foo(v)
"""
a = r"""
v = sorted( t)
foo(v)
"""
self.check(b, a)
b = r"""
try:
m = list(s)
m.sort()
except: pass
"""
a = r"""
try:
m = sorted(s)
except: pass
"""
self.check(b, a)
b = r"""
try:
m = list(s)
# foo
m.sort()
except: pass
"""
a = r"""
try:
m = sorted(s)
# foo
except: pass
"""
self.check(b, a)
b = r"""
m = list(s)
# more comments
m.sort()"""
a = r"""
m = sorted(s)
# more comments"""
self.check(b, a)
def test_sort_simple_expr(self):
b = """
v = t
v.sort()
foo(v)
"""
a = """
v = sorted(t)
foo(v)
"""
self.check(b, a)
b = """
v = foo(b)
v.sort()
foo(v)
"""
a = """
v = sorted(foo(b))
foo(v)
"""
self.check(b, a)
b = """
v = b.keys()
v.sort()
foo(v)
"""
a = """
v = sorted(b.keys())
foo(v)
"""
self.check(b, a)
b = """
v = foo(b) + d
v.sort()
foo(v)
"""
a = """
v = sorted(foo(b) + d)
foo(v)
"""
self.check(b, a)
b = """
while x:
v = t
v.sort()
foo(v)
"""
a = """
while x:
v = sorted(t)
foo(v)
"""
self.check(b, a)
b = """
v = t
# foo
v.sort()
foo(v)
"""
a = """
v = sorted(t)
# foo
foo(v)
"""
self.check(b, a)
b = r"""
v = t
v.sort()
foo(v)
"""
a = r"""
v = sorted(t)
foo(v)
"""
self.check(b, a)
def test_sort_unchanged(self):
s = """
v = list(t)
w.sort()
foo(w)
"""
self.unchanged(s)
s = """
v = list(t)
v.sort(u)
foo(v)
"""
self.unchanged(s)
class Test_basestring(FixerTestCase):
fixer = "basestring"
def test_basestring(self):
b = """isinstance(x, basestring)"""
a = """isinstance(x, str)"""
self.check(b, a)
class Test_buffer(FixerTestCase):
fixer = "buffer"
def test_buffer(self):
b = """x = buffer(y)"""
a = """x = memoryview(y)"""
self.check(b, a)
def test_slicing(self):
b = """buffer(y)[4:5]"""
a = """memoryview(y)[4:5]"""
self.check(b, a)
class Test_future(FixerTestCase):
fixer = "future"
def test_future(self):
b = """from __future__ import braces"""
a = """"""
self.check(b, a)
b = """# comment\nfrom __future__ import braces"""
a = """# comment\n"""
self.check(b, a)
b = """from __future__ import braces\n# comment"""
a = """\n# comment"""
self.check(b, a)
def test_run_order(self):
self.assert_runs_after('print')
class Test_itertools(FixerTestCase):
fixer = "itertools"
def checkall(self, before, after):
# Because we need to check with and without the itertools prefix
# and on each of the three functions, these loops make it all
# much easier
for i in ('itertools.', ''):
for f in ('map', 'filter', 'zip'):
b = before %(i+'i'+f)
a = after %(f)
self.check(b, a)
def test_0(self):
# A simple example -- test_1 covers exactly the same thing,
# but it's not quite as clear.
b = "itertools.izip(a, b)"
a = "zip(a, b)"
self.check(b, a)
def test_1(self):
b = """%s(f, a)"""
a = """%s(f, a)"""
self.checkall(b, a)
def test_qualified(self):
b = """itertools.ifilterfalse(a, b)"""
a = """itertools.filterfalse(a, b)"""
self.check(b, a)
b = """itertools.izip_longest(a, b)"""
a = """itertools.zip_longest(a, b)"""
self.check(b, a)
def test_2(self):
b = """ifilterfalse(a, b)"""
a = """filterfalse(a, b)"""
self.check(b, a)
b = """izip_longest(a, b)"""
a = """zip_longest(a, b)"""
self.check(b, a)
def test_space_1(self):
b = """ %s(f, a)"""
a = """ %s(f, a)"""
self.checkall(b, a)
def test_space_2(self):
b = """ itertools.ifilterfalse(a, b)"""
a = """ itertools.filterfalse(a, b)"""
self.check(b, a)
b = """ itertools.izip_longest(a, b)"""
a = """ itertools.zip_longest(a, b)"""
self.check(b, a)
def test_run_order(self):
self.assert_runs_after('map', 'zip', 'filter')
class Test_itertools_imports(FixerTestCase):
fixer = 'itertools_imports'
def test_reduced(self):
b = "from itertools import imap, izip, foo"
a = "from itertools import foo"
self.check(b, a)
b = "from itertools import bar, imap, izip, foo"
a = "from itertools import bar, foo"
self.check(b, a)
b = "from itertools import chain, imap, izip"
a = "from itertools import chain"
self.check(b, a)
def test_comments(self):
b = "#foo\nfrom itertools import imap, izip"
a = "#foo\n"
self.check(b, a)
def test_none(self):
b = "from itertools import imap, izip"
a = ""
self.check(b, a)
b = "from itertools import izip"
a = ""
self.check(b, a)
def test_import_as(self):
b = "from itertools import izip, bar as bang, imap"
a = "from itertools import bar as bang"
self.check(b, a)
b = "from itertools import izip as _zip, imap, bar"
a = "from itertools import bar"
self.check(b, a)
b = "from itertools import imap as _map"
a = ""
self.check(b, a)
b = "from itertools import imap as _map, izip as _zip"
a = ""
self.check(b, a)
s = "from itertools import bar as bang"
self.unchanged(s)
def test_ifilter_and_zip_longest(self):
for name in "filterfalse", "zip_longest":
b = "from itertools import i%s" % (name,)
a = "from itertools import %s" % (name,)
self.check(b, a)
b = "from itertools import imap, i%s, foo" % (name,)
a = "from itertools import %s, foo" % (name,)
self.check(b, a)
b = "from itertools import bar, i%s, foo" % (name,)
a = "from itertools import bar, %s, foo" % (name,)
self.check(b, a)
def test_import_star(self):
s = "from itertools import *"
self.unchanged(s)
def test_unchanged(self):
s = "from itertools import foo"
self.unchanged(s)
class Test_import(FixerTestCase):
fixer = "import"
def setUp(self):
super(Test_import, self).setUp()
# Need to replace fix_import's exists method
# so we can check that it's doing the right thing
self.files_checked = []
self.present_files = set()
self.always_exists = True
def fake_exists(name):
self.files_checked.append(name)
return self.always_exists or (name in self.present_files)
from lib2to3.fixes import fix_import
fix_import.exists = fake_exists
def tearDown(self):
from lib2to3.fixes import fix_import
fix_import.exists = os.path.exists
def check_both(self, b, a):
self.always_exists = True
super(Test_import, self).check(b, a)
self.always_exists = False
super(Test_import, self).unchanged(b)
def test_files_checked(self):
def p(path):
# Takes a unix path and returns a path with correct separators
return os.path.pathsep.join(path.split("/"))
self.always_exists = False
self.present_files = set(['__init__.py'])
expected_extensions = ('.py', os.path.sep, '.pyc', '.so', '.sl', '.pyd')
names_to_test = (p("/spam/eggs.py"), "ni.py", p("../../shrubbery.py"))
for name in names_to_test:
self.files_checked = []
self.filename = name
self.unchanged("import jam")
if os.path.dirname(name):
name = os.path.dirname(name) + '/jam'
else:
name = 'jam'
expected_checks = set(name + ext for ext in expected_extensions)
expected_checks.add("__init__.py")
self.assertEqual(set(self.files_checked), expected_checks)
def test_not_in_package(self):
s = "import bar"
self.always_exists = False
self.present_files = set(["bar.py"])
self.unchanged(s)
def test_with_absolute_import_enabled(self):
s = "from __future__ import absolute_import\nimport bar"
self.always_exists = False
self.present_files = set(["__init__.py", "bar.py"])
self.unchanged(s)
def test_in_package(self):
b = "import bar"
a = "from . import bar"
self.always_exists = False
self.present_files = set(["__init__.py", "bar.py"])
self.check(b, a)
def test_import_from_package(self):
b = "import bar"
a = "from . import bar"
self.always_exists = False
self.present_files = set(["__init__.py", "bar" + os.path.sep])
self.check(b, a)
def test_already_relative_import(self):
s = "from . import bar"
self.unchanged(s)
def test_comments_and_indent(self):
b = "import bar # Foo"
a = "from . import bar # Foo"
self.check(b, a)
def test_from(self):
b = "from foo import bar, baz"
a = "from .foo import bar, baz"
self.check_both(b, a)
b = "from foo import bar"
a = "from .foo import bar"
self.check_both(b, a)
b = "from foo import (bar, baz)"
a = "from .foo import (bar, baz)"
self.check_both(b, a)
def test_dotted_from(self):
b = "from green.eggs import ham"
a = "from .green.eggs import ham"
self.check_both(b, a)
def test_from_as(self):
b = "from green.eggs import ham as spam"
a = "from .green.eggs import ham as spam"
self.check_both(b, a)
def test_import(self):
b = "import foo"
a = "from . import foo"
self.check_both(b, a)
b = "import foo, bar"
a = "from . import foo, bar"
self.check_both(b, a)
b = "import foo, bar, x"
a = "from . import foo, bar, x"
self.check_both(b, a)
b = "import x, y, z"
a = "from . import x, y, z"
self.check_both(b, a)
def test_import_as(self):
b = "import foo as x"
a = "from . import foo as x"
self.check_both(b, a)
b = "import a as b, b as c, c as d"
a = "from . import a as b, b as c, c as d"
self.check_both(b, a)
def test_local_and_absolute(self):
self.always_exists = False
self.present_files = set(["foo.py", "__init__.py"])
s = "import foo, bar"
self.warns_unchanged(s, "absolute and local imports together")
def test_dotted_import(self):
b = "import foo.bar"
a = "from . import foo.bar"
self.check_both(b, a)
def test_dotted_import_as(self):
b = "import foo.bar as bang"
a = "from . import foo.bar as bang"
self.check_both(b, a)
def test_prefix(self):
b = """
# prefix
import foo.bar
"""
a = """
# prefix
from . import foo.bar
"""
self.check_both(b, a)
class Test_set_literal(FixerTestCase):
fixer = "set_literal"
def test_basic(self):
b = """set([1, 2, 3])"""
a = """{1, 2, 3}"""
self.check(b, a)
b = """set((1, 2, 3))"""
a = """{1, 2, 3}"""
self.check(b, a)
b = """set((1,))"""
a = """{1}"""
self.check(b, a)
b = """set([1])"""
self.check(b, a)
b = """set((a, b))"""
a = """{a, b}"""
self.check(b, a)
b = """set([a, b])"""
self.check(b, a)
b = """set((a*234, f(args=23)))"""
a = """{a*234, f(args=23)}"""
self.check(b, a)
b = """set([a*23, f(23)])"""
a = """{a*23, f(23)}"""
self.check(b, a)
b = """set([a-234**23])"""
a = """{a-234**23}"""
self.check(b, a)
def test_listcomps(self):
b = """set([x for x in y])"""
a = """{x for x in y}"""
self.check(b, a)
b = """set([x for x in y if x == m])"""
a = """{x for x in y if x == m}"""
self.check(b, a)
b = """set([x for x in y for a in b])"""
a = """{x for x in y for a in b}"""
self.check(b, a)
b = """set([f(x) - 23 for x in y])"""
a = """{f(x) - 23 for x in y}"""
self.check(b, a)
def test_whitespace(self):
b = """set( [1, 2])"""
a = """{1, 2}"""
self.check(b, a)
b = """set([1 , 2])"""
a = """{1 , 2}"""
self.check(b, a)
b = """set([ 1 ])"""
a = """{ 1 }"""
self.check(b, a)
b = """set( [1] )"""
a = """{1}"""
self.check(b, a)
b = """set([ 1, 2 ])"""
a = """{ 1, 2 }"""
self.check(b, a)
b = """set([x for x in y ])"""
a = """{x for x in y }"""
self.check(b, a)
b = """set(
[1, 2]
)
"""
a = """{1, 2}\n"""
self.check(b, a)
def test_comments(self):
b = """set((1, 2)) # Hi"""
a = """{1, 2} # Hi"""
self.check(b, a)
# This isn't optimal behavior, but the fixer is optional.
b = """
# Foo
set( # Bar
(1, 2)
)
"""
a = """
# Foo
{1, 2}
"""
self.check(b, a)
def test_unchanged(self):
s = """set()"""
self.unchanged(s)
s = """set(a)"""
self.unchanged(s)
s = """set(a, b, c)"""
self.unchanged(s)
# Don't transform generators because they might have to be lazy.
s = """set(x for x in y)"""
self.unchanged(s)
s = """set(x for x in y if z)"""
self.unchanged(s)
s = """set(a*823-23**2 + f(23))"""
self.unchanged(s)
class Test_sys_exc(FixerTestCase):
fixer = "sys_exc"
def test_0(self):
b = "sys.exc_type"
a = "sys.exc_info()[0]"
self.check(b, a)
def test_1(self):
b = "sys.exc_value"
a = "sys.exc_info()[1]"
self.check(b, a)
def test_2(self):
b = "sys.exc_traceback"
a = "sys.exc_info()[2]"
self.check(b, a)
def test_3(self):
b = "sys.exc_type # Foo"
a = "sys.exc_info()[0] # Foo"
self.check(b, a)
def test_4(self):
b = "sys. exc_type"
a = "sys. exc_info()[0]"
self.check(b, a)
def test_5(self):
b = "sys .exc_type"
a = "sys .exc_info()[0]"
self.check(b, a)
class Test_paren(FixerTestCase):
fixer = "paren"
def test_0(self):
b = """[i for i in 1, 2 ]"""
a = """[i for i in (1, 2) ]"""
self.check(b, a)
def test_1(self):
b = """[i for i in 1, 2, ]"""
a = """[i for i in (1, 2,) ]"""
self.check(b, a)
def test_2(self):
b = """[i for i in 1, 2 ]"""
a = """[i for i in (1, 2) ]"""
self.check(b, a)
def test_3(self):
b = """[i for i in 1, 2 if i]"""
a = """[i for i in (1, 2) if i]"""
self.check(b, a)
def test_4(self):
b = """[i for i in 1, 2 ]"""
a = """[i for i in (1, 2) ]"""
self.check(b, a)
def test_5(self):
b = """(i for i in 1, 2)"""
a = """(i for i in (1, 2))"""
self.check(b, a)
def test_6(self):
b = """(i for i in 1 ,2 if i)"""
a = """(i for i in (1 ,2) if i)"""
self.check(b, a)
def test_unchanged_0(self):
s = """[i for i in (1, 2)]"""
self.unchanged(s)
def test_unchanged_1(self):
s = """[i for i in foo()]"""
self.unchanged(s)
def test_unchanged_2(self):
s = """[i for i in (1, 2) if nothing]"""
self.unchanged(s)
def test_unchanged_3(self):
s = """(i for i in (1, 2))"""
self.unchanged(s)
def test_unchanged_4(self):
s = """[i for i in m]"""
self.unchanged(s)
class Test_metaclass(FixerTestCase):
fixer = 'metaclass'
def test_unchanged(self):
self.unchanged("class X(): pass")
self.unchanged("class X(object): pass")
self.unchanged("class X(object1, object2): pass")
self.unchanged("class X(object1, object2, object3): pass")
self.unchanged("class X(metaclass=Meta): pass")
self.unchanged("class X(b, arg=23, metclass=Meta): pass")
self.unchanged("class X(b, arg=23, metaclass=Meta, other=42): pass")
s = """
class X:
def __metaclass__(self): pass
"""
self.unchanged(s)
s = """
class X:
a[23] = 74
"""
self.unchanged(s)
def test_comments(self):
b = """
class X:
# hi
__metaclass__ = AppleMeta
"""
a = """
class X(metaclass=AppleMeta):
# hi
pass
"""
self.check(b, a)
b = """
class X:
__metaclass__ = Meta
# Bedtime!
"""
a = """
class X(metaclass=Meta):
pass
# Bedtime!
"""
self.check(b, a)
def test_meta(self):
# no-parent class, odd body
b = """
class X():
__metaclass__ = Q
pass
"""
a = """
class X(metaclass=Q):
pass
"""
self.check(b, a)
# one parent class, no body
b = """class X(object): __metaclass__ = Q"""
a = """class X(object, metaclass=Q): pass"""
self.check(b, a)
# one parent, simple body
b = """
class X(object):
__metaclass__ = Meta
bar = 7
"""
a = """
class X(object, metaclass=Meta):
bar = 7
"""
self.check(b, a)
b = """
class X:
__metaclass__ = Meta; x = 4; g = 23
"""
a = """
class X(metaclass=Meta):
x = 4; g = 23
"""
self.check(b, a)
# one parent, simple body, __metaclass__ last
b = """
class X(object):
bar = 7
__metaclass__ = Meta
"""
a = """
class X(object, metaclass=Meta):
bar = 7
"""
self.check(b, a)
# redefining __metaclass__
b = """
class X():
__metaclass__ = A
__metaclass__ = B
bar = 7
"""
a = """
class X(metaclass=B):
bar = 7
"""
self.check(b, a)
# multiple inheritance, simple body
b = """
class X(clsA, clsB):
__metaclass__ = Meta
bar = 7
"""
a = """
class X(clsA, clsB, metaclass=Meta):
bar = 7
"""
self.check(b, a)
# keywords in the class statement
b = """class m(a, arg=23): __metaclass__ = Meta"""
a = """class m(a, arg=23, metaclass=Meta): pass"""
self.check(b, a)
b = """
class X(expression(2 + 4)):
__metaclass__ = Meta
"""
a = """
class X(expression(2 + 4), metaclass=Meta):
pass
"""
self.check(b, a)
b = """
class X(expression(2 + 4), x**4):
__metaclass__ = Meta
"""
a = """
class X(expression(2 + 4), x**4, metaclass=Meta):
pass
"""
self.check(b, a)
b = """
class X:
__metaclass__ = Meta
save.py = 23
"""
a = """
class X(metaclass=Meta):
save.py = 23
"""
self.check(b, a)
class Test_getcwdu(FixerTestCase):
fixer = 'getcwdu'
def test_basic(self):
b = """os.getcwdu"""
a = """os.getcwd"""
self.check(b, a)
b = """os.getcwdu()"""
a = """os.getcwd()"""
self.check(b, a)
b = """meth = os.getcwdu"""
a = """meth = os.getcwd"""
self.check(b, a)
b = """os.getcwdu(args)"""
a = """os.getcwd(args)"""
self.check(b, a)
def test_comment(self):
b = """os.getcwdu() # Foo"""
a = """os.getcwd() # Foo"""
self.check(b, a)
def test_unchanged(self):
s = """os.getcwd()"""
self.unchanged(s)
s = """getcwdu()"""
self.unchanged(s)
s = """os.getcwdb()"""
self.unchanged(s)
def test_indentation(self):
b = """
if 1:
os.getcwdu()
"""
a = """
if 1:
os.getcwd()
"""
self.check(b, a)
def test_multilation(self):
b = """os .getcwdu()"""
a = """os .getcwd()"""
self.check(b, a)
b = """os. getcwdu"""
a = """os. getcwd"""
self.check(b, a)
b = """os.getcwdu ( )"""
a = """os.getcwd ( )"""
self.check(b, a)
class Test_operator(FixerTestCase):
fixer = "operator"
def test_operator_isCallable(self):
b = "operator.isCallable(x)"
a = "hasattr(x, '__call__')"
self.check(b, a)
def test_operator_sequenceIncludes(self):
b = "operator.sequenceIncludes(x, y)"
a = "operator.contains(x, y)"
self.check(b, a)
b = "operator .sequenceIncludes(x, y)"
a = "operator .contains(x, y)"
self.check(b, a)
b = "operator. sequenceIncludes(x, y)"
a = "operator. contains(x, y)"
self.check(b, a)
def test_operator_isSequenceType(self):
b = "operator.isSequenceType(x)"
a = "import collections\nisinstance(x, collections.Sequence)"
self.check(b, a)
def test_operator_isMappingType(self):
b = "operator.isMappingType(x)"
a = "import collections\nisinstance(x, collections.Mapping)"
self.check(b, a)
def test_operator_isNumberType(self):
b = "operator.isNumberType(x)"
a = "import numbers\nisinstance(x, numbers.Number)"
self.check(b, a)
def test_operator_repeat(self):
b = "operator.repeat(x, n)"
a = "operator.mul(x, n)"
self.check(b, a)
b = "operator .repeat(x, n)"
a = "operator .mul(x, n)"
self.check(b, a)
b = "operator. repeat(x, n)"
a = "operator. mul(x, n)"
self.check(b, a)
def test_operator_irepeat(self):
b = "operator.irepeat(x, n)"
a = "operator.imul(x, n)"
self.check(b, a)
b = "operator .irepeat(x, n)"
a = "operator .imul(x, n)"
self.check(b, a)
b = "operator. irepeat(x, n)"
a = "operator. imul(x, n)"
self.check(b, a)
def test_bare_isCallable(self):
s = "isCallable(x)"
t = "You should use 'hasattr(x, '__call__')' here."
self.warns_unchanged(s, t)
def test_bare_sequenceIncludes(self):
s = "sequenceIncludes(x, y)"
t = "You should use 'operator.contains(x, y)' here."
self.warns_unchanged(s, t)
def test_bare_operator_isSequenceType(self):
s = "isSequenceType(z)"
t = "You should use 'isinstance(z, collections.Sequence)' here."
self.warns_unchanged(s, t)
def test_bare_operator_isMappingType(self):
s = "isMappingType(x)"
t = "You should use 'isinstance(x, collections.Mapping)' here."
self.warns_unchanged(s, t)
def test_bare_operator_isNumberType(self):
s = "isNumberType(y)"
t = "You should use 'isinstance(y, numbers.Number)' here."
self.warns_unchanged(s, t)
def test_bare_operator_repeat(self):
s = "repeat(x, n)"
t = "You should use 'operator.mul(x, n)' here."
self.warns_unchanged(s, t)
def test_bare_operator_irepeat(self):
s = "irepeat(y, 187)"
t = "You should use 'operator.imul(y, 187)' here."
self.warns_unchanged(s, t)
class Test_exitfunc(FixerTestCase):
fixer = "exitfunc"
def test_simple(self):
b = """
import sys
sys.exitfunc = my_atexit
"""
a = """
import sys
import atexit
atexit.register(my_atexit)
"""
self.check(b, a)
def test_names_import(self):
b = """
import sys, crumbs
sys.exitfunc = my_func
"""
a = """
import sys, crumbs, atexit
atexit.register(my_func)
"""
self.check(b, a)
def test_complex_expression(self):
b = """
import sys
sys.exitfunc = do(d)/a()+complex(f=23, g=23)*expression
"""
a = """
import sys
import atexit
atexit.register(do(d)/a()+complex(f=23, g=23)*expression)
"""
self.check(b, a)
def test_comments(self):
b = """
import sys # Foo
sys.exitfunc = f # Blah
"""
a = """
import sys
import atexit # Foo
atexit.register(f) # Blah
"""
self.check(b, a)
b = """
import apples, sys, crumbs, larry # Pleasant comments
sys.exitfunc = func
"""
a = """
import apples, sys, crumbs, larry, atexit # Pleasant comments
atexit.register(func)
"""
self.check(b, a)
def test_in_a_function(self):
b = """
import sys
def f():
sys.exitfunc = func
"""
a = """
import sys
import atexit
def f():
atexit.register(func)
"""
self.check(b, a)
def test_no_sys_import(self):
b = """sys.exitfunc = f"""
a = """atexit.register(f)"""
msg = ("Can't find sys import; Please add an atexit import at the "
"top of your file.")
self.warns(b, a, msg)
def test_unchanged(self):
s = """f(sys.exitfunc)"""
self.unchanged(s) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import Framework
import datetime
class PullRequestComment(Framework.TestCase):
def setUp(self):
Framework.TestCase.setUp(self)
self.comment = self.g.get_user().get_repo("PyGithub").get_pull(31).get_comment(886298)
def testAttributes(self):
self.assertEqual(self.comment.body, "Comment created by PyGithub")
self.assertEqual(self.comment.commit_id, "8a4f306d4b223682dd19410d4a9150636ebe4206")
self.assertEqual(self.comment.created_at, datetime.datetime(2012, 5, 27, 9, 40, 12))
self.assertEqual(self.comment.id, 886298)
self.assertEqual(self.comment.original_commit_id, "8a4f306d4b223682dd19410d4a9150636ebe4206")
self.assertEqual(self.comment.original_position, 5)
self.assertEqual(self.comment.path, "src/github/Issue.py")
self.assertEqual(self.comment.position, 5)
self.assertEqual(self.comment.updated_at, datetime.datetime(2012, 5, 27, 9, 40, 12))
self.assertEqual(self.comment.url, "https://api.github.com/repos/jacquev6/PyGithub/pulls/comments/886298")
self.assertEqual(self.comment.user.login, "jacquev6")
self.assertEqual(self.comment.html_url, "https://github.com/jacquev6/PyGithub/pull/170#issuecomment-18637907")
def testEdit(self):
self.comment.edit("Comment edited by PyGithub")
self.assertEqual(self.comment.body, "Comment edited by PyGithub")
def testDelete(self):
self.comment.delete() | unknown | codeparrot/codeparrot-clean | ||
"""Build and install the colormaps package."""
# Copyright (c) 2012 Andrew Dawson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from distutils.core import setup
for line in open('lib/colormaps/__init__.py').readlines():
if (line.startswith('__version__')):
exec(line.strip())
package_data = {'colormaps': ['palette/*.txt', 'palette/ncl/*.txt',
'palette/brewer/diverging/*.txt',
'palette/brewer/qualitative/*.txt',
'palette/brewer/sequential/*.txt']}
if __name__ == '__main__':
setup(
name='colormaps',
version=__version__,
description='Easily generate colormaps for matplotlib',
author='Andrew Dawson',
author_email='dawson@atm.ox.ac.uk',
url='http://github.com/ajdawson/colormaps',
long_description="""
colormaps can generate colormaps of varying lengths from sets of
base colors. It is designed to allow total control of colormaps
in matplotlib.
""",
packages=['colormaps'],
package_dir={'': 'lib'},
package_data=package_data,) | unknown | codeparrot/codeparrot-clean | ||
<?php declare(strict_types=1);
/*
* This file is part of Composer.
*
* (c) Nils Adermann <naderman@naderman.de>
* Jordi Boggiano <j.boggiano@seld.be>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Composer\Test\Util;
use Composer\MetadataMinifier\MetadataMinifier;
use Composer\Package\CompletePackage;
use Composer\Package\Dumper\ArrayDumper;
use PHPUnit\Framework\TestCase;
class MetadataMinifierTest extends TestCase
{
public function testMinifyExpand(): void
{
$package1 = new CompletePackage('foo/bar', '2.0.0.0', '2.0.0');
$package1->setScripts(['foo' => ['bar']]);
$package1->setLicense(['MIT']);
$package2 = new CompletePackage('foo/bar', '1.2.0.0', '1.2.0');
$package2->setLicense(['GPL']);
$package2->setHomepage('https://example.org');
$package3 = new CompletePackage('foo/bar', '1.0.0.0', '1.0.0');
$package3->setLicense(['GPL']);
$dumper = new ArrayDumper();
$minified = [
['name' => 'foo/bar', 'version' => '2.0.0', 'version_normalized' => '2.0.0.0', 'type' => 'library', 'scripts' => ['foo' => ['bar']], 'license' => ['MIT']],
['version' => '1.2.0', 'version_normalized' => '1.2.0.0', 'license' => ['GPL'], 'homepage' => 'https://example.org', 'scripts' => '__unset'],
['version' => '1.0.0', 'version_normalized' => '1.0.0.0', 'homepage' => '__unset'],
];
$source = [$dumper->dump($package1), $dumper->dump($package2), $dumper->dump($package3)];
self::assertSame($minified, MetadataMinifier::minify($source));
self::assertSame($source, MetadataMinifier::expand($minified));
}
} | php | github | https://github.com/composer/composer | tests/Composer/Test/Util/MetadataMinifierTest.php |
from insights.parsers.mysql_log import MysqlLog
from insights.tests import context_wrap
MYSQL_LOG = """
2018-03-13T06:37:37.268209Z 0 [Warning] Changed limits: max_open_files: 1024 (requested 5000)
2018-03-13T06:37:37.268417Z 0 [Warning] Changed limits: table_open_cache: 431 (requested 2000)
2018-03-13T06:37:37.268549Z 0 [Warning] TIMESTAMP with implicit DEFAULT value is deprecated. Please use --explicit_defaults_for_timestamp server option (see documentation fo
r more details).
2018-03-13T06:37:39.651387Z 0 [Warning] InnoDB: New log files created, LSN=45790
2018-03-13T06:37:39.719166Z 0 [Warning] InnoDB: Creating foreign key constraint system tables.
2018-03-13T06:37:39.784406Z 0 [Warning] No existing UUID has been found, so we assume that this is the first time that this server has been started. Generating a new UUID: 0
698a7d6-2689-11e8-8944-0800274ac5ef.
2018-03-13T06:37:39.789636Z 0 [Warning] Gtid table is not ready to be used. Table 'mysql.gtid_executed' cannot be opened.
2018-03-13T06:37:40.498084Z 0 [Warning] CA certificate ca.pem is self signed.
2018-03-13T06:37:41.080591Z 1 [Warning] root@localhost is created with an empty password ! Please consider switching off the --initialize-insecure option.
md5_dgst.c(80): OpenSSL internal error, assertion failed: Digest MD5 forbidden in FIPS mode!
06:37:41 UTC - mysqld got signal 6 ;
2018-03-13T07:43:31.450772Z 0 [Note] Event Scheduler: Loaded 0 events
2018-03-13T07:43:31.450988Z 0 [Note] /opt/rh/rh-mysql57/root/usr/libexec/mysqld: ready for connections.
Version: '5.7.16' socket: '/var/lib/mysql/mysql.sock' port: 3306 MySQL Community Server (GPL)
md5_dgst.c(80): OpenSSL internal error, assertion failed: Digest MD5 forbidden in FIPS mode!
07:46:19 UTC - mysqld got signal 6 ;
"""
def test_mysql_log():
log = MysqlLog(context_wrap(MYSQL_LOG))
assert len(log.get("[Warning]")) == 9
assert len(log.get("[Note]")) == 2
assert 'ready for connections' in log | unknown | codeparrot/codeparrot-clean | ||
###############################################################################
##
## Copyright (C) 2011-2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import hashlib
from twisted.internet import reactor
from autobahn.twisted.websocket import WebSocketServerFactory, \
WebSocketServerProtocol, \
listenWS
class FrameBasedHashServerProtocol(WebSocketServerProtocol):
"""
Frame-based WebSockets server that computes a running SHA-256 for message
data received. It will respond after every frame received with the digest
computed up to that point. It can receive messages of unlimited number
of frames. Digest is reset upon new message.
"""
def onMessageBegin(self, isBinary):
WebSocketServerProtocol.onMessageBegin(self, isBinary)
self.sha256 = hashlib.sha256()
def onMessageFrame(self, payload):
l = 0
for data in payload:
l += len(data)
self.sha256.update(data)
digest = self.sha256.hexdigest()
print("Received frame with payload length {}, compute digest: {}".format(l, digest))
self.sendMessage(digest.encode('utf8'))
def onMessageEnd(self):
self.sha256 = None
if __name__ == '__main__':
factory = WebSocketServerFactory("ws://localhost:9000")
factory.protocol = FrameBasedHashServerProtocol
enableCompression = False
if enableCompression:
from autobahn.websocket.compress import PerMessageDeflateOffer, \
PerMessageDeflateOfferAccept
## Function to accept offers from the client ..
def accept(offers):
for offer in offers:
if isinstance(offer, PerMessageDeflateOffer):
return PerMessageDeflateOfferAccept(offer)
factory.setProtocolOptions(perMessageCompressionAccept = accept)
listenWS(factory)
reactor.run() | unknown | codeparrot/codeparrot-clean | ||
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
import os
import unittest
sys.path.insert(0, os.path.abspath(".."))
from cStringIO import StringIO
from .. import parser
from ..parser import token_types
class TokenizerTest(unittest.TestCase):
def setUp(self):
self.tokenizer = parser.Tokenizer()
def tokenize(self, input_str):
rv = []
for item in self.tokenizer.tokenize(StringIO(input_str)):
rv.append(item)
if item[0] == token_types.eof:
break
return rv
def compare(self, input_text, expected):
expected = expected + [(token_types.eof, None)]
actual = self.tokenize(input_text)
self.assertEquals(actual, expected)
def test_heading_0(self):
self.compare("""[Heading text]""",
[(token_types.paren, "["),
(token_types.string, "Heading text"),
(token_types.paren, "]")])
def test_heading_1(self):
self.compare("""[Heading [text\]]""",
[(token_types.paren, "["),
(token_types.string, "Heading [text]"),
(token_types.paren, "]")])
def test_heading_2(self):
self.compare("""[Heading #text]""",
[(token_types.paren, "["),
(token_types.string, "Heading #text"),
(token_types.paren, "]")])
def test_heading_3(self):
self.compare("""[Heading [\]text]""",
[(token_types.paren, "["),
(token_types.string, "Heading []text"),
(token_types.paren, "]")])
def test_heading_4(self):
with self.assertRaises(parser.ParseError):
self.tokenize("[Heading")
def test_heading_5(self):
self.compare("""[Heading [\]text] #comment""",
[(token_types.paren, "["),
(token_types.string, "Heading []text"),
(token_types.paren, "]")])
def test_heading_6(self):
self.compare(r"""[Heading \ttext]""",
[(token_types.paren, "["),
(token_types.string, "Heading \ttext"),
(token_types.paren, "]")])
def test_key_0(self):
self.compare("""key:value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_key_1(self):
self.compare("""key : value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_key_2(self):
self.compare("""key : val ue""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "val ue")])
def test_key_3(self):
self.compare("""key: value#comment""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_key_4(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""ke y: value""")
def test_key_5(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""key""")
def test_key_6(self):
self.compare("""key: "value\"""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_key_7(self):
self.compare("""key: 'value'""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_key_8(self):
self.compare("""key: "#value\"""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "#value")])
def test_key_9(self):
self.compare("""key: '#value\'""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "#value")])
def test_key_10(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""key: "value""")
def test_key_11(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""key: 'value""")
def test_key_12(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""key: 'value""")
def test_key_13(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""key: 'value' abc""")
def test_key_14(self):
self.compare(r"""key: \\nb""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, r"\nb")])
def test_list_0(self):
self.compare(
"""
key: []""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.list_start, "["),
(token_types.list_end, "]")])
def test_list_1(self):
self.compare(
"""
key: [a, "b"]""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.list_start, "["),
(token_types.string, "a"),
(token_types.string, "b"),
(token_types.list_end, "]")])
def test_list_2(self):
self.compare(
"""
key: [a,
b]""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.list_start, "["),
(token_types.string, "a"),
(token_types.string, "b"),
(token_types.list_end, "]")])
def test_list_3(self):
self.compare(
"""
key: [a, #b]
c]""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.list_start, "["),
(token_types.string, "a"),
(token_types.string, "c"),
(token_types.list_end, "]")])
def test_list_4(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""key: [a #b]
c]""")
def test_list_5(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""key: [a \\
c]""")
def test_list_6(self):
self.compare(
"""key: [a , b]""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.list_start, "["),
(token_types.string, "a"),
(token_types.string, "b"),
(token_types.list_end, "]")])
def test_expr_0(self):
self.compare(
"""
key:
if cond == 1: value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.ident, "cond"),
(token_types.ident, "=="),
(token_types.number, "1"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_expr_1(self):
self.compare(
"""
key:
if cond == 1: value1
value2""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.ident, "cond"),
(token_types.ident, "=="),
(token_types.number, "1"),
(token_types.separator, ":"),
(token_types.string, "value1"),
(token_types.string, "value2")])
def test_expr_2(self):
self.compare(
"""
key:
if cond=="1": value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.ident, "cond"),
(token_types.ident, "=="),
(token_types.string, "1"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_expr_3(self):
self.compare(
"""
key:
if cond==1.1: value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.ident, "cond"),
(token_types.ident, "=="),
(token_types.number, "1.1"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_expr_4(self):
self.compare(
"""
key:
if cond==1.1 and cond2 == "a": value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.ident, "cond"),
(token_types.ident, "=="),
(token_types.number, "1.1"),
(token_types.ident, "and"),
(token_types.ident, "cond2"),
(token_types.ident, "=="),
(token_types.string, "a"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_expr_5(self):
self.compare(
"""
key:
if (cond==1.1 ): value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.paren, "("),
(token_types.ident, "cond"),
(token_types.ident, "=="),
(token_types.number, "1.1"),
(token_types.paren, ")"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_expr_6(self):
self.compare(
"""
key:
if "\\ttest": value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.string, "\ttest"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_expr_7(self):
with self.assertRaises(parser.ParseError):
self.tokenize(
"""
key:
if 1A: value""")
def test_expr_8(self):
with self.assertRaises(parser.ParseError):
self.tokenize(
"""
key:
if 1a: value""")
def test_expr_9(self):
with self.assertRaises(parser.ParseError):
self.tokenize(
"""
key:
if 1.1.1: value""")
def test_expr_10(self):
self.compare(
"""
key:
if 1.: value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.number, "1."),
(token_types.separator, ":"),
(token_types.string, "value")])
if __name__ == "__main__":
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package oci
import (
"bytes"
"context"
"crypto/md5"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/terraform/internal/states/remote"
"github.com/hashicorp/terraform/internal/states/statemgr"
"github.com/hashicorp/terraform/internal/tfdiags"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
)
type RemoteClient struct {
objectStorageClient *objectstorage.ObjectStorageClient
namespace string
bucketName string
path string
lockFilePath string
kmsKeyID string
SSECustomerKey string
SSECustomerKeySHA256 string
SSECustomerAlgorithm string
}
func (c *RemoteClient) Get() (*remote.Payload, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
logger := logWithOperation("download-state-file").Named(c.path)
logger.Info("Downloading remote state")
ctx := context.WithValue(context.Background(), "logger", logger)
payload, err := c.getObject(ctx)
if err != nil || len(payload.Data) == 0 {
return nil, diags.Append(err)
}
// md5 hash of whole state
sum := md5.Sum(payload.Data)
payload.MD5 = sum[:]
return payload, diags
}
func (c *RemoteClient) getObject(ctx context.Context) (*remote.Payload, error) {
logger := ctx.Value("logger").(hclog.Logger)
headRequest := objectstorage.HeadObjectRequest{
NamespaceName: common.String(c.namespace),
ObjectName: common.String(c.path),
BucketName: common.String(c.bucketName),
RequestMetadata: common.RequestMetadata{
RetryPolicy: getDefaultRetryPolicy(),
},
}
if c.SSECustomerKey != "" && c.SSECustomerKeySHA256 != "" {
headRequest.OpcSseCustomerKey = common.String(c.SSECustomerKey)
headRequest.OpcSseCustomerKeySha256 = common.String(c.SSECustomerKeySHA256)
headRequest.OpcSseCustomerAlgorithm = common.String(c.SSECustomerAlgorithm)
}
// Get object from OCI
headResponse, headErr := c.objectStorageClient.HeadObject(ctx, headRequest)
if headErr != nil {
var ociHeadErr common.ServiceError
if errors.As(headErr, &ociHeadErr) && ociHeadErr.GetHTTPStatusCode() == 404 {
logger.Debug(" State file '%s' not found. Initializing Terraform state...", c.path)
return &remote.Payload{}, nil
} else {
return nil, fmt.Errorf("failed to access object '%s' in bucket '%s': %w", c.path, c.bucketName, headErr)
}
}
getRequest := objectstorage.GetObjectRequest{
NamespaceName: common.String(c.namespace),
ObjectName: common.String(c.path),
BucketName: common.String(c.bucketName),
IfMatch: headResponse.ETag,
RequestMetadata: common.RequestMetadata{
RetryPolicy: getDefaultRetryPolicy(),
},
}
if c.SSECustomerKey != "" && c.SSECustomerKeySHA256 != "" {
getRequest.OpcSseCustomerKey = common.String(c.SSECustomerKey)
getRequest.OpcSseCustomerKeySha256 = common.String(c.SSECustomerKeySHA256)
getRequest.OpcSseCustomerAlgorithm = common.String(c.SSECustomerAlgorithm)
}
// Get object from OCI
getResponse, err := c.objectStorageClient.GetObject(ctx, getRequest)
if err != nil {
var ociErr common.ServiceError
if errors.As(err, &ociErr) {
return nil, fmt.Errorf("failed to access object HttpStatusCode: %d\nOpcRequestId: %s\n message: %s\n ErrorCode: %s", ociErr.GetHTTPStatusCode(), ociErr.GetOpcRequestID(), ociErr.GetMessage(), ociErr.GetCode())
}
return nil, fmt.Errorf("failed to access object '%s' in bucket '%s': %w", c.path, c.bucketName, err)
}
defer getResponse.Content.Close()
// Read object content
contentArray, err := io.ReadAll(getResponse.Content)
if err != nil {
return nil, fmt.Errorf("unable to read 'content' from response: %w", err)
}
// Compute MD5 hash
md5Hash := getResponse.ContentMd5
if md5Hash == nil || len(*md5Hash) == 0 {
md5Hash = getResponse.OpcMultipartMd5
}
// Construct payload
payload := &remote.Payload{
Data: contentArray,
MD5: []byte(*md5Hash),
}
// Return an error instead of `nil, nil` if the object is empty
if len(payload.Data) == 0 {
return nil, fmt.Errorf("object %q is empty", c.path)
}
return payload, nil
}
func (c *RemoteClient) Put(data []byte) tfdiags.Diagnostics {
var diags tfdiags.Diagnostics
logger := logWithOperation("upload-state-file").Named(c.path)
ctx := context.WithValue(context.Background(), "logger", logger)
dataSize := int64(len(data))
sum := md5.Sum(data)
var err error
if dataSize > DefaultFilePartSize {
logger.Info("Using Multipart Feature")
var multipartUploadData = MultipartUploadData{
client: c,
Data: data,
RequestMetadata: common.RequestMetadata{
RetryPolicy: getDefaultRetryPolicy(),
},
}
err = multipartUploadData.multiPartUploadImpl(ctx)
if err != nil && dataSize <= MaxFilePartSize {
logger.Error(fmt.Sprintf("Multipart upload failed, falling back to single part upload: %v", err))
err = c.uploadSinglePartObject(ctx, data, sum[:])
}
} else {
err = c.uploadSinglePartObject(ctx, data, sum[:])
}
if err != nil {
return diags.Append(err)
}
return diags
}
func (c *RemoteClient) uploadSinglePartObject(ctx context.Context, data, sum []byte) error {
logger := ctx.Value("logger").(hclog.Logger).Named("singlePartUpload")
logger.Info("Uploading single part object")
if len(data) == 0 {
return fmt.Errorf("uploadSinglePartObject: data is empty")
}
contentType := "application/json"
putRequest := objectstorage.PutObjectRequest{
ContentType: common.String(contentType),
NamespaceName: common.String(c.namespace),
ObjectName: common.String(c.path),
BucketName: common.String(c.bucketName),
PutObjectBody: io.NopCloser(bytes.NewReader(data)),
ContentMD5: common.String(base64.StdEncoding.EncodeToString(sum)),
RequestMetadata: common.RequestMetadata{
RetryPolicy: getDefaultRetryPolicy(),
},
}
// Handle encryption settings
if c.kmsKeyID != "" {
putRequest.OpcSseKmsKeyId = common.String(c.kmsKeyID)
} else if c.SSECustomerKey != "" && c.SSECustomerKeySHA256 != "" {
putRequest.OpcSseCustomerKey = common.String(c.SSECustomerKey)
putRequest.OpcSseCustomerKeySha256 = common.String(c.SSECustomerKeySHA256)
putRequest.OpcSseCustomerAlgorithm = common.String(c.SSECustomerAlgorithm)
}
logger.Info(fmt.Sprintf("Uploading remote state: %s", c.path))
putResponse, err := c.objectStorageClient.PutObject(ctx, putRequest)
if err != nil {
return fmt.Errorf("failed to upload object: %w", err)
}
logger.Info("Uploaded state file response: %+v\n", putResponse)
return nil
}
func (c *RemoteClient) Delete() tfdiags.Diagnostics {
var diags tfdiags.Diagnostics
return diags.Append(c.DeleteAllObjectVersions())
}
func (c *RemoteClient) DeleteAllObjectVersions() error {
request := objectstorage.ListObjectVersionsRequest{
BucketName: common.String(c.bucketName),
NamespaceName: common.String(c.namespace),
Prefix: common.String(c.path),
RequestMetadata: common.RequestMetadata{
RetryPolicy: getDefaultRetryPolicy(),
},
}
response, err := c.objectStorageClient.ListObjectVersions(context.Background(), request)
if err != nil {
return err
}
request.Page = response.OpcNextPage
for request.Page != nil {
request.RequestMetadata.RetryPolicy = getDefaultRetryPolicy()
listResponse, err := c.objectStorageClient.ListObjectVersions(context.Background(), request)
if err != nil {
return err
}
response.Items = append(response.Items, listResponse.Items...)
request.Page = listResponse.OpcNextPage
}
var diagErr tfdiags.Diagnostics
for _, objectVersion := range response.Items {
deleteObjectVersionRequest := objectstorage.DeleteObjectRequest{
BucketName: common.String(c.bucketName),
NamespaceName: common.String(c.namespace),
ObjectName: objectVersion.Name,
VersionId: objectVersion.VersionId,
RequestMetadata: common.RequestMetadata{
RetryPolicy: getDefaultRetryPolicy(),
},
}
_, err := c.objectStorageClient.DeleteObject(context.Background(), deleteObjectVersionRequest)
if err != nil {
diagErr = diagErr.Append(err)
}
}
if diagErr != nil {
return diagErr.Err()
}
return nil
}
func (c *RemoteClient) Lock(info *statemgr.LockInfo) (string, error) {
logger := logWithOperation("lock-state-file").Named(c.lockFilePath)
logger.Info("Locking remote state")
ctx := context.TODO()
info.Path = c.path
infoBytes, err := json.Marshal(info)
if err != nil {
return "", err
}
putObjReq := objectstorage.PutObjectRequest{
BucketName: common.String(c.bucketName),
NamespaceName: common.String(c.namespace),
ObjectName: common.String(c.lockFilePath),
IfNoneMatch: common.String("*"),
PutObjectBody: io.NopCloser(bytes.NewReader(infoBytes)),
RequestMetadata: common.RequestMetadata{
RetryPolicy: getDefaultRetryPolicy(),
},
}
putResponse, putErr := c.objectStorageClient.PutObject(ctx, putObjReq)
if putErr != nil {
lockInfo, _, err := c.getLockInfo(ctx)
if err != nil {
putErr = errors.Join(putErr, err)
}
return "", &statemgr.LockError{
Err: putErr,
Info: lockInfo,
}
}
logger.Info("state lock response code: %+d\n", putResponse.RawResponse.StatusCode)
return info.ID, nil
}
// getLockInfo retrieves and parses a lock file from an oci bucket.
func (c *RemoteClient) getLockInfo(ctx context.Context) (*statemgr.LockInfo, string, error) {
// Attempt to retrieve the lock file from
getRequest := objectstorage.GetObjectRequest{
NamespaceName: common.String(c.namespace),
ObjectName: common.String(c.lockFilePath),
BucketName: common.String(c.bucketName),
RequestMetadata: common.RequestMetadata{
RetryPolicy: getDefaultRetryPolicy(),
},
}
getResponse, err := c.objectStorageClient.GetObject(ctx, getRequest)
if err != nil {
return nil, "", fmt.Errorf("failed to get existing lock file: %w", err)
}
lockByteData, err := io.ReadAll(getResponse.Content)
if err != nil {
return nil, *getResponse.ETag, fmt.Errorf("failed to read existing lock file content: %w", err)
}
lockInfo := &statemgr.LockInfo{}
if err := json.Unmarshal(lockByteData, lockInfo); err != nil {
return lockInfo, "", fmt.Errorf("failed to unmarshal JSON data into LockInfo struct: %w", err)
}
return lockInfo, *getResponse.ETag, nil
}
func (c *RemoteClient) Unlock(id string) error {
ctx := context.TODO()
logger := logWithOperation("unlock-state-file").Named(c.lockFilePath)
logger.Info("unlocking remote state")
lockInfo, etag, err := c.getLockInfo(ctx)
if err != nil {
return fmt.Errorf("Failed to retrieve lock information from OCI Object Storage: %w", err)
}
// Verify that the provided lock ID matches the lock ID of the retrieved lock file.
if lockInfo.ID != id {
return &statemgr.LockError{
Info: lockInfo,
Err: fmt.Errorf("lock ID '%s' does not match the existing lock ID '%s'", id, lockInfo.ID),
}
}
deleteRequest := objectstorage.DeleteObjectRequest{
NamespaceName: common.String(c.namespace),
ObjectName: common.String(c.lockFilePath),
BucketName: common.String(c.bucketName),
IfMatch: common.String(etag),
RequestMetadata: common.RequestMetadata{
RetryPolicy: getDefaultRetryPolicy(),
},
}
deleteResponse, err := c.objectStorageClient.DeleteObject(ctx, deleteRequest)
if err != nil {
return &statemgr.LockError{
Info: lockInfo,
Err: err,
}
}
logger.Info("Unlock response: %v\n", deleteResponse.RawResponse.StatusCode)
return nil
} | go | github | https://github.com/hashicorp/terraform | internal/backend/remote-state/oci/client.go |
// Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the CockroachDB Software License
// included in the /LICENSE file.
package cli
import (
"os"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/build"
"github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvtenant"
"github.com/cockroachdb/cockroach/pkg/security/securityassets"
"github.com/cockroachdb/cockroach/pkg/security/securitytest"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
)
func init() {
ResetTest()
}
func ResetTest() {
securityassets.SetLoader(securitytest.EmbeddedAssets)
}
func TestMain(m *testing.M) {
// CLI tests are sensitive to the server version, but test binaries don't have
// a version injected. Pretend to be a very up-to-date version.
defer build.TestingOverrideVersion("v999.0.0")()
serverutils.InitTestServerFactory(server.TestServerFactory,
serverutils.WithDRPCOption(base.TestDRPCEnabledRandomly))
serverutils.InitTestClusterFactory(testcluster.TestClusterFactory)
kvtenant.InitTestConnectorFactory()
os.Exit(m.Run())
}
//go:generate ../util/leaktest/add-leaktest.sh *_test.go | go | github | https://github.com/cockroachdb/cockroach | pkg/cli/main_test.go |
# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> and others
# (c) 2017, Toshio Kuratomi <tkuratomi@ansible.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import glob
import os
import os.path
import sys
import warnings
from collections import defaultdict, namedtuple
from ansible import constants as C
from ansible.errors import AnsibleError, AnsiblePluginCircularRedirect, AnsiblePluginRemovedError, AnsibleCollectionUnsupportedVersionError
from ansible.module_utils._text import to_bytes, to_text, to_native
from ansible.module_utils.compat.importlib import import_module
from ansible.module_utils.six import string_types
from ansible.parsing.utils.yaml import from_yaml
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.plugins import get_plugin_class, MODULE_CACHE, PATH_CACHE, PLUGIN_PATH_CACHE
from ansible.utils.collection_loader import AnsibleCollectionConfig, AnsibleCollectionRef
from ansible.utils.collection_loader._collection_finder import _AnsibleCollectionFinder, _get_collection_metadata
from ansible.utils.display import Display
from ansible.utils.plugin_docs import add_fragments
from ansible import __version__ as ansible_version
# TODO: take the packaging dep, or vendor SpecifierSet?
try:
from packaging.specifiers import SpecifierSet
from packaging.version import Version
except ImportError:
SpecifierSet = None
Version = None
try:
import importlib.util
imp = None
except ImportError:
import imp
display = Display()
get_with_context_result = namedtuple('get_with_context_result', ['object', 'plugin_load_context'])
def get_all_plugin_loaders():
return [(name, obj) for (name, obj) in globals().items() if isinstance(obj, PluginLoader)]
def add_all_plugin_dirs(path):
''' add any existing plugin dirs in the path provided '''
b_path = os.path.expanduser(to_bytes(path, errors='surrogate_or_strict'))
if os.path.isdir(b_path):
for name, obj in get_all_plugin_loaders():
if obj.subdir:
plugin_path = os.path.join(b_path, to_bytes(obj.subdir))
if os.path.isdir(plugin_path):
obj.add_directory(to_text(plugin_path))
else:
display.warning("Ignoring invalid path provided to plugin path: '%s' is not a directory" % to_text(path))
def get_shell_plugin(shell_type=None, executable=None):
if not shell_type:
# default to sh
shell_type = 'sh'
# mostly for backwards compat
if executable:
if isinstance(executable, string_types):
shell_filename = os.path.basename(executable)
try:
shell = shell_loader.get(shell_filename)
except Exception:
shell = None
if shell is None:
for shell in shell_loader.all():
if shell_filename in shell.COMPATIBLE_SHELLS:
shell_type = shell.SHELL_FAMILY
break
else:
raise AnsibleError("Either a shell type or a shell executable must be provided ")
shell = shell_loader.get(shell_type)
if not shell:
raise AnsibleError("Could not find the shell plugin required (%s)." % shell_type)
if executable:
setattr(shell, 'executable', executable)
return shell
def add_dirs_to_loader(which_loader, paths):
loader = getattr(sys.modules[__name__], '%s_loader' % which_loader)
for path in paths:
loader.add_directory(path, with_subdir=True)
class PluginPathContext(object):
def __init__(self, path, internal):
self.path = path
self.internal = internal
class PluginLoadContext(object):
def __init__(self):
self.original_name = None
self.redirect_list = []
self.error_list = []
self.import_error_list = []
self.load_attempts = []
self.pending_redirect = None
self.exit_reason = None
self.plugin_resolved_path = None
self.plugin_resolved_name = None
self.plugin_resolved_collection = None # empty string for resolved plugins from user-supplied paths
self.deprecated = False
self.removal_date = None
self.removal_version = None
self.deprecation_warnings = []
self.resolved = False
self._resolved_fqcn = None
@property
def resolved_fqcn(self):
if not self.resolved:
return
if not self._resolved_fqcn:
final_plugin = self.redirect_list[-1]
if AnsibleCollectionRef.is_valid_fqcr(final_plugin) and final_plugin.startswith('ansible.legacy.'):
final_plugin = final_plugin.split('ansible.legacy.')[-1]
if self.plugin_resolved_collection and not AnsibleCollectionRef.is_valid_fqcr(final_plugin):
final_plugin = self.plugin_resolved_collection + '.' + final_plugin
self._resolved_fqcn = final_plugin
return self._resolved_fqcn
def record_deprecation(self, name, deprecation, collection_name):
if not deprecation:
return self
# The `or ''` instead of using `.get(..., '')` makes sure that even if the user explicitly
# sets `warning_text` to `~` (None) or `false`, we still get an empty string.
warning_text = deprecation.get('warning_text', None) or ''
removal_date = deprecation.get('removal_date', None)
removal_version = deprecation.get('removal_version', None)
# If both removal_date and removal_version are specified, use removal_date
if removal_date is not None:
removal_version = None
warning_text = '{0} has been deprecated.{1}{2}'.format(name, ' ' if warning_text else '', warning_text)
display.deprecated(warning_text, date=removal_date, version=removal_version, collection_name=collection_name)
self.deprecated = True
if removal_date:
self.removal_date = removal_date
if removal_version:
self.removal_version = removal_version
self.deprecation_warnings.append(warning_text)
return self
def resolve(self, resolved_name, resolved_path, resolved_collection, exit_reason):
self.pending_redirect = None
self.plugin_resolved_name = resolved_name
self.plugin_resolved_path = resolved_path
self.plugin_resolved_collection = resolved_collection
self.exit_reason = exit_reason
self.resolved = True
return self
def redirect(self, redirect_name):
self.pending_redirect = redirect_name
self.exit_reason = 'pending redirect resolution from {0} to {1}'.format(self.original_name, redirect_name)
self.resolved = False
return self
def nope(self, exit_reason):
self.pending_redirect = None
self.exit_reason = exit_reason
self.resolved = False
return self
class PluginLoader:
'''
PluginLoader loads plugins from the configured plugin directories.
It searches for plugins by iterating through the combined list of play basedirs, configured
paths, and the python path. The first match is used.
'''
def __init__(self, class_name, package, config, subdir, aliases=None, required_base_class=None):
aliases = {} if aliases is None else aliases
self.class_name = class_name
self.base_class = required_base_class
self.package = package
self.subdir = subdir
# FIXME: remove alias dict in favor of alias by symlink?
self.aliases = aliases
if config and not isinstance(config, list):
config = [config]
elif not config:
config = []
self.config = config
if class_name not in MODULE_CACHE:
MODULE_CACHE[class_name] = {}
if class_name not in PATH_CACHE:
PATH_CACHE[class_name] = None
if class_name not in PLUGIN_PATH_CACHE:
PLUGIN_PATH_CACHE[class_name] = defaultdict(dict)
# hold dirs added at runtime outside of config
self._extra_dirs = []
# caches
self._module_cache = MODULE_CACHE[class_name]
self._paths = PATH_CACHE[class_name]
self._plugin_path_cache = PLUGIN_PATH_CACHE[class_name]
self._searched_paths = set()
def __repr__(self):
return 'PluginLoader(type={0})'.format(AnsibleCollectionRef.legacy_plugin_dir_to_plugin_type(self.subdir))
def _clear_caches(self):
if C.OLD_PLUGIN_CACHE_CLEARING:
self._paths = None
else:
# reset global caches
MODULE_CACHE[self.class_name] = {}
PATH_CACHE[self.class_name] = None
PLUGIN_PATH_CACHE[self.class_name] = defaultdict(dict)
# reset internal caches
self._module_cache = MODULE_CACHE[self.class_name]
self._paths = PATH_CACHE[self.class_name]
self._plugin_path_cache = PLUGIN_PATH_CACHE[self.class_name]
self._searched_paths = set()
def __setstate__(self, data):
'''
Deserializer.
'''
class_name = data.get('class_name')
package = data.get('package')
config = data.get('config')
subdir = data.get('subdir')
aliases = data.get('aliases')
base_class = data.get('base_class')
PATH_CACHE[class_name] = data.get('PATH_CACHE')
PLUGIN_PATH_CACHE[class_name] = data.get('PLUGIN_PATH_CACHE')
self.__init__(class_name, package, config, subdir, aliases, base_class)
self._extra_dirs = data.get('_extra_dirs', [])
self._searched_paths = data.get('_searched_paths', set())
def __getstate__(self):
'''
Serializer.
'''
return dict(
class_name=self.class_name,
base_class=self.base_class,
package=self.package,
config=self.config,
subdir=self.subdir,
aliases=self.aliases,
_extra_dirs=self._extra_dirs,
_searched_paths=self._searched_paths,
PATH_CACHE=PATH_CACHE[self.class_name],
PLUGIN_PATH_CACHE=PLUGIN_PATH_CACHE[self.class_name],
)
def format_paths(self, paths):
''' Returns a string suitable for printing of the search path '''
# Uses a list to get the order right
ret = []
for i in paths:
if i not in ret:
ret.append(i)
return os.pathsep.join(ret)
def print_paths(self):
return self.format_paths(self._get_paths(subdirs=False))
def _all_directories(self, dir):
results = []
results.append(dir)
for root, subdirs, files in os.walk(dir, followlinks=True):
if '__init__.py' in files:
for x in subdirs:
results.append(os.path.join(root, x))
return results
def _get_package_paths(self, subdirs=True):
''' Gets the path of a Python package '''
if not self.package:
return []
if not hasattr(self, 'package_path'):
m = __import__(self.package)
parts = self.package.split('.')[1:]
for parent_mod in parts:
m = getattr(m, parent_mod)
self.package_path = to_text(os.path.dirname(m.__file__), errors='surrogate_or_strict')
if subdirs:
return self._all_directories(self.package_path)
return [self.package_path]
def _get_paths_with_context(self, subdirs=True):
''' Return a list of PluginPathContext objects to search for plugins in '''
# FIXME: This is potentially buggy if subdirs is sometimes True and sometimes False.
# In current usage, everything calls this with subdirs=True except for module_utils_loader and ansible-doc
# which always calls it with subdirs=False. So there currently isn't a problem with this caching.
if self._paths is not None:
return self._paths
ret = [PluginPathContext(p, False) for p in self._extra_dirs]
# look in any configured plugin paths, allow one level deep for subcategories
if self.config is not None:
for path in self.config:
path = os.path.abspath(os.path.expanduser(path))
if subdirs:
contents = glob.glob("%s/*" % path) + glob.glob("%s/*/*" % path)
for c in contents:
c = to_text(c, errors='surrogate_or_strict')
if os.path.isdir(c) and c not in ret:
ret.append(PluginPathContext(c, False))
path = to_text(path, errors='surrogate_or_strict')
if path not in ret:
ret.append(PluginPathContext(path, False))
# look for any plugins installed in the package subtree
# Note package path always gets added last so that every other type of
# path is searched before it.
ret.extend([PluginPathContext(p, True) for p in self._get_package_paths(subdirs=subdirs)])
# HACK: because powershell modules are in the same directory
# hierarchy as other modules we have to process them last. This is
# because powershell only works on windows but the other modules work
# anywhere (possibly including windows if the correct language
# interpreter is installed). the non-powershell modules can have any
# file extension and thus powershell modules are picked up in that.
# The non-hack way to fix this is to have powershell modules be
# a different PluginLoader/ModuleLoader. But that requires changing
# other things too (known thing to change would be PATHS_CACHE,
# PLUGIN_PATHS_CACHE, and MODULE_CACHE. Since those three dicts key
# on the class_name and neither regular modules nor powershell modules
# would have class_names, they would not work as written.
#
# The expected sort order is paths in the order in 'ret' with paths ending in '/windows' at the end,
# also in the original order they were found in 'ret'.
# The .sort() method is guaranteed to be stable, so original order is preserved.
ret.sort(key=lambda p: p.path.endswith('/windows'))
# cache and return the result
self._paths = ret
return ret
def _get_paths(self, subdirs=True):
''' Return a list of paths to search for plugins in '''
paths_with_context = self._get_paths_with_context(subdirs=subdirs)
return [path_with_context.path for path_with_context in paths_with_context]
def _load_config_defs(self, name, module, path):
''' Reads plugin docs to find configuration setting definitions, to push to config manager for later use '''
# plugins w/o class name don't support config
if self.class_name:
type_name = get_plugin_class(self.class_name)
# if type name != 'module_doc_fragment':
if type_name in C.CONFIGURABLE_PLUGINS:
dstring = AnsibleLoader(getattr(module, 'DOCUMENTATION', ''), file_name=path).get_single_data()
if dstring:
add_fragments(dstring, path, fragment_loader=fragment_loader, is_module=(type_name == 'module'))
if dstring and 'options' in dstring and isinstance(dstring['options'], dict):
C.config.initialize_plugin_configuration_definitions(type_name, name, dstring['options'])
display.debug('Loaded config def from plugin (%s/%s)' % (type_name, name))
def add_directory(self, directory, with_subdir=False):
''' Adds an additional directory to the search path '''
directory = os.path.realpath(directory)
if directory is not None:
if with_subdir:
directory = os.path.join(directory, self.subdir)
if directory not in self._extra_dirs:
# append the directory and invalidate the path cache
self._extra_dirs.append(directory)
self._clear_caches()
display.debug('Added %s to loader search path' % (directory))
def _query_collection_routing_meta(self, acr, plugin_type, extension=None):
collection_pkg = import_module(acr.n_python_collection_package_name)
if not collection_pkg:
return None
# FIXME: shouldn't need this...
try:
# force any type-specific metadata postprocessing to occur
import_module(acr.n_python_collection_package_name + '.plugins.{0}'.format(plugin_type))
except ImportError:
pass
# this will be created by the collection PEP302 loader
collection_meta = getattr(collection_pkg, '_collection_meta', None)
if not collection_meta:
return None
# TODO: add subdirs support
# check for extension-specific entry first (eg 'setup.ps1')
# TODO: str/bytes on extension/name munging
if acr.subdirs:
subdir_qualified_resource = '.'.join([acr.subdirs, acr.resource])
else:
subdir_qualified_resource = acr.resource
entry = collection_meta.get('plugin_routing', {}).get(plugin_type, {}).get(subdir_qualified_resource + extension, None)
if not entry:
# try for extension-agnostic entry
entry = collection_meta.get('plugin_routing', {}).get(plugin_type, {}).get(subdir_qualified_resource, None)
return entry
def _find_fq_plugin(self, fq_name, extension, plugin_load_context):
"""Search builtin paths to find a plugin. No external paths are searched,
meaning plugins inside roles inside collections will be ignored.
"""
plugin_load_context.resolved = False
plugin_type = AnsibleCollectionRef.legacy_plugin_dir_to_plugin_type(self.subdir)
acr = AnsibleCollectionRef.from_fqcr(fq_name, plugin_type)
# check collection metadata to see if any special handling is required for this plugin
routing_metadata = self._query_collection_routing_meta(acr, plugin_type, extension=extension)
# TODO: factor this into a wrapper method
if routing_metadata:
deprecation = routing_metadata.get('deprecation', None)
# this will no-op if there's no deprecation metadata for this plugin
plugin_load_context.record_deprecation(fq_name, deprecation, acr.collection)
tombstone = routing_metadata.get('tombstone', None)
# FIXME: clean up text gen
if tombstone:
removal_date = tombstone.get('removal_date')
removal_version = tombstone.get('removal_version')
warning_text = tombstone.get('warning_text') or ''
warning_text = '{0} has been removed.{1}{2}'.format(fq_name, ' ' if warning_text else '', warning_text)
removed_msg = display.get_deprecation_message(msg=warning_text, version=removal_version,
date=removal_date, removed=True,
collection_name=acr.collection)
plugin_load_context.removal_date = removal_date
plugin_load_context.removal_version = removal_version
plugin_load_context.resolved = True
plugin_load_context.exit_reason = removed_msg
raise AnsiblePluginRemovedError(removed_msg, plugin_load_context=plugin_load_context)
redirect = routing_metadata.get('redirect', None)
if redirect:
# FIXME: remove once this is covered in debug or whatever
display.vv("redirecting (type: {0}) {1} to {2}".format(plugin_type, fq_name, redirect))
# The name doing the redirection is added at the beginning of _resolve_plugin_step,
# but if the unqualified name is used in conjunction with the collections keyword, only
# the unqualified name is in the redirect list.
if fq_name not in plugin_load_context.redirect_list:
plugin_load_context.redirect_list.append(fq_name)
return plugin_load_context.redirect(redirect)
# TODO: non-FQCN case, do we support `.` prefix for current collection, assume it with no dots, require it for subdirs in current, or ?
n_resource = to_native(acr.resource, errors='strict')
# we want this before the extension is added
full_name = '{0}.{1}'.format(acr.n_python_package_name, n_resource)
if extension:
n_resource += extension
pkg = sys.modules.get(acr.n_python_package_name)
if not pkg:
# FIXME: there must be cheaper/safer way to do this
try:
pkg = import_module(acr.n_python_package_name)
except ImportError:
return plugin_load_context.nope('Python package {0} not found'.format(acr.n_python_package_name))
pkg_path = os.path.dirname(pkg.__file__)
n_resource_path = os.path.join(pkg_path, n_resource)
# FIXME: and is file or file link or ...
if os.path.exists(n_resource_path):
return plugin_load_context.resolve(
full_name, to_text(n_resource_path), acr.collection, 'found exact match for {0} in {1}'.format(full_name, acr.collection))
if extension:
# the request was extension-specific, don't try for an extensionless match
return plugin_load_context.nope('no match for {0} in {1}'.format(to_text(n_resource), acr.collection))
# look for any matching extension in the package location (sans filter)
found_files = [f
for f in glob.iglob(os.path.join(pkg_path, n_resource) + '.*')
if os.path.isfile(f) and not f.endswith(C.MODULE_IGNORE_EXTS)]
if not found_files:
return plugin_load_context.nope('failed fuzzy extension match for {0} in {1}'.format(full_name, acr.collection))
if len(found_files) > 1:
# TODO: warn?
pass
return plugin_load_context.resolve(
full_name, to_text(found_files[0]), acr.collection, 'found fuzzy extension match for {0} in {1}'.format(full_name, acr.collection))
def find_plugin(self, name, mod_type='', ignore_deprecated=False, check_aliases=False, collection_list=None):
''' Find a plugin named name '''
result = self.find_plugin_with_context(name, mod_type, ignore_deprecated, check_aliases, collection_list)
if result.resolved and result.plugin_resolved_path:
return result.plugin_resolved_path
return None
def find_plugin_with_context(self, name, mod_type='', ignore_deprecated=False, check_aliases=False, collection_list=None):
''' Find a plugin named name, returning contextual info about the load, recursively resolving redirection '''
plugin_load_context = PluginLoadContext()
plugin_load_context.original_name = name
while True:
result = self._resolve_plugin_step(name, mod_type, ignore_deprecated, check_aliases, collection_list, plugin_load_context=plugin_load_context)
if result.pending_redirect:
if result.pending_redirect in result.redirect_list:
raise AnsiblePluginCircularRedirect('plugin redirect loop resolving {0} (path: {1})'.format(result.original_name, result.redirect_list))
name = result.pending_redirect
result.pending_redirect = None
plugin_load_context = result
else:
break
# TODO: smuggle these to the controller when we're in a worker, reduce noise from normal things like missing plugin packages during collection search
if plugin_load_context.error_list:
display.warning("errors were encountered during the plugin load for {0}:\n{1}".format(name, plugin_load_context.error_list))
# TODO: display/return import_error_list? Only useful for forensics...
# FIXME: store structured deprecation data in PluginLoadContext and use display.deprecate
# if plugin_load_context.deprecated and C.config.get_config_value('DEPRECATION_WARNINGS'):
# for dw in plugin_load_context.deprecation_warnings:
# # TODO: need to smuggle these to the controller if we're in a worker context
# display.warning('[DEPRECATION WARNING] ' + dw)
return plugin_load_context
# FIXME: name bikeshed
def _resolve_plugin_step(self, name, mod_type='', ignore_deprecated=False,
check_aliases=False, collection_list=None, plugin_load_context=PluginLoadContext()):
if not plugin_load_context:
raise ValueError('A PluginLoadContext is required')
plugin_load_context.redirect_list.append(name)
plugin_load_context.resolved = False
global _PLUGIN_FILTERS
if name in _PLUGIN_FILTERS[self.package]:
plugin_load_context.exit_reason = '{0} matched a defined plugin filter'.format(name)
return plugin_load_context
if mod_type:
suffix = mod_type
elif self.class_name:
# Ansible plugins that run in the controller process (most plugins)
suffix = '.py'
else:
# Only Ansible Modules. Ansible modules can be any executable so
# they can have any suffix
suffix = ''
# FIXME: need this right now so we can still load shipped PS module_utils- come up with a more robust solution
if (AnsibleCollectionRef.is_valid_fqcr(name) or collection_list) and not name.startswith('Ansible'):
if '.' in name or not collection_list:
candidates = [name]
else:
candidates = ['{0}.{1}'.format(c, name) for c in collection_list]
for candidate_name in candidates:
try:
plugin_load_context.load_attempts.append(candidate_name)
# HACK: refactor this properly
if candidate_name.startswith('ansible.legacy'):
# 'ansible.legacy' refers to the plugin finding behavior used before collections existed.
# They need to search 'library' and the various '*_plugins' directories in order to find the file.
plugin_load_context = self._find_plugin_legacy(name.replace('ansible.legacy.', '', 1),
plugin_load_context, ignore_deprecated, check_aliases, suffix)
else:
# 'ansible.builtin' should be handled here. This means only internal, or builtin, paths are searched.
plugin_load_context = self._find_fq_plugin(candidate_name, suffix, plugin_load_context=plugin_load_context)
# Pending redirects are added to the redirect_list at the beginning of _resolve_plugin_step.
# Once redirects are resolved, ensure the final FQCN is added here.
# e.g. 'ns.coll.module' is included rather than only 'module' if a collections list is provided:
# - module:
# collections: ['ns.coll']
if plugin_load_context.resolved and candidate_name not in plugin_load_context.redirect_list:
plugin_load_context.redirect_list.append(candidate_name)
if plugin_load_context.resolved or plugin_load_context.pending_redirect: # if we got an answer or need to chase down a redirect, return
return plugin_load_context
except (AnsiblePluginRemovedError, AnsiblePluginCircularRedirect, AnsibleCollectionUnsupportedVersionError):
# these are generally fatal, let them fly
raise
except ImportError as ie:
plugin_load_context.import_error_list.append(ie)
except Exception as ex:
# FIXME: keep actual errors, not just assembled messages
plugin_load_context.error_list.append(to_native(ex))
if plugin_load_context.error_list:
display.debug(msg='plugin lookup for {0} failed; errors: {1}'.format(name, '; '.join(plugin_load_context.error_list)))
plugin_load_context.exit_reason = 'no matches found for {0}'.format(name)
return plugin_load_context
# if we got here, there's no collection list and it's not an FQ name, so do legacy lookup
return self._find_plugin_legacy(name, plugin_load_context, ignore_deprecated, check_aliases, suffix)
def _find_plugin_legacy(self, name, plugin_load_context, ignore_deprecated=False, check_aliases=False, suffix=None):
"""Search library and various *_plugins paths in order to find the file.
This was behavior prior to the existence of collections.
"""
plugin_load_context.resolved = False
if check_aliases:
name = self.aliases.get(name, name)
# The particular cache to look for modules within. This matches the
# requested mod_type
pull_cache = self._plugin_path_cache[suffix]
try:
path_with_context = pull_cache[name]
plugin_load_context.plugin_resolved_path = path_with_context.path
plugin_load_context.plugin_resolved_name = name
plugin_load_context.plugin_resolved_collection = 'ansible.builtin' if path_with_context.internal else ''
plugin_load_context.resolved = True
return plugin_load_context
except KeyError:
# Cache miss. Now let's find the plugin
pass
# TODO: Instead of using the self._paths cache (PATH_CACHE) and
# self._searched_paths we could use an iterator. Before enabling that
# we need to make sure we don't want to add additional directories
# (add_directory()) once we start using the iterator.
# We can use _get_paths_with_context() since add_directory() forces a cache refresh.
for path_with_context in (p for p in self._get_paths_with_context() if p.path not in self._searched_paths and os.path.isdir(to_bytes(p.path))):
path = path_with_context.path
b_path = to_bytes(path)
display.debug('trying %s' % path)
plugin_load_context.load_attempts.append(path)
internal = path_with_context.internal
try:
full_paths = (os.path.join(b_path, f) for f in os.listdir(b_path))
except OSError as e:
display.warning("Error accessing plugin paths: %s" % to_text(e))
for full_path in (to_native(f) for f in full_paths if os.path.isfile(f) and not f.endswith(b'__init__.py')):
full_name = os.path.basename(full_path)
# HACK: We have no way of executing python byte compiled files as ansible modules so specifically exclude them
# FIXME: I believe this is only correct for modules and module_utils.
# For all other plugins we want .pyc and .pyo should be valid
if any(full_path.endswith(x) for x in C.MODULE_IGNORE_EXTS):
continue
splitname = os.path.splitext(full_name)
base_name = splitname[0]
try:
extension = splitname[1]
except IndexError:
extension = ''
# everything downstream expects unicode
full_path = to_text(full_path, errors='surrogate_or_strict')
# Module found, now enter it into the caches that match this file
if base_name not in self._plugin_path_cache['']:
self._plugin_path_cache[''][base_name] = PluginPathContext(full_path, internal)
if full_name not in self._plugin_path_cache['']:
self._plugin_path_cache[''][full_name] = PluginPathContext(full_path, internal)
if base_name not in self._plugin_path_cache[extension]:
self._plugin_path_cache[extension][base_name] = PluginPathContext(full_path, internal)
if full_name not in self._plugin_path_cache[extension]:
self._plugin_path_cache[extension][full_name] = PluginPathContext(full_path, internal)
self._searched_paths.add(path)
try:
path_with_context = pull_cache[name]
plugin_load_context.plugin_resolved_path = path_with_context.path
plugin_load_context.plugin_resolved_name = name
plugin_load_context.plugin_resolved_collection = 'ansible.builtin' if path_with_context.internal else ''
plugin_load_context.resolved = True
return plugin_load_context
except KeyError:
# Didn't find the plugin in this directory. Load modules from the next one
pass
# if nothing is found, try finding alias/deprecated
if not name.startswith('_'):
alias_name = '_' + name
# We've already cached all the paths at this point
if alias_name in pull_cache:
path_with_context = pull_cache[alias_name]
if not ignore_deprecated and not os.path.islink(path_with_context.path):
# FIXME: this is not always the case, some are just aliases
display.deprecated('%s is kept for backwards compatibility but usage is discouraged. ' # pylint: disable=ansible-deprecated-no-version
'The module documentation details page may explain more about this rationale.' % name.lstrip('_'))
plugin_load_context.plugin_resolved_path = path_with_context.path
plugin_load_context.plugin_resolved_name = alias_name
plugin_load_context.plugin_resolved_collection = 'ansible.builtin' if path_with_context.internal else ''
plugin_load_context.resolved = True
return plugin_load_context
# last ditch, if it's something that can be redirected, look for a builtin redirect before giving up
candidate_fqcr = 'ansible.builtin.{0}'.format(name)
if '.' not in name and AnsibleCollectionRef.is_valid_fqcr(candidate_fqcr):
return self._find_fq_plugin(fq_name=candidate_fqcr, extension=suffix, plugin_load_context=plugin_load_context)
return plugin_load_context.nope('{0} is not eligible for last-chance resolution'.format(name))
def has_plugin(self, name, collection_list=None):
''' Checks if a plugin named name exists '''
try:
return self.find_plugin(name, collection_list=collection_list) is not None
except Exception as ex:
if isinstance(ex, AnsibleError):
raise
# log and continue, likely an innocuous type/package loading failure in collections import
display.debug('has_plugin error: {0}'.format(to_text(ex)))
__contains__ = has_plugin
def _load_module_source(self, name, path):
# avoid collisions across plugins
if name.startswith('ansible_collections.'):
full_name = name
else:
full_name = '.'.join([self.package, name])
if full_name in sys.modules:
# Avoids double loading, See https://github.com/ansible/ansible/issues/13110
return sys.modules[full_name]
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
if imp is None:
spec = importlib.util.spec_from_file_location(to_native(full_name), to_native(path))
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
sys.modules[full_name] = module
else:
with open(to_bytes(path), 'rb') as module_file:
# to_native is used here because imp.load_source's path is for tracebacks and python's traceback formatting uses native strings
module = imp.load_source(to_native(full_name), to_native(path), module_file)
return module
def _update_object(self, obj, name, path, redirected_names=None):
# set extra info on the module, in case we want it later
setattr(obj, '_original_path', path)
setattr(obj, '_load_name', name)
setattr(obj, '_redirected_names', redirected_names or [])
def get(self, name, *args, **kwargs):
return self.get_with_context(name, *args, **kwargs).object
def get_with_context(self, name, *args, **kwargs):
''' instantiates a plugin of the given name using arguments '''
found_in_cache = True
class_only = kwargs.pop('class_only', False)
collection_list = kwargs.pop('collection_list', None)
if name in self.aliases:
name = self.aliases[name]
plugin_load_context = self.find_plugin_with_context(name, collection_list=collection_list)
if not plugin_load_context.resolved or not plugin_load_context.plugin_resolved_path:
# FIXME: this is probably an error (eg removed plugin)
return get_with_context_result(None, plugin_load_context)
name = plugin_load_context.plugin_resolved_name
path = plugin_load_context.plugin_resolved_path
redirected_names = plugin_load_context.redirect_list or []
if path not in self._module_cache:
self._module_cache[path] = self._load_module_source(name, path)
self._load_config_defs(name, self._module_cache[path], path)
found_in_cache = False
obj = getattr(self._module_cache[path], self.class_name)
if self.base_class:
# The import path is hardcoded and should be the right place,
# so we are not expecting an ImportError.
module = __import__(self.package, fromlist=[self.base_class])
# Check whether this obj has the required base class.
try:
plugin_class = getattr(module, self.base_class)
except AttributeError:
return get_with_context_result(None, plugin_load_context)
if not issubclass(obj, plugin_class):
return get_with_context_result(None, plugin_load_context)
# FIXME: update this to use the load context
self._display_plugin_load(self.class_name, name, self._searched_paths, path, found_in_cache=found_in_cache, class_only=class_only)
if not class_only:
try:
# A plugin may need to use its _load_name in __init__ (for example, to set
# or get options from config), so update the object before using the constructor
instance = object.__new__(obj)
self._update_object(instance, name, path, redirected_names)
obj.__init__(instance, *args, **kwargs)
obj = instance
except TypeError as e:
if "abstract" in e.args[0]:
# Abstract Base Class. The found plugin file does not
# fully implement the defined interface.
return get_with_context_result(None, plugin_load_context)
raise
self._update_object(obj, name, path, redirected_names)
return get_with_context_result(obj, plugin_load_context)
def _display_plugin_load(self, class_name, name, searched_paths, path, found_in_cache=None, class_only=None):
''' formats data to display debug info for plugin loading, also avoids processing unless really needed '''
if C.DEFAULT_DEBUG:
msg = 'Loading %s \'%s\' from %s' % (class_name, os.path.basename(name), path)
if len(searched_paths) > 1:
msg = '%s (searched paths: %s)' % (msg, self.format_paths(searched_paths))
if found_in_cache or class_only:
msg = '%s (found_in_cache=%s, class_only=%s)' % (msg, found_in_cache, class_only)
display.debug(msg)
def all(self, *args, **kwargs):
'''
Iterate through all plugins of this type
A plugin loader is initialized with a specific type. This function is an iterator returning
all of the plugins of that type to the caller.
:kwarg path_only: If this is set to True, then we return the paths to where the plugins reside
instead of an instance of the plugin. This conflicts with class_only and both should
not be set.
:kwarg class_only: If this is set to True then we return the python class which implements
a plugin rather than an instance of the plugin. This conflicts with path_only and both
should not be set.
:kwarg _dedupe: By default, we only return one plugin per plugin name. Deduplication happens
in the same way as the :meth:`get` and :meth:`find_plugin` methods resolve which plugin
should take precedence. If this is set to False, then we return all of the plugins
found, including those with duplicate names. In the case of duplicates, the order in
which they are returned is the one that would take precedence first, followed by the
others in decreasing precedence order. This should only be used by subclasses which
want to manage their own deduplication of the plugins.
:*args: Any extra arguments are passed to each plugin when it is instantiated.
:**kwargs: Any extra keyword arguments are passed to each plugin when it is instantiated.
'''
# TODO: Change the signature of this method to:
# def all(return_type='instance', args=None, kwargs=None):
# if args is None: args = []
# if kwargs is None: kwargs = {}
# return_type can be instance, class, or path.
# These changes will mean that plugin parameters won't conflict with our params and
# will also make it impossible to request both a path and a class at the same time.
#
# Move _dedupe to be a class attribute, CUSTOM_DEDUPE, with subclasses for filters and
# tests setting it to True
global _PLUGIN_FILTERS
dedupe = kwargs.pop('_dedupe', True)
path_only = kwargs.pop('path_only', False)
class_only = kwargs.pop('class_only', False)
# Having both path_only and class_only is a coding bug
if path_only and class_only:
raise AnsibleError('Do not set both path_only and class_only when calling PluginLoader.all()')
all_matches = []
found_in_cache = True
for i in self._get_paths():
all_matches.extend(glob.glob(to_native(os.path.join(i, "*.py"))))
loaded_modules = set()
for path in sorted(all_matches, key=os.path.basename):
name = os.path.splitext(path)[0]
basename = os.path.basename(name)
if basename == '__init__' or basename in _PLUGIN_FILTERS[self.package]:
# either empty or ignored by the module blocklist
continue
if basename == 'base' and self.package == 'ansible.plugins.cache':
# cache has legacy 'base.py' file, which is wrapper for __init__.py
continue
if dedupe and basename in loaded_modules:
continue
loaded_modules.add(basename)
if path_only:
yield path
continue
if path not in self._module_cache:
try:
if self.subdir in ('filter_plugins', 'test_plugins'):
# filter and test plugin files can contain multiple plugins
# they must have a unique python module name to prevent them from shadowing each other
full_name = '{0}_{1}'.format(abs(hash(path)), basename)
else:
full_name = basename
module = self._load_module_source(full_name, path)
self._load_config_defs(basename, module, path)
except Exception as e:
display.warning("Skipping plugin (%s) as it seems to be invalid: %s" % (path, to_text(e)))
continue
self._module_cache[path] = module
found_in_cache = False
try:
obj = getattr(self._module_cache[path], self.class_name)
except AttributeError as e:
display.warning("Skipping plugin (%s) as it seems to be invalid: %s" % (path, to_text(e)))
continue
if self.base_class:
# The import path is hardcoded and should be the right place,
# so we are not expecting an ImportError.
module = __import__(self.package, fromlist=[self.base_class])
# Check whether this obj has the required base class.
try:
plugin_class = getattr(module, self.base_class)
except AttributeError:
continue
if not issubclass(obj, plugin_class):
continue
self._display_plugin_load(self.class_name, basename, self._searched_paths, path, found_in_cache=found_in_cache, class_only=class_only)
if not class_only:
try:
obj = obj(*args, **kwargs)
except TypeError as e:
display.warning("Skipping plugin (%s) as it seems to be incomplete: %s" % (path, to_text(e)))
self._update_object(obj, basename, path)
yield obj
class Jinja2Loader(PluginLoader):
"""
PluginLoader optimized for Jinja2 plugins
The filter and test plugins are Jinja2 plugins encapsulated inside of our plugin format.
The way the calling code is setup, we need to do a few things differently in the all() method
We can't use the base class version because of file == plugin assumptions and dedupe logic
"""
def find_plugin(self, name, collection_list=None):
if '.' in name: # NOTE: this is wrong way, use: AnsibleCollectionRef.is_valid_fqcr(name) or collection_list
return super(Jinja2Loader, self).find_plugin(name, collection_list=collection_list)
# Nothing is currently using this method
raise AnsibleError('No code should call "find_plugin" for Jinja2Loaders (Not implemented)')
def get(self, name, *args, **kwargs):
if '.' in name: # NOTE: this is wrong way to detect collection, see note above for example
return super(Jinja2Loader, self).get(name, *args, **kwargs)
# Nothing is currently using this method
raise AnsibleError('No code should call "get" for Jinja2Loaders (Not implemented)')
def all(self, *args, **kwargs):
"""
Differences with :meth:`PluginLoader.all`:
* Unlike other plugin types, file != plugin, a file can contain multiple plugins (of same type).
This is why we do not deduplicate ansible file names at this point, we mostly care about
the names of the actual jinja2 plugins which are inside of our files.
* We reverse the order of the list of files compared to other PluginLoaders. This is
because of how calling code chooses to sync the plugins from the list. It adds all the
Jinja2 plugins from one of our Ansible files into a dict. Then it adds the Jinja2
plugins from the next Ansible file, overwriting any Jinja2 plugins that had the same
name. This is an encapsulation violation (the PluginLoader should not know about what
calling code does with the data) but we're pushing the common code here. We'll fix
this in the future by moving more of the common code into this PluginLoader.
* We return a list. We could iterate the list instead but that's extra work for no gain because
the API receiving this doesn't care. It just needs an iterable
* This method will NOT fetch collection plugins, only those that would be expected under 'ansible.legacy'.
"""
# We don't deduplicate ansible file names.
# Instead, calling code deduplicates jinja2 plugin names when loading each file.
kwargs['_dedupe'] = False
# TODO: move this to initalization and extract/dedupe plugin names in loader and offset this from
# caller. It would have to cache/refresh on add_directory to reevaluate plugin list and dedupe.
# Another option is to always prepend 'ansible.legac'y and force the collection path to
# load/find plugins, just need to check compatiblity of that approach.
# This would also enable get/find_plugin for these type of plugins.
# We have to instantiate a list of all files so that we can reverse the list.
# We reverse it so that calling code will deduplicate this correctly.
files = list(super(Jinja2Loader, self).all(*args, **kwargs))
files .reverse()
return files
def _load_plugin_filter():
filters = defaultdict(frozenset)
user_set = False
if C.PLUGIN_FILTERS_CFG is None:
filter_cfg = '/etc/ansible/plugin_filters.yml'
else:
filter_cfg = C.PLUGIN_FILTERS_CFG
user_set = True
if os.path.exists(filter_cfg):
with open(filter_cfg, 'rb') as f:
try:
filter_data = from_yaml(f.read())
except Exception as e:
display.warning(u'The plugin filter file, {0} was not parsable.'
u' Skipping: {1}'.format(filter_cfg, to_text(e)))
return filters
try:
version = filter_data['filter_version']
except KeyError:
display.warning(u'The plugin filter file, {0} was invalid.'
u' Skipping.'.format(filter_cfg))
return filters
# Try to convert for people specifying version as a float instead of string
version = to_text(version)
version = version.strip()
if version == u'1.0':
# Modules and action plugins share the same blacklist since the difference between the
# two isn't visible to the users
try:
filters['ansible.modules'] = frozenset(filter_data['module_blacklist'])
except TypeError:
display.warning(u'Unable to parse the plugin filter file {0} as'
u' module_blacklist is not a list.'
u' Skipping.'.format(filter_cfg))
return filters
filters['ansible.plugins.action'] = filters['ansible.modules']
else:
display.warning(u'The plugin filter file, {0} was a version not recognized by this'
u' version of Ansible. Skipping.'.format(filter_cfg))
else:
if user_set:
display.warning(u'The plugin filter file, {0} does not exist.'
u' Skipping.'.format(filter_cfg))
# Specialcase the stat module as Ansible can run very few things if stat is blacklisted.
if 'stat' in filters['ansible.modules']:
raise AnsibleError('The stat module was specified in the module blacklist file, {0}, but'
' Ansible will not function without the stat module. Please remove stat'
' from the blacklist.'.format(to_native(filter_cfg)))
return filters
# since we don't want the actual collection loader understanding metadata, we'll do it in an event handler
def _on_collection_load_handler(collection_name, collection_path):
display.vvvv(to_text('Loading collection {0} from {1}'.format(collection_name, collection_path)))
collection_meta = _get_collection_metadata(collection_name)
try:
if not _does_collection_support_ansible_version(collection_meta.get('requires_ansible', ''), ansible_version):
mismatch_behavior = C.config.get_config_value('COLLECTIONS_ON_ANSIBLE_VERSION_MISMATCH')
message = 'Collection {0} does not support Ansible version {1}'.format(collection_name, ansible_version)
if mismatch_behavior == 'warning':
display.warning(message)
elif mismatch_behavior == 'error':
raise AnsibleCollectionUnsupportedVersionError(message)
except AnsibleError:
raise
except Exception as ex:
display.warning('Error parsing collection metadata requires_ansible value from collection {0}: {1}'.format(collection_name, ex))
def _does_collection_support_ansible_version(requirement_string, ansible_version):
if not requirement_string:
return True
if not SpecifierSet:
display.warning('packaging Python module unavailable; unable to validate collection Ansible version requirements')
return True
ss = SpecifierSet(requirement_string)
# ignore prerelease/postrelease/beta/dev flags for simplicity
base_ansible_version = Version(ansible_version).base_version
return ss.contains(base_ansible_version)
def _configure_collection_loader():
if AnsibleCollectionConfig.collection_finder:
display.warning('AnsibleCollectionFinder has already been configured')
return
finder = _AnsibleCollectionFinder(C.config.get_config_value('COLLECTIONS_PATHS'), C.config.get_config_value('COLLECTIONS_SCAN_SYS_PATH'))
finder._install()
# this should succeed now
AnsibleCollectionConfig.on_collection_load += _on_collection_load_handler
# TODO: All of the following is initialization code It should be moved inside of an initialization
# function which is called at some point early in the ansible and ansible-playbook CLI startup.
_PLUGIN_FILTERS = _load_plugin_filter()
_configure_collection_loader()
# doc fragments first
fragment_loader = PluginLoader(
'ModuleDocFragment',
'ansible.plugins.doc_fragments',
C.DOC_FRAGMENT_PLUGIN_PATH,
'doc_fragments',
)
action_loader = PluginLoader(
'ActionModule',
'ansible.plugins.action',
C.DEFAULT_ACTION_PLUGIN_PATH,
'action_plugins',
required_base_class='ActionBase',
)
cache_loader = PluginLoader(
'CacheModule',
'ansible.plugins.cache',
C.DEFAULT_CACHE_PLUGIN_PATH,
'cache_plugins',
)
callback_loader = PluginLoader(
'CallbackModule',
'ansible.plugins.callback',
C.DEFAULT_CALLBACK_PLUGIN_PATH,
'callback_plugins',
)
connection_loader = PluginLoader(
'Connection',
'ansible.plugins.connection',
C.DEFAULT_CONNECTION_PLUGIN_PATH,
'connection_plugins',
aliases={'paramiko': 'paramiko_ssh'},
required_base_class='ConnectionBase',
)
shell_loader = PluginLoader(
'ShellModule',
'ansible.plugins.shell',
'shell_plugins',
'shell_plugins',
)
module_loader = PluginLoader(
'',
'ansible.modules',
C.DEFAULT_MODULE_PATH,
'library',
)
module_utils_loader = PluginLoader(
'',
'ansible.module_utils',
C.DEFAULT_MODULE_UTILS_PATH,
'module_utils',
)
# NB: dedicated loader is currently necessary because PS module_utils expects "with subdir" lookup where
# regular module_utils doesn't. This can be revisited once we have more granular loaders.
ps_module_utils_loader = PluginLoader(
'',
'ansible.module_utils',
C.DEFAULT_MODULE_UTILS_PATH,
'module_utils',
)
lookup_loader = PluginLoader(
'LookupModule',
'ansible.plugins.lookup',
C.DEFAULT_LOOKUP_PLUGIN_PATH,
'lookup_plugins',
required_base_class='LookupBase',
)
filter_loader = Jinja2Loader(
'FilterModule',
'ansible.plugins.filter',
C.DEFAULT_FILTER_PLUGIN_PATH,
'filter_plugins',
)
test_loader = Jinja2Loader(
'TestModule',
'ansible.plugins.test',
C.DEFAULT_TEST_PLUGIN_PATH,
'test_plugins'
)
strategy_loader = PluginLoader(
'StrategyModule',
'ansible.plugins.strategy',
C.DEFAULT_STRATEGY_PLUGIN_PATH,
'strategy_plugins',
required_base_class='StrategyBase',
)
terminal_loader = PluginLoader(
'TerminalModule',
'ansible.plugins.terminal',
C.DEFAULT_TERMINAL_PLUGIN_PATH,
'terminal_plugins',
required_base_class='TerminalBase'
)
vars_loader = PluginLoader(
'VarsModule',
'ansible.plugins.vars',
C.DEFAULT_VARS_PLUGIN_PATH,
'vars_plugins',
)
cliconf_loader = PluginLoader(
'Cliconf',
'ansible.plugins.cliconf',
C.DEFAULT_CLICONF_PLUGIN_PATH,
'cliconf_plugins',
required_base_class='CliconfBase'
)
netconf_loader = PluginLoader(
'Netconf',
'ansible.plugins.netconf',
C.DEFAULT_NETCONF_PLUGIN_PATH,
'netconf_plugins',
required_base_class='NetconfBase'
)
inventory_loader = PluginLoader(
'InventoryModule',
'ansible.plugins.inventory',
C.DEFAULT_INVENTORY_PLUGIN_PATH,
'inventory_plugins'
)
httpapi_loader = PluginLoader(
'HttpApi',
'ansible.plugins.httpapi',
C.DEFAULT_HTTPAPI_PLUGIN_PATH,
'httpapi_plugins',
required_base_class='HttpApiBase',
)
become_loader = PluginLoader(
'BecomeModule',
'ansible.plugins.become',
C.BECOME_PLUGIN_PATH,
'become_plugins'
) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import socket
import sys
import unittest
def run_tests(test_case, driver, webserver):
logging.basicConfig(level=logging.WARN)
webserver.start()
try:
testLoader = unittest.TestLoader()
testRunner = unittest.TextTestRunner()
test_case_name = "selenium.test.selenium.webdriver.common.%s" % test_case
if len(sys.argv) > 1:
testMethod = sys.argv[1]
testRunner.run(
testLoader.loadTestsFromName(
"%s.%s" % (test_case_name, testMethod)))
else:
testRunner.run(testLoader.loadTestsFromName(test_case_name))
driver.quit()
finally:
webserver.stop()
def require_online(func):
"""Only exucte the test method if the internet is accessible."""
def testMethod(self):
socket_ = socket.socket()
try:
socket_.settimeout(1)
socket_.connect(("www.google.com", 80))
return func(self)
except socket.error:
return lambda x: None
testMethod.func_name = func.func_name
return testMethod
def convert_cookie_to_json(cookie):
cookie_dict = {}
for key, value in cookie.items():
if key == "expires":
cookie_dict["expiry"] = int(value) * 1000
else:
cookie_dict[key] = value
return cookie_dict | unknown | codeparrot/codeparrot-clean | ||
{
"RENAMENX": {
"summary": "Renames a key only when the target key name doesn't exist.",
"complexity": "O(1)",
"group": "generic",
"since": "1.0.0",
"arity": 3,
"function": "renamenxCommand",
"history": [
[
"3.2.0",
"The command no longer returns an error when source and destination names are the same."
]
],
"command_flags": [
"WRITE",
"FAST"
],
"acl_categories": [
"KEYSPACE"
],
"key_specs": [
{
"flags": [
"RW",
"ACCESS",
"DELETE"
],
"begin_search": {
"index": {
"pos": 1
}
},
"find_keys": {
"range": {
"lastkey": 0,
"step": 1,
"limit": 0
}
}
},
{
"flags": [
"OW",
"INSERT"
],
"begin_search": {
"index": {
"pos": 2
}
},
"find_keys": {
"range": {
"lastkey": 0,
"step": 1,
"limit": 0
}
}
}
],
"arguments": [
{
"name": "key",
"type": "key",
"key_spec_index": 0
},
{
"name": "newkey",
"type": "key",
"key_spec_index": 1
}
],
"reply_schema": {
"oneOf": [
{
"description": "key was renamed to newkey",
"const": 1
},
{
"description": "new key already exists",
"const": 0
}
]
}
}
} | json | github | https://github.com/redis/redis | src/commands/renamenx.json |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" ArduPilot BiquadFilter
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = "Guglielmo Cassinelli"
__contact__ = "gdguglie@gmail.com"
import numpy as np
class DigitalLPF:
def __init__(self, cutoff_freq, sample_freq):
self._cutoff_freq = cutoff_freq
self._sample_freq = sample_freq
self._output = 0
self.compute_alpha()
def compute_alpha(self):
if self._cutoff_freq <= 0 or self._sample_freq <= 0:
self.alpha = 1.
else:
dt = 1. / self._sample_freq
rc = 1. / (np.pi * 2 * self._cutoff_freq)
a = dt / (dt + rc)
self.alpha = np.clip(a, 0, 1)
def apply(self, sample):
self._output += (sample - self._output) * self.alpha
return self._output
class BiquadFilterType:
LPF = 0
PEAK = 1
NOTCH = 2
class BiquadFilter:
def __init__(self, center_freq, sample_freq, type=BiquadFilterType.LPF, attenuation=10, bandwidth=15):
self._center_freq = int(center_freq)
self._attenuation_db = int(attenuation) # used only by notch, use setter
self._bandwidth_hz = int(bandwidth) # used only by notch, use setter
self._sample_freq = sample_freq
self._type = type
self._delayed_sample1 = 0
self._delayed_sample2 = 0
self._delayed_output1 = 0
self._delayed_output2 = 0
self.b0 = 0.
self.b1 = 0.
self.b2 = 0.
self.a0 = 1
self.a1 = 0.
self.a2 = 0.
self.compute_params()
def get_sample_freq(self):
return self._sample_freq
def reset(self):
self._delayed_sample1 = 0
self._delayed_sample2 = 0
self._delayed_output1 = 0
self._delayed_output2 = 0
def get_type(self):
return self._type
def set_attenuation(self, attenuation_db):
self._attenuation_db = int(attenuation_db)
self.compute_params()
def set_bandwidth(self, bandwidth_hz):
self._bandwidth_hz = int(bandwidth_hz)
self.compute_params()
def set_center_freq(self, cutoff_freq):
self._center_freq = int(cutoff_freq)
self.compute_params()
def compute_params(self):
omega = 2 * np.pi * self._center_freq / self._sample_freq
sin_om = np.sin(omega)
cos_om = np.cos(omega)
if self._type == BiquadFilterType.LPF:
if self._center_freq > 0:
Q = 1 / np.sqrt(2)
alpha = sin_om / (2 * Q)
self.b0 = (1 - cos_om) / 2
self.b1 = 1 - cos_om
self.b2 = self.b0
self.a0 = 1 + alpha
self.a1 = -2 * cos_om
self.a2 = 1 - alpha
elif self._type == BiquadFilterType.PEAK:
A = 10 ** (-self._attenuation_db / 40)
# why not the formula below? It prevents a division by 0 when bandwidth = 2*frequency
octaves = np.log2(self._center_freq / (self._center_freq - self._bandwidth_hz / 2)) * 2
Q = np.sqrt(2 ** octaves) / (2 ** octaves - 1)
# Q = self._center_freq / self._bandwidth_hz
alpha = sin_om / (2 * Q / A)
self.b0 = 1.0 + alpha * A
self.b1 = -2.0 * cos_om
self.b2 = 1.0 - alpha * A
self.a0 = 1.0 + alpha / A
self.a1 = -2.0 * cos_om
self.a2 = 1.0 - alpha / A
elif self._type == BiquadFilterType.NOTCH:
alpha = sin_om * np.sinh(np.log(2) / 2 * self._bandwidth_hz * omega * sin_om)
self.b0 = 1
self.b1 = -2 * cos_om
self.b2 = self.b0
self.a0 = 1 + alpha
self.a1 = -2 * cos_om
self.a2 = 1 - alpha
self.b0 /= self.a0
self.b1 /= self.a0
self.b2 /= self.a0
self.a1 /= self.a0
self.a2 /= self.a0
def apply(self, sample):
if self._center_freq <= 0:
return sample
output = (self.b0 * sample + self.b1 * self._delayed_sample1 + self.b2 * self._delayed_sample2 - self.a1
* self._delayed_output1 - self.a2 * self._delayed_output2)
self._delayed_sample2 = self._delayed_sample1
self._delayed_sample1 = sample
self._delayed_output2 = self._delayed_output1
self._delayed_output1 = output
return output
def get_params(self):
return {
"a1": self.a1,
"a2": self.a2,
"b0": self.b0,
"b1": self.b1,
"b2": self.b2,
}
def get_center_freq(self):
return self._center_freq
def get_attenuation(self):
return self._attenuation_db
def get_bandwidth(self):
return self._bandwidth_hz
def freq_response(self, f):
if self._center_freq <= 0:
return 1
phi = (np.sin(np.pi * f * 2 / (2 * self._sample_freq))) ** 2
r = (((self.b0 + self.b1 + self.b2) ** 2 - 4 * (self.b0 * self.b1 + 4 * self.b0 * self.b2 + self.b1 * self.b2)
* phi + 16 * self.b0 * self.b2 * phi * phi)
/ ((1 + self.a1 + self.a2) ** 2 - 4 * (self.a1 + 4 * self.a2 + self.a1 * self.a2) * phi + 16
* self.a2 * phi * phi))
# if r < 0:
# r = 0
return r ** .5 | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2004-2009 Sergey Lyubka
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# $Id: mongoose.py 471 2009-08-30 14:30:21Z valenok $
"""
This module provides python binding for the Mongoose web server.
There are two classes defined:
Connection: - wraps all functions that accept struct mg_connection pointer
as first argument.
Mongoose: wraps all functions that accept struct mg_context pointer as
first argument.
Creating Mongoose object automatically starts server, deleting object
automatically stops it. There is no need to call mg_start() or mg_stop().
"""
import ctypes
import os
NEW_REQUEST = 0
HTTP_ERROR = 1
EVENT_LOG = 2
INIT_SSL = 3
class mg_header(ctypes.Structure):
"""A wrapper for struct mg_header."""
_fields_ = [
('name', ctypes.c_char_p),
('value', ctypes.c_char_p),
]
class mg_request_info(ctypes.Structure):
"""A wrapper for struct mg_request_info."""
_fields_ = [
('user_data', ctypes.c_char_p),
('request_method', ctypes.c_char_p),
('uri', ctypes.c_char_p),
('http_version', ctypes.c_char_p),
('query_string', ctypes.c_char_p),
('remote_user', ctypes.c_char_p),
('log_message', ctypes.c_char_p),
('remote_ip', ctypes.c_long),
('remote_port', ctypes.c_int),
('status_code', ctypes.c_int),
('is_ssl', ctypes.c_int),
('num_headers', ctypes.c_int),
('http_headers', mg_header * 64),
]
mg_callback_t = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p)
class Connection(object):
"""A wrapper class for all functions that take
struct mg_connection * as the first argument."""
def __init__(self, mongoose, connection):
self.m = mongoose
self.conn = ctypes.c_void_p(connection)
self.info = self.m.dll.mg_get_request_info(self.conn).contents
def get_header(self, name):
val = self.m.dll.mg_get_header(self.conn, name)
return ctypes.c_char_p(val).value
def get_var(self, data, name):
size = data and len(data) or 0
buf = ctypes.create_string_buffer(size)
n = self.m.dll.mg_get_var(data, size, name, buf, size)
return n >= 0 and buf or None
def printf(self, fmt, *args):
val = self.m.dll.mg_printf(self.conn, fmt, *args)
return ctypes.c_int(val).value
def write(self, data):
val = self.m.dll.mg_write(self.conn, data, len(data))
return ctypes.c_int(val).value
def read(self, size):
buf = ctypes.create_string_buffer(size)
n = self.m.dll.mg_read(self.conn, buf, size)
return n <= 0 and None or buf[:n]
def send_file(self, path):
self.m.dll.mg_send_file(self.conn, path)
class Mongoose(object):
"""A wrapper class for Mongoose shared library."""
def __init__(self, callback, **kwargs):
if os.name == 'nt':
self.dll = ctypes.WinDLL('_mongoose.dll')
else:
self.dll = ctypes.CDLL('_mongoose.so')
self.dll.mg_start.restype = ctypes.c_void_p
self.dll.mg_modify_passwords_file.restype = ctypes.c_int
self.dll.mg_read.restype = ctypes.c_int
self.dll.mg_write.restype = ctypes.c_int
self.dll.mg_printf.restype = ctypes.c_int
self.dll.mg_get_header.restype = ctypes.c_char_p
self.dll.mg_get_var.restype = ctypes.c_int
self.dll.mg_get_cookie.restype = ctypes.c_int
self.dll.mg_get_option.restype = ctypes.c_char_p
self.dll.mg_get_request_info.restype = ctypes.POINTER(mg_request_info)
if callback:
# Create a closure that will be called by the shared library.
def func(event, connection):
# Wrap connection pointer into the connection
# object and call Python callback
conn = Connection(self, connection)
return callback(event, conn) and 1 or 0
# Convert the closure into C callable object
self.callback = mg_callback_t(func)
self.callback.restype = ctypes.c_char_p
else:
self.callback = ctypes.c_void_p(0)
args = [y for x in kwargs.items() for y in x] + [None]
options = (ctypes.c_char_p * len(args))(*args)
ret = self.dll.mg_start(self.callback, 0, options)
self.ctx = ctypes.c_void_p(ret)
def __del__(self):
"""Destructor, stop Mongoose instance."""
self.dll.mg_stop(self.ctx)
def get_option(self, name):
return self.dll.mg_get_option(self.ctx, name) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
oreos.monkeys
~~~~~~~~~~~~~
Monkeypatches.
"""
#!/usr/bin/env python
#
####
# Copyright 2000 by Timothy O'Malley <timo@alum.mit.edu>
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software
# and its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Timothy O'Malley not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
####
#
# Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp
# by Timothy O'Malley <timo@alum.mit.edu>
#
# Cookie.py is a Python module for the handling of HTTP
# cookies as a Python dictionary. See RFC 2109 for more
# information on cookies.
#
# The original idea to treat Cookies as a dictionary came from
# Dave Mitchell (davem@magnet.com) in 1995, when he released the
# first version of nscookie.py.
#
####
r"""
Here's a sample session to show how to use this module.
At the moment, this is the only documentation.
The Basics
----------
Importing is easy..
>>> import Cookie
Most of the time you start by creating a cookie. Cookies come in
three flavors, each with slightly different encoding semantics, but
more on that later.
>>> C = Cookie.SimpleCookie()
>>> C = Cookie.SerialCookie()
>>> C = Cookie.SmartCookie()
[Note: Long-time users of Cookie.py will remember using
Cookie.Cookie() to create an Cookie object. Although deprecated, it
is still supported by the code. See the Backward Compatibility notes
for more information.]
Once you've created your Cookie, you can add values just as if it were
a dictionary.
>>> C = Cookie.SmartCookie()
>>> C["fig"] = "newton"
>>> C["sugar"] = "wafer"
>>> C.output()
'Set-Cookie: fig=newton\r\nSet-Cookie: sugar=wafer'
Notice that the printable representation of a Cookie is the
appropriate format for a Set-Cookie: header. This is the
default behavior. You can change the header and printed
attributes by using the .output() function
>>> C = Cookie.SmartCookie()
>>> C["rocky"] = "road"
>>> C["rocky"]["path"] = "/cookie"
>>> print C.output(header="Cookie:")
Cookie: rocky=road; Path=/cookie
>>> print C.output(attrs=[], header="Cookie:")
Cookie: rocky=road
The load() method of a Cookie extracts cookies from a string. In a
CGI script, you would use this method to extract the cookies from the
HTTP_COOKIE environment variable.
>>> C = Cookie.SmartCookie()
>>> C.load("chips=ahoy; vienna=finger")
>>> C.output()
'Set-Cookie: chips=ahoy\r\nSet-Cookie: vienna=finger'
The load() method is darn-tootin smart about identifying cookies
within a string. Escaped quotation marks, nested semicolons, and other
such trickeries do not confuse it.
>>> C = Cookie.SmartCookie()
>>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";')
>>> print C
Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;"
Each element of the Cookie also supports all of the RFC 2109
Cookie attributes. Here's an example which sets the Path
attribute.
>>> C = Cookie.SmartCookie()
>>> C["oreo"] = "doublestuff"
>>> C["oreo"]["path"] = "/"
>>> print C
Set-Cookie: oreo=doublestuff; Path=/
Each dictionary element has a 'value' attribute, which gives you
back the value associated with the key.
>>> C = Cookie.SmartCookie()
>>> C["twix"] = "none for you"
>>> C["twix"].value
'none for you'
A Bit More Advanced
-------------------
As mentioned before, there are three different flavors of Cookie
objects, each with different encoding/decoding semantics. This
section briefly discusses the differences.
SimpleCookie
The SimpleCookie expects that all values should be standard strings.
Just to be sure, SimpleCookie invokes the str() builtin to convert
the value to a string, when the values are set dictionary-style.
>>> C = Cookie.SimpleCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
'7'
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number=7\r\nSet-Cookie: string=seven'
SerialCookie
The SerialCookie expects that all values should be serialized using
cPickle (or pickle, if cPickle isn't available). As a result of
serializing, SerialCookie can save almost any Python object to a
value, and recover the exact same object when the cookie has been
returned. (SerialCookie can yield some strange-looking cookie
values, however.)
>>> C = Cookie.SerialCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
7
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number="I7\\012."\r\nSet-Cookie: string="S\'seven\'\\012p1\\012."'
Be warned, however, if SerialCookie cannot de-serialize a value (because
it isn't a valid pickle'd object), IT WILL RAISE AN EXCEPTION.
SmartCookie
The SmartCookie combines aspects of each of the other two flavors.
When setting a value in a dictionary-fashion, the SmartCookie will
serialize (ala cPickle) the value *if and only if* it isn't a
Python string. String objects are *not* serialized. Similarly,
when the load() method parses out values, it attempts to de-serialize
the value. If it fails, then it fallsback to treating the value
as a string.
>>> C = Cookie.SmartCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
7
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number="I7\\012."\r\nSet-Cookie: string=seven'
Backwards Compatibility
-----------------------
In order to keep compatibilty with earlier versions of Cookie.py,
it is still possible to use Cookie.Cookie() to create a Cookie. In
fact, this simply returns a SmartCookie.
>>> C = Cookie.Cookie()
>>> print C.__class__.__name__
SmartCookie
Finis.
""" #"
# ^
# |----helps out font-lock
#
# Import our required modules
#
import string
try:
from cPickle import dumps, loads
except ImportError:
from pickle import dumps, loads
import re, warnings
__all__ = ["CookieError","BaseCookie","SimpleCookie","SerialCookie",
"SmartCookie","Cookie"]
_nulljoin = ''.join
_semispacejoin = '; '.join
_spacejoin = ' '.join
#
# Define an exception visible to External modules
#
class CookieError(Exception):
pass
# These quoting routines conform to the RFC2109 specification, which in
# turn references the character definitions from RFC2068. They provide
# a two-way quoting algorithm. Any non-text character is translated
# into a 4 character sequence: a forward-slash followed by the
# three-digit octal equivalent of the character. Any '\' or '"' is
# quoted with a preceeding '\' slash.
#
# These are taken from RFC2068 and RFC2109.
# _LegalChars is the list of chars which don't require "'s
# _Translator hash-table for fast quoting
#
_LegalChars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~[]_"
_Translator = {
'\000' : '\\000', '\001' : '\\001', '\002' : '\\002',
'\003' : '\\003', '\004' : '\\004', '\005' : '\\005',
'\006' : '\\006', '\007' : '\\007', '\010' : '\\010',
'\011' : '\\011', '\012' : '\\012', '\013' : '\\013',
'\014' : '\\014', '\015' : '\\015', '\016' : '\\016',
'\017' : '\\017', '\020' : '\\020', '\021' : '\\021',
'\022' : '\\022', '\023' : '\\023', '\024' : '\\024',
'\025' : '\\025', '\026' : '\\026', '\027' : '\\027',
'\030' : '\\030', '\031' : '\\031', '\032' : '\\032',
'\033' : '\\033', '\034' : '\\034', '\035' : '\\035',
'\036' : '\\036', '\037' : '\\037',
# Because of the way browsers really handle cookies (as opposed
# to what the RFC says) we also encode , and ;
',' : '\\054', ';' : '\\073',
'"' : '\\"', '\\' : '\\\\',
'\177' : '\\177', '\200' : '\\200', '\201' : '\\201',
'\202' : '\\202', '\203' : '\\203', '\204' : '\\204',
'\205' : '\\205', '\206' : '\\206', '\207' : '\\207',
'\210' : '\\210', '\211' : '\\211', '\212' : '\\212',
'\213' : '\\213', '\214' : '\\214', '\215' : '\\215',
'\216' : '\\216', '\217' : '\\217', '\220' : '\\220',
'\221' : '\\221', '\222' : '\\222', '\223' : '\\223',
'\224' : '\\224', '\225' : '\\225', '\226' : '\\226',
'\227' : '\\227', '\230' : '\\230', '\231' : '\\231',
'\232' : '\\232', '\233' : '\\233', '\234' : '\\234',
'\235' : '\\235', '\236' : '\\236', '\237' : '\\237',
'\240' : '\\240', '\241' : '\\241', '\242' : '\\242',
'\243' : '\\243', '\244' : '\\244', '\245' : '\\245',
'\246' : '\\246', '\247' : '\\247', '\250' : '\\250',
'\251' : '\\251', '\252' : '\\252', '\253' : '\\253',
'\254' : '\\254', '\255' : '\\255', '\256' : '\\256',
'\257' : '\\257', '\260' : '\\260', '\261' : '\\261',
'\262' : '\\262', '\263' : '\\263', '\264' : '\\264',
'\265' : '\\265', '\266' : '\\266', '\267' : '\\267',
'\270' : '\\270', '\271' : '\\271', '\272' : '\\272',
'\273' : '\\273', '\274' : '\\274', '\275' : '\\275',
'\276' : '\\276', '\277' : '\\277', '\300' : '\\300',
'\301' : '\\301', '\302' : '\\302', '\303' : '\\303',
'\304' : '\\304', '\305' : '\\305', '\306' : '\\306',
'\307' : '\\307', '\310' : '\\310', '\311' : '\\311',
'\312' : '\\312', '\313' : '\\313', '\314' : '\\314',
'\315' : '\\315', '\316' : '\\316', '\317' : '\\317',
'\320' : '\\320', '\321' : '\\321', '\322' : '\\322',
'\323' : '\\323', '\324' : '\\324', '\325' : '\\325',
'\326' : '\\326', '\327' : '\\327', '\330' : '\\330',
'\331' : '\\331', '\332' : '\\332', '\333' : '\\333',
'\334' : '\\334', '\335' : '\\335', '\336' : '\\336',
'\337' : '\\337', '\340' : '\\340', '\341' : '\\341',
'\342' : '\\342', '\343' : '\\343', '\344' : '\\344',
'\345' : '\\345', '\346' : '\\346', '\347' : '\\347',
'\350' : '\\350', '\351' : '\\351', '\352' : '\\352',
'\353' : '\\353', '\354' : '\\354', '\355' : '\\355',
'\356' : '\\356', '\357' : '\\357', '\360' : '\\360',
'\361' : '\\361', '\362' : '\\362', '\363' : '\\363',
'\364' : '\\364', '\365' : '\\365', '\366' : '\\366',
'\367' : '\\367', '\370' : '\\370', '\371' : '\\371',
'\372' : '\\372', '\373' : '\\373', '\374' : '\\374',
'\375' : '\\375', '\376' : '\\376', '\377' : '\\377'
}
_idmap = ''.join(chr(x) for x in xrange(256))
def _quote(str, LegalChars=_LegalChars,
idmap=_idmap, translate=string.translate):
#
# If the string does not need to be double-quoted,
# then just return the string. Otherwise, surround
# the string in doublequotes and precede quote (with a \)
# special characters.
#
if "" == translate(str, idmap, LegalChars):
return str
else:
return '"' + _nulljoin( map(_Translator.get, str, str) ) + '"'
# end _quote
_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
_QuotePatt = re.compile(r"[\\].")
def _unquote(str):
# If there aren't any doublequotes,
# then there can't be any special characters. See RFC 2109.
if len(str) < 2:
return str
if str[0] != '"' or str[-1] != '"':
return str
# We have to assume that we must decode this string.
# Down to work.
# Remove the "s
str = str[1:-1]
# Check for special sequences. Examples:
# \012 --> \n
# \" --> "
#
i = 0
n = len(str)
res = []
while 0 <= i < n:
Omatch = _OctalPatt.search(str, i)
Qmatch = _QuotePatt.search(str, i)
if not Omatch and not Qmatch: # Neither matched
res.append(str[i:])
break
# else:
j = k = -1
if Omatch: j = Omatch.start(0)
if Qmatch: k = Qmatch.start(0)
if Qmatch and ( not Omatch or k < j ): # QuotePatt matched
res.append(str[i:k])
res.append(str[k+1])
i = k+2
else: # OctalPatt matched
res.append(str[i:j])
res.append( chr( int(str[j+1:j+4], 8) ) )
i = j+4
return _nulljoin(res)
# end _unquote
# The _getdate() routine is used to set the expiration time in
# the cookie's HTTP header. By default, _getdate() returns the
# current time in the appropriate "expires" format for a
# Set-Cookie header. The one optional argument is an offset from
# now, in seconds. For example, an offset of -3600 means "one hour ago".
# The offset may be a floating point number.
#
_weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
_monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname):
from time import gmtime, time
now = time()
year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future)
return "%s, %02d-%3s-%4d %02d:%02d:%02d GMT" % \
(weekdayname[wd], day, monthname[month], year, hh, mm, ss)
#
# A class to hold ONE key,value pair.
# In a cookie, each such pair may have several attributes.
# so this class is used to keep the attributes associated
# with the appropriate key,value pair.
# This class also includes a coded_value attribute, which
# is used to hold the network representation of the
# value. This is most useful when Python objects are
# pickled for network transit.
#
class Morsel(dict):
# RFC 2109 lists these attributes as reserved:
# path comment domain
# max-age secure version
#
# For historical reasons, these attributes are also reserved:
# expires
#
# This is an extension from Microsoft:
# httponly
#
# This dictionary provides a mapping from the lowercase
# variant on the left to the appropriate traditional
# formatting on the right.
_reserved = { "expires" : "expires",
"path" : "Path",
"comment" : "Comment",
"domain" : "Domain",
"max-age" : "Max-Age",
"secure" : "secure",
"httponly" : "httponly",
"version" : "Version",
}
def __init__(self):
# Set defaults
self.key = self.value = self.coded_value = None
# Set default attributes
for K in self._reserved:
dict.__setitem__(self, K, "")
# end __init__
def __setitem__(self, K, V):
K = K.lower()
if not K in self._reserved:
raise CookieError("Invalid Attribute %s" % K)
dict.__setitem__(self, K, V)
# end __setitem__
def isReservedKey(self, K):
return K.lower() in self._reserved
# end isReservedKey
def set(self, key, val, coded_val,
LegalChars=_LegalChars,
idmap=_idmap, translate=string.translate):
# First we verify that the key isn't a reserved word
# Second we make sure it only contains legal characters
if key.lower() in self._reserved:
raise CookieError("Attempt to set a reserved key: %s" % key)
if "" != translate(key, idmap, LegalChars):
raise CookieError("Illegal key value: %s" % key)
# It's a good key, so save it.
self.key = key
self.value = val
self.coded_value = coded_val
# end set
def output(self, attrs=None, header = "Set-Cookie:"):
return "%s %s" % ( header, self.OutputString(attrs) )
__str__ = output
def __repr__(self):
return '<%s: %s=%s>' % (self.__class__.__name__,
self.key, repr(self.value) )
def js_output(self, attrs=None):
# Print javascript
return """
<script type="text/javascript">
<!-- begin hiding
document.cookie = \"%s\";
// end hiding -->
</script>
""" % ( self.OutputString(attrs).replace('"',r'\"'), )
# end js_output()
def OutputString(self, attrs=None):
# Build up our result
#
result = []
RA = result.append
# First, the key=value pair
RA("%s=%s" % (self.key, self.coded_value))
# Now add any defined attributes
if attrs is None:
attrs = self._reserved
items = self.items()
items.sort()
for K,V in items:
if V == "": continue
if K not in attrs: continue
if K == "expires" and type(V) == type(1):
RA("%s=%s" % (self._reserved[K], _getdate(V)))
elif K == "max-age" and type(V) == type(1):
RA("%s=%d" % (self._reserved[K], V))
elif K == "secure":
RA(str(self._reserved[K]))
elif K == "httponly":
RA(str(self._reserved[K]))
else:
RA("%s=%s" % (self._reserved[K], V))
# Return the result
return _semispacejoin(result)
# end OutputString
# end Morsel class
#
# Pattern for finding cookie
#
# This used to be strict parsing based on the RFC2109 and RFC2068
# specifications. I have since discovered that MSIE 3.0x doesn't
# follow the character rules outlined in those specs. As a
# result, the parsing rules here are less strict.
#
_LegalCharsPatt = r"[\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=\[\]\_]"
_CookiePattern = re.compile(
r"(?x)" # This is a Verbose pattern
r"(?P<key>" # Start of group 'key'
""+ _LegalCharsPatt +"+?" # Any word of at least one letter, nongreedy
r")" # End of group 'key'
r"\s*=\s*" # Equal Sign
r"(?P<val>" # Start of group 'val'
r'"(?:[^\\"]|\\.)*"' # Any doublequoted string
r"|" # or
r"\w{3},\s[\w\d-]{9,11}\s[\d:]{8}\sGMT" # Special case for "expires" attr
r"|" # or
""+ _LegalCharsPatt +"*" # Any word or empty string
r")" # End of group 'val'
r"\s*;?" # Probably ending in a semi-colon
)
# At long last, here is the cookie class.
# Using this class is almost just like using a dictionary.
# See this module's docstring for example usage.
#
class BaseCookie(dict):
# A container class for a set of Morsels
#
def value_decode(self, val):
"""real_value, coded_value = value_decode(STRING)
Called prior to setting a cookie's value from the network
representation. The VALUE is the value read from HTTP
header.
Override this function to modify the behavior of cookies.
"""
return val, val
# end value_encode
def value_encode(self, val):
"""real_value, coded_value = value_encode(VALUE)
Called prior to setting a cookie's value from the dictionary
representation. The VALUE is the value being assigned.
Override this function to modify the behavior of cookies.
"""
strval = str(val)
return strval, strval
# end value_encode
def __init__(self, input=None):
if input: self.load(input)
# end __init__
def __set(self, key, real_value, coded_value):
"""Private method for setting a cookie's value"""
M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
dict.__setitem__(self, key, M)
# end __set
def __setitem__(self, key, value):
"""Dictionary style assignment."""
rval, cval = self.value_encode(value)
self.__set(key, rval, cval)
# end __setitem__
def output(self, attrs=None, header="Set-Cookie:", sep="\015\012"):
"""Return a string suitable for HTTP."""
result = []
items = self.items()
items.sort()
for K,V in items:
result.append( V.output(attrs, header) )
return sep.join(result)
# end output
__str__ = output
def __repr__(self):
L = []
items = self.items()
items.sort()
for K,V in items:
L.append( '%s=%s' % (K,repr(V.value) ) )
return '<%s: %s>' % (self.__class__.__name__, _spacejoin(L))
def js_output(self, attrs=None):
"""Return a string suitable for JavaScript."""
result = []
items = self.items()
items.sort()
for K,V in items:
result.append( V.js_output(attrs) )
return _nulljoin(result)
# end js_output
def load(self, rawdata):
"""Load cookies from a string (presumably HTTP_COOKIE) or
from a dictionary. Loading cookies from a dictionary 'd'
is equivalent to calling:
map(Cookie.__setitem__, d.keys(), d.values())
"""
if type(rawdata) == type(""):
self.__ParseString(rawdata)
else:
# self.update() wouldn't call our custom __setitem__
for k, v in rawdata.items():
self[k] = v
return
# end load()
def __ParseString(self, str, patt=_CookiePattern):
i = 0 # Our starting point
n = len(str) # Length of string
M = None # current morsel
while 0 <= i < n:
# Start looking for a cookie
match = patt.search(str, i)
if not match: break # No more cookies
K,V = match.group("key"), match.group("val")
i = match.end(0)
# Parse the key, value in case it's metainfo
if K[0] == "$":
# We ignore attributes which pertain to the cookie
# mechanism as a whole. See RFC 2109.
# (Does anyone care?)
if M:
M[ K[1:] ] = V
elif K.lower() in Morsel._reserved:
if M:
M[ K ] = _unquote(V)
else:
rval, cval = self.value_decode(V)
self.__set(K, rval, cval)
M = self[K]
# end __ParseString
# end BaseCookie class
class SimpleCookie(BaseCookie):
"""SimpleCookie
SimpleCookie supports strings as cookie values. When setting
the value using the dictionary assignment notation, SimpleCookie
calls the builtin str() to convert the value to a string. Values
received from HTTP are kept as strings.
"""
def value_decode(self, val):
return _unquote( val ), val
def value_encode(self, val):
strval = str(val)
return strval, _quote( strval )
# end SimpleCookie
class SerialCookie(BaseCookie):
"""SerialCookie
SerialCookie supports arbitrary objects as cookie values. All
values are serialized (using cPickle) before being sent to the
client. All incoming values are assumed to be valid Pickle
representations. IF AN INCOMING VALUE IS NOT IN A VALID PICKLE
FORMAT, THEN AN EXCEPTION WILL BE RAISED.
Note: Large cookie values add overhead because they must be
retransmitted on every HTTP transaction.
Note: HTTP has a 2k limit on the size of a cookie. This class
does not check for this limit, so be careful!!!
"""
def __init__(self, input=None):
warnings.warn("SerialCookie class is insecure; do not use it",
DeprecationWarning)
BaseCookie.__init__(self, input)
# end __init__
def value_decode(self, val):
# This could raise an exception!
return loads( _unquote(val) ), val
def value_encode(self, val):
return val, _quote( dumps(val) )
# end SerialCookie
class SmartCookie(BaseCookie):
"""SmartCookie
SmartCookie supports arbitrary objects as cookie values. If the
object is a string, then it is quoted. If the object is not a
string, however, then SmartCookie will use cPickle to serialize
the object into a string representation.
Note: Large cookie values add overhead because they must be
retransmitted on every HTTP transaction.
Note: HTTP has a 2k limit on the size of a cookie. This class
does not check for this limit, so be careful!!!
"""
def __init__(self, input=None):
warnings.warn("Cookie/SmartCookie class is insecure; do not use it",
DeprecationWarning)
BaseCookie.__init__(self, input)
# end __init__
def value_decode(self, val):
strval = _unquote(val)
try:
return loads(strval), val
except:
return strval, val
def value_encode(self, val):
if type(val) == type(""):
return val, _quote(val)
else:
return val, _quote( dumps(val) )
# end SmartCookie
###########################################################
# Backwards Compatibility: Don't break any existing code!
# We provide Cookie() as an alias for SmartCookie()
Cookie = SmartCookie
#
###########################################################
def _test():
import doctest, Cookie
return doctest.testmod(Cookie)
if __name__ == "__main__":
_test()
#Local Variables:
#tab-width: 4
#end: | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Copyright (c) 2014 Docker.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
docker_registry.drivers.file
~~~~~~~~~~~~~~~~~~~~~~~~~~
This is a simple filesystem based driver.
"""
import os
import shutil
from ..core import driver
from ..core import exceptions
from ..core import lru
class Storage(driver.Base):
supports_bytes_range = True
def __init__(self, path=None, config=None):
self._root_path = path or './tmp'
def _init_path(self, path=None, create=False):
path = os.path.join(self._root_path, path) if path else self._root_path
if create is True:
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
return path
@lru.get
def get_content(self, path):
path = self._init_path(path)
try:
with open(path, mode='rb') as f:
d = f.read()
except Exception:
raise exceptions.FileNotFoundError('%s is not there' % path)
return d
@lru.set
def put_content(self, path, content):
path = self._init_path(path, create=True)
with open(path, mode='wb') as f:
f.write(content)
return path
def stream_read(self, path, bytes_range=None):
path = self._init_path(path)
nb_bytes = 0
total_size = 0
try:
with open(path, mode='rb') as f:
if bytes_range:
f.seek(bytes_range[0])
total_size = bytes_range[1] - bytes_range[0] + 1
while True:
buf = None
if bytes_range:
# Bytes Range is enabled
buf_size = self.buffer_size
if nb_bytes + buf_size > total_size:
# We make sure we don't read out of the range
buf_size = total_size - nb_bytes
if buf_size > 0:
buf = f.read(buf_size)
nb_bytes += len(buf)
else:
# We're at the end of the range
buf = ''
else:
buf = f.read(self.buffer_size)
if not buf:
break
yield buf
except IOError:
raise exceptions.FileNotFoundError('%s is not there' % path)
def stream_write(self, path, fp):
# Size is mandatory
path = self._init_path(path, create=True)
with open(path, mode='wb') as f:
try:
while True:
buf = fp.read(self.buffer_size)
if not buf:
break
f.write(buf)
except IOError:
pass
def list_directory(self, path=None):
prefix = ''
if path:
prefix = '%s/' % path
path = self._init_path(path)
exists = False
try:
for d in os.listdir(path):
exists = True
yield prefix + d
except Exception:
pass
if not exists:
raise exceptions.FileNotFoundError('%s is not there' % path)
def exists(self, path):
path = self._init_path(path)
return os.path.exists(path)
@lru.remove
def remove(self, path):
path = self._init_path(path)
if os.path.isdir(path):
shutil.rmtree(path)
return
try:
os.remove(path)
except OSError:
raise exceptions.FileNotFoundError('%s is not there' % path)
def get_size(self, path):
path = self._init_path(path)
try:
return os.path.getsize(path)
except OSError:
raise exceptions.FileNotFoundError('%s is not there' % path) | unknown | codeparrot/codeparrot-clean | ||
# Because division is different in Python 2 and 3
from __future__ import division
from typing import Tuple
import numpy as np
from . import _ops as math
from . import extrapolation as extrapolation
from ._config import GLOBAL_AXIS_ORDER
from ._ops import stack
from ._shape import Shape, channel, batch, spatial
from ._tensors import Tensor, TensorLike, variable_values
from ._tensors import wrap
from .extrapolation import Extrapolation
def spatial_sum(value: Tensor):
return math.sum_(value, dim=value.shape.spatial.names)
def vec_abs(vec: Tensor):
return math.sqrt(math.sum_(vec ** 2, dim=vec.shape.channel.names))
def vec_squared(vec: Tensor):
return math.sum_(vec ** 2, dim=channel('vector'))
def cross_product(vec1: Tensor, vec2: Tensor):
vec1 = math.tensor(vec1)
vec2 = math.tensor(vec2)
spatial_rank = vec1.vector.size if 'vector' in vec1.shape else vec2.vector.size
if spatial_rank == 2: # Curl in 2D
assert vec2.vector.exists
if vec1.vector.exists:
v1_x, v1_y = vec1.vector.unstack()
v2_x, v2_y = vec2.vector.unstack()
if GLOBAL_AXIS_ORDER.is_x_first:
return v1_x * v2_y - v1_y * v2_x
else:
return - v1_x * v2_y + v1_y * v2_x
else:
v2_x, v2_y = vec2.vector.unstack()
if GLOBAL_AXIS_ORDER.is_x_first:
return vec1 * math.stack([-v2_y, v2_x], channel('vector'))
else:
return vec1 * math.stack([v2_y, -v2_x], channel('vector'))
elif spatial_rank == 3: # Curl in 3D
raise NotImplementedError(f'spatial_rank={spatial_rank} not yet implemented')
else:
raise AssertionError(f'dims = {spatial_rank}. Vector product not available in > 3 dimensions')
def normalize_to(target: Tensor, source: Tensor, epsilon=1e-5):
"""
Multiplies the target so that its total content matches the source.
Args:
target: a tensor
source: a tensor or number
epsilon: small number to prevent division by zero or None. (Default value = 1e-5)
target: Tensor:
source: Tensor:
Returns:
normalized tensor of the same shape as target
"""
target_total = math.sum_(target, dim=target.shape.non_batch.names)
denominator = math.maximum(target_total, epsilon) if epsilon is not None else target_total
source_total = math.sum_(source, dim=source.shape.non_batch.names)
return target * (source_total / denominator)
def l1_loss(x) -> Tensor:
"""
Computes *∑<sub>i</sub> ||x<sub>i</sub>||<sub>1</sub>*, summing over all non-batch dimensions.
Args:
x: `Tensor` or `TensorLike`.
For `TensorLike` objects, only value the sum over all value attributes is computed.
Returns:
loss: `Tensor`
"""
if isinstance(x, Tensor):
return math.sum_(abs(x), x.shape.non_batch)
elif isinstance(x, TensorLike):
return sum([l1_loss(getattr(x, a)) for a in variable_values(x)])
else:
raise ValueError(x)
def l2_loss(x) -> Tensor:
"""
Computes *∑<sub>i</sub> ||x<sub>i</sub>||<sub>2</sub><sup>2</sup> / 2*, summing over all non-batch dimensions.
Args:
x: `Tensor` or `TensorLike`.
For `TensorLike` objects, only value the sum over all value attributes is computed.
Returns:
loss: `Tensor`
"""
if isinstance(x, Tensor):
if x.dtype.kind == complex:
x = abs(x)
return math.sum_(x ** 2, x.shape.non_batch) * 0.5
elif isinstance(x, TensorLike):
return sum([l2_loss(getattr(x, a)) for a in variable_values(x)])
else:
raise ValueError(x)
def frequency_loss(x,
frequency_falloff: float = 100,
threshold=1e-5,
ignore_mean=False) -> Tensor:
"""
Penalizes the squared `values` in frequency (Fourier) space.
Lower frequencies are weighted more strongly then higher frequencies, depending on `frequency_falloff`.
Args:
x: `Tensor` or `TensorLike` Values to penalize, typically `actual - target`.
frequency_falloff: Large values put more emphasis on lower frequencies, 1.0 weights all frequencies equally.
*Note*: The total loss is not normalized. Varying the value will result in losses of different magnitudes.
threshold: Frequency amplitudes below this value are ignored.
Setting this to zero may cause infinities or NaN values during backpropagation.
ignore_mean: If `True`, does not penalize the mean value (frequency=0 component).
Returns:
Scalar loss value
"""
if isinstance(x, Tensor):
if ignore_mean:
x -= math.mean(x, x.shape.non_batch)
k_squared = vec_squared(math.fftfreq(x.shape.spatial))
weights = math.exp(-0.5 * k_squared * frequency_falloff ** 2)
diff_fft = abs_square(math.fft(x) * weights)
diff_fft = math.sqrt(math.maximum(diff_fft, threshold))
return l2_loss(diff_fft)
elif isinstance(x, TensorLike):
return sum([frequency_loss(getattr(x, a), frequency_falloff, threshold, ignore_mean) for a in variable_values(x)])
else:
raise ValueError(x)
def abs_square(complex_values: Tensor) -> Tensor:
"""
Squared magnitude of complex values.
Args:
complex_values: complex `Tensor`
Returns:
Tensor: real valued magnitude squared
"""
return math.imag(complex_values) ** 2 + math.real(complex_values) ** 2
# Divergence
# def divergence(tensor, dx=1, difference='central', padding='constant', dimensions=None):
# """
# Computes the spatial divergence of a vector channel from finite differences.
#
# :param tensor: vector field; tensor of shape (batch size, spatial dimensions..., spatial rank)
# :param dx: distance between adjacent grid points (default 1)
# :param difference: type of difference, one of ('forward', 'central') (default 'forward')
# :return: tensor of shape (batch size, spatial dimensions..., 1)
# """
# assert difference in ('central', 'forward', 'backward'), difference
# rank = spatial_rank(tensor)
# if difference == 'forward':
# return _divergence_nd(tensor, padding, (0, 1), dims) / dx ** rank # TODO why dx^rank?
# elif difference == 'backward':
# return _divergence_nd(tensor, padding, (-1, 0), dims) / dx ** rank
# else:
# return _divergence_nd(tensor, padding, (-1, 1), dims) / (2 * dx) ** rank
#
#
# def _divergence_nd(x_, padding, relative_shifts, dims=None):
# x = tensor(x_)
# assert x.shape.channel.rank == 1
# dims = dims if dims is not None else x.shape.spatial.names
# x = math.pad(x, {axis: (-relative_shifts[0], relative_shifts[1]) for axis in dims}, mode=padding)
# components = []
# for dimension in dims:
# dim_index_in_spatial = x.shape.spatial.reset_indices().index(dimension)
# lower, upper = _multi_roll(x, dimension, relative_shifts, diminish_others=(-relative_shifts[0], relative_shifts[1]), names=dims, base_selection={0: rank - dimension - 1})
# components.append(upper - lower)
# return math.sum_(components, 0)
def shift(x: Tensor,
offsets: tuple,
dims: tuple or None = None,
padding: Extrapolation or None = extrapolation.BOUNDARY,
stack_dim: Shape or None = channel('shift')) -> list:
"""
shift Tensor by a fixed offset and abiding by extrapolation
Args:
x: Input data
offsets: Shift size
dims: Dimensions along which to shift, defaults to None
padding: padding to be performed at the boundary, defaults to extrapolation.BOUNDARY
stack_dim: dimensions to be stacked, defaults to 'shift'
Returns:
list: offset_tensor
"""
if stack_dim is None:
assert len(dims) == 1
x = wrap(x)
dims = dims if dims is not None else x.shape.spatial.names
pad_lower = max(0, -min(offsets))
pad_upper = max(0, max(offsets))
if padding:
x = math.pad(x, {axis: (pad_lower, pad_upper) for axis in dims}, mode=padding)
offset_tensors = []
for offset in offsets:
components = []
for dimension in dims:
if padding:
slices = {dim: slice(pad_lower + offset, (-pad_upper + offset) or None) if dim == dimension else slice(pad_lower, -pad_upper or None) for dim in dims}
else:
slices = {dim: slice(pad_lower + offset, (-pad_upper + offset) or None) if dim == dimension else slice(None, None) for dim in dims}
components.append(x[slices])
offset_tensors.append(stack(components, stack_dim) if stack_dim is not None else components[0])
return offset_tensors
def extrapolate_valid_values(values: Tensor, valid: Tensor, distance_cells: int = 1) -> Tuple[Tensor, Tensor]:
"""
Extrapolates the values of `values` which are marked by the nonzero values of `valid` for `distance_cells` steps in all spatial directions.
Overlapping extrapolated values get averaged. Extrapolation also includes diagonals.
Examples (1-step extrapolation), x marks the values for extrapolation:
200 000 111 004 00x 044 102 000 144
010 + 0x0 => 111 000 + 000 => 234 004 + 00x => 234
040 000 111 200 x00 220 200 x00 234
Args:
values: Tensor which holds the values for extrapolation
valid: Tensor with same size as `x` marking the values for extrapolation with nonzero values
distance_cells: Number of extrapolation steps
Returns:
values: Extrapolation result
valid: mask marking all valid values after extrapolation
"""
def binarize(x):
return math.divide_no_nan(x, x)
distance_cells = min(distance_cells, max(values.shape.sizes))
for _ in range(distance_cells):
valid = binarize(valid)
valid_values = valid * values
overlap = valid
for dim in values.shape.spatial.names:
values_l, values_r = shift(valid_values, (-1, 1), dims=dim, padding=extrapolation.ZERO)
valid_values = math.sum_(values_l + values_r + valid_values, dim='shift')
mask_l, mask_r = shift(overlap, (-1, 1), dims=dim, padding=extrapolation.ZERO)
overlap = math.sum_(mask_l + mask_r + overlap, dim='shift')
extp = math.divide_no_nan(valid_values, overlap) # take mean where extrapolated values overlap
values = math.where(valid, values, math.where(binarize(overlap), extp, values))
valid = overlap
return values, binarize(valid)
# Gradient
def spatial_gradient(grid: Tensor,
dx: float or int = 1,
difference: str = 'central',
padding: Extrapolation or None = extrapolation.BOUNDARY,
dims: tuple or None = None,
stack_dim: Shape = channel('gradient')):
"""
Calculates the spatial_gradient of a scalar channel from finite differences.
The spatial_gradient vectors are in reverse order, lowest dimension first.
Args:
grid: grid values
dims: optional) sequence of dimension names
dx: physical distance between grid points (default 1)
difference: type of difference, one of ('forward', 'backward', 'central') (default 'forward')
padding: tensor padding mode
stack_dim: name of the new vector dimension listing the spatial_gradient w.r.t. the various axes
Returns:
tensor of shape (batch_size, spatial_dimensions..., spatial rank)
"""
grid = wrap(grid)
if difference.lower() == 'central':
left, right = shift(grid, (-1, 1), dims, padding, stack_dim=stack_dim)
return (right - left) / (dx * 2)
elif difference.lower() == 'forward':
left, right = shift(grid, (0, 1), dims, padding, stack_dim=stack_dim)
return (right - left) / dx
elif difference.lower() == 'backward':
left, right = shift(grid, (-1, 0), dims, padding, stack_dim=stack_dim)
return (right - left) / dx
else:
raise ValueError('Invalid difference type: {}. Can be CENTRAL or FORWARD'.format(difference))
# Laplace
def laplace(x: Tensor,
dx: Tensor or float = 1,
padding: Extrapolation = extrapolation.BOUNDARY,
dims: tuple or None = None):
"""
Spatial Laplace operator as defined for scalar fields.
If a vector field is passed, the laplace is computed component-wise.
Args:
x: n-dimensional field of shape (batch, spacial dimensions..., components)
dx: scalar or 1d tensor
padding: extrapolation
dims: The second derivative along these dimensions is summed over
Returns:
`phi.math.Tensor` of same shape as `x`
"""
if not isinstance(dx, (int, float)):
dx = wrap(dx, batch('_laplace'))
if isinstance(x, Extrapolation):
return x.spatial_gradient()
left, center, right = shift(wrap(x), (-1, 0, 1), dims, padding, stack_dim=batch('_laplace'))
result = (left + right - 2 * center) / dx
result = math.sum_(result, '_laplace')
return result
def fourier_laplace(grid: Tensor,
dx: Tensor or Shape or float or list or tuple,
times: int = 1):
"""
Applies the spatial laplace operator to the given tensor with periodic boundary conditions.
*Note:* The results of `fourier_laplace` and `laplace` are close but not identical.
This implementation computes the laplace operator in Fourier space.
The result for periodic fields is exact, i.e. no numerical instabilities can occur, even for higher-order derivatives.
Args:
grid: tensor, assumed to have periodic boundary conditions
dx: distance between grid points, tensor-like, scalar or vector
times: number of times the laplace operator is applied. The computational cost is independent of this parameter.
grid: Tensor:
dx: Tensor or Shape or float or list or tuple:
times: int: (Default value = 1)
Returns:
tensor of same shape as `tensor`
"""
frequencies = math.fft(math.to_complex(grid))
k_squared = math.sum_(math.fftfreq(grid.shape) ** 2, 'vector')
fft_laplace = -(2 * np.pi) ** 2 * k_squared
result = math.real(math.ifft(frequencies * fft_laplace ** times))
return math.cast(result / wrap(dx) ** 2, grid.dtype)
def fourier_poisson(grid: Tensor,
dx: Tensor or Shape or float or list or tuple,
times: int = 1):
"""
Inverse operation to `fourier_laplace`.
Args:
grid: Tensor:
dx: Tensor or Shape or float or list or tuple:
times: int: (Default value = 1)
Returns:
"""
frequencies = math.fft(math.to_complex(grid))
k_squared = math.sum_(math.fftfreq(grid.shape) ** 2, 'vector')
fft_laplace = -(2 * np.pi) ** 2 * k_squared
# fft_laplace.tensor[(0,) * math.ndims(k_squared)] = math.inf # assume NumPy array to edit
result = math.real(math.ifft(math.divide_no_nan(frequencies, math.to_complex(fft_laplace ** times))))
return math.cast(result * wrap(dx) ** 2, grid.dtype)
# Downsample / Upsample
def downsample2x(grid: Tensor,
padding: Extrapolation = extrapolation.BOUNDARY,
dims: tuple or None = None) -> Tensor:
"""
Resamples a regular grid to half the number of spatial sample points per dimension.
The grid values at the new points are determined via mean (linear interpolation).
Args:
grid: full size grid
padding: grid extrapolation. Used to insert an additional value for odd spatial dims
dims: dims along which down-sampling is applied. If None, down-sample along all spatial dims.
grid: Tensor:
padding: Extrapolation: (Default value = extrapolation.BOUNDARY)
dims: tuple or None: (Default value = None)
Returns:
half-size grid
"""
dims = grid.shape.spatial.only(dims).names
odd_dimensions = [dim for dim in dims if grid.shape.get_size(dim) % 2 != 0]
grid = math.pad(grid, {dim: (0, 1) for dim in odd_dimensions}, padding)
for dim in dims:
grid = (grid[{dim: slice(1, None, 2)}] + grid[{dim: slice(0, None, 2)}]) / 2
return grid
def upsample2x(grid: Tensor,
padding: Extrapolation = extrapolation.BOUNDARY,
dims: tuple or None = None) -> Tensor:
"""
Resamples a regular grid to double the number of spatial sample points per dimension.
The grid values at the new points are determined via linear interpolation.
Args:
grid: half-size grid
padding: grid extrapolation
dims: dims along which up-sampling is applied. If None, up-sample along all spatial dims.
grid: Tensor:
padding: Extrapolation: (Default value = extrapolation.BOUNDARY)
dims: tuple or None: (Default value = None)
Returns:
double-size grid
"""
for i, dim in enumerate(grid.shape.spatial.only(dims)):
left, center, right = shift(grid, (-1, 0, 1), dim.names, padding, None)
interp_left = 0.25 * left + 0.75 * center
interp_right = 0.75 * center + 0.25 * right
stacked = math.stack([interp_left, interp_right], spatial('_interleave'))
grid = math.join_dimensions(stacked, (dim.name, '_interleave'), dim)
return grid
def sample_subgrid(grid: Tensor, start: Tensor, size: Shape) -> Tensor:
"""
Samples a sub-grid from `grid` with equal distance between sampling points.
The values at the new sample points are determined via linear interpolation.
Args:
grid: `Tensor` to be resampled. Values are assumed to be sampled at cell centers.
start: Origin point of sub-grid within `grid`, measured in number of cells.
Must have a single dimension called `vector`.
Example: `start=(1, 0.5)` would slice off the first grid point in dim 1 and take the mean of neighbouring points in dim 2.
The order of dims must be equal to `size` and `grid.shape.spatial`.
size: Resolution of the sub-grid. Must not be larger than the resolution of `grid`.
The order of dims must be equal to `start` and `grid.shape.spatial`.
Returns:
Sub-grid as `Tensor`
"""
assert start.shape.names == ('vector',)
assert grid.shape.spatial.names == size.names
assert math.all_available(start), "Cannot perform sample_subgrid() during tracing, 'start' must be known."
discard = {}
for dim, d_start, d_size in zip(grid.shape.spatial.names, start, size.sizes):
discard[dim] = slice(int(d_start), int(d_start) + d_size + (1 if d_start != 0 else 0))
grid = grid[discard]
upper_weight = start % 1
lower_weight = 1 - upper_weight
for i, dim in enumerate(grid.shape.spatial.names):
if upper_weight[i].native() not in (0, 1):
lower, upper = shift(grid, (0, 1), [dim], padding=None, stack_dim=None)
grid = upper * upper_weight[i] + lower * lower_weight[i]
return grid
# Poisson Brackets
def poisson_bracket(grid1, grid2):
if all([grid1.rank == grid2.rank == 2,
grid1.boundary == grid2.boundary == extrapolation.PERIODIC,
len(set(list(grid1.dx) + list(grid2.dx))) == 1]):
return _periodic_2d_arakawa_poisson_bracket(grid1.values, grid2.values, grid1.dx)
else:
raise NotImplementedError("\n".join([
"Not implemented for:"
f"ranks ({grid1.rank}, {grid2.rank}) != 2",
f"boundary ({grid1.boundary}, {grid2.boundary}) != {extrapolation.PERIODIC}",
f"dx uniform ({grid1.dx}, {grid2.dx})"
]))
def _periodic_2d_arakawa_poisson_bracket(tensor1: Tensor, tensor2: Tensor, dx: float):
"""
Solves the poisson bracket using the Arakawa Scheme [tensor1, tensor2]
Only works in 2D, with equal spaced grids, and periodic boundary conditions
Args:
tensor1(Tensor): first field in the poisson bracket
tensor2(Tensor): second field in the poisson bracket
dx(float): Grid size (equal in x-y)
tensor1: Tensor:
tensor2: Tensor:
dx: float:
Returns:
"""
zeta = math.pad(value=tensor1, widths={'x': (1, 1), 'y': (1, 1)}, mode=extrapolation.PERIODIC)
psi = math.pad(value=tensor2, widths={'x': (1, 1), 'y': (1, 1)}, mode=extrapolation.PERIODIC)
return (zeta.x[2:].y[1:-1] * (psi.x[1:-1].y[2:] - psi.x[1:-1].y[0:-2] + psi.x[2:].y[2:] - psi.x[2:].y[0:-2])
- zeta.x[0:-2].y[1:-1] * (psi.x[1:-1].y[2:] - psi.x[1:-1].y[0:-2] + psi.x[0:-2].y[2:] - psi.x[0:-2].y[0:-2])
- zeta.x[1:-1].y[2:] * (psi.x[2:].y[1:-1] - psi.x[0:-2].y[1:-1] + psi.x[2:].y[2:] - psi.x[0:-2].y[2:])
+ zeta.x[1:-1].y[0:-2] * (psi.x[2:].y[1:-1] - psi.x[0:-2].y[1:-1] + psi.x[2:].y[0:-2] - psi.x[0:-2].y[0:-2])
+ zeta.x[2:].y[0:-2] * (psi.x[2:].y[1:-1] - psi.x[1:-1].y[0:-2])
+ zeta.x[2:].y[2:] * (psi.x[1:-1].y[2:] - psi.x[2:].y[1:-1])
- zeta.x[0:-2].y[2:] * (psi.x[1:-1].y[2:] - psi.x[0:-2].y[1:-1])
- zeta.x[0:-2].y[0:-2] * (psi.x[0:-2].y[1:-1] - psi.x[1:-1].y[0:-2])) / (12 * dx ** 2) | unknown | codeparrot/codeparrot-clean | ||
use crate::spec::{
Cc, LinkerFlavor, Lld, Os, PanicStrategy, RelroLevel, TargetOptions, add_link_args,
};
pub(crate) fn opts() -> TargetOptions {
let lld_args = &["-zmax-page-size=4096", "-znow", "-ztext", "--execute-only"];
let cc_args = &["-Wl,-zmax-page-size=4096", "-Wl,-znow", "-Wl,-ztext", "-mexecute-only"];
let mut pre_link_args = TargetOptions::link_args(LinkerFlavor::Gnu(Cc::No, Lld::No), lld_args);
add_link_args(&mut pre_link_args, LinkerFlavor::Gnu(Cc::Yes, Lld::No), cc_args);
TargetOptions {
os: Os::TeeOs,
dynamic_linking: true,
linker_flavor: LinkerFlavor::Gnu(Cc::Yes, Lld::No),
// rpath hardcodes -Wl, so it can't be used together with ld.lld.
// C TAs also don't support rpath, so this is fine.
has_rpath: false,
// Note: Setting has_thread_local to true causes an error when
// loading / dyn-linking the TA
has_thread_local: false,
position_independent_executables: true,
relro_level: RelroLevel::Full,
crt_static_respected: true,
pre_link_args,
panic_strategy: PanicStrategy::Abort,
..Default::default()
}
} | rust | github | https://github.com/rust-lang/rust | compiler/rustc_target/src/spec/base/teeos.rs |
# -*- coding: utf-8 -*-
import nixops.deployment
import os
import os.path
import sqlite3
import sys
import threading
class Connection(sqlite3.Connection):
def __init__(self, db_file, **kwargs):
sqlite3.Connection.__init__(self, db_file, **kwargs)
self.db_file = db_file
self.nesting = 0
self.lock = threading.RLock()
# Implement Python's context management protocol so that "with db"
# automatically commits or rolls back. The difference with the
# parent's "with" implementation is that we nest, i.e. a commit or
# rollback is only done at the outer "with".
def __enter__(self):
self.lock.acquire()
if self.nesting == 0:
self.must_rollback = False
self.nesting = self.nesting + 1
def __exit__(self, exception_type, exception_value, exception_traceback):
if exception_type != None: self.must_rollback = True
self.nesting = self.nesting - 1
assert self.nesting >= 0
if self.nesting == 0:
if self.must_rollback:
try:
self.rollback()
except sqlite3.ProgrammingError:
pass
else:
self.commit()
self.lock.release()
def get_default_state_file():
home = os.environ.get("HOME", "") + "/.nixops"
if not os.path.exists(home):
old_home = os.environ.get("HOME", "") + "/.charon"
if os.path.exists(old_home):
sys.stderr.write("renaming ‘{0}’ to ‘{1}’...\n".format(old_home, home))
os.rename(old_home, home)
if os.path.exists(home + "/deployments.charon"):
os.rename(home + "/deployments.charon", home + "/deployments.nixops")
else:
os.makedirs(home, 0700)
return os.environ.get("NIXOPS_STATE", os.environ.get("CHARON_STATE", home + "/deployments.nixops"))
class StateFile(object):
"""NixOps state file."""
current_schema = 3
def __init__(self, db_file):
self.db_file = db_file
if os.path.splitext(db_file)[1] not in ['.nixops', '.charon']:
raise Exception("state file ‘{0}’ should have extension ‘.nixops’".format(db_file))
db = sqlite3.connect(db_file, timeout=60, check_same_thread=False, factory=Connection) # FIXME
db.db_file = db_file
db.execute("pragma journal_mode = wal")
db.execute("pragma foreign_keys = 1")
# FIXME: this is not actually transactional, because pysqlite (not
# sqlite) does an implicit commit before "create table".
with db:
c = db.cursor()
# Get the schema version.
version = 0 # new database
if self._table_exists(c, 'SchemaVersion'):
c.execute("select version from SchemaVersion")
version = c.fetchone()[0]
elif self._table_exists(c, 'Deployments'):
version = 1
if version == self.current_schema:
pass
elif version == 0:
self._create_schema(c)
elif version < self.current_schema:
if version <= 1: self._upgrade_1_to_2(c)
if version <= 2: self._upgrade_2_to_3(c)
c.execute("update SchemaVersion set version = ?", (self.current_schema,))
else:
raise Exception("this NixOps version is too old to deal with schema version {0}".format(version))
self._db = db
def close(self):
self._db.close()
def query_deployments(self):
"""Return the UUIDs of all deployments in the database."""
c = self._db.cursor()
c.execute("select uuid from Deployments")
res = c.fetchall()
return [x[0] for x in res]
def get_all_deployments(self):
"""Return Deployment objects for every deployment in the database."""
uuids = self.query_deployments()
res = []
for uuid in uuids:
try:
res.append(self.open_deployment(uuid=uuid))
except nixops.deployment.UnknownBackend as e:
sys.stderr.write("skipping deployment ‘{0}’: {1}\n".format(uuid, str(e)))
return res
def _find_deployment(self, uuid=None):
c = self._db.cursor()
if not uuid:
c.execute("select uuid from Deployments")
else:
c.execute("select uuid from Deployments d where uuid = ? or exists (select 1 from DeploymentAttrs where deployment = d.uuid and name = 'name' and value = ?)", (uuid, uuid))
res = c.fetchall()
if len(res) == 0:
if uuid:
# try the prefix match
c.execute("select uuid from Deployments where uuid glob ?", (uuid + '*', ))
res = c.fetchall()
if len(res) == 0:
return None
else:
return None
if len(res) > 1:
if uuid:
raise Exception("state file contains multiple deployments with the same name, so you should specify one using its UUID")
else:
raise Exception("state file contains multiple deployments, so you should specify which one to use using ‘-d’, or set the environment variable NIXOPS_DEPLOYMENT")
return nixops.deployment.Deployment(self, res[0][0], sys.stderr)
def open_deployment(self, uuid=None):
"""Open an existing deployment."""
deployment = self._find_deployment(uuid=uuid)
if deployment: return deployment
raise Exception("could not find specified deployment in state file ‘{0}’".format(self.db_file))
def create_deployment(self, uuid=None):
"""Create a new deployment."""
if not uuid:
import uuid
uuid = str(uuid.uuid1())
with self._db:
self._db.execute("insert into Deployments(uuid) values (?)", (uuid,))
return nixops.deployment.Deployment(self, uuid, sys.stderr)
def _table_exists(self, c, table):
c.execute("select 1 from sqlite_master where name = ? and type='table'", (table,));
return c.fetchone() != None
def _create_schemaversion(self, c):
c.execute(
'''create table if not exists SchemaVersion(
version integer not null
);''')
c.execute("insert into SchemaVersion(version) values (?)", (self.current_schema,))
def _create_schema(self, c):
self._create_schemaversion(c)
c.execute(
'''create table if not exists Deployments(
uuid text primary key
);''')
c.execute(
'''create table if not exists DeploymentAttrs(
deployment text not null,
name text not null,
value text not null,
primary key(deployment, name),
foreign key(deployment) references Deployments(uuid) on delete cascade
);''')
c.execute(
'''create table if not exists Resources(
id integer primary key autoincrement,
deployment text not null,
name text not null,
type text not null,
foreign key(deployment) references Deployments(uuid) on delete cascade
);''')
c.execute(
'''create table if not exists ResourceAttrs(
machine integer not null,
name text not null,
value text not null,
primary key(machine, name),
foreign key(machine) references Resources(id) on delete cascade
);''')
def _upgrade_1_to_2(self, c):
sys.stderr.write("updating database schema from version 1 to 2...\n")
self._create_schemaversion(c)
def _upgrade_2_to_3(self, c):
sys.stderr.write("updating database schema from version 2 to 3...\n")
c.execute("alter table Machines rename to Resources")
c.execute("alter table MachineAttrs rename to ResourceAttrs") | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsTaskManager.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '26/04/2016'
__copyright__ = 'Copyright 2016, The QGIS Project'
import os
from time import sleep
import qgis # NOQA
from qgis.core import QgsTask, QgsApplication
from qgis.PyQt.QtCore import QCoreApplication
from qgis.PyQt.QtTest import QSignalSpy
from qgis.testing import start_app, unittest
start_app()
def run(task, result):
if not result:
raise Exception('canceled')
else:
return result
def run_with_kwargs(task, password, result):
if not password == 1:
raise Exception('bad password value')
else:
return result
def cancelable(task):
while not task.isCanceled():
pass
if task.isCanceled():
raise Exception('canceled')
def progress_function(task):
task.setProgress(50)
while not task.isCanceled():
pass
if task.isCanceled():
raise Exception('canceled')
def run_no_result(task):
return
def run_fail(task):
raise Exception('fail')
def run_single_val_result(task):
return 5
def run_multiple_val_result(task):
return 5, 'whoo'
class TestQgsTaskManager(unittest.TestCase):
def testTaskFromFunction(self):
""" test creating task from function """
task = QgsTask.fromFunction('test task', run, 20)
QgsApplication.taskManager().addTask(task)
while task.status() not in [QgsTask.Complete, QgsTask.Terminated]:
QCoreApplication.processEvents()
self.assertEqual(task.returned_values, 20)
self.assertFalse(task.exception)
self.assertEqual(task.status(), QgsTask.Complete)
# try a task which cancels itself
bad_task = QgsTask.fromFunction('test task2', run, None)
QgsApplication.taskManager().addTask(bad_task)
while bad_task.status() not in [QgsTask.Complete, QgsTask.Terminated]:
QCoreApplication.processEvents()
self.assertFalse(bad_task.returned_values)
self.assertTrue(bad_task.exception)
self.assertEqual(bad_task.status(), QgsTask.Terminated)
def testTaskFromFunctionWithFlags(self):
""" test creating task from function with flags"""
task = QgsTask.fromFunction('test task', run, 20, flags=QgsTask.Flags())
self.assertFalse(task.canCancel())
task2 = QgsTask.fromFunction('test task', run, 20, flags=QgsTask.CanCancel)
self.assertTrue(task2.canCancel())
def testTaskFromFunctionWithKwargs(self):
""" test creating task from function using kwargs """
task = QgsTask.fromFunction('test task3', run_with_kwargs, result=5, password=1)
QgsApplication.taskManager().addTask(task)
while task.status() not in [QgsTask.Complete, QgsTask.Terminated]:
QCoreApplication.processEvents()
self.assertEqual(task.returned_values, 5)
self.assertFalse(task.exception)
self.assertEqual(task.status(), QgsTask.Complete)
def testTaskFromFunctionIsCancelable(self):
""" test that task from function can check canceled status """
bad_task = QgsTask.fromFunction('test task4', cancelable)
QgsApplication.taskManager().addTask(bad_task)
while bad_task.status() != QgsTask.Running:
QCoreApplication.processEvents()
bad_task.cancel()
while bad_task.status() == QgsTask.Running:
QCoreApplication.processEvents()
while QgsApplication.taskManager().countActiveTasks() > 0:
QCoreApplication.processEvents()
self.assertEqual(bad_task.status(), QgsTask.Terminated)
self.assertTrue(bad_task.exception)
def testTaskFromFunctionCanSetProgress(self):
""" test that task from function can set progress """
task = QgsTask.fromFunction('test task5', progress_function)
QgsApplication.taskManager().addTask(task)
while task.status() != QgsTask.Running:
QCoreApplication.processEvents()
# wait a fraction so that setProgress gets a chance to be called
sleep(0.001)
self.assertEqual(task.progress(), 50)
self.assertFalse(task.exception)
task.cancel()
while task.status() == QgsTask.Running:
QCoreApplication.processEvents()
while QgsApplication.taskManager().countActiveTasks() > 0:
QCoreApplication.processEvents()
def testTaskFromFunctionFinished(self):
""" test that task from function can have callback finished function"""
called = False
def finished_no_val(e):
nonlocal called
assert e is None
called = True
return
task = QgsTask.fromFunction('test task', run_no_result, on_finished=finished_no_val)
QgsApplication.taskManager().addTask(task)
while task.status() not in [QgsTask.Complete, QgsTask.Terminated]:
QCoreApplication.processEvents()
while QgsApplication.taskManager().countActiveTasks() > 0:
QCoreApplication.processEvents()
# check that the finished function was called
self.assertFalse(task.returned_values)
self.assertFalse(task.exception)
self.assertTrue(called)
def testTaskFromFunctionFinishedFail(self):
""" test that task from function which fails calls finished with exception"""
finished_exception = None
def finished_fail(e):
nonlocal finished_exception
assert e
finished_exception = e
task = QgsTask.fromFunction('test task', run_fail, on_finished=finished_fail)
QgsApplication.taskManager().addTask(task)
while task.status() not in [QgsTask.Complete, QgsTask.Terminated]:
QCoreApplication.processEvents()
while QgsApplication.taskManager().countActiveTasks() > 0:
QCoreApplication.processEvents()
# check that the finished function was called
self.assertTrue(task.exception)
self.assertTrue(finished_exception)
self.assertEqual(task.exception, finished_exception)
def testTaskFromFunctionCanceledWhileQueued(self):
""" test that task from finished is called with exception when task is terminated while queued"""
finished_exception = None
def finished_fail(e):
nonlocal finished_exception
assert e
finished_exception = e
task = QgsTask.fromFunction('test task', run_no_result, on_finished=finished_fail)
task.hold()
QgsApplication.taskManager().addTask(task)
task.cancel()
while task.status() not in [QgsTask.Complete, QgsTask.Terminated]:
QCoreApplication.processEvents()
while QgsApplication.taskManager().countActiveTasks() > 0:
QCoreApplication.processEvents()
# check that the finished function was called
self.assertTrue(task.exception)
self.assertTrue(finished_exception)
self.assertEqual(task.exception, finished_exception)
def testTaskFromFunctionFinishedWithVal(self):
""" test that task from function can have callback finished function and is passed result values"""
result_value = None
def finished_single_value_result(e, value):
nonlocal result_value
assert e is None
result_value = value
return
task = QgsTask.fromFunction('test task', run_single_val_result, on_finished=finished_single_value_result)
QgsApplication.taskManager().addTask(task)
while task.status() not in [QgsTask.Complete, QgsTask.Terminated]:
QCoreApplication.processEvents()
while QgsApplication.taskManager().countActiveTasks() > 0:
QCoreApplication.processEvents()
# check that the finished function was called
self.assertEqual(task.returned_values, (5))
self.assertFalse(task.exception)
self.assertEqual(result_value, 5)
def testTaskFromFunctionFinishedWithMultipleValues(self):
""" test that task from function can have callback finished function and is passed multiple result values"""
result_value = None
result_statement = None
def finished_multiple_value_result(e, results):
nonlocal result_value
nonlocal result_statement
assert e is None
result_value = results[0]
result_statement = results[1]
task = QgsTask.fromFunction('test task', run_multiple_val_result, on_finished=finished_multiple_value_result)
QgsApplication.taskManager().addTask(task)
while task.status() not in [QgsTask.Complete, QgsTask.Terminated]:
QCoreApplication.processEvents()
while QgsApplication.taskManager().countActiveTasks() > 0:
QCoreApplication.processEvents()
# check that the finished function was called
self.assertEqual(task.returned_values, (5, 'whoo'))
self.assertFalse(task.exception)
self.assertEqual(result_value, 5)
self.assertEqual(result_statement, 'whoo')
@unittest.skipIf(os.environ.get('TRAVIS', '') == 'true', 'Test is unstable on Travis')
def testTaskFromFunctionWithSubTaskCompletedIsCalledOnce(self): # spellok
""" test that when a parent task has subtasks it does emit taskCompleted only once"""
self.finished = 0
self.completed = 0
def _on_finished(e):
self.finished += 1
def _on_completed():
self.completed += 1
task = QgsTask.fromFunction('test task', run_no_result, on_finished=_on_finished)
task.taskCompleted.connect(_on_completed)
spy = QSignalSpy(task.taskCompleted)
sub_task_1 = QgsTask.fromFunction('test subtask 1', run_no_result, on_finished=_on_finished)
sub_task_2 = QgsTask.fromFunction('test subtask 2', run_no_result, on_finished=_on_finished)
task.addSubTask(sub_task_1, [], QgsTask.ParentDependsOnSubTask)
task.addSubTask(sub_task_2, [], QgsTask.ParentDependsOnSubTask)
QgsApplication.taskManager().addTask(task)
while task.status() not in [QgsTask.Complete, QgsTask.Terminated]:
QCoreApplication.processEvents()
while QgsApplication.taskManager().countActiveTasks() > 0:
QCoreApplication.processEvents()
self.assertEqual(self.completed, 1)
self.assertEqual(self.finished, 3)
self.assertEqual(len(spy), 1)
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
"""Functions to convert NetworkX graphs to and from other formats.
The preferred way of converting data to a NetworkX graph is through the
graph constuctor. The constructor calls the to_networkx_graph() function
which attempts to guess the input type and convert it automatically.
Examples
--------
Create a graph with a single edge from a dictionary of dictionaries
>>> d={0: {1: 1}} # dict-of-dicts single edge (0,1)
>>> G=nx.Graph(d)
See Also
--------
nx_agraph, nx_pydot
"""
# Copyright (C) 2006-2013 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import warnings
import networkx as nx
__author__ = """\n""".join(['Aric Hagberg <aric.hagberg@gmail.com>',
'Pieter Swart (swart@lanl.gov)',
'Dan Schult(dschult@colgate.edu)'])
__all__ = ['to_networkx_graph',
'from_dict_of_dicts', 'to_dict_of_dicts',
'from_dict_of_lists', 'to_dict_of_lists',
'from_edgelist', 'to_edgelist']
def _prep_create_using(create_using):
"""Return a graph object ready to be populated.
If create_using is None return the default (just networkx.Graph())
If create_using.clear() works, assume it returns a graph object.
Otherwise raise an exception because create_using is not a networkx graph.
"""
if create_using is None:
return nx.Graph()
try:
create_using.clear()
except:
raise TypeError("Input graph is not a networkx graph type")
return create_using
def to_networkx_graph(data,create_using=None,multigraph_input=False):
"""Make a NetworkX graph from a known data structure.
The preferred way to call this is automatically
from the class constructor
>>> d={0: {1: {'weight':1}}} # dict-of-dicts single edge (0,1)
>>> G=nx.Graph(d)
instead of the equivalent
>>> G=nx.from_dict_of_dicts(d)
Parameters
----------
data : a object to be converted
Current known types are:
any NetworkX graph
dict-of-dicts
dist-of-lists
list of edges
numpy matrix
numpy ndarray
scipy sparse matrix
pygraphviz agraph
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
multigraph_input : bool (default False)
If True and data is a dict_of_dicts,
try to create a multigraph assuming dict_of_dict_of_lists.
If data and create_using are both multigraphs then create
a multigraph from a multigraph.
"""
# NX graph
if hasattr(data,"adj"):
try:
result= from_dict_of_dicts(data.adj,\
create_using=create_using,\
multigraph_input=data.is_multigraph())
if hasattr(data,'graph') and isinstance(data.graph,dict):
result.graph=data.graph.copy()
if hasattr(data,'node') and isinstance(data.node,dict):
result.node=dict( (n,dd.copy()) for n,dd in data.node.items() )
return result
except:
raise nx.NetworkXError("Input is not a correct NetworkX graph.")
# pygraphviz agraph
if hasattr(data,"is_strict"):
try:
return nx.nx_agraph.from_agraph(data,create_using=create_using)
except:
raise nx.NetworkXError("Input is not a correct pygraphviz graph.")
# dict of dicts/lists
if isinstance(data,dict):
try:
return from_dict_of_dicts(data,create_using=create_using,\
multigraph_input=multigraph_input)
except:
try:
return from_dict_of_lists(data,create_using=create_using)
except:
raise TypeError("Input is not known type.")
# list or generator of edges
if (isinstance(data,list)
or isinstance(data,tuple)
or hasattr(data,'next')
or hasattr(data, '__next__')):
try:
return from_edgelist(data,create_using=create_using)
except:
raise nx.NetworkXError("Input is not a valid edge list")
# Pandas DataFrame
try:
import pandas as pd
if isinstance(data, pd.DataFrame):
try:
return nx.from_pandas_dataframe(data, create_using=create_using)
except:
msg = "Input is not a correct Pandas DataFrame."
raise nx.NetworkXError(msg)
except ImportError:
msg = 'pandas not found, skipping conversion test.'
warnings.warn(msg, ImportWarning)
# numpy matrix or ndarray
try:
import numpy
if isinstance(data,numpy.matrix) or \
isinstance(data,numpy.ndarray):
try:
return nx.from_numpy_matrix(data,create_using=create_using)
except:
raise nx.NetworkXError(\
"Input is not a correct numpy matrix or array.")
except ImportError:
warnings.warn('numpy not found, skipping conversion test.',
ImportWarning)
# scipy sparse matrix - any format
try:
import scipy
if hasattr(data,"format"):
try:
return nx.from_scipy_sparse_matrix(data,create_using=create_using)
except:
raise nx.NetworkXError(\
"Input is not a correct scipy sparse matrix type.")
except ImportError:
warnings.warn('scipy not found, skipping conversion test.',
ImportWarning)
raise nx.NetworkXError(\
"Input is not a known data type for conversion.")
return
def convert_to_undirected(G):
"""Return a new undirected representation of the graph G."""
return G.to_undirected()
def convert_to_directed(G):
"""Return a new directed representation of the graph G."""
return G.to_directed()
def to_dict_of_lists(G,nodelist=None):
"""Return adjacency representation of graph as a dictionary of lists.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list
Use only nodes specified in nodelist
Notes
-----
Completely ignores edge data for MultiGraph and MultiDiGraph.
"""
if nodelist is None:
nodelist=G
d = {}
for n in nodelist:
d[n]=[nbr for nbr in G.neighbors(n) if nbr in nodelist]
return d
def from_dict_of_lists(d,create_using=None):
"""Return a graph from a dictionary of lists.
Parameters
----------
d : dictionary of lists
A dictionary of lists adjacency representation.
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
Examples
--------
>>> dol= {0:[1]} # single edge (0,1)
>>> G=nx.from_dict_of_lists(dol)
or
>>> G=nx.Graph(dol) # use Graph constructor
"""
G=_prep_create_using(create_using)
G.add_nodes_from(d)
if G.is_multigraph() and not G.is_directed():
# a dict_of_lists can't show multiedges. BUT for undirected graphs,
# each edge shows up twice in the dict_of_lists.
# So we need to treat this case separately.
seen={}
for node,nbrlist in d.items():
for nbr in nbrlist:
if nbr not in seen:
G.add_edge(node,nbr)
seen[node]=1 # don't allow reverse edge to show up
else:
G.add_edges_from( ((node,nbr) for node,nbrlist in d.items()
for nbr in nbrlist) )
return G
def to_dict_of_dicts(G,nodelist=None,edge_data=None):
"""Return adjacency representation of graph as a dictionary of dictionaries.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list
Use only nodes specified in nodelist
edge_data : list, optional
If provided, the value of the dictionary will be
set to edge_data for all edges. This is useful to make
an adjacency matrix type representation with 1 as the edge data.
If edgedata is None, the edgedata in G is used to fill the values.
If G is a multigraph, the edgedata is a dict for each pair (u,v).
"""
dod={}
if nodelist is None:
if edge_data is None:
for u,nbrdict in G.adjacency_iter():
dod[u]=nbrdict.copy()
else: # edge_data is not None
for u,nbrdict in G.adjacency_iter():
dod[u]=dod.fromkeys(nbrdict, edge_data)
else: # nodelist is not None
if edge_data is None:
for u in nodelist:
dod[u]={}
for v,data in ((v,data) for v,data in G[u].items() if v in nodelist):
dod[u][v]=data
else: # nodelist and edge_data are not None
for u in nodelist:
dod[u]={}
for v in ( v for v in G[u] if v in nodelist):
dod[u][v]=edge_data
return dod
def from_dict_of_dicts(d,create_using=None,multigraph_input=False):
"""Return a graph from a dictionary of dictionaries.
Parameters
----------
d : dictionary of dictionaries
A dictionary of dictionaries adjacency representation.
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
multigraph_input : bool (default False)
When True, the values of the inner dict are assumed
to be containers of edge data for multiple edges.
Otherwise this routine assumes the edge data are singletons.
Examples
--------
>>> dod= {0: {1:{'weight':1}}} # single edge (0,1)
>>> G=nx.from_dict_of_dicts(dod)
or
>>> G=nx.Graph(dod) # use Graph constructor
"""
G=_prep_create_using(create_using)
G.add_nodes_from(d)
# is dict a MultiGraph or MultiDiGraph?
if multigraph_input:
# make a copy of the list of edge data (but not the edge data)
if G.is_directed():
if G.is_multigraph():
G.add_edges_from( (u,v,key,data)
for u,nbrs in d.items()
for v,datadict in nbrs.items()
for key,data in datadict.items()
)
else:
G.add_edges_from( (u,v,data)
for u,nbrs in d.items()
for v,datadict in nbrs.items()
for key,data in datadict.items()
)
else: # Undirected
if G.is_multigraph():
seen=set() # don't add both directions of undirected graph
for u,nbrs in d.items():
for v,datadict in nbrs.items():
if (u,v) not in seen:
G.add_edges_from( (u,v,key,data)
for key,data in datadict.items()
)
seen.add((v,u))
else:
seen=set() # don't add both directions of undirected graph
for u,nbrs in d.items():
for v,datadict in nbrs.items():
if (u,v) not in seen:
G.add_edges_from( (u,v,data)
for key,data in datadict.items() )
seen.add((v,u))
else: # not a multigraph to multigraph transfer
if G.is_multigraph() and not G.is_directed():
# d can have both representations u-v, v-u in dict. Only add one.
# We don't need this check for digraphs since we add both directions,
# or for Graph() since it is done implicitly (parallel edges not allowed)
seen=set()
for u,nbrs in d.items():
for v,data in nbrs.items():
if (u,v) not in seen:
G.add_edge(u,v,attr_dict=data)
seen.add((v,u))
else:
G.add_edges_from( ( (u,v,data)
for u,nbrs in d.items()
for v,data in nbrs.items()) )
return G
def to_edgelist(G,nodelist=None):
"""Return a list of edges in the graph.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list
Use only nodes specified in nodelist
"""
if nodelist is None:
return G.edges(data=True)
else:
return G.edges(nodelist,data=True)
def from_edgelist(edgelist,create_using=None):
"""Return a graph from a list of edges.
Parameters
----------
edgelist : list or iterator
Edge tuples
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
Examples
--------
>>> edgelist= [(0,1)] # single edge (0,1)
>>> G=nx.from_edgelist(edgelist)
or
>>> G=nx.Graph(edgelist) # use Graph constructor
"""
G=_prep_create_using(create_using)
G.add_edges_from(edgelist)
return G | unknown | codeparrot/codeparrot-clean | ||
"""Base interface that all chains should implement."""
import builtins
import contextlib
import inspect
import json
import logging
import warnings
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, cast
import yaml
from langchain_core._api import deprecated
from langchain_core.callbacks import (
AsyncCallbackManager,
AsyncCallbackManagerForChainRun,
BaseCallbackManager,
CallbackManager,
CallbackManagerForChainRun,
Callbacks,
)
from langchain_core.outputs import RunInfo
from langchain_core.runnables import (
RunnableConfig,
RunnableSerializable,
ensure_config,
run_in_executor,
)
from langchain_core.utils.pydantic import create_model
from pydantic import (
BaseModel,
ConfigDict,
Field,
field_validator,
model_validator,
)
from typing_extensions import override
from langchain_classic.base_memory import BaseMemory
from langchain_classic.schema import RUN_KEY
logger = logging.getLogger(__name__)
def _get_verbosity() -> bool:
from langchain_classic.globals import get_verbose
return get_verbose()
class Chain(RunnableSerializable[dict[str, Any], dict[str, Any]], ABC):
"""Abstract base class for creating structured sequences of calls to components.
Chains should be used to encode a sequence of calls to components like
models, document retrievers, other chains, etc., and provide a simple interface
to this sequence.
The Chain interface makes it easy to create apps that are:
- Stateful: add Memory to any Chain to give it state,
- Observable: pass Callbacks to a Chain to execute additional functionality,
like logging, outside the main sequence of component calls,
- Composable: the Chain API is flexible enough that it is easy to combine
Chains with other components, including other Chains.
The main methods exposed by chains are:
- `__call__`: Chains are callable. The `__call__` method is the primary way to
execute a Chain. This takes inputs as a dictionary and returns a
dictionary output.
- `run`: A convenience method that takes inputs as args/kwargs and returns the
output as a string or object. This method can only be used for a subset of
chains and cannot return as rich of an output as `__call__`.
"""
memory: BaseMemory | None = None
"""Optional memory object.
Memory is a class that gets called at the start
and at the end of every chain. At the start, memory loads variables and passes
them along in the chain. At the end, it saves any returned variables.
There are many different types of memory - please see memory docs
for the full catalog."""
callbacks: Callbacks = Field(default=None, exclude=True)
"""Optional list of callback handlers (or callback manager).
Callback handlers are called throughout the lifecycle of a call to a chain,
starting with on_chain_start, ending with on_chain_end or on_chain_error.
Each custom chain can optionally call additional callback methods, see Callback docs
for full details."""
verbose: bool = Field(default_factory=_get_verbosity)
"""Whether or not run in verbose mode. In verbose mode, some intermediate logs
will be printed to the console. Defaults to the global `verbose` value,
accessible via `langchain.globals.get_verbose()`."""
tags: list[str] | None = None
"""Optional list of tags associated with the chain.
These tags will be associated with each call to this chain,
and passed as arguments to the handlers defined in `callbacks`.
You can use these to eg identify a specific instance of a chain with its use case.
"""
metadata: builtins.dict[str, Any] | None = None
"""Optional metadata associated with the chain.
This metadata will be associated with each call to this chain,
and passed as arguments to the handlers defined in `callbacks`.
You can use these to eg identify a specific instance of a chain with its use case.
"""
callback_manager: BaseCallbackManager | None = Field(default=None, exclude=True)
"""[DEPRECATED] Use `callbacks` instead."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
@override
def get_input_schema(
self,
config: RunnableConfig | None = None,
) -> type[BaseModel]:
# This is correct, but pydantic typings/mypy don't think so.
return create_model("ChainInput", **dict.fromkeys(self.input_keys, (Any, None)))
@override
def get_output_schema(
self,
config: RunnableConfig | None = None,
) -> type[BaseModel]:
# This is correct, but pydantic typings/mypy don't think so.
return create_model(
"ChainOutput",
**dict.fromkeys(self.output_keys, (Any, None)),
)
@override
def invoke(
self,
input: dict[str, Any],
config: RunnableConfig | None = None,
**kwargs: Any,
) -> dict[str, Any]:
config = ensure_config(config)
callbacks = config.get("callbacks")
tags = config.get("tags")
metadata = config.get("metadata")
run_name = config.get("run_name") or self.get_name()
run_id = config.get("run_id")
include_run_info = kwargs.get("include_run_info", False)
return_only_outputs = kwargs.get("return_only_outputs", False)
inputs = self.prep_inputs(input)
callback_manager = CallbackManager.configure(
callbacks,
self.callbacks,
self.verbose,
tags,
self.tags,
metadata,
self.metadata,
)
new_arg_supported = inspect.signature(self._call).parameters.get("run_manager")
run_manager = callback_manager.on_chain_start(
None,
inputs,
run_id,
name=run_name,
)
try:
self._validate_inputs(inputs)
outputs = (
self._call(inputs, run_manager=run_manager)
if new_arg_supported
else self._call(inputs)
)
final_outputs: dict[str, Any] = self.prep_outputs(
inputs,
outputs,
return_only_outputs,
)
except BaseException as e:
run_manager.on_chain_error(e)
raise
run_manager.on_chain_end(outputs)
if include_run_info:
final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id)
return final_outputs
@override
async def ainvoke(
self,
input: dict[str, Any],
config: RunnableConfig | None = None,
**kwargs: Any,
) -> dict[str, Any]:
config = ensure_config(config)
callbacks = config.get("callbacks")
tags = config.get("tags")
metadata = config.get("metadata")
run_name = config.get("run_name") or self.get_name()
run_id = config.get("run_id")
include_run_info = kwargs.get("include_run_info", False)
return_only_outputs = kwargs.get("return_only_outputs", False)
inputs = await self.aprep_inputs(input)
callback_manager = AsyncCallbackManager.configure(
callbacks,
self.callbacks,
self.verbose,
tags,
self.tags,
metadata,
self.metadata,
)
new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager")
run_manager = await callback_manager.on_chain_start(
None,
inputs,
run_id,
name=run_name,
)
try:
self._validate_inputs(inputs)
outputs = (
await self._acall(inputs, run_manager=run_manager)
if new_arg_supported
else await self._acall(inputs)
)
final_outputs: dict[str, Any] = await self.aprep_outputs(
inputs,
outputs,
return_only_outputs,
)
except BaseException as e:
await run_manager.on_chain_error(e)
raise
await run_manager.on_chain_end(outputs)
if include_run_info:
final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id)
return final_outputs
@property
def _chain_type(self) -> str:
msg = "Saving not supported for this chain type."
raise NotImplementedError(msg)
@model_validator(mode="before")
@classmethod
def raise_callback_manager_deprecation(cls, values: dict) -> Any:
"""Raise deprecation warning if callback_manager is used."""
if values.get("callback_manager") is not None:
if values.get("callbacks") is not None:
msg = (
"Cannot specify both callback_manager and callbacks. "
"callback_manager is deprecated, callbacks is the preferred "
"parameter to pass in."
)
raise ValueError(msg)
warnings.warn(
"callback_manager is deprecated. Please use callbacks instead.",
DeprecationWarning,
stacklevel=4,
)
values["callbacks"] = values.pop("callback_manager", None)
return values
@field_validator("verbose", mode="before")
@classmethod
def set_verbose(
cls,
verbose: bool | None, # noqa: FBT001
) -> bool:
"""Set the chain verbosity.
Defaults to the global setting if not specified by the user.
"""
if verbose is None:
return _get_verbosity()
return verbose
@property
@abstractmethod
def input_keys(self) -> list[str]:
"""Keys expected to be in the chain input."""
@property
@abstractmethod
def output_keys(self) -> list[str]:
"""Keys expected to be in the chain output."""
def _validate_inputs(self, inputs: Any) -> None:
"""Check that all inputs are present."""
if not isinstance(inputs, dict):
_input_keys = set(self.input_keys)
if self.memory is not None:
# If there are multiple input keys, but some get set by memory so that
# only one is not set, we can still figure out which key it is.
_input_keys = _input_keys.difference(self.memory.memory_variables)
if len(_input_keys) != 1:
msg = (
f"A single string input was passed in, but this chain expects "
f"multiple inputs ({_input_keys}). When a chain expects "
f"multiple inputs, please call it by passing in a dictionary, "
"eg `chain({'foo': 1, 'bar': 2})`"
)
raise ValueError(msg)
missing_keys = set(self.input_keys).difference(inputs)
if missing_keys:
msg = f"Missing some input keys: {missing_keys}"
raise ValueError(msg)
def _validate_outputs(self, outputs: dict[str, Any]) -> None:
missing_keys = set(self.output_keys).difference(outputs)
if missing_keys:
msg = f"Missing some output keys: {missing_keys}"
raise ValueError(msg)
@abstractmethod
def _call(
self,
inputs: builtins.dict[str, Any],
run_manager: CallbackManagerForChainRun | None = None,
) -> builtins.dict[str, Any]:
"""Execute the chain.
This is a private method that is not user-facing. It is only called within
`Chain.__call__`, which is the user-facing wrapper method that handles
callbacks configuration and some input/output processing.
Args:
inputs: A dict of named inputs to the chain. Assumed to contain all inputs
specified in `Chain.input_keys`, including any inputs added by memory.
run_manager: The callbacks manager that contains the callback handlers for
this run of the chain.
Returns:
A dict of named outputs. Should contain all outputs specified in
`Chain.output_keys`.
"""
async def _acall(
self,
inputs: builtins.dict[str, Any],
run_manager: AsyncCallbackManagerForChainRun | None = None,
) -> builtins.dict[str, Any]:
"""Asynchronously execute the chain.
This is a private method that is not user-facing. It is only called within
`Chain.acall`, which is the user-facing wrapper method that handles
callbacks configuration and some input/output processing.
Args:
inputs: A dict of named inputs to the chain. Assumed to contain all inputs
specified in `Chain.input_keys`, including any inputs added by memory.
run_manager: The callbacks manager that contains the callback handlers for
this run of the chain.
Returns:
A dict of named outputs. Should contain all outputs specified in
`Chain.output_keys`.
"""
return await run_in_executor(
None,
self._call,
inputs,
run_manager.get_sync() if run_manager else None,
)
@deprecated("0.1.0", alternative="invoke", removal="1.0")
def __call__(
self,
inputs: dict[str, Any] | Any,
return_only_outputs: bool = False, # noqa: FBT001,FBT002
callbacks: Callbacks = None,
*,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
run_name: str | None = None,
include_run_info: bool = False,
) -> dict[str, Any]:
"""Execute the chain.
Args:
inputs: Dictionary of inputs, or single input if chain expects
only one param. Should contain all inputs specified in
`Chain.input_keys` except for inputs that will be set by the chain's
memory.
return_only_outputs: Whether to return only outputs in the
response. If `True`, only new keys generated by this chain will be
returned. If `False`, both input keys and new keys generated by this
chain will be returned.
callbacks: Callbacks to use for this chain run. These will be called in
addition to callbacks passed to the chain during construction, but only
these runtime callbacks will propagate to calls to other objects.
tags: List of string tags to pass to all callbacks. These will be passed in
addition to tags passed to the chain during construction, but only
these runtime tags will propagate to calls to other objects.
metadata: Optional metadata associated with the chain.
run_name: Optional name for this run of the chain.
include_run_info: Whether to include run info in the response. Defaults
to False.
Returns:
A dict of named outputs. Should contain all outputs specified in
`Chain.output_keys`.
"""
config = {
"callbacks": callbacks,
"tags": tags,
"metadata": metadata,
"run_name": run_name,
}
return self.invoke(
inputs,
cast("RunnableConfig", {k: v for k, v in config.items() if v is not None}),
return_only_outputs=return_only_outputs,
include_run_info=include_run_info,
)
@deprecated("0.1.0", alternative="ainvoke", removal="1.0")
async def acall(
self,
inputs: dict[str, Any] | Any,
return_only_outputs: bool = False, # noqa: FBT001,FBT002
callbacks: Callbacks = None,
*,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
run_name: str | None = None,
include_run_info: bool = False,
) -> dict[str, Any]:
"""Asynchronously execute the chain.
Args:
inputs: Dictionary of inputs, or single input if chain expects
only one param. Should contain all inputs specified in
`Chain.input_keys` except for inputs that will be set by the chain's
memory.
return_only_outputs: Whether to return only outputs in the
response. If `True`, only new keys generated by this chain will be
returned. If `False`, both input keys and new keys generated by this
chain will be returned.
callbacks: Callbacks to use for this chain run. These will be called in
addition to callbacks passed to the chain during construction, but only
these runtime callbacks will propagate to calls to other objects.
tags: List of string tags to pass to all callbacks. These will be passed in
addition to tags passed to the chain during construction, but only
these runtime tags will propagate to calls to other objects.
metadata: Optional metadata associated with the chain.
run_name: Optional name for this run of the chain.
include_run_info: Whether to include run info in the response. Defaults
to False.
Returns:
A dict of named outputs. Should contain all outputs specified in
`Chain.output_keys`.
"""
config = {
"callbacks": callbacks,
"tags": tags,
"metadata": metadata,
"run_name": run_name,
}
return await self.ainvoke(
inputs,
cast("RunnableConfig", {k: v for k, v in config.items() if k is not None}),
return_only_outputs=return_only_outputs,
include_run_info=include_run_info,
)
def prep_outputs(
self,
inputs: dict[str, str],
outputs: dict[str, str],
return_only_outputs: bool = False, # noqa: FBT001,FBT002
) -> dict[str, str]:
"""Validate and prepare chain outputs, and save info about this run to memory.
Args:
inputs: Dictionary of chain inputs, including any inputs added by chain
memory.
outputs: Dictionary of initial chain outputs.
return_only_outputs: Whether to only return the chain outputs. If `False`,
inputs are also added to the final outputs.
Returns:
A dict of the final chain outputs.
"""
self._validate_outputs(outputs)
if self.memory is not None:
self.memory.save_context(inputs, outputs)
if return_only_outputs:
return outputs
return {**inputs, **outputs}
async def aprep_outputs(
self,
inputs: dict[str, str],
outputs: dict[str, str],
return_only_outputs: bool = False, # noqa: FBT001,FBT002
) -> dict[str, str]:
"""Validate and prepare chain outputs, and save info about this run to memory.
Args:
inputs: Dictionary of chain inputs, including any inputs added by chain
memory.
outputs: Dictionary of initial chain outputs.
return_only_outputs: Whether to only return the chain outputs. If `False`,
inputs are also added to the final outputs.
Returns:
A dict of the final chain outputs.
"""
self._validate_outputs(outputs)
if self.memory is not None:
await self.memory.asave_context(inputs, outputs)
if return_only_outputs:
return outputs
return {**inputs, **outputs}
def prep_inputs(self, inputs: dict[str, Any] | Any) -> dict[str, str]:
"""Prepare chain inputs, including adding inputs from memory.
Args:
inputs: Dictionary of raw inputs, or single input if chain expects
only one param. Should contain all inputs specified in
`Chain.input_keys` except for inputs that will be set by the chain's
memory.
Returns:
A dictionary of all inputs, including those added by the chain's memory.
"""
if not isinstance(inputs, dict):
_input_keys = set(self.input_keys)
if self.memory is not None:
# If there are multiple input keys, but some get set by memory so that
# only one is not set, we can still figure out which key it is.
_input_keys = _input_keys.difference(self.memory.memory_variables)
inputs = {next(iter(_input_keys)): inputs}
if self.memory is not None:
external_context = self.memory.load_memory_variables(inputs)
inputs = dict(inputs, **external_context)
return inputs
async def aprep_inputs(self, inputs: dict[str, Any] | Any) -> dict[str, str]:
"""Prepare chain inputs, including adding inputs from memory.
Args:
inputs: Dictionary of raw inputs, or single input if chain expects
only one param. Should contain all inputs specified in
`Chain.input_keys` except for inputs that will be set by the chain's
memory.
Returns:
A dictionary of all inputs, including those added by the chain's memory.
"""
if not isinstance(inputs, dict):
_input_keys = set(self.input_keys)
if self.memory is not None:
# If there are multiple input keys, but some get set by memory so that
# only one is not set, we can still figure out which key it is.
_input_keys = _input_keys.difference(self.memory.memory_variables)
inputs = {next(iter(_input_keys)): inputs}
if self.memory is not None:
external_context = await self.memory.aload_memory_variables(inputs)
inputs = dict(inputs, **external_context)
return inputs
@property
def _run_output_key(self) -> str:
if len(self.output_keys) != 1:
msg = (
f"`run` not supported when there is not exactly "
f"one output key. Got {self.output_keys}."
)
raise ValueError(msg)
return self.output_keys[0]
@deprecated("0.1.0", alternative="invoke", removal="1.0")
def run(
self,
*args: Any,
callbacks: Callbacks = None,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
**kwargs: Any,
) -> Any:
"""Convenience method for executing chain.
The main difference between this method and `Chain.__call__` is that this
method expects inputs to be passed directly in as positional arguments or
keyword arguments, whereas `Chain.__call__` expects a single input dictionary
with all the inputs
Args:
*args: If the chain expects a single input, it can be passed in as the
sole positional argument.
callbacks: Callbacks to use for this chain run. These will be called in
addition to callbacks passed to the chain during construction, but only
these runtime callbacks will propagate to calls to other objects.
tags: List of string tags to pass to all callbacks. These will be passed in
addition to tags passed to the chain during construction, but only
these runtime tags will propagate to calls to other objects.
metadata: Optional metadata associated with the chain.
**kwargs: If the chain expects multiple inputs, they can be passed in
directly as keyword arguments.
Returns:
The chain output.
Example:
```python
# Suppose we have a single-input chain that takes a 'question' string:
chain.run("What's the temperature in Boise, Idaho?")
# -> "The temperature in Boise is..."
# Suppose we have a multi-input chain that takes a 'question' string
# and 'context' string:
question = "What's the temperature in Boise, Idaho?"
context = "Weather report for Boise, Idaho on 07/03/23..."
chain.run(question=question, context=context)
# -> "The temperature in Boise is..."
```
"""
# Run at start to make sure this is possible/defined
_output_key = self._run_output_key
if args and not kwargs:
if len(args) != 1:
msg = "`run` supports only one positional argument."
raise ValueError(msg)
return self(args[0], callbacks=callbacks, tags=tags, metadata=metadata)[
_output_key
]
if kwargs and not args:
return self(kwargs, callbacks=callbacks, tags=tags, metadata=metadata)[
_output_key
]
if not kwargs and not args:
msg = (
"`run` supported with either positional arguments or keyword arguments,"
" but none were provided."
)
raise ValueError(msg)
msg = (
f"`run` supported with either positional arguments or keyword arguments"
f" but not both. Got args: {args} and kwargs: {kwargs}."
)
raise ValueError(msg)
@deprecated("0.1.0", alternative="ainvoke", removal="1.0")
async def arun(
self,
*args: Any,
callbacks: Callbacks = None,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
**kwargs: Any,
) -> Any:
"""Convenience method for executing chain.
The main difference between this method and `Chain.__call__` is that this
method expects inputs to be passed directly in as positional arguments or
keyword arguments, whereas `Chain.__call__` expects a single input dictionary
with all the inputs
Args:
*args: If the chain expects a single input, it can be passed in as the
sole positional argument.
callbacks: Callbacks to use for this chain run. These will be called in
addition to callbacks passed to the chain during construction, but only
these runtime callbacks will propagate to calls to other objects.
tags: List of string tags to pass to all callbacks. These will be passed in
addition to tags passed to the chain during construction, but only
these runtime tags will propagate to calls to other objects.
metadata: Optional metadata associated with the chain.
**kwargs: If the chain expects multiple inputs, they can be passed in
directly as keyword arguments.
Returns:
The chain output.
Example:
```python
# Suppose we have a single-input chain that takes a 'question' string:
await chain.arun("What's the temperature in Boise, Idaho?")
# -> "The temperature in Boise is..."
# Suppose we have a multi-input chain that takes a 'question' string
# and 'context' string:
question = "What's the temperature in Boise, Idaho?"
context = "Weather report for Boise, Idaho on 07/03/23..."
await chain.arun(question=question, context=context)
# -> "The temperature in Boise is..."
```
"""
if len(self.output_keys) != 1:
msg = (
f"`run` not supported when there is not exactly "
f"one output key. Got {self.output_keys}."
)
raise ValueError(msg)
if args and not kwargs:
if len(args) != 1:
msg = "`run` supports only one positional argument."
raise ValueError(msg)
return (
await self.acall(
args[0],
callbacks=callbacks,
tags=tags,
metadata=metadata,
)
)[self.output_keys[0]]
if kwargs and not args:
return (
await self.acall(
kwargs,
callbacks=callbacks,
tags=tags,
metadata=metadata,
)
)[self.output_keys[0]]
msg = (
f"`run` supported with either positional arguments or keyword arguments"
f" but not both. Got args: {args} and kwargs: {kwargs}."
)
raise ValueError(msg)
def dict(self, **kwargs: Any) -> dict:
"""Dictionary representation of chain.
Expects `Chain._chain_type` property to be implemented and for memory to be
null.
Args:
**kwargs: Keyword arguments passed to default `pydantic.BaseModel.dict`
method.
Returns:
A dictionary representation of the chain.
Example:
```python
chain.model_dump(exclude_unset=True)
# -> {"_type": "foo", "verbose": False, ...}
```
"""
_dict = super().model_dump(**kwargs)
with contextlib.suppress(NotImplementedError):
_dict["_type"] = self._chain_type
return _dict
def save(self, file_path: Path | str) -> None:
"""Save the chain.
Expects `Chain._chain_type` property to be implemented and for memory to be
null.
Args:
file_path: Path to file to save the chain to.
Example:
```python
chain.save(file_path="path/chain.yaml")
```
"""
if self.memory is not None:
msg = "Saving of memory is not yet supported."
raise ValueError(msg)
# Fetch dictionary to save
chain_dict = self.model_dump()
if "_type" not in chain_dict:
msg = f"Chain {self} does not support saving."
raise NotImplementedError(msg)
# Convert file to Path object.
save_path = Path(file_path) if isinstance(file_path, str) else file_path
directory_path = save_path.parent
directory_path.mkdir(parents=True, exist_ok=True)
if save_path.suffix == ".json":
with save_path.open("w") as f:
json.dump(chain_dict, f, indent=4)
elif save_path.suffix.endswith((".yaml", ".yml")):
with save_path.open("w") as f:
yaml.dump(chain_dict, f, default_flow_style=False)
else:
msg = f"{save_path} must be json or yaml"
raise ValueError(msg)
@deprecated("0.1.0", alternative="batch", removal="1.0")
def apply(
self,
input_list: list[builtins.dict[str, Any]],
callbacks: Callbacks = None,
) -> list[builtins.dict[str, str]]:
"""Call the chain on all inputs in the list."""
return [self(inputs, callbacks=callbacks) for inputs in input_list] | python | github | https://github.com/langchain-ai/langchain | libs/langchain/langchain_classic/chains/base.py |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
class TestMultiplexOp(OpTest):
def setUp(self):
self.op_type = "multiplex"
rows = 4
index = np.arange(0, rows).astype('int32')
np.random.shuffle(index)
index = np.reshape(index, (rows, 1))
ins1 = np.random.random((rows, 10)).astype("float32")
ins2 = np.random.random((rows, 10)).astype("float32")
ins3 = np.random.random((rows, 10)).astype("float32")
ins4 = np.random.random((rows, 10)).astype("float32")
self.inputs = {
'Ids': index,
'X': [('x1', ins1), ('x2', ins2), ('x3', ins3), ('x4', ins4)]
}
# multiplex output
output = np.zeros_like(ins1)
for i in range(0, rows):
k = index[i][0]
output[i] = self.inputs['X'][k][1][i]
self.outputs = {'Out': output}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['x1', 'x2', 'x3', 'x4'], 'Out')
def test_check_grad_ignore_x1(self):
self.check_grad(['x2', 'x3', 'x4'], 'Out', no_grad_set=set('x1'))
def test_check_grad_ignore_x1_x2(self):
self.check_grad(['x3', 'x4'], 'Out', no_grad_set=set(['x1', 'x2']))
def test_check_grad_ignore_x3(self):
self.check_grad(['x1', 'x2', 'x4'], 'Out', no_grad_set=set('x3'))
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
package gin
import (
"errors"
"fmt"
"net/http"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func init() {
SetMode(TestMode)
}
func TestLogger(t *testing.T) {
buffer := new(strings.Builder)
router := New()
router.Use(LoggerWithWriter(buffer))
router.GET("/example", func(c *Context) {})
router.POST("/example", func(c *Context) {})
router.PUT("/example", func(c *Context) {})
router.DELETE("/example", func(c *Context) {})
router.PATCH("/example", func(c *Context) {})
router.HEAD("/example", func(c *Context) {})
router.OPTIONS("/example", func(c *Context) {})
PerformRequest(router, http.MethodGet, "/example?a=100")
assert.Contains(t, buffer.String(), "200")
assert.Contains(t, buffer.String(), http.MethodGet)
assert.Contains(t, buffer.String(), "/example")
assert.Contains(t, buffer.String(), "a=100")
// I wrote these first (extending the above) but then realized they are more
// like integration tests because they test the whole logging process rather
// than individual functions. I'm not sure where these should go.
buffer.Reset()
PerformRequest(router, http.MethodPost, "/example")
assert.Contains(t, buffer.String(), "200")
assert.Contains(t, buffer.String(), http.MethodPost)
assert.Contains(t, buffer.String(), "/example")
buffer.Reset()
PerformRequest(router, http.MethodPut, "/example")
assert.Contains(t, buffer.String(), "200")
assert.Contains(t, buffer.String(), http.MethodPut)
assert.Contains(t, buffer.String(), "/example")
buffer.Reset()
PerformRequest(router, http.MethodDelete, "/example")
assert.Contains(t, buffer.String(), "200")
assert.Contains(t, buffer.String(), http.MethodDelete)
assert.Contains(t, buffer.String(), "/example")
buffer.Reset()
PerformRequest(router, "PATCH", "/example")
assert.Contains(t, buffer.String(), "200")
assert.Contains(t, buffer.String(), "PATCH")
assert.Contains(t, buffer.String(), "/example")
buffer.Reset()
PerformRequest(router, "HEAD", "/example")
assert.Contains(t, buffer.String(), "200")
assert.Contains(t, buffer.String(), "HEAD")
assert.Contains(t, buffer.String(), "/example")
buffer.Reset()
PerformRequest(router, "OPTIONS", "/example")
assert.Contains(t, buffer.String(), "200")
assert.Contains(t, buffer.String(), "OPTIONS")
assert.Contains(t, buffer.String(), "/example")
buffer.Reset()
PerformRequest(router, http.MethodGet, "/notfound")
assert.Contains(t, buffer.String(), "404")
assert.Contains(t, buffer.String(), http.MethodGet)
assert.Contains(t, buffer.String(), "/notfound")
}
func TestLoggerWithConfig(t *testing.T) {
buffer := new(strings.Builder)
router := New()
router.Use(LoggerWithConfig(LoggerConfig{Output: buffer}))
router.GET("/example", func(c *Context) {})
router.POST("/example", func(c *Context) {})
router.PUT("/example", func(c *Context) {})
router.DELETE("/example", func(c *Context) {})
router.PATCH("/example", func(c *Context) {})
router.HEAD("/example", func(c *Context) {})
router.OPTIONS("/example", func(c *Context) {})
PerformRequest(router, http.MethodGet, "/example?a=100")
assert.Contains(t, buffer.String(), "200")
assert.Contains(t, buffer.String(), http.MethodGet)
assert.Contains(t, buffer.String(), "/example")
assert.Contains(t, buffer.String(), "a=100")
// I wrote these first (extending the above) but then realized they are more
// like integration tests because they test the whole logging process rather
// than individual functions. I'm not sure where these should go.
buffer.Reset()
PerformRequest(router, http.MethodPost, "/example")
assert.Contains(t, buffer.String(), "200")
assert.Contains(t, buffer.String(), http.MethodPost)
assert.Contains(t, buffer.String(), "/example")
buffer.Reset()
PerformRequest(router, http.MethodPut, "/example")
assert.Contains(t, buffer.String(), "200")
assert.Contains(t, buffer.String(), http.MethodPut)
assert.Contains(t, buffer.String(), "/example")
buffer.Reset()
PerformRequest(router, http.MethodDelete, "/example")
assert.Contains(t, buffer.String(), "200")
assert.Contains(t, buffer.String(), http.MethodDelete)
assert.Contains(t, buffer.String(), "/example")
buffer.Reset()
PerformRequest(router, "PATCH", "/example")
assert.Contains(t, buffer.String(), "200")
assert.Contains(t, buffer.String(), "PATCH")
assert.Contains(t, buffer.String(), "/example")
buffer.Reset()
PerformRequest(router, "HEAD", "/example")
assert.Contains(t, buffer.String(), "200")
assert.Contains(t, buffer.String(), "HEAD")
assert.Contains(t, buffer.String(), "/example")
buffer.Reset()
PerformRequest(router, "OPTIONS", "/example")
assert.Contains(t, buffer.String(), "200")
assert.Contains(t, buffer.String(), "OPTIONS")
assert.Contains(t, buffer.String(), "/example")
buffer.Reset()
PerformRequest(router, http.MethodGet, "/notfound")
assert.Contains(t, buffer.String(), "404")
assert.Contains(t, buffer.String(), http.MethodGet)
assert.Contains(t, buffer.String(), "/notfound")
}
func TestLoggerWithFormatter(t *testing.T) {
buffer := new(strings.Builder)
d := DefaultWriter
DefaultWriter = buffer
defer func() {
DefaultWriter = d
}()
router := New()
router.Use(LoggerWithFormatter(func(param LogFormatterParams) string {
return fmt.Sprintf("[FORMATTER TEST] %v | %3d | %13v | %15s | %-7s %#v\n%s",
param.TimeStamp.Format("2006/01/02 - 15:04:05"),
param.StatusCode,
param.Latency,
param.ClientIP,
param.Method,
param.Path,
param.ErrorMessage,
)
}))
router.GET("/example", func(c *Context) {})
PerformRequest(router, http.MethodGet, "/example?a=100")
// output test
assert.Contains(t, buffer.String(), "[FORMATTER TEST]")
assert.Contains(t, buffer.String(), "200")
assert.Contains(t, buffer.String(), http.MethodGet)
assert.Contains(t, buffer.String(), "/example")
assert.Contains(t, buffer.String(), "a=100")
}
func TestLoggerWithConfigFormatting(t *testing.T) {
var gotParam LogFormatterParams
var gotKeys map[any]any
buffer := new(strings.Builder)
router := New()
router.engine.trustedCIDRs, _ = router.engine.prepareTrustedCIDRs()
router.Use(LoggerWithConfig(LoggerConfig{
Output: buffer,
Formatter: func(param LogFormatterParams) string {
// for assert test
gotParam = param
return fmt.Sprintf("[FORMATTER TEST] %v | %3d | %13v | %15s | %-7s %s\n%s",
param.TimeStamp.Format("2006/01/02 - 15:04:05"),
param.StatusCode,
param.Latency,
param.ClientIP,
param.Method,
param.Path,
param.ErrorMessage,
)
},
}))
router.GET("/example", func(c *Context) {
// set dummy ClientIP
c.Request.Header.Set("X-Forwarded-For", "20.20.20.20")
gotKeys = c.Keys
time.Sleep(time.Millisecond)
})
PerformRequest(router, http.MethodGet, "/example?a=100")
// output test
assert.Contains(t, buffer.String(), "[FORMATTER TEST]")
assert.Contains(t, buffer.String(), "200")
assert.Contains(t, buffer.String(), http.MethodGet)
assert.Contains(t, buffer.String(), "/example")
assert.Contains(t, buffer.String(), "a=100")
// LogFormatterParams test
assert.NotNil(t, gotParam.Request)
assert.NotEmpty(t, gotParam.TimeStamp)
assert.Equal(t, 200, gotParam.StatusCode)
assert.NotEmpty(t, gotParam.Latency)
assert.Equal(t, "20.20.20.20", gotParam.ClientIP)
assert.Equal(t, http.MethodGet, gotParam.Method)
assert.Equal(t, "/example?a=100", gotParam.Path)
assert.Empty(t, gotParam.ErrorMessage)
assert.Equal(t, gotKeys, gotParam.Keys)
}
func TestDefaultLogFormatter(t *testing.T) {
timeStamp := time.Unix(1544173902, 0).UTC()
termFalseParam := LogFormatterParams{
TimeStamp: timeStamp,
StatusCode: 200,
Latency: time.Second * 5,
ClientIP: "20.20.20.20",
Method: http.MethodGet,
Path: "/",
ErrorMessage: "",
isTerm: false,
}
termTrueParam := LogFormatterParams{
TimeStamp: timeStamp,
StatusCode: 200,
Latency: time.Second * 5,
ClientIP: "20.20.20.20",
Method: http.MethodGet,
Path: "/",
ErrorMessage: "",
isTerm: true,
}
termTrueLongDurationParam := LogFormatterParams{
TimeStamp: timeStamp,
StatusCode: 200,
Latency: time.Millisecond * 9876543210,
ClientIP: "20.20.20.20",
Method: http.MethodGet,
Path: "/",
ErrorMessage: "",
isTerm: true,
}
termFalseLongDurationParam := LogFormatterParams{
TimeStamp: timeStamp,
StatusCode: 200,
Latency: time.Millisecond * 9876543210,
ClientIP: "20.20.20.20",
Method: http.MethodGet,
Path: "/",
ErrorMessage: "",
isTerm: false,
}
assert.Equal(t, "[GIN] 2018/12/07 - 09:11:42 | 200 | 5s | 20.20.20.20 | GET \"/\"\n", defaultLogFormatter(termFalseParam))
assert.Equal(t, "[GIN] 2018/12/07 - 09:11:42 | 200 | 2743h29m0s | 20.20.20.20 | GET \"/\"\n", defaultLogFormatter(termFalseLongDurationParam))
assert.Equal(t, "[GIN] 2018/12/07 - 09:11:42 |\x1b[97;42m 200 \x1b[0m|\x1b[97;41m 5s \x1b[0m| 20.20.20.20 |\x1b[97;44m GET \x1b[0m \"/\"\n", defaultLogFormatter(termTrueParam))
assert.Equal(t, "[GIN] 2018/12/07 - 09:11:42 |\x1b[97;42m 200 \x1b[0m|\x1b[97;41m 2743h29m0s \x1b[0m| 20.20.20.20 |\x1b[97;44m GET \x1b[0m \"/\"\n", defaultLogFormatter(termTrueLongDurationParam))
}
func TestColorForMethod(t *testing.T) {
colorForMethod := func(method string) string {
p := LogFormatterParams{
Method: method,
}
return p.MethodColor()
}
assert.Equal(t, blue, colorForMethod(http.MethodGet), "get should be blue")
assert.Equal(t, cyan, colorForMethod(http.MethodPost), "post should be cyan")
assert.Equal(t, yellow, colorForMethod(http.MethodPut), "put should be yellow")
assert.Equal(t, red, colorForMethod(http.MethodDelete), "delete should be red")
assert.Equal(t, green, colorForMethod("PATCH"), "patch should be green")
assert.Equal(t, magenta, colorForMethod("HEAD"), "head should be magenta")
assert.Equal(t, white, colorForMethod("OPTIONS"), "options should be white")
assert.Equal(t, reset, colorForMethod("TRACE"), "trace is not defined and should be the reset color")
}
func TestColorForStatus(t *testing.T) {
colorForStatus := func(code int) string {
p := LogFormatterParams{
StatusCode: code,
}
return p.StatusCodeColor()
}
assert.Equal(t, white, colorForStatus(http.StatusContinue), "1xx should be white")
assert.Equal(t, green, colorForStatus(http.StatusOK), "2xx should be green")
assert.Equal(t, white, colorForStatus(http.StatusMovedPermanently), "3xx should be white")
assert.Equal(t, yellow, colorForStatus(http.StatusNotFound), "4xx should be yellow")
assert.Equal(t, red, colorForStatus(2), "other things should be red")
}
func TestColorForLatency(t *testing.T) {
colorForLantency := func(latency time.Duration) string {
p := LogFormatterParams{
Latency: latency,
}
return p.LatencyColor()
}
assert.Equal(t, white, colorForLantency(time.Duration(0)), "0 should be white")
assert.Equal(t, white, colorForLantency(time.Millisecond*20), "20ms should be white")
assert.Equal(t, green, colorForLantency(time.Millisecond*150), "150ms should be green")
assert.Equal(t, cyan, colorForLantency(time.Millisecond*250), "250ms should be cyan")
assert.Equal(t, yellow, colorForLantency(time.Millisecond*600), "600ms should be yellow")
assert.Equal(t, magenta, colorForLantency(time.Millisecond*1500), "1.5s should be magenta")
assert.Equal(t, red, colorForLantency(time.Second*3), "other things should be red")
}
func TestResetColor(t *testing.T) {
p := LogFormatterParams{}
assert.Equal(t, string([]byte{27, 91, 48, 109}), p.ResetColor())
}
func TestIsOutputColor(t *testing.T) {
// test with isTerm flag true.
p := LogFormatterParams{
isTerm: true,
}
consoleColorMode = autoColor
assert.True(t, p.IsOutputColor())
ForceConsoleColor()
assert.True(t, p.IsOutputColor())
DisableConsoleColor()
assert.False(t, p.IsOutputColor())
// test with isTerm flag false.
p = LogFormatterParams{
isTerm: false,
}
consoleColorMode = autoColor
assert.False(t, p.IsOutputColor())
ForceConsoleColor()
assert.True(t, p.IsOutputColor())
DisableConsoleColor()
assert.False(t, p.IsOutputColor())
// reset console color mode.
consoleColorMode = autoColor
}
func TestErrorLogger(t *testing.T) {
router := New()
router.Use(ErrorLogger())
router.GET("/error", func(c *Context) {
c.Error(errors.New("this is an error")) //nolint: errcheck
})
router.GET("/abort", func(c *Context) {
c.AbortWithError(http.StatusUnauthorized, errors.New("no authorized")) //nolint: errcheck
})
router.GET("/print", func(c *Context) {
c.Error(errors.New("this is an error")) //nolint: errcheck
c.String(http.StatusInternalServerError, "hola!")
})
w := PerformRequest(router, http.MethodGet, "/error")
assert.Equal(t, http.StatusOK, w.Code)
assert.JSONEq(t, "{\"error\":\"this is an error\"}", w.Body.String())
w = PerformRequest(router, http.MethodGet, "/abort")
assert.Equal(t, http.StatusUnauthorized, w.Code)
assert.JSONEq(t, "{\"error\":\"no authorized\"}", w.Body.String())
w = PerformRequest(router, http.MethodGet, "/print")
assert.Equal(t, http.StatusInternalServerError, w.Code)
assert.Equal(t, "hola!{\"error\":\"this is an error\"}", w.Body.String())
}
func TestLoggerWithWriterSkippingPaths(t *testing.T) {
buffer := new(strings.Builder)
router := New()
router.Use(LoggerWithWriter(buffer, "/skipped"))
router.GET("/logged", func(c *Context) {})
router.GET("/skipped", func(c *Context) {})
PerformRequest(router, http.MethodGet, "/logged")
assert.Contains(t, buffer.String(), "200")
buffer.Reset()
PerformRequest(router, http.MethodGet, "/skipped")
assert.Contains(t, buffer.String(), "")
}
func TestLoggerWithConfigSkippingPaths(t *testing.T) {
buffer := new(strings.Builder)
router := New()
router.Use(LoggerWithConfig(LoggerConfig{
Output: buffer,
SkipPaths: []string{"/skipped"},
}))
router.GET("/logged", func(c *Context) {})
router.GET("/skipped", func(c *Context) {})
PerformRequest(router, http.MethodGet, "/logged")
assert.Contains(t, buffer.String(), "200")
buffer.Reset()
PerformRequest(router, http.MethodGet, "/skipped")
assert.Contains(t, buffer.String(), "")
}
func TestLoggerWithConfigSkipper(t *testing.T) {
buffer := new(strings.Builder)
router := New()
router.Use(LoggerWithConfig(LoggerConfig{
Output: buffer,
Skip: func(c *Context) bool {
return c.Writer.Status() == http.StatusNoContent
},
}))
router.GET("/logged", func(c *Context) { c.Status(http.StatusOK) })
router.GET("/skipped", func(c *Context) { c.Status(http.StatusNoContent) })
PerformRequest(router, http.MethodGet, "/logged")
assert.Contains(t, buffer.String(), "200")
buffer.Reset()
PerformRequest(router, http.MethodGet, "/skipped")
assert.Contains(t, buffer.String(), "")
}
func TestDisableConsoleColor(t *testing.T) {
New()
assert.Equal(t, autoColor, consoleColorMode)
DisableConsoleColor()
assert.Equal(t, disableColor, consoleColorMode)
// reset console color mode.
consoleColorMode = autoColor
}
func TestForceConsoleColor(t *testing.T) {
New()
assert.Equal(t, autoColor, consoleColorMode)
ForceConsoleColor()
assert.Equal(t, forceColor, consoleColorMode)
// reset console color mode.
consoleColorMode = autoColor
} | go | github | https://github.com/gin-gonic/gin | logger_test.go |
/*
* Copyright (C) 2008 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.collect.testing;
import static java.lang.System.arraycopy;
import com.google.common.annotations.GwtCompatible;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import org.jspecify.annotations.NullMarked;
import org.jspecify.annotations.Nullable;
/**
* Creates map entries using sample keys and sample values.
*
* @author Jesse Wilson
*/
@GwtCompatible
@NullMarked
public abstract class TestMapEntrySetGenerator<
K extends @Nullable Object, V extends @Nullable Object>
implements TestSetGenerator<Map.Entry<K, V>> {
private final SampleElements<K> keys;
private final SampleElements<V> values;
protected TestMapEntrySetGenerator(SampleElements<K> keys, SampleElements<V> values) {
this.keys = keys;
this.values = values;
}
@Override
public SampleElements<Entry<K, V>> samples() {
return SampleElements.mapEntries(keys, values);
}
@Override
public Set<Entry<K, V>> create(Object... elements) {
Entry<K, V>[] entries = createArray(elements.length);
arraycopy(elements, 0, entries, 0, elements.length);
return createFromEntries(entries);
}
public abstract Set<Entry<K, V>> createFromEntries(Entry<K, V>[] entries);
@Override
@SuppressWarnings("unchecked") // generic arrays make typesafety sad
public Entry<K, V>[] createArray(int length) {
return (Entry<K, V>[]) new Entry<?, ?>[length];
}
/** Returns the original element list, unchanged. */
@Override
public List<Entry<K, V>> order(List<Entry<K, V>> insertionOrder) {
return insertionOrder;
}
} | java | github | https://github.com/google/guava | android/guava-testlib/src/com/google/common/collect/testing/TestMapEntrySetGenerator.java |
fun test() {
while (cond()) {
<expr>if (foo() == 5) {
break
} else if (foo() == 6) {
continue
}</expr>
consume("foo")
}
}
fun cond(): Boolean = true
fun foo(): Int = 0
fun consume(text: String?) = {} | kotlin | github | https://github.com/JetBrains/kotlin | analysis/analysis-api/testData/components/dataFlowInfoProvider/exitPointSnapshot/controlFlow/breakContinue.kt |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Cudnn RNN models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import os
import unittest
import numpy as np
from tensorflow.contrib.cudnn_rnn.python.layers import cudnn_rnn
from tensorflow.contrib.cudnn_rnn.python.ops import cudnn_rnn_ops
from tensorflow.contrib.rnn.python.ops import rnn as contrib_rnn_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework.test_util import TensorFlowTestCase
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradients_impl as gradients
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn as rnn_lib
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import saver as saver_lib
CUDNN_LSTM = cudnn_rnn_ops.CUDNN_LSTM
CUDNN_GRU = cudnn_rnn_ops.CUDNN_GRU
CUDNN_RNN_RELU = cudnn_rnn_ops.CUDNN_RNN_RELU
CUDNN_RNN_TANH = cudnn_rnn_ops.CUDNN_RNN_TANH
CUDNN_RNN_UNIDIRECTION = cudnn_rnn_ops.CUDNN_RNN_UNIDIRECTION
CUDNN_RNN_BIDIRECTION = cudnn_rnn_ops.CUDNN_RNN_BIDIRECTION
CUDNN_LSTM_PARAMS_PER_LAYER = cudnn_rnn_ops.CUDNN_LSTM_PARAMS_PER_LAYER
CUDNN_GRU_PARAMS_PER_LAYER = cudnn_rnn_ops.CUDNN_GRU_PARAMS_PER_LAYER
CUDNN_RNN_TANH_PARAMS_PER_LAYER = cudnn_rnn_ops.CUDNN_RNN_TANH_PARAMS_PER_LAYER
CUDNN_RNN_RELU_PARAMS_PER_LAYER = cudnn_rnn_ops.CUDNN_RNN_RELU_PARAMS_PER_LAYER
class CudnnTestModel(object):
"""Model with convenient APIs for easier building and running test graph.
The graph built is used by all tests below to avoid repeatedly building
similar test graphs.
"""
def __init__(self,
rnn_mode,
num_layers,
num_units,
input_size,
direction=CUDNN_RNN_UNIDIRECTION,
dropout=0.,
dtype=dtypes.float32,
training=False,
kernel_initializer=None,
bias_initializer=None):
if dtype not in (dtypes.float32, dtypes.float64):
raise ValueError("Invalid dtype: %s" % dtype)
self._dtype = dtype
self._inputs = array_ops.placeholder(
dtype=dtype, shape=[None, None, input_size], name="inputs")
h = array_ops.placeholder(
dtype=dtype, shape=[None, None, num_units], name="h")
c = array_ops.placeholder(
dtype=dtype, shape=[None, None, num_units], name="c")
if rnn_mode == CUDNN_LSTM:
model_fn = cudnn_rnn.CudnnLSTM
self._initial_state = (h, c)
elif rnn_mode == CUDNN_GRU:
model_fn = cudnn_rnn.CudnnGRU
self._initial_state = (h,)
elif rnn_mode == CUDNN_RNN_TANH:
model_fn = cudnn_rnn.CudnnRNNTanh
self._initial_state = (h,)
elif rnn_mode == CUDNN_RNN_RELU:
model_fn = cudnn_rnn.CudnnRNNRelu
self._initial_state = (h,)
else:
raise ValueError("Invalid rnn_mode: %s" % rnn_mode)
self._rnn = model_fn(
num_layers,
num_units,
direction=direction,
dropout=dropout,
dtype=dtype,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer)
self._rnn.build([None, None, input_size])
self._outputs, self._output_state = self._rnn(
self._inputs, initial_state=self._initial_state, training=training)
def _AddUp(self, outputs, output_state):
total = math_ops.reduce_sum(outputs)
for s in output_state:
total += math_ops.reduce_sum(s)
return total
@property
def inputs(self):
return self._inputs
@property
def initial_state(self):
return self._initial_state
@property
def outputs(self):
return self._outputs
@property
def output_state(self):
return self._output_state
@property
def rnn(self):
return self._rnn
@property
def total_sum(self):
return self._AddUp(self.outputs, self.output_state)
def SynthesizeInput(self, seq_length, batch_size, seed=1234):
"""Synthesizes input and initial state values for testing."""
np.random.seed(seed)
num_layers = self._rnn.num_layers
dir_count = self._rnn.num_dirs
num_units = self._rnn.num_units
input_size = self._rnn.input_size
np_dtype = np.float32 if self._dtype == dtypes.float32 else np.float64
inputs = np.random.randn(seq_length, batch_size,
input_size).astype(np_dtype)
input_h = np.random.randn(num_layers * dir_count, batch_size,
num_units).astype(np_dtype)
if self._rnn.rnn_mode == CUDNN_LSTM:
input_c = np.random.randn(num_layers * dir_count, batch_size,
num_units).astype(np_dtype)
initial_state = (input_h, input_c)
else:
initial_state = (input_h,)
return inputs, initial_state
def ZeroState(self, batch_size):
num_layers = self._rnn.num_layers
dir_count = self._rnn.num_dirs
num_units = self._rnn.num_units
np_dtype = np.float32 if self._dtype == dtypes.float32 else np.float64
input_h = np.zeros((num_layers * dir_count, batch_size,
num_units)).astype(np_dtype)
if self._rnn.rnn_mode == CUDNN_LSTM:
input_c = np.zeros((num_layers * dir_count, batch_size,
num_units)).astype(np_dtype)
initial_state = (input_h, input_c)
else:
initial_state = (input_h,)
return initial_state
def FProp(self, inputs_t, initial_state_t, training):
"""Builds additional subgraph with given inputs and state.
Args:
inputs_t: a tensor.
initial_state_t: a tensor.
training: boolean, true if training mode.
Returns:
A tensor of the forward pass output of the model.
"""
outputs, output_state = self._rnn(
inputs_t, initial_state=initial_state_t, training=training)
return self._AddUp(outputs, output_state)
def Feed(self, sess, inputs, initial_state=None, return_sum=True):
"""Runs graph with given inputs and initial state."""
batch_size = inputs.shape[1]
if initial_state is None:
initial_state = self.ZeroState(batch_size)
if return_sum:
return sess.run(
self.total_sum,
feed_dict={self.inputs: inputs,
self.initial_state: initial_state})
else:
return sess.run(
[self.outputs, self.output_state],
feed_dict={self.inputs: inputs,
self.initial_state: initial_state})
def _CreateCudnnCompatibleCanonicalRNN(rnn, inputs, is_bidi=False, scope=None):
mode = rnn.rnn_mode
num_units = rnn.num_units
num_layers = rnn.num_layers
# To reuse cuDNN-trained models, must use cudnn compatible rnn cells.
if mode == CUDNN_LSTM:
single_cell = lambda: cudnn_rnn_ops.CudnnCompatibleLSTMCell(num_units)
elif mode == CUDNN_GRU:
single_cell = lambda: cudnn_rnn_ops.CudnnCompatibleGRUCell(num_units)
elif mode == CUDNN_RNN_TANH:
single_cell = (lambda: rnn_cell_impl.BasicRNNCell(num_units, math_ops.tanh))
elif mode == CUDNN_RNN_RELU:
single_cell = (
lambda: rnn_cell_impl.BasicRNNCell(num_units, gen_nn_ops.relu))
else:
raise ValueError("%s is not supported!" % mode)
if not is_bidi:
cell = rnn_cell_impl.MultiRNNCell(
[single_cell() for _ in range(num_layers)])
return rnn_lib.dynamic_rnn(
cell, inputs, dtype=dtypes.float32, time_major=True, scope=scope)
else:
cells_fw = [single_cell() for _ in range(num_layers)]
cells_bw = [single_cell() for _ in range(num_layers)]
(outputs, output_state_fw,
output_state_bw) = contrib_rnn_lib.stack_bidirectional_dynamic_rnn(
cells_fw,
cells_bw,
inputs,
dtype=dtypes.float32,
time_major=True,
scope=scope)
return outputs, (output_state_fw, output_state_bw)
class CudnnRNNTestBasic(TensorFlowTestCase):
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testLayerBasic(self):
num_layers = 4
num_units = 2
batch_size = 8
direction = CUDNN_RNN_UNIDIRECTION
dir_count = 1
with vs.variable_scope("main"):
kernel_initializer = init_ops.constant_initializer(0.)
bias_initializer = init_ops.constant_initializer(0.)
inputs = random_ops.random_uniform([
num_layers * dir_count, batch_size, num_units], dtype=dtypes.float32)
lstm = cudnn_rnn.CudnnLSTM(num_layers, num_units,
direction=direction,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
name="awesome_lstm")
# Build the layer
outputs1, _ = lstm(inputs)
# Reuse the layer
outputs2, _ = lstm(inputs)
total_sum1 = math_ops.reduce_sum(outputs1)
total_sum2 = math_ops.reduce_sum(outputs2)
with vs.variable_scope("main", reuse=True):
lstm = cudnn_rnn.CudnnLSTM(num_layers, num_units,
direction=direction,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
name="awesome_lstm")
# Reuse the layer
outputs3, _ = lstm(inputs)
total_sum3 = math_ops.reduce_sum(outputs3)
self.assertEqual(1, len(variables.trainable_variables()))
self.assertEqual(1, len(ops.get_collection(ops.GraphKeys.SAVEABLE_OBJECTS)))
self.assertEqual("main/awesome_lstm/opaque_kernel",
variables.trainable_variables()[0].op.name)
with self.test_session(use_gpu=True) as sess:
sess.run(variables.global_variables_initializer())
(total_sum1_v, total_sum2_v, total_sum3_v) = sess.run(
[total_sum1, total_sum2, total_sum3])
self.assertEqual(0, total_sum1_v)
self.assertEqual(0, total_sum2_v)
self.assertEqual(0, total_sum3_v)
# TODO(jamesqin): Transform to parameterized test after it is included in the
# TF open source codebase.
class CudnnRNNTestSaveRestore(TensorFlowTestCase):
def _CompareWeights(self, lhs, rhs):
self.assertEqual(len(lhs), len(rhs))
for lw, rw in zip(lhs, rhs):
self.assertAllEqual(lw, rw)
def _CompareBiases(self, lhs, rhs, rnn_mode, num_layers, direction):
self.assertEqual(len(lhs), len(rhs))
if rnn_mode == CUDNN_LSTM:
num_params_per_layer = CUDNN_LSTM_PARAMS_PER_LAYER
elif rnn_mode == CUDNN_GRU:
num_params_per_layer = CUDNN_GRU_PARAMS_PER_LAYER
elif rnn_mode == CUDNN_RNN_TANH:
num_params_per_layer = CUDNN_RNN_TANH_PARAMS_PER_LAYER
else:
num_params_per_layer = CUDNN_RNN_RELU_PARAMS_PER_LAYER
num_dirs = 1 if direction == CUDNN_RNN_UNIDIRECTION else 2
num_params_per_layer *= num_dirs
self.assertEqual(num_params_per_layer * num_layers, len(lhs))
for i in range(num_layers):
layer_lhs = lhs[i * num_params_per_layer: (i+1) * num_params_per_layer]
layer_rhs = rhs[i * num_params_per_layer: (i+1) * num_params_per_layer]
if direction == CUDNN_RNN_UNIDIRECTION:
self._CompareSingleLayerBiases(layer_lhs, layer_rhs)
else:
size = len(layer_lhs)
fw_lhs, bw_lhs = layer_lhs[:size//2], layer_lhs[size//2:]
fw_rhs, bw_rhs = layer_rhs[:size//2], layer_rhs[size//2:]
self._CompareSingleLayerBiases(fw_lhs, fw_rhs)
self._CompareSingleLayerBiases(bw_lhs, bw_rhs)
def _CompareSingleLayerBiases(self, lhs, rhs):
self.assertEqual(len(lhs), len(rhs))
lf_lhs, rt_lhs = lhs[:len(lhs)//2], lhs[len(lhs)//2:]
lf_rhs, rt_rhs = rhs[:len(rhs)//2], rhs[len(rhs)//2:]
self.assertEqual(len(lf_lhs), len(rt_lhs))
self.assertEqual(len(lf_rhs), len(rt_rhs))
sum_lhs, sum_rhs = [], []
for lf, rt in zip(lf_lhs, rt_lhs):
sum_lhs.append(lf + rt)
for lf, rt in zip(lf_rhs, rt_rhs):
sum_rhs.append(lf + rt)
self.assertEqual(len(sum_lhs), len(sum_rhs))
for lf, rt in zip(sum_lhs, sum_rhs):
self.assertAllEqual(lf, rt)
def _TestSaveRestoreVariable(self, rnn_mode, direction, dtype):
input_size = 3
num_layers = 2
num_units = 7
with ops.Graph().as_default() as g:
random_seed.set_random_seed(1234)
model = CudnnTestModel(
rnn_mode,
num_layers,
num_units,
input_size,
direction=direction,
dtype=dtype)
rnn = model.rnn
save_path = os.path.join(self.get_temp_dir(),
"save-restore-variable-test")
saver = saver_lib.Saver()
weights, biases = model.rnn.saveable._OpaqueParamsToCanonical()
opaque_params = rnn.trainable_variables[0]
# CudnnTestModel() creates CudnnOpaqueParamsSaveable that helps saver save
# Cudnn vars in canonical format.
reset_op = state_ops.assign(
opaque_params,
array_ops.zeros(array_ops.shape(opaque_params), dtype=dtype))
# Passing graph explicitly, otherwise an old sess would be reused.
with self.test_session(use_gpu=True, graph=g) as sess:
sess.run(variables.global_variables_initializer())
val = saver.save(sess, save_path)
self.assertEqual(save_path, val)
weights_v, biases_v = sess.run([weights, biases])
# Reset opaque param
sess.run(reset_op)
saver.restore(sess, save_path)
weights_v_restored, biases_v_restored = sess.run([weights, biases])
self._CompareWeights(weights_v, weights_v_restored)
self._CompareBiases(biases_v, biases_v_restored, rnn_mode, num_layers,
direction)
def _TestSaveRestoreTwoVariables(self, rnn_mode, direction, dtype):
input_size = 3
num_layers = 2
num_units = 7
with ops.Graph().as_default() as g:
random_seed.set_random_seed(1234)
with vs.variable_scope("m1"):
model1 = CudnnTestModel(
rnn_mode,
num_layers,
num_units,
input_size,
direction=direction,
dtype=dtype)
with vs.variable_scope("m2"):
model2 = CudnnTestModel(
rnn_mode,
num_layers,
num_units,
input_size,
direction=direction,
dtype=dtype)
opaque_params = (model1.rnn.trainable_variables[0],
model2.rnn.trainable_variables[0])
weights1, biases1 = model1.rnn.saveable._OpaqueParamsToCanonical()
weights2, biases2 = model2.rnn.saveable._OpaqueParamsToCanonical()
reset_params = [
state_ops.assign(params,
array_ops.zeros_like(params, dtype=dtype))
for params in opaque_params
]
reset_op = control_flow_ops.group(*reset_params)
save_path = os.path.join(self.get_temp_dir(),
"save-restore-variable-test2")
saver = saver_lib.Saver()
# Passing graph explicitly, otherwise an old sess would be reused.
with self.test_session(use_gpu=True, graph=g) as sess:
sess.run(variables.global_variables_initializer())
val = saver.save(sess, save_path)
self.assertEqual(save_path, val)
weights1_v, biases1_v = sess.run([weights1, biases1])
weights2_v, biases2_v = sess.run([weights2, biases2])
sess.run(reset_op)
saver.restore(sess, save_path)
weights1_v_restored, biases1_v_restored = sess.run([weights1, biases1])
weights2_v_restored, biases2_v_restored = sess.run([weights2, biases2])
self._CompareWeights(weights1_v, weights1_v_restored)
self._CompareWeights(weights2_v, weights2_v_restored)
self._CompareBiases(biases1_v, biases1_v_restored, rnn_mode, num_layers,
direction)
self._CompareBiases(biases2_v, biases2_v_restored, rnn_mode, num_layers,
direction)
def _TestSaveRestoreOutput(self, rnn_mode, direction, dtype):
with ops.Graph().as_default() as g:
num_layers = 2
num_units = 7
input_size = 7
seq_length = 8
batch_size = 4
model = CudnnTestModel(
rnn_mode,
num_layers,
num_units,
input_size,
direction=direction,
dtype=dtype,
training=False)
rnn = model.rnn
save_path = os.path.join(self.get_temp_dir(), "save-restore-output-test")
saver = saver_lib.Saver()
# Only one opaque var in a cudnn layer.
assert len(rnn.trainable_variables) == 1
reset_params = state_ops.assign(
rnn.trainable_variables[0],
array_ops.zeros(
array_ops.shape(rnn.trainable_variables[0]), dtype=dtype))
# Passing graph explicitly, otherwise an old sess would be reused.
with self.test_session(use_gpu=True, graph=g) as sess:
sess.run(variables.global_variables_initializer())
inputs, initial_state = model.SynthesizeInput(seq_length, batch_size)
total_sum_v = model.Feed(sess, inputs, initial_state)
val = saver.save(sess, save_path)
self.assertEqual(save_path, val)
sess.run(reset_params)
saver.restore(sess, save_path)
total_sum_v_restored = model.Feed(sess, inputs, initial_state)
self.assertAllClose(total_sum_v, total_sum_v_restored, atol=1e-5)
def _TestSaveRestoreHelper(self, rnn_mode):
directions = [CUDNN_RNN_UNIDIRECTION, CUDNN_RNN_BIDIRECTION]
dtype_list = [dtypes.float32, dtypes.float64]
for direction, dtype in itertools.product(directions, dtype_list):
self._TestSaveRestoreVariable(rnn_mode, direction, dtype)
self._TestSaveRestoreTwoVariables(rnn_mode, direction, dtype)
self._TestSaveRestoreOutput(rnn_mode, direction, dtype)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSaveRestoreRepeatedlyCreateCustomSaveable(self):
input_size = 3
num_layers = 2
num_units = 7
with ops.Graph().as_default():
random_seed.set_random_seed(1234)
model = CudnnTestModel(
CUDNN_LSTM,
num_layers,
num_units,
input_size,
direction=CUDNN_RNN_UNIDIRECTION,
dtype=dtypes.float32)
with self.assertRaisesRegexp(RuntimeError,
"Cudnn saveable already created"):
model.rnn._create_saveable()
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSaveRestoreLSTM(self):
self._TestSaveRestoreHelper(CUDNN_LSTM)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSaveRestoreGRU(self):
self._TestSaveRestoreHelper(CUDNN_GRU)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSaveRestoreRNNTanh(self):
self._TestSaveRestoreHelper(CUDNN_RNN_TANH)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSaveRestoreRNNRelu(self):
self._TestSaveRestoreHelper(CUDNN_RNN_RELU)
# TODO(jamesqin): Transform to parameterized test after it is included in the
# TF open source codebase.
class CudnnRNNTestCompatibleRNNCells(TensorFlowTestCase):
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testCudnnCompatibleLSTM(self):
self._TestCudnnCompatibleRnnCellsHelper(CUDNN_LSTM)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testCudnnCompatibleGRU(self):
self._TestCudnnCompatibleRnnCellsHelper(CUDNN_GRU)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testCudnnCompatibleRNNTanh(self):
self._TestCudnnCompatibleRnnCellsHelper(CUDNN_RNN_TANH)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testCudnnCompatibleRNNRelu(self):
self._TestCudnnCompatibleRnnCellsHelper(CUDNN_RNN_RELU)
def _TestCudnnCompatibleRnnCellsHelper(self, rnn_mode):
configs = [
{
"num_layers": 1,
"seq_length": 3,
"num_units": 4,
"input_size": 5,
"batch_size": 6,
},
{
"num_layers": 2,
"seq_length": 8,
"num_units": 4,
"input_size": 8,
"batch_size": 16,
},
{
"num_layers": 2,
"seq_length": 3,
"num_units": 4,
"input_size": 5,
"batch_size": 6,
},
{
"num_layers": 1,
"seq_length": 2,
"num_units": 2,
"input_size": 4,
"batch_size": 1,
},
]
directions = [CUDNN_RNN_UNIDIRECTION, CUDNN_RNN_BIDIRECTION]
for cfg, direction in zip(configs, directions):
self._TestCudnnCompatibleRnnCells(cfg["num_layers"], cfg["seq_length"],
cfg["num_units"], cfg["input_size"],
cfg["batch_size"], rnn_mode, direction)
def _TestCudnnCompatibleRnnCells(self, num_layers, seq_length, num_units,
input_size, batch_size, rnn_mode, direction):
dtype = dtypes.float32
# Train graph
with ops.Graph().as_default() as g:
model = CudnnTestModel(
rnn_mode,
num_layers,
num_units,
input_size,
direction=direction,
dtype=dtype,
training=True)
target_output = array_ops.placeholder(dtype=dtype)
loss_op = losses.log_loss(
labels=target_output, predictions=model.total_sum)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1e-2)
train_op = optimizer.minimize(loss_op)
saver = saver_lib.Saver()
# Train Cudnn model
seed = 0
with self.test_session(use_gpu=True, graph=g) as sess:
sess.run(variables.global_variables_initializer())
# Train 128 steps
num_steps = 128
for _ in range(num_steps):
inputs, _ = model.SynthesizeInput(seq_length, batch_size, seed)
targets = np.random.rand()
sess.run(
train_op,
feed_dict={
model.inputs: inputs,
model.initial_state: model.ZeroState(batch_size),
target_output: targets
})
seed += 1
save_path = os.path.join(self.get_temp_dir(),
("cudnn-rnn-%s-test" % rnn_mode))
save_v = saver.save(sess, save_path)
self.assertEqual(save_path, save_v)
# Cudnn inference graph
with ops.Graph().as_default() as g:
model = CudnnTestModel(
rnn_mode,
num_layers,
num_units,
input_size,
direction=direction,
dtype=dtype,
training=False)
rnn = model.rnn
saver = saver_lib.Saver()
inference_input = np.random.rand(seq_length, batch_size,
input_size).astype(np.float32)
with self.test_session(use_gpu=True, graph=g) as sess:
sess.run(variables.global_variables_initializer())
saver.restore(sess, save_path)
# Cudnn inference
cudnn_outputs_v, cudnn_output_states_v = model.Feed(
sess, inference_input, return_sum=False)
# Canonical RNN inference graph
with ops.Graph().as_default() as g:
cell_inputs = array_ops.placeholder(
dtype, shape=[seq_length, batch_size, input_size])
if direction == CUDNN_RNN_UNIDIRECTION:
# outputs is one tensor, states are num_layer tuples, each 2 tensors
(outputs, states) = _CreateCudnnCompatibleCanonicalRNN(rnn, cell_inputs)
if rnn_mode == CUDNN_LSTM:
output_h = array_ops.stack([s.h for s in states])
output_c = array_ops.stack([s.c for s in states])
else:
output_state = array_ops.stack([s for s in states])
else:
# outputs is one tensor.
# states is a tuple of 2 tuples:
# each sub tuple is num_layer tuples, each with 2 tensors.
(outputs, states) = _CreateCudnnCompatibleCanonicalRNN(
rnn, cell_inputs, is_bidi=True)
output_state_fw, output_state_bw = states
if rnn_mode == CUDNN_LSTM:
output_h, output_c = [], []
for s_fw, s_bw in zip(output_state_fw, output_state_bw):
output_h.append(array_ops.stack([s_fw.h, s_bw.h]))
output_c.append(array_ops.stack([s_fw.c, s_bw.c]))
output_h = array_ops.concat(output_h, axis=0)
output_c = array_ops.concat(output_c, axis=0)
else:
output_state = []
for s_fw, s_bw in zip(output_state_fw, output_state_bw):
output_state.append(array_ops.stack([s_fw, s_bw]))
output_state = array_ops.concat(output_state, axis=0)
saver = saver_lib.Saver()
with self.test_session(use_gpu=True, graph=g) as sess:
saver.restore(sess, save_path)
# BlockCell inference
if rnn_mode == CUDNN_LSTM:
outputs_v, output_h_v, output_c_v = sess.run(
[outputs, output_h, output_c],
feed_dict={cell_inputs: inference_input})
self.assertAllClose(cudnn_outputs_v, outputs_v)
cudnn_output_h_v, cudnn_output_c_v = cudnn_output_states_v
self.assertAllClose(cudnn_output_h_v, output_h_v)
self.assertAllClose(cudnn_output_c_v, output_c_v)
else:
outputs_v, output_state_v = sess.run(
[outputs, output_state],
feed_dict={cell_inputs: inference_input})
self.assertAllClose(cudnn_outputs_v, outputs_v, atol=1e-5, rtol=1e-5)
(cudnn_output_h_v,) = cudnn_output_states_v
self.assertAllClose(cudnn_output_h_v, output_state_v, atol=1e-5,
rtol=1e-5)
class CudnnRNNTestParamsSize(TensorFlowTestCase):
def _TestOpaqueParamsSize(self, rnn_mode, num_layers, num_units, input_size,
direction):
logging.info("Testing one lstm param size with config: %s", locals())
dtype = dtypes.float32
model = CudnnTestModel(
rnn_mode,
num_layers,
num_units,
input_size,
dtype=dtype,
direction=direction)
rnn = model.rnn
# Min param size estimate = sum(weights.size) + sum(biases.size)
min_params_size = (
np.sum(map(np.prod, rnn.canonical_weight_shapes)) +
np.sum([sp[0] for sp in rnn.canonical_bias_shapes]))
opaque_params = rnn.trainable_variables[0]
with self.test_session(use_gpu=True, graph=ops.get_default_graph()):
variables.global_variables_initializer().run()
opaque_params_size_v = opaque_params.eval().size
self.assertLessEqual(min_params_size, opaque_params_size_v)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testOpaqueParamsSize(self):
test_configs = [
[4, 200, 200],
[4, 200, 300],
[4, 200, 100],
[1, 100, 200],
[2, 200, 100],
[3, 200, 400],
]
directions = [CUDNN_RNN_UNIDIRECTION, CUDNN_RNN_BIDIRECTION]
rnns = [CUDNN_LSTM, CUDNN_GRU, CUDNN_RNN_RELU, CUDNN_RNN_TANH]
for (rnn, config, direction) in itertools.product(rnns, test_configs,
directions):
num_layers, num_units, input_size = config
with ops.Graph().as_default():
self._TestOpaqueParamsSize(rnn, num_layers, num_units, input_size,
direction)
class CudnnRNNTestTraining(TensorFlowTestCase):
def _ComputeNumericGrad(self, sess, y, x, delta=1e-4, step=1):
"""Compute the numeric gradient of y wrt to x.
Args:
sess: The TF session constructed with a graph containing x and y.
y: A scalar TF Tensor in the graph constructed in sess.
x: A TF Tensor in the graph constructed in sess.
delta: Gradient checker's small perturbation of x[i].
step: Only compute numerical gradients for a subset of x values.
I.e. dy/dx[i] is computed if i % step == 0.
Returns:
A Tensor of the same shape and dtype as x. If x[i] is not chosen
to compute the numerical gradient dy/x[i], the corresponding
value is set to 0.
"""
x_data = sess.run(x)
x_size = x_data.size
x_shape = x_data.shape
numeric_grad = np.zeros(x_size, dtype=x_data.dtype)
for i in range(0, x_size, step):
x_pos = x_data.copy()
if x_size == 1:
x_pos += delta
else:
x_pos.flat[i] += delta
y_pos_feed_dict = dict([(x.name, x_pos)])
y_pos = sess.run(y, feed_dict=y_pos_feed_dict)
x_neg = x_data.copy()
if x_size == 1:
x_neg -= delta
else:
x_neg.flat[i] -= delta
y_neg_feed_dict = dict([(x.name, x_neg)])
y_neg = sess.run(y, feed_dict=y_neg_feed_dict)
numeric_grad[i] = (y_pos - y_neg) / (2 * delta)
return numeric_grad.reshape(x_shape)
def _GradientCheck(self, sess, y, xs, tolerance=1e-6, delta=1e-4):
sym_grads_t = gradients.gradients(y, xs)
sym_grads = sess.run(sym_grads_t)
num_grads = [self._ComputeNumericGrad(sess, y, x, delta) for x in xs]
self.assertEqual(len(sym_grads), len(num_grads))
for sym, num in zip(sym_grads, num_grads):
self.assertFalse(np.any(np.isnan(sym)))
self.assertFalse(np.any(np.isnan(num)))
self.assertAllClose(sym, num, atol=tolerance, rtol=tolerance)
def _TestOneSimpleTraining(self, rnn_mode, num_layers, num_units, input_size,
batch_size, seq_length, dir_count, dropout, dtype,
delta, tolerance):
# Gradient checking runs two forward ops with almost the same input. Need to
# make sure the drop patterns across the two runs are the same.
logging.info("Training test with config: %s", locals())
old_env_state = os.environ.get("TF_CUDNN_RESET_RND_GEN_STATE", str(False))
os.environ["TF_CUDNN_RESET_RND_GEN_STATE"] = str(True)
random_seed.set_random_seed(5678)
has_input_c = (rnn_mode == CUDNN_LSTM)
direction = (CUDNN_RNN_UNIDIRECTION
if dir_count == 1 else CUDNN_RNN_BIDIRECTION)
model = CudnnTestModel(
rnn_mode,
num_layers,
num_units,
input_size,
direction=direction,
dropout=dropout,
dtype=dtype,
training=True,
bias_initializer=init_ops.random_normal_initializer(
mean=1., dtype=dtype))
rnn = model.rnn
params = rnn.trainable_variables[0]
inputs = variables.Variable(
random_ops.random_uniform(
[seq_length, batch_size, input_size], dtype=dtype),
dtype=dtype)
input_h = variables.Variable(
random_ops.random_uniform(
[num_layers * dir_count, batch_size, num_units], dtype=dtype),
dtype=dtype)
if has_input_c:
input_c = variables.Variable(
random_ops.random_uniform(
[num_layers * dir_count, batch_size, num_units], dtype=dtype),
dtype=dtype)
initial_state = (input_h, input_c)
else:
initial_state = (input_h,)
total_sum = model.FProp(inputs, initial_state, training=True)
with self.test_session(use_gpu=True, graph=ops.get_default_graph()) as sess:
sess.run(variables.global_variables_initializer())
all_inputs = [inputs, params]
for s in initial_state:
all_inputs.append(s)
self._GradientCheck(
sess, total_sum, all_inputs, tolerance=tolerance, delta=delta)
os.environ["TF_CUDNN_RESET_RND_GEN_STATE"] = old_env_state
def _TestSimpleTrainingHelper(self, rnn_mode, test_configs):
dropouts = [0., 0.5, 1.]
for config, dropout in itertools.product(test_configs, dropouts):
dtype = config.get("dtype", dtypes.float32)
delta = config.get("delta", 1e-4)
tolerance = config.get("tolerance", 1e-6)
dir_count = config.get("dir_count", 1)
shape = config["shape"]
with ops.Graph().as_default():
self._TestOneSimpleTraining(rnn_mode, shape["num_layers"],
shape["num_units"], shape["input_size"],
shape["batch_size"], shape["seq_length"],
dir_count, dropout, dtype, delta, tolerance)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingLSTM64(self):
test_configs = [
{
"dtype": dtypes.float64,
"tolerance": 5e-6,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_LSTM, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingLSTM32(self):
test_configs = [
{
"dtype": dtypes.float32,
"delta": 1e-4,
"tolerance": 9e-2,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_LSTM, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingGRU64(self):
test_configs = [
{
"dtype": dtypes.float64,
"tolerance": 5e-6,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
}
},
]
self._TestSimpleTrainingHelper(CUDNN_GRU, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingGRU32(self):
test_configs = [
{
"dtype": dtypes.float32,
"delta": 1e-3,
"tolerance": 4e-3,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_GRU, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingRNNTanh64(self):
test_configs = [
{
"dtype": dtypes.float64,
"tolerance": 5e-6,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_RNN_TANH, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingRNNTanh32(self):
test_configs = [
{
"dtype": dtypes.float32,
"delta": 1e-3,
"tolerance": 5e-3,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_RNN_TANH, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingRNNRelu64(self):
test_configs = [
{
"dtype": dtypes.float64,
"tolerance": 5e-6,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_RNN_RELU, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingRNNRelu32(self):
test_configs = [
{
"dtype": dtypes.float32,
"delta": 1e-3,
"tolerance": 7e-2,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_RNN_RELU, test_configs)
if __name__ == "__main__":
googletest.main() | unknown | codeparrot/codeparrot-clean | ||
#ifndef SEND_PACK_H
#define SEND_PACK_H
#include "string-list.h"
struct child_process;
struct oid_array;
struct ref;
struct repository;
/* Possible values for push_cert field in send_pack_args. */
#define SEND_PACK_PUSH_CERT_NEVER 0
#define SEND_PACK_PUSH_CERT_IF_ASKED 1
#define SEND_PACK_PUSH_CERT_ALWAYS 2
/* At least one reference has been rejected by the remote side. */
#define ERROR_SEND_PACK_BAD_REF_STATUS 1
struct send_pack_args {
const char *url;
unsigned verbose:1,
quiet:1,
porcelain:1,
progress:1,
send_mirror:1,
force_update:1,
use_thin_pack:1,
use_ofs_delta:1,
dry_run:1,
/* One of the SEND_PACK_PUSH_CERT_* constants. */
push_cert:2,
stateless_rpc:1,
atomic:1,
disable_bitmaps:1;
const struct string_list *push_options;
};
struct option;
int option_parse_push_signed(const struct option *opt,
const char *arg, int unset);
/*
* Compute a packfile and write it to a file descriptor. The `fd` array needs
* to contain two file descriptors: `fd[0]` is the file descriptor used as
* input for the packet reader, whereas `fd[1]` is the file descriptor the
* packfile will be written to.
*
* Returns 0 on success, non-zero otherwise. Negative return values indicate a
* generic error, whereas positive return values indicate specific error
* conditions as documented with the `ERROR_SEND_PACK_*` constants.
*/
int send_pack(struct repository *r, struct send_pack_args *args,
int fd[], struct child_process *conn,
struct ref *remote_refs, struct oid_array *extra_have);
#endif | c | github | https://github.com/git/git | send-pack.h |
import unittest
import builtins
import os
from platform import system as platform_system
class ExceptionClassTests(unittest.TestCase):
"""Tests for anything relating to exception objects themselves (e.g.,
inheritance hierarchy)"""
def test_builtins_new_style(self):
self.assertIsSubclass(Exception, object)
def verify_instance_interface(self, ins):
for attr in ("args", "__str__", "__repr__"):
self.assertHasAttr(ins, attr)
def test_inheritance(self):
# Make sure the inheritance hierarchy matches the documentation
exc_set = set()
for object_ in builtins.__dict__.values():
try:
if issubclass(object_, BaseException):
exc_set.add(object_.__name__)
except TypeError:
pass
inheritance_tree = open(
os.path.join(os.path.split(__file__)[0], 'exception_hierarchy.txt'),
encoding="utf-8")
try:
superclass_name = inheritance_tree.readline().rstrip()
try:
last_exc = getattr(builtins, superclass_name)
except AttributeError:
self.fail("base class %s not a built-in" % superclass_name)
self.assertIn(superclass_name, exc_set,
'%s not found' % superclass_name)
exc_set.discard(superclass_name)
superclasses = [] # Loop will insert base exception
last_depth = 0
for exc_line in inheritance_tree:
exc_line = exc_line.rstrip()
depth = exc_line.rindex('─')
exc_name = exc_line[depth+2:] # Slice past space
if '(' in exc_name:
paren_index = exc_name.index('(')
platform_name = exc_name[paren_index+1:-1]
exc_name = exc_name[:paren_index-1] # Slice off space
if platform_system() != platform_name:
exc_set.discard(exc_name)
continue
if '[' in exc_name:
left_bracket = exc_name.index('[')
exc_name = exc_name[:left_bracket-1] # cover space
try:
exc = getattr(builtins, exc_name)
except AttributeError:
self.fail("%s not a built-in exception" % exc_name)
if last_depth < depth:
superclasses.append((last_depth, last_exc))
elif last_depth > depth:
while superclasses[-1][0] >= depth:
superclasses.pop()
self.assertIsSubclass(exc, superclasses[-1][1],
"%s is not a subclass of %s" % (exc.__name__,
superclasses[-1][1].__name__))
try: # Some exceptions require arguments; just skip them
self.verify_instance_interface(exc())
except TypeError:
pass
self.assertIn(exc_name, exc_set)
exc_set.discard(exc_name)
last_exc = exc
last_depth = depth
finally:
inheritance_tree.close()
# Underscore-prefixed (private) exceptions don't need to be documented
exc_set = set(e for e in exc_set if not e.startswith('_'))
self.assertEqual(len(exc_set), 0, "%s not accounted for" % exc_set)
interface_tests = ("length", "args", "str", "repr")
def interface_test_driver(self, results):
for test_name, (given, expected) in zip(self.interface_tests, results):
self.assertEqual(given, expected, "%s: %s != %s" % (test_name,
given, expected))
def test_interface_single_arg(self):
# Make sure interface works properly when given a single argument
arg = "spam"
exc = Exception(arg)
results = ([len(exc.args), 1], [exc.args[0], arg],
[str(exc), str(arg)],
[repr(exc), '%s(%r)' % (exc.__class__.__name__, arg)])
self.interface_test_driver(results)
def test_interface_multi_arg(self):
# Make sure interface correct when multiple arguments given
arg_count = 3
args = tuple(range(arg_count))
exc = Exception(*args)
results = ([len(exc.args), arg_count], [exc.args, args],
[str(exc), str(args)],
[repr(exc), exc.__class__.__name__ + repr(exc.args)])
self.interface_test_driver(results)
def test_interface_no_arg(self):
# Make sure that with no args that interface is correct
exc = Exception()
results = ([len(exc.args), 0], [exc.args, tuple()],
[str(exc), ''],
[repr(exc), exc.__class__.__name__ + '()'])
self.interface_test_driver(results)
def test_setstate_refcount_no_crash(self):
# gh-97591: Acquire strong reference before calling tp_hash slot
# in PyObject_SetAttr.
import gc
d = {}
class HashThisKeyWillClearTheDict(str):
def __hash__(self) -> int:
d.clear()
return super().__hash__()
class Value(str):
pass
exc = Exception()
d[HashThisKeyWillClearTheDict()] = Value() # refcount of Value() is 1 now
# Exception.__setstate__ should acquire a strong reference of key and
# value in the dict. Otherwise, Value()'s refcount would go below
# zero in the tp_hash call in PyObject_SetAttr(), and it would cause
# crash in GC.
exc.__setstate__(d) # __hash__() is called again here, clearing the dict.
# This GC would crash if the refcount of Value() goes below zero.
gc.collect()
class UsageTests(unittest.TestCase):
"""Test usage of exceptions"""
def raise_fails(self, object_):
"""Make sure that raising 'object_' triggers a TypeError."""
try:
raise object_
except TypeError:
return # What is expected.
self.fail("TypeError expected for raising %s" % type(object_))
def catch_fails(self, object_):
"""Catching 'object_' should raise a TypeError."""
try:
try:
raise Exception
except object_:
pass
except TypeError:
pass
except Exception:
self.fail("TypeError expected when catching %s" % type(object_))
try:
try:
raise Exception
except (object_,):
pass
except TypeError:
return
except Exception:
self.fail("TypeError expected when catching %s as specified in a "
"tuple" % type(object_))
def test_raise_new_style_non_exception(self):
# You cannot raise a new-style class that does not inherit from
# BaseException; the ability was not possible until BaseException's
# introduction so no need to support new-style objects that do not
# inherit from it.
class NewStyleClass(object):
pass
self.raise_fails(NewStyleClass)
self.raise_fails(NewStyleClass())
def test_raise_string(self):
# Raising a string raises TypeError.
self.raise_fails("spam")
def test_catch_non_BaseException(self):
# Trying to catch an object that does not inherit from BaseException
# is not allowed.
class NonBaseException(object):
pass
self.catch_fails(NonBaseException)
self.catch_fails(NonBaseException())
def test_catch_BaseException_instance(self):
# Catching an instance of a BaseException subclass won't work.
self.catch_fails(BaseException())
def test_catch_string(self):
# Catching a string is bad.
self.catch_fails("spam")
if __name__ == '__main__':
unittest.main() | python | github | https://github.com/python/cpython | Lib/test/test_baseexception.py |
//
// stream_file.hpp
// ~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef BOOST_ASIO_STREAM_FILE_HPP
#define BOOST_ASIO_STREAM_FILE_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include <boost/asio/detail/config.hpp>
#if defined(BOOST_ASIO_HAS_FILE) \
|| defined(GENERATING_DOCUMENTATION)
#include <boost/asio/basic_stream_file.hpp>
namespace boost {
namespace asio {
/// Typedef for the typical usage of a stream-oriented file.
typedef basic_stream_file<> stream_file;
} // namespace asio
} // namespace boost
#endif // defined(BOOST_ASIO_HAS_FILE)
// || defined(GENERATING_DOCUMENTATION)
#endif // BOOST_ASIO_STREAM_FILE_HPP | unknown | github | https://github.com/mysql/mysql-server | extra/boost/boost_1_87_0/boost/asio/stream_file.hpp |
use std::borrow::Cow;
use std::error::Error;
use std::fmt;
use rustc_error_messages::fluent_bundle::resolver::errors::{ReferenceKind, ResolverError};
use rustc_error_messages::{FluentArgs, FluentError};
#[derive(Debug)]
pub enum TranslateError<'args> {
One {
id: &'args Cow<'args, str>,
args: &'args FluentArgs<'args>,
kind: TranslateErrorKind<'args>,
},
Two {
primary: Box<TranslateError<'args>>,
fallback: Box<TranslateError<'args>>,
},
}
impl<'args> TranslateError<'args> {
pub fn message(id: &'args Cow<'args, str>, args: &'args FluentArgs<'args>) -> Self {
Self::One { id, args, kind: TranslateErrorKind::MessageMissing }
}
pub fn primary(id: &'args Cow<'args, str>, args: &'args FluentArgs<'args>) -> Self {
Self::One { id, args, kind: TranslateErrorKind::PrimaryBundleMissing }
}
pub fn attribute(
id: &'args Cow<'args, str>,
args: &'args FluentArgs<'args>,
attr: &'args str,
) -> Self {
Self::One { id, args, kind: TranslateErrorKind::AttributeMissing { attr } }
}
pub fn value(id: &'args Cow<'args, str>, args: &'args FluentArgs<'args>) -> Self {
Self::One { id, args, kind: TranslateErrorKind::ValueMissing }
}
pub fn fluent(
id: &'args Cow<'args, str>,
args: &'args FluentArgs<'args>,
errs: Vec<FluentError>,
) -> Self {
Self::One { id, args, kind: TranslateErrorKind::Fluent { errs } }
}
pub fn and(self, fallback: TranslateError<'args>) -> TranslateError<'args> {
Self::Two { primary: Box::new(self), fallback: Box::new(fallback) }
}
}
#[derive(Debug)]
pub enum TranslateErrorKind<'args> {
MessageMissing,
PrimaryBundleMissing,
AttributeMissing { attr: &'args str },
ValueMissing,
Fluent { errs: Vec<FluentError> },
}
impl fmt::Display for TranslateError<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use TranslateErrorKind::*;
match self {
Self::One { id, args, kind } => {
writeln!(f, "failed while formatting fluent string `{id}`: ")?;
match kind {
MessageMissing => writeln!(f, "message was missing")?,
PrimaryBundleMissing => writeln!(f, "the primary bundle was missing")?,
AttributeMissing { attr } => {
writeln!(f, "the attribute `{attr}` was missing")?;
writeln!(f, "help: add `.{attr} = <message>`")?;
}
ValueMissing => writeln!(f, "the value was missing")?,
Fluent { errs } => {
for err in errs {
match err {
FluentError::ResolverError(ResolverError::Reference(
ReferenceKind::Message { id, .. }
| ReferenceKind::Variable { id, .. },
)) => {
if args.iter().any(|(arg_id, _)| arg_id == id) {
writeln!(
f,
"argument `{id}` exists but was not referenced correctly"
)?;
writeln!(f, "help: try using `{{${id}}}` instead")?;
} else {
writeln!(
f,
"the fluent string has an argument `{id}` that was not found."
)?;
let vars: Vec<&str> =
args.iter().map(|(a, _v)| a).collect();
match &*vars {
[] => writeln!(f, "help: no arguments are available")?,
[one] => writeln!(
f,
"help: the argument `{one}` is available"
)?,
[first, middle @ .., last] => {
write!(f, "help: the arguments `{first}`")?;
for a in middle {
write!(f, ", `{a}`")?;
}
writeln!(f, " and `{last}` are available")?;
}
}
}
}
_ => writeln!(f, "{err}")?,
}
}
}
}
}
// If someone cares about primary bundles, they'll probably notice it's missing
// regardless or will be using `debug_assertions`
// so we skip the arm below this one to avoid confusing the regular user.
Self::Two { primary: box Self::One { kind: PrimaryBundleMissing, .. }, fallback } => {
fmt::Display::fmt(fallback, f)?;
}
Self::Two { primary, fallback } => {
writeln!(
f,
"first, fluent formatting using the primary bundle failed:\n {primary}\n \
while attempting to recover by using the fallback bundle instead, another error occurred:\n{fallback}"
)?;
}
}
Ok(())
}
}
impl Error for TranslateError<'_> {} | rust | github | https://github.com/rust-lang/rust | compiler/rustc_errors/src/error.rs |
#!/usr/bin/python
#
# Copyright (c) 2018 Yuwei Zhou, <yuwzho@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_servicebustopicsubscription
version_added: "2.8"
short_description: Manage Azure Service Bus subscription
description:
- Create, update or delete an Azure Service Bus subscriptions.
options:
resource_group:
description:
- Name of resource group.
required: true
name:
description:
- Name of the servicebus subscription.
required: true
state:
description:
- Assert the state of the servicebus subscription. Use C(present) to create or update and use C(absent) to delete.
default: present
choices:
- absent
- present
namespace:
description:
- Servicebus namespace name.
- A namespace is a scoping container for all messaging components.
- Multiple subscriptions and topics can reside within a single namespace, and namespaces often serve as application containers.
required: true
topic:
description:
- Topic name which the subscription subscribe to.
required: true
auto_delete_on_idle_in_seconds:
description:
- Time idle interval after which a subscription is automatically deleted.
- The minimum duration is 5 minutes.
type: int
dead_lettering_on_message_expiration:
description:
- A value that indicates whether a subscription has dead letter support when a message expires.
type: bool
dead_lettering_on_filter_evaluation_exceptions:
description:
- Value that indicates whether a subscription has dead letter support on filter evaluation exceptions.
type: bool
default_message_time_to_live_seconds:
description:
- Default message timespan to live value.
- This is the duration after which the message expires, starting from when the message is sent to Service Bus.
- This is the default value used when TimeToLive is not set on a message itself.
type: int
enable_batched_operations:
description:
- Value that indicates whether server-side batched operations are enabled.
type: bool
forward_dead_lettered_messages_to:
description:
- Queue or topic name to forward the Dead Letter message for a subscription.
forward_to:
description:
- Queue or topic name to forward the messages for a subscription.
lock_duration_in_seconds:
description:
- Timespan duration of a peek-lock.
- The amount of time that the message is locked for other receivers.
- The maximum value for LockDuration is 5 minutes.
type: int
max_delivery_count:
description:
- he maximum delivery count.
- A message is automatically deadlettered after this number of deliveries.
type: int
requires_session:
description:
- A value that indicates whether the subscription supports the concept of sessions.
type: bool
duplicate_detection_time_in_seconds:
description:
- TimeSpan structure that defines the duration of the duplicate detection history.
type: int
status:
description:
- Status of the entity.
choices:
- active
- disabled
- send_disabled
- receive_disabled
extends_documentation_fragment:
- azure
- azure_tags
author:
- Yuwei Zhou (@yuwzho)
'''
EXAMPLES = '''
- name: Create a subscription
azure_rm_servicebustopicsubscription:
name: sbsub
resource_group: myResourceGroup
namespace: bar
topic: subtopic
'''
RETURN = '''
id:
description:
- Current state of the subscription.
returned: success
type: str
sample: "/subscriptions/xxx...xxx/resourceGroups/myResourceGroup/providers/Microsoft.ServiceBus/
namespaces/nsb57dc95979/topics/topicb57dc95979/subscriptions/subsb57dc95979"
'''
try:
from msrestazure.azure_exceptions import CloudError
except ImportError:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils.common.dict_transformations import _snake_to_camel, _camel_to_snake
from ansible.module_utils._text import to_native
from datetime import datetime, timedelta
duration_spec_map = dict(
default_message_time_to_live='default_message_time_to_live_seconds',
duplicate_detection_history_time_window='duplicate_detection_time_in_seconds',
auto_delete_on_idle='auto_delete_on_idle_in_seconds',
lock_duration='lock_duration_in_seconds'
)
class AzureRMServiceSubscription(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
auto_delete_on_idle_in_seconds=dict(type='int'),
dead_lettering_on_filter_evaluation_exceptions=dict(type='bool'),
dead_lettering_on_message_expiration=dict(type='bool'),
default_message_time_to_live_seconds=dict(type='int'),
duplicate_detection_time_in_seconds=dict(type='int'),
enable_batched_operations=dict(type='bool'),
forward_dead_lettered_messages_to=dict(type='str'),
forward_to=dict(type='str'),
lock_duration_in_seconds=dict(type='int'),
max_delivery_count=dict(type='int'),
name=dict(type='str', required=True),
namespace=dict(type='str', required=True),
requires_session=dict(type='bool'),
resource_group=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
status=dict(type='str',
choices=['active', 'disabled', 'send_disabled', 'receive_disabled']),
topic=dict(type='str', required=True)
)
self.auto_delete_on_idle_in_seconds = None
self.dead_lettering_on_filter_evaluation_exceptions = None
self.dead_lettering_on_message_expiration = None
self.default_message_time_to_live_seconds = None
self.duplicate_detection_time_in_seconds = None
self.enable_batched_operations = None
self.forward_dead_lettered_messages_to = None
self.forward_to = None
self.lock_duration_in_seconds = None
self.max_delivery_count = None
self.name = None
self.namespace = None
self.requires_session = None
self.resource_group = None
self.state = None
self.status = None
self.topic = None
self.results = dict(
changed=False,
id=None
)
super(AzureRMServiceSubscription, self).__init__(self.module_arg_spec,
supports_check_mode=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()):
setattr(self, key, kwargs[key])
changed = False
original = self.get()
if self.state == 'present':
# Create the resource instance
params = dict(
dead_lettering_on_filter_evaluation_exceptions=self.dead_lettering_on_filter_evaluation_exceptions,
dead_lettering_on_message_expiration=self.dead_lettering_on_message_expiration,
enable_batched_operations=self.enable_batched_operations,
forward_dead_lettered_messages_to=self.forward_dead_lettered_messages_to,
forward_to=self.forward_to,
max_delivery_count=self.max_delivery_count,
requires_session=self.requires_session
)
if self.status:
params['status'] = self.servicebus_models.EntityStatus(str.capitalize(_snake_to_camel(self.status)))
for k, v in duration_spec_map.items():
seconds = getattr(self, v)
if seconds:
params[k] = timedelta(seconds=seconds)
instance = self.servicebus_models.SBSubscription(**params)
result = original
if not original:
changed = True
result = instance
else:
result = original
attribute_map_keys = set(self.servicebus_models.SBSubscription._attribute_map.keys())
validation_keys = set(self.servicebus_models.SBSubscription._validation.keys())
attribute_map = attribute_map_keys - validation_keys
for attribute in attribute_map:
value = getattr(instance, attribute)
if value and value != getattr(original, attribute):
changed = True
if changed and not self.check_mode:
result = self.create_or_update(instance)
self.results = self.to_dict(result)
elif original:
changed = True
if not self.check_mode:
self.delete()
self.results['deleted'] = True
self.results['changed'] = changed
return self.results
def create_or_update(self, param):
try:
client = self._get_client()
return client.create_or_update(self.resource_group, self.namespace, self.topic, self.name, param)
except Exception as exc:
self.fail("Error creating or updating servicebus subscription {0} - {1}".format(self.name, str(exc)))
def delete(self):
try:
client = self._get_client()
client.delete(self.resource_group, self.namespace, self.topic, self.name)
return True
except Exception as exc:
self.fail("Error deleting servicebus subscription {0} - {1}".format(self.name, str(exc)))
def _get_client(self):
return self.servicebus_client.subscriptions
def get(self):
try:
client = self._get_client()
return client.get(self.resource_group, self.namespace, self.topic, self.name)
except Exception:
return None
def to_dict(self, instance):
result = dict()
attribute_map = self.servicebus_models.SBSubscription._attribute_map
for attribute in attribute_map.keys():
value = getattr(instance, attribute)
if not value:
continue
if attribute_map[attribute]['type'] == 'duration':
if is_valid_timedelta(value):
key = duration_spec_map.get(attribute) or attribute
result[key] = int(value.total_seconds())
elif attribute == 'status':
result['status'] = _camel_to_snake(value)
elif isinstance(value, self.servicebus_models.MessageCountDetails):
result[attribute] = value.as_dict()
elif isinstance(value, self.servicebus_models.SBSku):
result[attribute] = value.name.lower()
elif isinstance(value, datetime):
result[attribute] = str(value)
elif isinstance(value, str):
result[attribute] = to_native(value)
elif attribute == 'max_size_in_megabytes':
result['max_size_in_mb'] = value
else:
result[attribute] = value
return result
def is_valid_timedelta(value):
if value == timedelta(10675199, 10085, 477581):
return None
return value
def main():
AzureRMServiceSubscription()
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classification metrics library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
# TODO(nsilberman): move into metrics/python/ops/
def accuracy(predictions, labels, weights=None, name=None):
"""Computes the percentage of times that predictions matches labels.
Args:
predictions: the predicted values, a `Tensor` whose dtype and shape
matches 'labels'.
labels: the ground truth values, a `Tensor` of any shape and
bool, integer, or string dtype.
weights: None or `Tensor` of float values to reweight the accuracy.
name: A name for the operation (optional).
Returns:
Accuracy `Tensor`.
Raises:
ValueError: if dtypes don't match or
if dtype is not bool, integer, or string.
"""
if not (labels.dtype.is_integer or
labels.dtype in (dtypes.bool, dtypes.string)):
raise ValueError(
'Labels should have bool, integer, or string dtype, not %r' %
labels.dtype)
if not labels.dtype.is_compatible_with(predictions.dtype):
raise ValueError('Dtypes of predictions and labels should match. '
'Given: predictions (%r) and labels (%r)' %
(predictions.dtype, labels.dtype))
with ops.name_scope(name, 'accuracy', values=[predictions, labels]):
is_correct = math_ops.cast(
math_ops.equal(predictions, labels), dtypes.float32)
if weights is not None:
is_correct = math_ops.multiply(is_correct, weights)
num_values = math_ops.multiply(weights, array_ops.ones_like(is_correct))
return math_ops.div(math_ops.reduce_sum(is_correct),
math_ops.reduce_sum(num_values))
return math_ops.reduce_mean(is_correct) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import TestGyp
test = TestGyp.TestGyp()
# The xcode-ninja generator handles gypfiles which are not at the
# project root incorrectly.
# cf. https://code.google.com/p/gyp/issues/detail?id=460
if test.format == 'xcode-ninja':
test.skip_test()
test.run_gyp('build/all.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('build/all.gyp', test.ALL, chdir='relocate/src')
chdir = 'relocate/src/build'
# The top-level Makefile is in the directory where gyp was run.
# TODO(mmoss) Should the Makefile go in the directory of the passed in .gyp
# file? What about when passing in multiple .gyp files? Would sub-project
# Makefiles (see http://codereview.chromium.org/340008 comments) solve this?
if test.format in ('make', 'ninja', 'cmake'):
chdir = 'relocate/src'
if test.format == 'xcode':
chdir = 'relocate/src/prog1'
test.run_built_executable('program1',
chdir=chdir,
stdout="Hello from prog1.c\n")
if test.format == 'xcode':
chdir = 'relocate/src/prog2'
test.run_built_executable('program2',
chdir=chdir,
stdout="Hello from prog2.c\n")
test.pass_test() | unknown | codeparrot/codeparrot-clean | ||
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import re
from dataclasses import dataclass
from datetime import timedelta
from enum import Enum
from typing import Annotated, Literal
from pydantic import (
AfterValidator,
AliasGenerator,
AwareDatetime,
BaseModel,
BeforeValidator,
ConfigDict,
field_validator,
model_serializer,
model_validator,
)
from airflow._shared.timezones import timezone
UtcDateTime = Annotated[AwareDatetime, AfterValidator(lambda d: d.astimezone(timezone.utc))]
"""UTCDateTime is a datetime with timezone information"""
def _validate_timedelta_field(td: timedelta | None) -> TimeDelta | None:
"""Validate the timedelta field and return it."""
if td is None:
return None
return TimeDelta(
days=td.days,
seconds=td.seconds,
microseconds=td.microseconds,
)
class TimeDelta(BaseModel):
"""TimeDelta can be used to interact with datetime.timedelta objects."""
object_type: str = "TimeDelta"
days: int
seconds: int
microseconds: int
model_config = ConfigDict(
alias_generator=AliasGenerator(
serialization_alias=lambda field_name: {
"object_type": "__type",
}.get(field_name, field_name),
)
)
TimeDeltaWithValidation = Annotated[TimeDelta, BeforeValidator(_validate_timedelta_field)]
class Mimetype(str, Enum):
"""Mimetype for the `Content-Type` header."""
TEXT = "text/plain"
JSON = "application/json"
FORM = "application/x-www-form-urlencoded"
NDJSON = "application/x-ndjson"
ANY = "*/*"
@dataclass
class ExtraMenuItem:
"""Define a menu item that can be added to the menu by auth managers or plugins."""
text: str
href: str
class MenuItem(Enum):
"""Define all menu items defined in the menu."""
REQUIRED_ACTIONS = "Required Actions"
ASSETS = "Assets"
AUDIT_LOG = "Audit Log"
CONFIG = "Config"
CONNECTIONS = "Connections"
DAGS = "Dags"
DOCS = "Docs"
JOBS = "Jobs"
PLUGINS = "Plugins"
POOLS = "Pools"
PROVIDERS = "Providers"
VARIABLES = "Variables"
XCOMS = "XComs"
class UIAlert(BaseModel):
"""Optional alert to be shown at the top of the page."""
text: str
category: Literal["info", "warning", "error"]
class OklchColor(BaseModel):
"""Validates OKLCH color format from string oklch(l c h)."""
lightness: float
chroma: float
hue: float
@model_validator(mode="before")
@classmethod
def parse_oklch_string(cls, data):
if isinstance(data, str):
oklch_regex_pattern = r"^oklch\((-?\d+(?:\.\d+)?) (-?\d+(?:\.\d+)?) (-?\d+(?:\.\d+)?)\)$"
match = re.match(oklch_regex_pattern, data)
if not match:
raise ValueError(f"Invalid OKLCH format: {data} Expected format oklch(l c h)")
ligthness_str, chroma_str, hue_str = match.groups()
return {
"lightness": float(ligthness_str),
"chroma": float(chroma_str),
"hue": float(hue_str),
}
return data
@field_validator("lightness")
@classmethod
def validate_lightness(cls, value: float) -> float:
if value < 0 or value > 1:
raise ValueError(f"Invalid lightness: {value} Must be between 0 and 1")
return value
@field_validator("chroma")
@classmethod
def validate_chroma(cls, value: float) -> float:
if value < 0 or value > 0.5:
raise ValueError(f"Invalid chroma: {value} Must be between 0 and 0.5")
return value
@field_validator("hue")
@classmethod
def validate_hue(cls, value: float) -> float:
if value < 0 or value > 360:
raise ValueError(f"Invalid hue: {value} Must be between 0 and 360")
return value
@model_serializer(mode="plain")
def serialize_model(self) -> str:
return f"oklch({self.lightness} {self.chroma} {self.hue})"
class Theme(BaseModel):
"""JSON to modify Chakra's theme."""
tokens: dict[
Literal["colors"],
dict[
Literal["brand"],
dict[
Literal["50", "100", "200", "300", "400", "500", "600", "700", "800", "900", "950"],
dict[Literal["value"], OklchColor],
],
],
]
globalCss: dict[str, dict] | None = None | python | github | https://github.com/apache/airflow | airflow-core/src/airflow/api_fastapi/common/types.py |
from config import config, ConfigSelection, ConfigSubsection
from boxbranding import getBoxType, getMachineBuild
def InitHdmiRecord():
full_hd = getMachineBuild() in ('et10000','dm900', 'dm920', 'et13000', 'sf5008', 'vuuno4kse', 'vuduo4k') or getBoxType() in ('spycat4k','spycat4kcombo','gbquad4k')
config.hdmirecord = ConfigSubsection()
choices = [
("512000", "0.5 Mb/s"),
("1024000", "1 Mb/s"),
("2048000", "2 Mb/s"),
("3072000", "3 Mb/s"),
("4096000", "4 Mb/s"),
("5120000", "5 Mb/s"),
("6144000", "6 Mb/s"),
("7168000", "7 Mb/s"),
("8192000", "8 Mb/s"),
("9216000", "9 Mb/s"),
("10240000", "10 Mb/s"),
("15360000", "15 Mb/s"),
("20480000", "20 Mb/s"),
("25600000", "25 Mb/s"),
]
config.hdmirecord.bitrate = ConfigSelection(choices, default="5120000")
choices = [
("180", "180"), # SD / 4
("240", "240"), # FullHD / 8, SD / 3
("320", "320"), # FullHD / 6
("360", "360"), # SD / 2
("384", "384"), # FullHD / 5
("480", "480"), # FullHD / 4
("640", "640"), # FullHD / 3
("720", "720"), # SD
("960", "960"), # FullHD / 2
("1280", "1280"), # FullHD / 1.5
]
if(full_hd):
choices.append(("1920", "1920")) # FullHD
config.hdmirecord.width = ConfigSelection(choices, default="1280")
choices = [
("144", "144"), # SD / 4
("135", "135"), # FullHD / 8
("192", "192"), # SD / 3
("180", "180"), # FullHD / 6
("288", "288"), # SD / 2
("216", "216"), # FullHD / 5
("270", "270"), # FullHD / 4
("360", "360"), # FullHD / 3
("576", "576"), # SD
("540", "540"), # FullHD / 2
("720", "720"), # FullHD / 1.5
]
if(full_hd):
choices.append(("1080", "1080")) # FullHD
config.hdmirecord.height = ConfigSelection(choices, default="720")
config.hdmirecord.framerate = ConfigSelection(
choices=[
("24000", "24"),
("25000", "25"),
("30000", "30"),
("50000", "50"),
("60000", "60"),
], default="60000")
# Intentionally not a boolean because the API expects an integer parsed from the string
config.hdmirecord.interlaced = ConfigSelection(
choices=[
("0", _("No")),
("1", _("Yes")),
], default="0")
config.hdmirecord.aspectratio = ConfigSelection(
choices=[
("0", "Auto"),
("1", "4:3"),
("2", "16:9"),
], default="0") | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from ast import literal_eval
from odoo import api, fields, models, _
from odoo.exceptions import ValidationError
class Project(models.Model):
_inherit = 'project.project'
sale_line_id = fields.Many2one(
'sale.order.line', 'Sales Order Item', copy=False,
domain="[('is_expense', '=', False), ('order_id', '=', sale_order_id), ('state', 'in', ['sale', 'done']), '|', ('company_id', '=', False), ('company_id', '=', company_id)]",
help="Sales order item to which the project is linked. If an employee timesheets on a task that does not have a "
"sale order item defines, and if this employee is not in the 'Employee/Sales Order Item Mapping' of the project, "
"the timesheet entry will be linked to the sales order item defined on the project.")
sale_order_id = fields.Many2one('sale.order', 'Sales Order', domain="[('partner_id', '=', partner_id)]", readonly=True, copy=False, help="Sales order to which the project is linked.")
_sql_constraints = [
('sale_order_required_if_sale_line', "CHECK((sale_line_id IS NOT NULL AND sale_order_id IS NOT NULL) OR (sale_line_id IS NULL))", 'The Project should be linked to a Sale Order to select an Sale Order Items.'),
]
@api.model
def _map_tasks_default_valeus(self, task, project):
defaults = super()._map_tasks_default_valeus(task, project)
defaults['sale_line_id'] = False
return defaults
class ProjectTask(models.Model):
_inherit = "project.task"
sale_order_id = fields.Many2one('sale.order', 'Sales Order', help="Sales order to which the task is linked.")
sale_line_id = fields.Many2one(
'sale.order.line', 'Sales Order Item', domain="[('is_service', '=', True), ('order_partner_id', 'child_of', commercial_partner_id), ('is_expense', '=', False), ('state', 'in', ['sale', 'done']), ('order_id', '=?', project_sale_order_id)]",
compute='_compute_sale_line', store=True, readonly=False, copy=False,
help="Sales order item to which the task is linked. If an employee timesheets on a this task, "
"and if this employee is not in the 'Employee/Sales Order Item Mapping' of the project, the "
"timesheet entry will be linked to this sales order item.")
project_sale_order_id = fields.Many2one('sale.order', string="project's sale order", related='project_id.sale_order_id')
invoice_count = fields.Integer("Number of invoices", related='sale_order_id.invoice_count')
task_to_invoice = fields.Boolean("To invoice", compute='_compute_task_to_invoice', search='_search_task_to_invoice', groups='sales_team.group_sale_salesman_all_leads')
@api.depends('project_id.sale_line_id.order_partner_id')
def _compute_partner_id(self):
for task in self:
if not task.partner_id:
task.partner_id = task.project_id.sale_line_id.order_partner_id
super()._compute_partner_id()
@api.depends('partner_id.commercial_partner_id', 'sale_line_id.order_partner_id.commercial_partner_id', 'parent_id.sale_line_id', 'project_id.sale_line_id')
def _compute_sale_line(self):
for task in self:
if not task.sale_line_id:
task.sale_line_id = task.parent_id.sale_line_id or task.project_id.sale_line_id
# check sale_line_id and customer are coherent
if task.sale_line_id.order_partner_id.commercial_partner_id != task.partner_id.commercial_partner_id:
task.sale_line_id = False
@api.constrains('sale_line_id')
def _check_sale_line_type(self):
for task in self.sudo():
if task.sale_line_id:
if not task.sale_line_id.is_service or task.sale_line_id.is_expense:
raise ValidationError(_('You cannot link the order item %s - %s to this task because it is a re-invoiced expense.' % (task.sale_line_id.order_id.id, task.sale_line_id.product_id.name)))
def unlink(self):
if any(task.sale_line_id for task in self):
raise ValidationError(_('You have to unlink the task from the sale order item in order to delete it.'))
return super().unlink()
# ---------------------------------------------------
# Actions
# ---------------------------------------------------
def _get_action_view_so_ids(self):
return self.sale_order_id.ids
def action_view_so(self):
self.ensure_one()
so_ids = self._get_action_view_so_ids()
action_window = {
"type": "ir.actions.act_window",
"res_model": "sale.order",
"name": "Sales Order",
"views": [[False, "tree"], [False, "form"]],
"context": {"create": False, "show_sale": True},
"domain": [["id", "in", so_ids]],
}
if len(so_ids) == 1:
action_window["views"] = [[False, "form"]]
action_window["res_id"] = so_ids[0]
return action_window
def rating_get_partner_id(self):
partner = self.partner_id or self.sale_line_id.order_id.partner_id
if partner:
return partner
return super().rating_get_partner_id()
@api.depends('sale_order_id.invoice_status', 'sale_order_id.order_line')
def _compute_task_to_invoice(self):
for task in self:
if task.sale_order_id:
task.task_to_invoice = bool(task.sale_order_id.invoice_status not in ('no', 'invoiced'))
else:
task.task_to_invoice = False
@api.model
def _search_task_to_invoice(self, operator, value):
query = """
SELECT so.id
FROM sale_order so
WHERE so.invoice_status != 'invoiced'
AND so.invoice_status != 'no'
"""
operator_new = 'inselect'
if(bool(operator == '=') ^ bool(value)):
operator_new = 'not inselect'
return [('sale_order_id', operator_new, (query, ()))]
def action_create_invoice(self):
# ensure the SO exists before invoicing, then confirm it
so_to_confirm = self.filtered(
lambda task: task.sale_order_id and task.sale_order_id.state in ['draft', 'sent']
).mapped('sale_order_id')
so_to_confirm.action_confirm()
# redirect create invoice wizard (of the Sales Order)
action = self.env.ref('sale.action_view_sale_advance_payment_inv').read()[0]
context = literal_eval(action.get('context', "{}"))
context.update({
'active_id': self.sale_order_id.id if len(self) == 1 else False,
'active_ids': self.mapped('sale_order_id').ids,
'default_company_id': self.company_id.id,
})
action['context'] = context
return action | unknown | codeparrot/codeparrot-clean | ||
# Copyright (C) 2016 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The group_snapshots api."""
from oslo_log import log as logging
import six
import webob
from webob import exc
from cinder.api import common
from cinder.api.openstack import wsgi
from cinder.api.v3.views import group_snapshots as group_snapshot_views
from cinder import exception
from cinder import group as group_api
from cinder.i18n import _, _LI
LOG = logging.getLogger(__name__)
GROUP_SNAPSHOT_API_VERSION = '3.14'
class GroupSnapshotsController(wsgi.Controller):
"""The group_snapshots API controller for the OpenStack API."""
_view_builder_class = group_snapshot_views.ViewBuilder
def __init__(self):
self.group_snapshot_api = group_api.API()
super(GroupSnapshotsController, self).__init__()
@wsgi.Controller.api_version(GROUP_SNAPSHOT_API_VERSION)
def show(self, req, id):
"""Return data about the given group_snapshot."""
LOG.debug('show called for member %s', id)
context = req.environ['cinder.context']
group_snapshot = self.group_snapshot_api.get_group_snapshot(
context,
group_snapshot_id=id)
return self._view_builder.detail(req, group_snapshot)
@wsgi.Controller.api_version(GROUP_SNAPSHOT_API_VERSION)
def delete(self, req, id):
"""Delete a group_snapshot."""
LOG.debug('delete called for member %s', id)
context = req.environ['cinder.context']
LOG.info(_LI('Delete group_snapshot with id: %s'), id, context=context)
try:
group_snapshot = self.group_snapshot_api.get_group_snapshot(
context,
group_snapshot_id=id)
self.group_snapshot_api.delete_group_snapshot(context,
group_snapshot)
except exception.InvalidGroupSnapshot as e:
raise exc.HTTPBadRequest(explanation=six.text_type(e))
except exception.GroupSnapshotNotFound:
# Not found exception will be handled at the wsgi level
raise
except Exception:
msg = _("Error occurred when deleting group snapshot %s.") % id
LOG.exception(msg)
raise exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202)
@wsgi.Controller.api_version(GROUP_SNAPSHOT_API_VERSION)
def index(self, req):
"""Returns a summary list of group_snapshots."""
return self._get_group_snapshots(req, is_detail=False)
@wsgi.Controller.api_version(GROUP_SNAPSHOT_API_VERSION)
def detail(self, req):
"""Returns a detailed list of group_snapshots."""
return self._get_group_snapshots(req, is_detail=True)
def _get_group_snapshots(self, req, is_detail):
"""Returns a list of group_snapshots through view builder."""
context = req.environ['cinder.context']
group_snapshots = self.group_snapshot_api.get_all_group_snapshots(
context)
limited_list = common.limited(group_snapshots, req)
if is_detail:
group_snapshots = self._view_builder.detail_list(req, limited_list)
else:
group_snapshots = self._view_builder.summary_list(req,
limited_list)
return group_snapshots
@wsgi.Controller.api_version(GROUP_SNAPSHOT_API_VERSION)
@wsgi.response(202)
def create(self, req, body):
"""Create a new group_snapshot."""
LOG.debug('Creating new group_snapshot %s', body)
self.assert_valid_body(body, 'group_snapshot')
context = req.environ['cinder.context']
group_snapshot = body['group_snapshot']
self.validate_name_and_description(group_snapshot)
try:
group_id = group_snapshot['group_id']
except KeyError:
msg = _("'group_id' must be specified")
raise exc.HTTPBadRequest(explanation=msg)
group = self.group_snapshot_api.get(context, group_id)
name = group_snapshot.get('name', None)
description = group_snapshot.get('description', None)
LOG.info(_LI("Creating group_snapshot %(name)s."),
{'name': name},
context=context)
try:
new_group_snapshot = self.group_snapshot_api.create_group_snapshot(
context, group, name, description)
except (exception.InvalidGroup,
exception.InvalidGroupSnapshot,
exception.InvalidVolume) as error:
raise exc.HTTPBadRequest(explanation=error.msg)
retval = self._view_builder.summary(req, new_group_snapshot)
return retval
def create_resource():
return wsgi.Resource(GroupSnapshotsController()) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2013 Huawei Technologies Co.,LTD.
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions as lib_exc
from neutron.tests.api import base
from neutron.tests.tempest import test
class NetworksNegativeTestJSON(base.BaseNetworkTest):
@test.attr(type=['negative', 'smoke'])
@test.idempotent_id('9293e937-824d-42d2-8d5b-e985ea67002a')
def test_show_non_existent_network(self):
non_exist_id = data_utils.rand_name('network')
self.assertRaises(lib_exc.NotFound, self.client.show_network,
non_exist_id)
@test.attr(type=['negative', 'smoke'])
@test.idempotent_id('d746b40c-5e09-4043-99f7-cba1be8b70df')
def test_show_non_existent_subnet(self):
non_exist_id = data_utils.rand_name('subnet')
self.assertRaises(lib_exc.NotFound, self.client.show_subnet,
non_exist_id)
@test.attr(type=['negative', 'smoke'])
@test.idempotent_id('a954861d-cbfd-44e8-b0a9-7fab111f235d')
def test_show_non_existent_port(self):
non_exist_id = data_utils.rand_name('port')
self.assertRaises(lib_exc.NotFound, self.client.show_port,
non_exist_id)
@test.attr(type=['negative', 'smoke'])
@test.idempotent_id('98bfe4e3-574e-4012-8b17-b2647063de87')
def test_update_non_existent_network(self):
non_exist_id = data_utils.rand_name('network')
self.assertRaises(lib_exc.NotFound, self.client.update_network,
non_exist_id, name="new_name")
@test.attr(type=['negative', 'smoke'])
@test.idempotent_id('03795047-4a94-4120-a0a1-bd376e36fd4e')
def test_delete_non_existent_network(self):
non_exist_id = data_utils.rand_name('network')
self.assertRaises(lib_exc.NotFound, self.client.delete_network,
non_exist_id) | unknown | codeparrot/codeparrot-clean | ||
# etcd Governance
## Principles
The etcd community adheres to the following principles:
- Open: etcd is open source.
- Welcoming and respectful: See [Code of Conduct].
- Transparent and accessible: Changes to the etcd code repository and CNCF related
activities (e.g. level, involvement, etc) are done in public.
- Merit: Ideas and contributions are accepted according to their technical merit for
the betterment of the project. For specific guidance on practical contribution steps
please see [contributor guide] guide.
## Roles and responsibilities
Etcd project roles along with their requirements and responsibilities are defined
in [community membership].
## Decision making process
Decisions are built on consensus between [maintainers] publicly. Proposals and ideas
can either be submitted for agreement via a GitHub issue or PR, or by sending an email
to `etcd-maintainers@googlegroups.com`.
## Conflict resolution
In general, we prefer that technical issues and maintainer membership are amicably
worked out between the persons involved. However, any technical dispute that has
reached an impasse with a subset of the community, any contributor may open a GitHub
issue or PR or send an email to `etcd-maintainers@googlegroups.com`. If the
maintainers themselves cannot decide an issue, the issue will be resolved by a
supermajority of the maintainers with a fallback on lazy consensus after three business
weeks inactive voting period and as long as two maintainers are on board.
## Changes in Governance
Changes in project governance could be initiated by opening a GitHub PR.
## SIG-etcd Governance
[SIG-etcd Governance] is documented in the Kubernetes/community repository.
[community membership]: /Documentation/contributor-guide/community-membership.md
[Code of Conduct]: /code-of-conduct.md
[contributor guide]: /CONTRIBUTING.md
[maintainers]: /OWNERS
[SIG-etcd Governance]: https://github.com/kubernetes/community/blob/master/sig-etcd/charter.md#deviations-from-sig-governance | unknown | github | https://github.com/etcd-io/etcd | GOVERNANCE.md |
import dynamic from 'next/dynamic';
export const NextDynamicNoSSRServerComponent = dynamic(()=>import('../text-dynamic-no-ssr-server'), {
loadableGenerated: {
webpack: ()=>[
require.resolveWeak("../text-dynamic-no-ssr-server")
]
},
ssr: false
}); | javascript | github | https://github.com/vercel/next.js | crates/next-custom-transforms/tests/fixture/next-dynamic-app-dir/no-ssr/output-prod.js |
#!/usr/bin/python
# Author: Jon Trulson <jtrulson@ics.com>
# Copyright (c) 2016 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import time, sys, signal, atexit
from upm import pyupm_ozw as sensorObj
def main():
# This function lets you run code on exit
def exitHandler():
print("Exiting")
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
defaultDev = "/dev/ttyACM0"
if (len(sys.argv) > 1):
defaultDev = sys.argv[1]
print("Using device", defaultDev)
# Instantiate an Aeotec Door/Window 2nd Edition sensor instance, on
# device node 10. You will almost certainly need to change this to
# reflect your own network. Use the ozwdump example to see what nodes
# are available.
sensor = sensorObj.AeotecDW2E(10)
# The first thing to do is create options, then lock them when done.
sensor.optionsCreate()
sensor.optionsLock()
# Next, initialize it.
print("Initializing, this may take awhile depending on your ZWave network")
sensor.init(defaultDev)
print("Initialization complete")
print("Querying data...")
while (True):
if (sensor.isDeviceAvailable()):
print("Alarm status:", end=' ')
print(sensor.isAlarmTripped())
print("Tamper Switch status:", end=' ')
print(sensor.isTamperTripped())
print("Battery Level:", end=' ')
print(sensor.getBatteryLevel(), end=' ')
print("%")
print()
else:
print("Device has not yet responded to probe.")
print("Try waking it, or wait until it wakes itself if ", end=' ')
print("configured to do so.")
print()
time.sleep(1)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2017 - 2025 R. Thomas
* Copyright 2017 - 2025 Quarkslab
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "spdlog/fmt/fmt.h"
#include "LIEF/Visitor.hpp"
#include "LIEF/MachO/FunctionStarts.hpp"
#include "MachO/Structures.hpp"
namespace LIEF {
namespace MachO {
FunctionStarts::FunctionStarts(const details::linkedit_data_command& cmd) :
LoadCommand::LoadCommand{LoadCommand::TYPE(cmd.cmd), cmd.cmdsize},
data_offset_{cmd.dataoff},
data_size_{cmd.datasize}
{}
void FunctionStarts::accept(Visitor& visitor) const {
visitor.visit(*this);
}
std::ostream& FunctionStarts::print(std::ostream& os) const {
LoadCommand::print(os) << '\n';
const std::vector<uint64_t> funcs = functions();
os << fmt::format("offset=0x{:06x}, size=0x{:06x}, #functions={}",
data_offset(), data_size(), funcs.size()) << '\n';
for (size_t i = 0; i < funcs.size(); ++i) {
os << fmt::format(" [{}] __TEXT + 0x{:06x}\n", i, funcs[i]);
}
return os;
}
}
} | cpp | github | https://github.com/nodejs/node | deps/LIEF/src/MachO/FunctionStarts.cpp |
#!/usr/bin/python
#
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
import sys
import locale
import unittest
import time
from spacewalk.common import rhnLib
TIMEZONE_SHIFT = time.timezone / 3600
class Tests(unittest.TestCase):
# pylint: disable=R0904
###########################################################################
# Tests for rhnLib.rfc822time()
###########################################################################
def test_rfc822time_normal_tuple(self):
"rfc822time: Simple call using a valid tuple argument."
test_arg = (2006, 1, 27, (14 - TIMEZONE_SHIFT), 12, 5, 4, 27, -1)
target = "Fri, 27 Jan 2006 14:12:05 GMT"
result = rhnLib.rfc822time(test_arg)
self.assertEqual(result, target, result + " != " + target)
def test_rfc822time_normal_list(self):
"rfc822time: Simple call using a valid list argument."
test_arg = [2006, 1, 27, (14 - TIMEZONE_SHIFT), 12, 5, 4, 27, -1]
target = "Fri, 27 Jan 2006 14:12:05 GMT"
result = rhnLib.rfc822time(test_arg)
self.assertEqual(result, target, result + " != " + target)
def test_rfc822time_normal_float(self):
"rfc822time: Simple call using a valid float argument."
test_arg = 1138371125
target = "Fri, 27 Jan 2006 14:12:05 GMT"
result = rhnLib.rfc822time(test_arg)
self.assertEqual(result, target, result + " != " + target)
def test_rfc822time_japan_locale(self):
"rfc822time: Test result in ja_JP locale."
test_arg = 1138371125
target = "Fri, 27 Jan 2006 14:12:05 GMT"
old_locale = locale.getlocale(locale.LC_TIME)
locale.setlocale(locale.LC_TIME, 'ja_JP')
result = rhnLib.rfc822time(test_arg)
locale.setlocale(locale.LC_TIME, old_locale)
self.assertEqual(result, target, result + " != " + target)
def testParseUrl(self):
self.assertEquals(('', '', '', '', '', ''),
rhnLib.parseUrl(''))
self.assertEquals(('', 'somehostname', '', '', '', ''),
rhnLib.parseUrl('somehostname'))
self.assertEquals(('http', 'somehostname', '', '', '', ''),
rhnLib.parseUrl('http://somehostname'))
self.assertEquals(('https', 'somehostname', '', '', '', ''),
rhnLib.parseUrl('https://somehostname'))
self.assertEquals(('https', 'somehostname:123', '', '', '', ''),
rhnLib.parseUrl('https://somehostname:123'))
self.assertEquals(('https', 'somehostname:123', '/ABCDE', '', '', ''),
rhnLib.parseUrl('https://somehostname:123/ABCDE'))
if __name__ == '__main__':
sys.exit(unittest.main() or 0) | unknown | codeparrot/codeparrot-clean | ||
raise "Use https://github.com/keflavich/h2co_modeling/blob/master/h2co_modeling/constrain_parameters.py instead"
"""
Functions for fitting temperature (and density and column) from the line ratio
plus whatever other constraints are available
"""
import inspect
import time
import os
import numpy as np
from scipy.ndimage.interpolation import map_coordinates
from astropy import units as u
from astropy import log
import pylab as pl
from astropy.io import fits
from h2co_modeling import grid_fitter
from h2co_modeling.paraH2COmodel import generic_paraH2COmodel
def gpath(fn, gridpath='/Users/adam/work/h2co/radex/thermom/'):
return os.path.join(gridpath, fn)
class paraH2COmodel(generic_paraH2COmodel):
def __init__(self, tbackground=2.73, gridsize=[250.,101.,100.]):
t0 = time.time()
self.texgrid303 = texgrid303 = fits.getdata(gpath('fjdu_pH2CO_303_tex_5kms.fits'))
self.taugrid303 = taugrid303 = fits.getdata(gpath('fjdu_pH2CO_303_tau_5kms.fits'))
self.texgrid321 = texgrid321 = fits.getdata(gpath('fjdu_pH2CO_321_tex_5kms.fits'))
self.taugrid321 = taugrid321 = fits.getdata(gpath('fjdu_pH2CO_321_tau_5kms.fits'))
self.texgrid322 = texgrid322 = fits.getdata(gpath('fjdu_pH2CO_322_tex_5kms.fits'))
self.taugrid322 = taugrid322 = fits.getdata(gpath('fjdu_pH2CO_322_tau_5kms.fits'))
self.texgrid404 = texgrid404 = fits.getdata(gpath('fjdu_pH2CO_404_tex_5kms.fits'))
self.taugrid404 = taugrid404 = fits.getdata(gpath('fjdu_pH2CO_404_tau_5kms.fits'))
self.texgrid422 = texgrid422 = fits.getdata(gpath('fjdu_pH2CO_422_tex_5kms.fits'))
self.taugrid422 = taugrid422 = fits.getdata(gpath('fjdu_pH2CO_422_tau_5kms.fits'))
self.texgrid423 = texgrid423 = fits.getdata(gpath('fjdu_pH2CO_423_tex_5kms.fits'))
self.taugrid423 = taugrid423 = fits.getdata(gpath('fjdu_pH2CO_423_tau_5kms.fits'))
self.hdr = hdr = hdrb = fits.getheader(gpath('fjdu_pH2CO_303_tex_5kms.fits'))
t1 = time.time()
log.debug("Loading grids took {0:0.1f} seconds".format(t1-t0))
self.Tbackground = tbackground
self.tline303a = ((1.0-np.exp(-np.array(self.taugrid303))) *
(self.texgrid303-self.Tbackground))
self.tline321a = ((1.0-np.exp(-np.array(self.taugrid321))) *
(self.texgrid321-self.Tbackground))
self.tline322a = ((1.0-np.exp(-np.array(self.taugrid322))) *
(self.texgrid322-self.Tbackground))
self.tline404a = ((1.0-np.exp(-np.array(self.taugrid404))) *
(self.texgrid404-self.Tbackground))
self.tline423a = ((1.0-np.exp(-np.array(self.taugrid423))) *
(self.texgrid423-self.Tbackground))
self.tline422a = ((1.0-np.exp(-np.array(self.taugrid422))) *
(self.texgrid422-self.Tbackground))
zinds,yinds,xinds = np.indices(self.tline303a.shape)
upsample_factor = np.array([gridsize[0]/self.tline303a.shape[0], # temperature
gridsize[1]/self.tline303a.shape[1], # density
gridsize[2]/self.tline303a.shape[2]], # column
dtype='float')
uzinds,uyinds,uxinds = upsinds = np.indices([int(x*us)
for x,us in zip(self.tline303a.shape,
upsample_factor)],
dtype='float')
self.tline303 = map_coordinates(self.tline303a,
upsinds/upsample_factor[:,None,None,None],
mode='nearest')
self.tline321 = map_coordinates(self.tline321a,
upsinds/upsample_factor[:,None,None,None],
mode='nearest')
self.tline322 = map_coordinates(self.tline322a,
upsinds/upsample_factor[:,None,None,None],
mode='nearest')
self.tline404 = map_coordinates(self.tline404a,
upsinds/upsample_factor[:,None,None,None],
mode='nearest')
self.tline422 = map_coordinates(self.tline422a,
upsinds/upsample_factor[:,None,None,None],
mode='nearest')
self.tline423 = map_coordinates(self.tline423a,
upsinds/upsample_factor[:,None,None,None],
mode='nearest')
self.tline = {303: self.tline303,
321: self.tline321,
322: self.tline322,
422: self.tline422,
423: self.tline423,
404: self.tline404,
}
assert self.hdr['CTYPE2'].strip() == 'LOG-DENS'
assert self.hdr['CTYPE1'].strip() == 'LOG-COLU'
self.columnarr = ((uxinds + self.hdr['CRPIX1']-1)*self.hdr['CDELT1'] /
float(upsample_factor[2])+self.hdr['CRVAL1']) # log column
self.densityarr = ((uyinds + self.hdr['CRPIX2']-1)*self.hdr['CDELT2'] /
float(upsample_factor[1])+self.hdr['CRVAL2']) # log density
self.temparr = ((uzinds + self.hdr['CRPIX3']-1)*self.hdr['CDELT3'] /
float(upsample_factor[0])+self.hdr['CRVAL3']) # lin temperature
self.drange = [self.densityarr.min(), self.densityarr.max()]
self.crange = [self.columnarr.min(), self.columnarr.max()]
self.trange = [self.temparr.min(), self.temparr.max()]
self.darr = self.densityarr[0,:,0]
self.carr = self.columnarr[0,0,:]
self.tarr = self.temparr[:,0,0]
self.axes = {'dens': self.darr,
'col': self.carr,
'tem': self.tarr}
self.labels = {'dens': 'Density $n(\mathrm{H}_2)$ [log cm$^{-3}$]',
'col': 'p-H$_2$CO [log cm$^{-2}$/(km s$^{-1}$ pc)]',
'tem': 'Temperature (K)'}
# While the individual lines are subject to filling factor uncertainties, the
# ratio is not.
self.modelratio1 = self.tline321/self.tline303
self.modelratio2 = self.tline322/self.tline321
self.modelratio_423_404 = self.tline423/self.tline404
self.modelratio_422_404 = self.tline422/self.tline404
self.modelratio_404_303 = self.tline404/self.tline303
self.model_logabundance = np.log10(10**self.columnarr / u.pc.to(u.cm) /
10**self.densityarr)
t2 = time.time()
log.debug("Grid initialization took {0:0.1f} seconds total,"
" {1:0.1f} since loading grids.".format(t2-t0,t2-t1))
def grid_getmatch_321to303(self, ratio, eratio):
match,indbest,chi2r = grid_fitter.grid_getmatch(ratio, eratio,
self.modelratio1)
return chi2r
def grid_getmatch_404to303(self, ratio, eratio):
match,indbest,chi2r = grid_fitter.grid_getmatch(ratio, eratio,
self.modelratio_404_303)
return chi2r
def grid_getmatch_422to404(self, ratio, eratio):
match,indbest,chi2r = grid_fitter.grid_getmatch(ratio, eratio,
self.modelratio_422_404)
return chi2r
def grid_getmatch_423to404(self, ratio, eratio):
match,indbest,chi2r = grid_fitter.grid_getmatch(ratio, eratio,
self.modelratio_423_404)
return chi2r
def grid_getmatch_322to321(self, ratio, eratio):
match,indbest,chi2r = grid_fitter.grid_getmatch(ratio, eratio,
self.modelratio2)
return chi2r
def list_parameters():
raise NotImplementedError("Not implemented yet for 4-3")
return ['taline303', 'etaline303', 'taline321', 'etaline321',
'taline322', 'etaline322', 'logabundance', 'elogabundance',
'logh2column', 'elogh2column', 'ratio321303', 'eratio321303',
'ratio321322', 'eratio321322', 'linewidth']
def set_constraints_fromrow(self, row, **kwargs):
raise NotImplementedError("Not implemented yet for 4-3")
mapping = {'e321':'etaline321',
'Smean321':'taline321',
'Smean303':'taline303',
'er321303':'eratio321303',
'eratio321303':'eratio321303',
'e303':'etaline303',
'r321303':'ratio321303',
'ratio321303':'ratio321303',
'r321303':'ratio321303',
'er321303':'eratio321303',
'logabundance':'logabundance',
'elogabundance':'elogabundance',
'logh2column':'logh2column',
'elogh2column':'elogh2column',
'dustmindens':'linmindens',
'v_rms':'linewidth',
}
pars = {mapping[k]: row[k] for k in row.colnames if k in mapping}
pars.update(**kwargs)
self.set_constraints(**pars)
def set_constraints(self,
taline303=None, etaline303=None,
taline321=None, etaline321=None,
taline322=None, etaline322=None,
taline404=None, etaline404=None,
taline422=None, etaline422=None,
taline423=None, etaline423=None,
logabundance=None, elogabundance=None,
logh2column=None, elogh2column=None,
ratio321303=None, eratio321303=None,
ratio321322=None, eratio321322=None,
ratio404303=None, eratio404303=None,
ratio422404=None, eratio422404=None,
ratio423404=None, eratio423404=None,
linmindens=None,
mindens=None, emindens=0.2,
linewidth=None):
"""
Set parameter constraints from a variety of inputs. This will fill in
a variety of .chi2_[x] values.
All errors are 1-sigma Gaussian errors.
The ``taline`` parameters are only used as lower limits.
Logabundance and logh2column are both log_10 values, so the errorbars
are effectively lognormal 1-sigma errors.
The ratios are generally the most important constraints.
A minimum volume density, with 1-sigma lognormal one-sided error
``emindens``, can be included. ``mindens`` is logarithmic, but you can
use ``linmindens`` instead. ``linewidth`` also needs to be specified
in km/s.
"""
argspec=inspect.getargvalues(inspect.currentframe())
for arg in argspec.args:
if argspec.locals[arg] is not None:
setattr(self, arg, argspec.locals[arg])
self.chi2_X = (self.chi2_abundance(logabundance, elogabundance)
if not any(arg is None for arg in (logabundance,
elogabundance))
else 0)
self.chi2_h2 = (self.chi2_column(logh2column, elogh2column,
logabundance, linewidth)
if not
any(arg is None for arg in (logabundance, logh2column,
elogh2column, linewidth))
else 0)
self.chi2_ff1 = (self.chi2_fillingfactor(taline303, etaline303, 303)
if not any(arg is None for arg in (taline303,
etaline303))
else 0)
self.chi2_ff2 = (self.chi2_fillingfactor(taline321, etaline321, 321)
if not any(arg is None for arg in (taline321,
etaline321))
else 0)
self.chi2_r321303 = (self.grid_getmatch_321to303(ratio321303,
eratio321303)
if not any(arg is None for arg in (ratio321303,
eratio321303))
else 0)
if np.all(~np.isfinite(self.chi2_r321303)):
self.chi2_r321303 = 0
self.chi2_r423404 = (self.grid_getmatch_423to404(ratio423404,
eratio423404)
if not any(arg is None for arg in (ratio423404,
eratio423404))
else 0)
if np.all(~np.isfinite(self.chi2_r423404)):
self.chi2_r423404 = 0
self.chi2_r422404 = (self.grid_getmatch_422to404(ratio422404,
eratio422404)
if not any(arg is None for arg in (ratio422404,
eratio422404))
else 0)
if np.all(~np.isfinite(self.chi2_r422404)):
self.chi2_r422404 = 0
self.chi2_r404303 = (self.grid_getmatch_404to303(ratio404303,
eratio404303)
if not any(arg is None for arg in (ratio404303,
eratio404303))
else 0)
if np.all(~np.isfinite(self.chi2_r404303)):
self.chi2_r404303 = 0
self.chi2_r321322 = (self.grid_getmatch_322to321(ratio321322,
eratio321322)
if not any(arg is None for arg in (ratio321322,
eratio321322))
else 0)
if np.all(~np.isfinite(self.chi2_r321322)):
self.chi2_r321322 = 0
if linmindens is not None:
if mindens is not None:
raise ValueError("Both linmindens and logmindens were set.")
mindens = np.log10(linmindens)
if mindens is not None:
self.chi2_dens = (((self.densityarr - mindens)/emindens)**2
* (self.densityarr < (mindens-emindens)))
else:
self.chi2_dens = 0
self.compute_chi2_fromcomponents()
def compute_chi2_fromcomponents(self):
"""
Compute the total chi2 from the individual chi2 components
"""
self.chi2 = (self.chi2_X + self.chi2_h2 + self.chi2_ff1 + self.chi2_ff2
+ self.chi2_r321322 + self.chi2_r321303 + self.chi2_dens +
self.chi2_r404303 + self.chi2_r423404 + self.chi2_r422404)
def denstemplot(self):
self.parplot('dens','tem')
def denscolplot(self):
self.parplot('col','dens')
def coltemplot(self):
self.parplot('col','tem')
def parplot(self, par1='col', par2='dens', nlevs=5, levels=None,
colors=[(0.5,0,0), (0.75,0,0), (1.0,0,0), (1.0,0.25,0), (0.75,0.5,0)],
colorsf=[0.0, 0.33, 0.66, 1.0, 'w']):
cdict = {x: [(0.0, 0.0, 0.0),
(1.0, 1.0, 1.0)]
for x in ('red','green','blue')}
cdict['blue'] = [(0.0, 1., 1.), (1.0, 1.0, 1.0)]
cm = matplotlib.colors.LinearSegmentedColormap('mycm', cdict)
colorsf = [cm(float(ii)) if isinstance(ii, (float,int))
else ii
for ii in colorsf]
xax = self.axes[par1]
yax = self.axes[par2]
xlabel = self.labels[par1]
ylabel = self.labels[par2]
amapping = {('col','dens'): 0,
('dens','tem'): 2,
('col','tem'): 1}
if (par1,par2) in amapping:
axis = amapping[(par1,par2)]
swaps = (0,0)
elif (par2,par1) in amapping:
axis = amapping[(par2,par1)]
swaps = (0,1)
if levels is None:
levels = ([0]+[(stats.norm.cdf(ii)-stats.norm.cdf(-ii))
for ii in range(1,nlevs)]+[1])
xmaxlike = self.parconstraints['{0}_chi2'.format(short_mapping[par1])]
ymaxlike = self.parconstraints['{0}_chi2'.format(short_mapping[par2])]
xexpect = self.parconstraints['expected_{0}'.format(short_mapping[par1])]
yexpect = self.parconstraints['expected_{0}'.format(short_mapping[par2])]
fig = pl.gcf()
fig.clf()
ax1 = pl.subplot(2,2,1)
if 'chi2_r321303' in self.individual_likelihoods:
like = (self.individual_likelihoods['chi2_r321303'])
pl.contourf(xax, yax, cdf_of_like(like.sum(axis=axis)).swapaxes(*swaps),
levels=levels, alpha=0.5, zorder=-5, colors=colorsf)
pl.contour(xax, yax,
cdf_of_like(self.likelihood.sum(axis=axis)).swapaxes(*swaps),
levels=levels, colors=colors, zorder=10)
pl.plot(xmaxlike, ymaxlike, 'o', markerfacecolor='none', markeredgecolor='k')
pl.plot(xexpect, yexpect, 'x', markerfacecolor='none', markeredgecolor='k')
if self.chi2_r321322 is not 0:
like = cdf_of_like(self.individual_likelihoods['chi2_r321322'])
pl.contour(xax, yax, like.sum(axis=axis).swapaxes(*swaps),
levels=levels,
cmap=pl.cm.bone)
pl.title("Ratio $3_{0,3}-2_{0,2}/3_{2,1}-2_{2,0}$")
ax4 = pl.subplot(2,2,2)
if hasattr(self.chi2_X, 'size'):
like = self.individual_likelihoods['chi2_X']
pl.contourf(xax, yax, cdf_of_like(like.sum(axis=axis)).swapaxes(*swaps),
levels=levels, alpha=0.5, zorder=-5, colors=colorsf)
pl.contour(xax, yax,
cdf_of_like(self.likelihood.sum(axis=axis)).swapaxes(*swaps),
levels=levels, colors=colors, zorder=10)
pl.plot(xmaxlike, ymaxlike, 'o', markerfacecolor='none', markeredgecolor='k')
pl.plot(xexpect, yexpect, 'x', markerfacecolor='none', markeredgecolor='k')
pl.title("log(p-H$_2$CO/H$_2$) "
"$= {0:0.1f}\pm{1:0.1f}$".format(self.logabundance,
self.elogabundance))
ax3 = pl.subplot(2,2,3)
if hasattr(self.chi2_h2, 'size'):
like = (self.individual_likelihoods['chi2_h2'])
pl.contourf(xax, yax, cdf_of_like(like.sum(axis=axis)).swapaxes(*swaps),
levels=levels, alpha=0.5, zorder=-5, colors=colorsf)
pl.contour(xax, yax,
cdf_of_like(self.likelihood.sum(axis=axis)).swapaxes(*swaps),
levels=levels, colors=colors, zorder=10)
pl.plot(xmaxlike, ymaxlike, 'o', markerfacecolor='none', markeredgecolor='k')
pl.plot(xexpect, yexpect, 'x', markerfacecolor='none', markeredgecolor='k')
pl.title("Total log$(N(\\mathrm{{H}}_2))$ ")
# "= {0:0.1f}\pm{1:0.1f}$".format(self.logh2column,
# self.elogh2column))
ax5 = pl.subplot(2,2,4)
if hasattr(self.chi2_ff1, 'size'):
cdict = {x: [(0.0, 0.5, 0.5),
(1.0, 0.0, 0.0)]
for x in ('red','green','blue')}
cdict['green'] = [(0, 0.5, 0.5), (1,1,1)]
cdict['red'] = [(0, 0.5, 0.5), (1,0.7,0.7)]
cdict['blue'] = [(0, 0.0, 0.0), (1,0,0)]
#cdict['alpha'] = [(0.0, 0.0, 0.0), (1.0, 0.3, 0.3)]
darker = matplotlib.colors.LinearSegmentedColormap('darker', cdict)
like = (self.individual_likelihoods['chi2_ff1'])
plim = cdf_of_like(like.sum(axis=axis)).swapaxes(*swaps)
pl.contour(xax, yax, plim, levels=levels,
cmap=darker, zorder=5)
if hasattr(self.chi2_dens, 'size'):
like = (self.individual_likelihoods['chi2_dens'])
pl.contourf(xax, yax, cdf_of_like(like.sum(axis=axis)).swapaxes(*swaps),
levels=levels, alpha=0.5, zorder=-5, colors=colorsf)
pl.contour(xax, yax,
cdf_of_like(self.likelihood.sum(axis=axis)).swapaxes(*swaps),
levels=levels, colors=colors, zorder=10)
#if hasattr(self, 'taline303'):
# ff1_mask = (self.tline303 < 10*self.taline303)
# pl.contour(xax, yax, ff1_mask.max(axis=axis).swapaxes(*swaps),
# levels=[0.5], colors='k')
pl.plot(xmaxlike, ymaxlike, 'o', markerfacecolor='none', markeredgecolor='k')
pl.plot(xexpect, yexpect, 'x', markerfacecolor='none', markeredgecolor='k')
#pl.contour(xax, yax, (tline303 < 100*par1).max(axis=axis).swapaxes(*swaps), levels=[0.5], colors='k')
#pl.contour(xax, yax, (tline321 < 10*par2).max(axis=axis).swapaxes(*swaps), levels=[0.5], colors='k', linestyles='--')
#pl.contour(xax, yax, (tline321 < 100*par2).max(axis=axis).swapaxes(*swaps), levels=[0.5], colors='k', linestyles='--')
#pl.title("Line Brightness + $ff\leq1$")
pl.title("Minimum Density & $ff$")
fig.text(0.05, 0.5, ylabel, horizontalalignment='center',
verticalalignment='center',
rotation='vertical', transform=fig.transFigure)
fig.text(0.5, 0.02, xlabel, horizontalalignment='center', transform=fig.transFigure)
if par1 == 'col':
for ss in range(1,5):
ax = pl.subplot(2,2,ss)
ax.xaxis.set_ticks(np.arange(self.carr.min(), self.carr.max()))
pl.subplots_adjust(wspace=0.25, hspace=0.45)
def parplot1d(self, par='col', levels=None, clf=True,
legend=True, legendfontsize=14):
xax = self.axes[par]
xlabel = self.labels[par]
amapping = {'col':(2,(0,1)),
'dens':(1,(0,2)),
'tem':(0,(1,2))}
axis,axes = amapping[par]
xmaxlike = self.parconstraints['{0}_chi2'.format(short_mapping[par])]
xexpect = self.parconstraints['expected_{0}'.format(short_mapping[par])]
like = self.likelihood.sum(axis=axes)
like /= like.sum()
inds_cdf = np.argsort(like)
cdf = like[inds_cdf]
fig = pl.gcf()
if clf:
fig.clf()
ax = fig.gca()
ax.plot(xax, like, 'k-', label='Posterior')
for key in self.individual_likelihoods:
if key in ('chi2','_chi2'):
continue # already done
ilike = self.individual_likelihoods[key].sum(axis=axes)
ilike /= ilike.sum()
ax.plot(xax, ilike, label=chi2_mapping[key.replace("chi2_","")])
ax.vlines((xmaxlike,), 0, like.max(), linestyle='--', color='r',
label='Maximum Likelihood')
ax.vlines((xexpect,), 0, like.max(), linestyle='--', color='b',
label='E[{0}]'.format(xlabel))
xexpect_v2 = (like*xax).sum()/like.sum()
ax.vlines((xexpect_v2,), 0, like.max(), linestyle='--', color='c',
zorder=-1)
print("par:{4} xmaxlike: {0}, xexpect: {1}, xexpect_v2: {2},"
"maxlike: {3}, diff:{5}"
.format(xmaxlike, xexpect, xexpect_v2, like.max(), par,
xexpect-xmaxlike))
if levels is not None:
if not isinstance(levels, collections.Iterable):
levels = [levels]
cdf_inds = np.argsort(like)
ppf = 1-like[cdf_inds].cumsum()
cutoff_likes = [like[cdf_inds[np.argmin(np.abs(ppf-lev))]]
for lev in levels]
for fillind,cutoff in enumerate(sorted(cutoff_likes)):
selection = like > cutoff
ax.fill_between(xax[selection], like[selection]*0,
like[selection], alpha=0.1, zorder=fillind-20)
if np.abs(like[selection].sum() - levels[0]) > 0.05:
# we want the sum of the likelihood to be right!
#import ipdb; ipdb.set_trace()
warnings.warn("Likelihood is not self-consistent.")
if legend:
ax.legend(loc='best', fontsize=legendfontsize)
ax.set_xlabel(xlabel)
ax.set_ylabel('$P(${0}$)$'.format(xlabel))
def parplot1d_all(self, legendfontsize=14, **kwargs):
fig = pl.gcf()
if not all(fig.get_size_inches() == [12,16]):
num = fig.number
pl.close(fig)
fig = pl.figure(num, figsize=(12,16))
for axindex,par in enumerate(('col','dens','tem')):
ax = fig.add_subplot(3,1,axindex+1)
self.parplot1d(par=par, clf=False, legend=False, **kwargs)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
if axindex == 1:
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5),
fontsize=legendfontsize)
pl.subplots_adjust(hspace=0.45)
@property
def individual_likelihoods(self):
if hasattr(self, '_likelihoods') and self._likelihoods is not None:
return self._likelihoods
else:
self._likelihoods = {}
for key in self.__dict__:
if 'chi2' in key and getattr(self,key) is not 0:
self._likelihoods[key] = np.exp(-getattr(self,key)/2.)
self._likelihoods[key] /= self._likelihoods[key].sum()
return self._likelihoods
def cdf_of_like(like):
"""
There is probably an easier way to do this, BUT it works:
Turn a likelihood image into a CDF image
"""
like = like/like.sum()
order = np.argsort(like.flat)[::-1]
cdf = like.flat[order].cumsum()[np.argsort(order)].reshape(like.shape)
cdf[like == like.max()] = 0
return cdf
def ppf_of_like(like):
return 1-cdf_of_like(like) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
'''
fantastic Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import dom_parser2
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['moviego.cc']
self.base_link = 'http://moviego.cc'
self.search_link = 'index.php?do=search'
def movie(self, imdb, title, localtitle, aliases, year):
try:
clean_title = cleantitle.geturl(title)
search_url = urlparse.urljoin(self.base_link, self.search_link)
post = ('do=search&subaction=search&search_start=0&full_search=0&result_from=1&story=%s+%s' % (clean_title.replace('-','+'), year))
r = client.request(search_url, post=post)
r = dom_parser2.parse_dom(r, 'article', {'class': ['shortstory','cf']})[0]
r = dom_parser2.parse_dom(r.content, 'a', req='href')[0]
url = r.attrs['href']
return url
except Exception:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
r = client.request(url)
r = dom_parser2.parse_dom(r, 'div', {'class': 'tab_box'})[0]
r = dom_parser2.parse_dom(r.content, 'iframe', req='src')[0]
url = r.attrs['src']
if r:
try:
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if host in hostDict:
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({
'source': host,
'quality': 'SD',
'language': 'en',
'url': url.replace('\/','/'),
'direct': False,
'debridonly': False
})
except: pass
return sources
except Exception:
return
def resolve(self, url):
return url | unknown | codeparrot/codeparrot-clean | ||
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package addrs
import (
"fmt"
"testing"
)
// TestUniqueKeyer aims to ensure that all of the types that have unique keys
// will continue to meet the UniqueKeyer contract under future changes.
//
// If you add a new implementation of UniqueKey, consider adding a test case
// for it here.
func TestUniqueKeyer(t *testing.T) {
tests := []UniqueKeyer{
CountAttr{Name: "index"},
ForEachAttr{Name: "key"},
TerraformAttr{Name: "workspace"},
PathAttr{Name: "module"},
InputVariable{Name: "foo"},
ModuleCall{Name: "foo"},
ModuleCallInstance{
Call: ModuleCall{Name: "foo"},
Key: StringKey("a"),
},
ModuleCallOutput{
Call: ModuleCall{Name: "foo"},
Name: "bar",
},
ModuleCallInstanceOutput{
Call: ModuleCallInstance{
Call: ModuleCall{Name: "foo"},
Key: StringKey("a"),
},
Name: "bar",
},
Resource{
Mode: ManagedResourceMode,
Type: "foo",
Name: "bar",
},
ResourceInstance{
Resource: Resource{
Mode: ManagedResourceMode,
Type: "foo",
Name: "bar",
},
Key: IntKey(1),
},
RootModuleInstance,
RootModuleInstance.Child("foo", NoKey),
RootModuleInstance.ResourceInstance(
DataResourceMode,
"boop",
"beep",
NoKey,
),
Self,
}
for _, test := range tests {
t.Run(fmt.Sprintf("%s", test), func(t *testing.T) {
a := test.UniqueKey()
b := test.UniqueKey()
// The following comparison will panic if the unique key is not
// of a comparable type.
if a != b {
t.Fatalf("the two unique keys are not equal\na: %#v\b: %#v", a, b)
}
})
}
} | go | github | https://github.com/hashicorp/terraform | internal/addrs/unique_key_test.go |
import base64
import boto3
import json
import logging
import os
import random
from botocore.stub import Stubber
from cis_profile import profile
from cis_profile import fake_profile
from everett.ext.inifile import ConfigIniEnv
from everett.manager import ConfigManager
from everett.manager import ConfigOSEnv
from moto import mock_dynamodb2
from mock import patch
logger = logging.getLogger(__name__)
logging.basicConfig(
level=logging.INFO, format="%(asctime)s %(name)-12s %(levelname)-8s %(message)s", datefmt="%m-%d %H:%M"
)
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
def get_config():
return ConfigManager(
[ConfigIniEnv([os.environ.get("CIS_CONFIG_INI"), "~/.mozilla-cis.ini", "/etc/mozilla-cis.ini"]), ConfigOSEnv()]
)
def profile_to_vault_structure(user_profile):
return {
"sequence_number": str(random.randint(100000, 100000000)),
"primary_email": user_profile["primary_email"]["value"],
"profile": json.dumps(user_profile),
"user_uuid": user_profile["uuid"]["value"],
"primary_username": user_profile["primary_username"]["value"],
"id": user_profile["user_id"]["value"],
}
def kinesis_event_generate(user_profile):
fh = open("tests/fixture/kinesis-event.json")
kinesis_event_structure = json.loads(fh.read())
fh.close()
kinesis_event_structure["Records"][0]["kinesis"]["sequenceNumber"] = "900000000000"
kinesis_event_structure["Records"][0]["kinesis"]["parititionKey"] = "generic_publisher"
kinesis_event_structure["Records"][0]["kinesis"]["data"] = base64.b64encode(
json.dumps(user_profile).encode()
).decode()
return kinesis_event_structure
@mock_dynamodb2
class TestOperation(object):
def setup(self):
os.environ["CIS_CONFIG_INI"] = "tests/fixture/mozilla-cis.ini"
self.config = get_config()
from cis_profile import WellKnown
from cis_identity_vault import vault
os.environ["CIS_CONFIG_INI"] = "tests/fixture/mozilla-cis.ini"
well_known = WellKnown()
self.well_known_json = well_known.get_well_known()
self.dynamodb_client = boto3.client(
"dynamodb", region_name="us-west-2", aws_access_key_id="ak", aws_secret_access_key="sk"
)
self.dynamodb_resource = boto3.resource(
"dynamodb", region_name="us-west-2", aws_access_key_id="ak", aws_secret_access_key="sk"
)
self.vault_client = vault.IdentityVault()
self.vault_client.boto_session = Stubber(boto3.session.Session(region_name="us-west-2")).client
self.vault_client.dynamodb_client = self.dynamodb_client
self.vault_client.find_or_create()
self.table = self.dynamodb_resource.Table("purple-identity-vault")
self.mr_mozilla_profile = fake_profile.FakeUser(seed=1337).as_dict()
from cis_identity_vault.models import user
vault_interface = user.Profile(self.table, self.dynamodb_client, False)
vault_interface.create(profile_to_vault_structure(user_profile=self.mr_mozilla_profile))
self.mr_mozilla_change_event = kinesis_event_generate(self.mr_mozilla_profile)
@patch.object(profile.User, "verify_all_publishers")
@patch.object(profile.User, "verify_all_signatures")
def test_base_operation_object_it_should_succeed(self, verify_sigs, verify_pubs):
verify_sigs.return_value = True
verify_pubs.return_value = True
os.environ["CIS_PROCESSOR_VERIFY_SIGNATURES"] = "False"
patched_profile = self.mr_mozilla_profile
patched_profile["last_name"]["value"] = "anupdatedlastname"
kinesis_event = kinesis_event_generate(patched_profile)
from cis_processor import operation
for kinesis_record in kinesis_event["Records"]:
base_operation = operation.BaseProcessor(
event_record=kinesis_record, dynamodb_client=self.dynamodb_client, dynamodb_table=self.table
)
base_operation._load_profiles()
needs_integration = base_operation.needs_integration(
base_operation.profiles["new_profile"], base_operation.profiles["old_profile"]
)
assert needs_integration is True
assert (
base_operation.profiles["new_profile"].verify_all_publishers(base_operation.profiles["old_profile"])
is True
)
assert base_operation.process() is True
from cis_identity_vault.models import user
p = user.Profile(self.table, self.dynamodb_client, False)
p.find_by_id(id=base_operation.profiles["new_profile"].as_dict()["user_id"]["value"])
@patch.object(profile.User, "verify_all_publishers")
@patch.object(profile.User, "verify_all_signatures")
def test_base_operation_object_with_signature_testing_it_should_fail(self, verify_sigs, verify_pubs):
verify_sigs.return_value = False
verify_pubs.return_value = True
os.environ["CIS_PROCESSOR_VERIFY_SIGNATURES"] = "True"
patched_profile = self.mr_mozilla_profile
patched_profile["first_name"]["value"] = "anupdatedfirstname"
kinesis_event = kinesis_event_generate(patched_profile)
from cis_processor import operation
for kinesis_record in kinesis_event["Records"]:
base_operation = operation.BaseProcessor(
event_record=kinesis_record, dynamodb_client=self.dynamodb_client, dynamodb_table=self.table
)
base_operation._load_profiles()
needs_integration = base_operation.needs_integration(
base_operation.profiles["new_profile"], base_operation.profiles["old_profile"]
)
assert needs_integration is True
assert (
base_operation.profiles["new_profile"].verify_all_publishers(base_operation.profiles["old_profile"])
is True
)
assert base_operation.process() is False
@patch.object(profile.User, "verify_all_publishers")
@patch.object(profile.User, "verify_all_signatures")
def test_new_user_scenario(self, verify_sigs, verify_pubs):
verify_sigs.return_value = False
verify_pubs.return_value = True
os.environ["CIS_PROCESSOR_VERIFY_SIGNATURES"] = "True"
new_user_profile = fake_profile.FakeUser().as_dict()
new_user_profile["user_id"]["value"] = "harrypotter"
kinesis_event = kinesis_event_generate(new_user_profile)
from cis_processor import operation
for kinesis_record in kinesis_event["Records"]:
base_operation = operation.BaseProcessor(
event_record=kinesis_record, dynamodb_client=self.dynamodb_client, dynamodb_table=self.table
)
base_operation._load_profiles()
needs_integration = base_operation.needs_integration(
base_operation.profiles["new_profile"], base_operation.profiles["old_profile"]
)
assert needs_integration is True
assert (
base_operation.profiles["new_profile"].verify_all_publishers(base_operation.profiles["old_profile"])
is True
)
assert base_operation.process() is False | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
import time
import cPickle
from shinken.util import get_obj_name_two_args_and_void
from shinken.objects.item import Item, Items
from shinken.property import BoolProp, IntegerProp, StringProp, ListProp, DictProp, AddrProp
from shinken.log import logger
from shinken.http_client import HTTPClient, HTTPExceptions
class SatelliteLink(Item):
"""SatelliteLink is a common Class for link to satellite for
Arbiter with Conf Dispatcher.
"""
# id = 0 each Class will have it's own id
properties = Item.properties.copy()
properties.update({
'address': StringProp(fill_brok=['full_status']),
'timeout': IntegerProp(default='3', fill_brok=['full_status']),
'data_timeout': IntegerProp(default='120', fill_brok=['full_status']),
'check_interval': IntegerProp(default='60', fill_brok=['full_status']),
'max_check_attempts': IntegerProp(default='3', fill_brok=['full_status']),
'spare': BoolProp(default='0', fill_brok=['full_status']),
'manage_sub_realms': BoolProp(default='1', fill_brok=['full_status']),
'manage_arbiters': BoolProp(default='0', fill_brok=['full_status'], to_send=True),
'modules': ListProp(default='', to_send=True),
'polling_interval': IntegerProp(default='1', fill_brok=['full_status'], to_send=True),
'use_timezone': StringProp(default='NOTSET', to_send=True),
'realm': StringProp(default='', fill_brok=['full_status'], brok_transformation=get_obj_name_two_args_and_void),
'satellitemap': DictProp(default=None, elts_prop=AddrProp, to_send=True, override=True),
'use_ssl': BoolProp(default='0', fill_brok=['full_status']),
'hard_ssl_name_check':BoolProp(default='0', fill_brok=['full_status']),
'passive': BoolProp(default='0', fill_brok=['full_status'], to_send=True),
})
running_properties = Item.running_properties.copy()
running_properties.update({
'con': StringProp(default=None),
'alive': StringProp(default=True, fill_brok=['full_status']),
'broks': StringProp(default=[]),
'attempt': StringProp(default=0, fill_brok=['full_status']), # the number of failed attempt
'reachable': StringProp(default=False, fill_brok=['full_status']), # can be network ask or not (dead or check in timeout or error)
'last_check': IntegerProp(default=0, fill_brok=['full_status']),
'managed_confs': StringProp(default={}),
})
def __init__(self, *args, **kwargs):
super(SatelliteLink, self).__init__(*args, **kwargs)
self.arb_satmap = {'address': '0.0.0.0', 'port': 0}
if hasattr(self, 'address'):
self.arb_satmap['address'] = self.address
if hasattr(self, 'port'):
try:
self.arb_satmap['port'] = int(self.port)
except:
pass
def set_arbiter_satellitemap(self, satellitemap):
"""
arb_satmap is the satellitemap in current context:
- A SatelliteLink is owned by an Arbiter
- satellitemap attribute of SatelliteLink is the map defined IN THE satellite configuration
but for creating connections, we need the have the satellitemap of the Arbiter
"""
self.arb_satmap = {'address': self.address, 'port': self.port, 'use_ssl':self.use_ssl, 'hard_ssl_name_check':self.hard_ssl_name_check}
self.arb_satmap.update(satellitemap)
def create_connection(self):
self.con = HTTPClient(address=self.arb_satmap['address'], port=self.arb_satmap['port'],
timeout=self.timeout, data_timeout=self.data_timeout, use_ssl=self.use_ssl,
strong_ssl=self.hard_ssl_name_check
)
self.uri = self.con.uri
def put_conf(self, conf):
if self.con is None:
self.create_connection()
# Maybe the connexion was not ok, bail out
if not self.con:
return False
try:
self.con.get('ping')
self.con.post('put_conf', {'conf':conf}, wait='long')
print "PUT CONF SUCESS", self.get_name()
return True
except HTTPExceptions, exp:
self.con = None
logger.error("Failed sending configuration for %s: %s", self.get_name(), str(exp))
return False
# Get and clean all of our broks
def get_all_broks(self):
res = self.broks
self.broks = []
return res
# Set alive, reachable, and reset attempts.
# If we change state, raise a status brok update
def set_alive(self):
was_alive = self.alive
self.alive = True
self.attempt = 0
self.reachable = True
# We came from dead to alive
# so we must add a brok update
if not was_alive:
b = self.get_update_status_brok()
self.broks.append(b)
def set_dead(self):
was_alive = self.alive
self.alive = False
self.con = None
# We are dead now. Must raise
# a brok to say it
if was_alive:
logger.warning("Setting the satellite %s to a dead state.", self.get_name())
b = self.get_update_status_brok()
self.broks.append(b)
# Go in reachable=False and add a failed attempt
# if we reach the max, go dead
def add_failed_check_attempt(self, reason=''):
self.reachable = False
self.attempt += 1
self.attempt = min(self.attempt, self.max_check_attempts)
# Don't need to warn again and again if the satellite is already dead
if self.alive:
logger.warning("Add failed attempt to %s (%d/%d) %s", self.get_name(), self.attempt, self.max_check_attempts, reason)
# check when we just go HARD (dead)
if self.attempt == self.max_check_attempts:
self.set_dead()
# Update satellite info each self.check_interval seconds
# so we smooth arbiter actions for just useful actions
# and not cry for a little timeout
def update_infos(self):
# First look if it's not too early to ping
now = time.time()
since_last_check = now - self.last_check
if since_last_check < self.check_interval:
return
self.last_check = now
# We ping and update the managed list
self.ping()
self.update_managed_list()
# Update the state of this element
b = self.get_update_status_brok()
self.broks.append(b)
# The elements just got a new conf_id, we put it in our list
# because maybe the satellite is too busy to answer now
def known_conf_managed_push(self, cfg_id, push_flavor):
self.managed_confs[cfg_id] = push_flavor
def ping(self):
logger.debug("Pinging %s", self.get_name())
try:
if self.con is None:
self.create_connection()
logger.debug(" (%s)", self.uri)
# If the connection failed to initialize, bail out
if self.con is None:
self.add_failed_check_attempt()
return
r = self.con.get('ping')
# Should return us pong string
if r == 'pong':
self.set_alive()
else:
self.add_failed_check_attempt()
except HTTPExceptions, exp:
self.add_failed_check_attempt(reason=str(exp))
def wait_new_conf(self):
if self.con is None:
self.create_connection()
try:
r = self.con.get('wait_new_conf')
return True
except HTTPExceptions, exp:
self.con = None
return False
# To know if the satellite have a conf (magic_hash = None)
# OR to know if the satellite have THIS conf (magic_hash != None)
# Magic_hash is for arbiter check only
def have_conf(self, magic_hash=None):
if self.con is None:
self.create_connection()
# If the connection failed to initialize, bail out
if self.con is None:
return False
try:
if magic_hash is None:
r = self.con.get('have_conf')
else:
r = self.con.get('have_conf', {'magic_hash':magic_hash})
print "have_conf RAW CALL", r, type(r)
if not isinstance(r, bool):
return False
return r
except HTTPExceptions, exp:
self.con = None
return False
# To know if a receiver got a conf or not
def got_conf(self):
if self.con is None:
self.create_connection()
# If the connection failed to initialize, bail out
if self.con is None:
return False
try:
r = self.con.get('got_conf')
# Protect against bad return
if not isinstance(r, bool):
return False
return r
except HTTPExceptions, exp:
self.con = None
return False
def remove_from_conf(self, sched_id):
if self.con is None:
self.create_connection()
# If the connection failed to initialize, bail out
if self.con is None:
return
try:
self.con.get('remove_from_conf', {'sched_id':sched_id})
return True
except HTTPExceptions, exp:
self.con = None
return False
def update_managed_list(self):
if self.con is None:
self.create_connection()
# If the connection failed to initialize, bail out
if self.con is None:
self.managed_confs = {}
return
try:
tab = self.con.get('what_i_managed')
print "[%s]What i managed raw value is %s" % (self.get_name(), tab)
# Protect against bad return
if not isinstance(tab, dict):
print "[%s]What i managed: Got exception: bad what_i_managed returns" % self.get_name(), tab
self.con = None
self.managed_confs = {}
return
# Ok protect against json that is chaning keys as string instead of int
tab_cleaned = {}
for (k,v) in tab.iteritems():
try:
tab_cleaned[int(k)] = v
except ValueError:
print "[%s]What i managed: Got exception: bad what_i_managed returns" % self.get_name(), tab
# We can update our list now
self.managed_confs = tab_cleaned
except HTTPExceptions, exp:
print "EXCEPTION INwhat_i_managed", str(exp)
# A timeout is not a crime, put this case aside
#TODO : fix the timeout part?
self.con = None
print "[%s]What i managed: Got exception: %s %s %s" % (self.get_name(), exp, type(exp), exp.__dict__)
self.managed_confs = {}
# Return True if the satellite said to managed a configuration
def do_i_manage(self, cfg_id, push_flavor):
# If not even the cfg_id in the managed_conf, bail out
if not cfg_id in self.managed_confs:
return False
# maybe it's in but with a false push_flavor. check it :)
return self.managed_confs[cfg_id] == push_flavor
def push_broks(self, broks):
if self.con is None:
self.create_connection()
# If the connection failed to initialize, bail out
if self.con is None:
return False
try:
# Always do a simple ping to avoid a LOOOONG lock
self.con.get('ping')
self.con.post('push_broks', {'broks':broks}, wait='long')
return True
except HTTPExceptions, exp:
self.con = None
return False
def get_external_commands(self):
if self.con is None:
self.create_connection()
# If the connection failed to initialize, bail out
if self.con is None:
return []
try:
self.con.get('ping')
tab = self.con.get('get_external_commands', wait='long')
tab = cPickle.loads(str(tab))
# Protect against bad return
if not isinstance(tab, list):
self.con = None
return []
return tab
except HTTPExceptions, exp:
self.con = None
return []
except AttributeError:
self.con = None
return []
def prepare_for_conf(self):
self.cfg = {'global': {}, 'schedulers': {}, 'arbiters': {}}
properties = self.__class__.properties
for prop, entry in properties.items():
if entry.to_send:
self.cfg['global'][prop] = getattr(self, prop)
# Also add global values
self.cfg['global']['api_key'] = self.__class__.api_key
self.cfg['global']['secret'] = self.__class__.secret
# Some parameters for satellites are not defined in the satellites conf
# but in the global configuration. We can pass them in the global
# property
def add_global_conf_parameters(self, params):
for prop in params:
self.cfg['global'][prop] = params[prop]
def get_my_type(self):
return self.__class__.my_type
# Here for poller and reactionner. Scheduler have its own function
def give_satellite_cfg(self):
return {'port': self.port,
'address': self.address,
'use_ssl':self.use_ssl,
'hard_ssl_name_check':self.hard_ssl_name_check,
'name': self.get_name(),
'instance_id': self.id,
'active': True,
'passive': self.passive,
'poller_tags': getattr(self, 'poller_tags', []),
'reactionner_tags': getattr(self, 'reactionner_tags', []),
'api_key': self.__class__.api_key,
'secret': self.__class__.secret,
}
# Call by pickle for dataify the downtime
# because we DO NOT WANT REF in this pickleisation!
def __getstate__(self):
cls = self.__class__
# id is not in *_properties
res = {'id': self.id}
for prop in cls.properties:
if prop != 'realm':
if hasattr(self, prop):
res[prop] = getattr(self, prop)
for prop in cls.running_properties:
if prop != 'con':
if hasattr(self, prop):
res[prop] = getattr(self, prop)
return res
# Inverted function of getstate
def __setstate__(self, state):
cls = self.__class__
self.id = state['id']
for prop in cls.properties:
if prop in state:
setattr(self, prop, state[prop])
for prop in cls.running_properties:
if prop in state:
setattr(self, prop, state[prop])
# con needs to be explicitly set:
self.con = None
class SatelliteLinks(Items):
"""Please Add a Docstring to describe the class here"""
# name_property = "name"
# inner_class = SchedulerLink
# We must have a realm property, so we find our realm
def linkify(self, realms, modules):
self.linkify_s_by_p(realms)
self.linkify_s_by_plug(modules)
def linkify_s_by_p(self, realms):
for s in self:
p_name = s.realm.strip()
# If no realm name, take the default one
if p_name == '':
p = realms.get_default()
s.realm = p
else: # find the realm one
p = realms.find_by_name(p_name)
s.realm = p
# Check if what we get is OK or not
if p is not None:
s.register_to_my_realm()
else:
err = "The %s %s got a unknown realm '%s'" % (s.__class__.my_type, s.get_name(), p_name)
s.configuration_errors.append(err) | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rafthttp
import (
"errors"
"fmt"
"sync"
"time"
"go.uber.org/zap"
"go.etcd.io/etcd/client/pkg/v3/types"
)
type failureType struct {
source string
action string
}
type peerStatus struct {
lg *zap.Logger
local types.ID
id types.ID
mu sync.Mutex // protect variables below
active bool
since time.Time
}
func newPeerStatus(lg *zap.Logger, local, id types.ID) *peerStatus {
if lg == nil {
lg = zap.NewNop()
}
return &peerStatus{lg: lg, local: local, id: id}
}
func (s *peerStatus) activate() {
s.mu.Lock()
defer s.mu.Unlock()
if !s.active {
s.lg.Info("peer became active", zap.String("peer-id", s.id.String()))
s.active = true
s.since = time.Now()
activePeers.WithLabelValues(s.local.String(), s.id.String()).Inc()
}
}
func (s *peerStatus) deactivate(failure failureType, reason string) {
s.mu.Lock()
defer s.mu.Unlock()
msg := fmt.Sprintf("failed to %s %s on %s (%s)", failure.action, s.id, failure.source, reason)
if s.active {
s.lg.Warn("peer became inactive (message send to peer failed)", zap.String("peer-id", s.id.String()), zap.Error(errors.New(msg)))
s.active = false
s.since = time.Time{}
activePeers.WithLabelValues(s.local.String(), s.id.String()).Dec()
disconnectedPeers.WithLabelValues(s.local.String(), s.id.String()).Inc()
return
}
if s.lg != nil {
s.lg.Debug("peer deactivated again", zap.String("peer-id", s.id.String()), zap.Error(errors.New(msg)))
}
}
func (s *peerStatus) isActive() bool {
s.mu.Lock()
defer s.mu.Unlock()
return s.active
}
func (s *peerStatus) activeSince() time.Time {
s.mu.Lock()
defer s.mu.Unlock()
return s.since
} | go | github | https://github.com/etcd-io/etcd | server/etcdserver/api/rafthttp/peer_status.go |
from __future__ import unicode_literals
import logging
import sys
import types
import warnings
from django import http
from django.conf import settings
from django.core import signals, urlresolvers
from django.core.exceptions import (
MiddlewareNotUsed, PermissionDenied, SuspiciousOperation,
)
from django.db import connections, transaction
from django.http.multipartparser import MultiPartParserError
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
from django.utils.module_loading import import_string
from django.views import debug
logger = logging.getLogger('django.request')
class BaseHandler(object):
# Changes that are always applied to a response (in this order).
response_fixes = [
http.conditional_content_removal,
]
def __init__(self):
self._request_middleware = None
self._view_middleware = None
self._template_response_middleware = None
self._response_middleware = None
self._exception_middleware = None
def load_middleware(self):
"""
Populate middleware lists from settings.MIDDLEWARE_CLASSES.
Must be called after the environment is fixed (see __call__ in subclasses).
"""
self._view_middleware = []
self._template_response_middleware = []
self._response_middleware = []
self._exception_middleware = []
request_middleware = []
for middleware_path in settings.MIDDLEWARE_CLASSES:
mw_class = import_string(middleware_path)
try:
mw_instance = mw_class()
except MiddlewareNotUsed as exc:
if settings.DEBUG:
if six.text_type(exc):
logger.debug('MiddlewareNotUsed(%r): %s', middleware_path, exc)
else:
logger.debug('MiddlewareNotUsed: %r', middleware_path)
continue
if hasattr(mw_instance, 'process_request'):
request_middleware.append(mw_instance.process_request)
if hasattr(mw_instance, 'process_view'):
self._view_middleware.append(mw_instance.process_view)
if hasattr(mw_instance, 'process_template_response'):
self._template_response_middleware.insert(0, mw_instance.process_template_response)
if hasattr(mw_instance, 'process_response'):
self._response_middleware.insert(0, mw_instance.process_response)
if hasattr(mw_instance, 'process_exception'):
self._exception_middleware.insert(0, mw_instance.process_exception)
# We only assign to this when initialization is complete as it is used
# as a flag for initialization being complete.
self._request_middleware = request_middleware
def make_view_atomic(self, view):
non_atomic_requests = getattr(view, '_non_atomic_requests', set())
for db in connections.all():
if (db.settings_dict['ATOMIC_REQUESTS']
and db.alias not in non_atomic_requests):
view = transaction.atomic(using=db.alias)(view)
return view
def get_exception_response(self, request, resolver, status_code, exception):
try:
callback, param_dict = resolver.resolve_error_handler(status_code)
# Unfortunately, inspect.getargspec result is not trustable enough
# depending on the callback wrapping in decorators (frequent for handlers).
# Falling back on try/except:
try:
response = callback(request, **dict(param_dict, exception=exception))
except TypeError:
warnings.warn(
"Error handlers should accept an exception parameter. Update "
"your code as this parameter will be required in Django 2.0",
RemovedInDjango20Warning, stacklevel=2
)
response = callback(request, **param_dict)
except:
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
return response
def get_response(self, request):
"Returns an HttpResponse object for the given HttpRequest"
# Setup default url resolver for this thread, this code is outside
# the try/except so we don't get a spurious "unbound local
# variable" exception in the event an exception is raised before
# resolver is set
urlconf = settings.ROOT_URLCONF
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.get_resolver(urlconf)
# Use a flag to check if the response was rendered to prevent
# multiple renderings or to force rendering if necessary.
response_is_rendered = False
try:
response = None
# Apply request middleware
for middleware_method in self._request_middleware:
response = middleware_method(request)
if response:
break
if response is None:
if hasattr(request, 'urlconf'):
# Reset url resolver with a custom urlconf.
urlconf = request.urlconf
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.get_resolver(urlconf)
resolver_match = resolver.resolve(request.path_info)
callback, callback_args, callback_kwargs = resolver_match
request.resolver_match = resolver_match
# Apply view middleware
for middleware_method in self._view_middleware:
response = middleware_method(request, callback, callback_args, callback_kwargs)
if response:
break
if response is None:
wrapped_callback = self.make_view_atomic(callback)
try:
response = wrapped_callback(request, *callback_args, **callback_kwargs)
except Exception as e:
response = self.process_exception_by_middleware(e, request)
# Complain if the view returned None (a common error).
if response is None:
if isinstance(callback, types.FunctionType): # FBV
view_name = callback.__name__
else: # CBV
view_name = callback.__class__.__name__ + '.__call__'
raise ValueError("The view %s.%s didn't return an HttpResponse object. It returned None instead."
% (callback.__module__, view_name))
# If the response supports deferred rendering, apply template
# response middleware and then render the response
if hasattr(response, 'render') and callable(response.render):
for middleware_method in self._template_response_middleware:
response = middleware_method(request, response)
# Complain if the template response middleware returned None (a common error).
if response is None:
raise ValueError(
"%s.process_template_response didn't return an "
"HttpResponse object. It returned None instead."
% (middleware_method.__self__.__class__.__name__))
try:
response = response.render()
except Exception as e:
response = self.process_exception_by_middleware(e, request)
response_is_rendered = True
except http.Http404 as exc:
logger.warning('Not Found: %s', request.path,
extra={
'status_code': 404,
'request': request
})
if settings.DEBUG:
response = debug.technical_404_response(request, exc)
else:
response = self.get_exception_response(request, resolver, 404, exc)
except PermissionDenied as exc:
logger.warning(
'Forbidden (Permission denied): %s', request.path,
extra={
'status_code': 403,
'request': request
})
response = self.get_exception_response(request, resolver, 403, exc)
except MultiPartParserError as exc:
logger.warning(
'Bad request (Unable to parse request body): %s', request.path,
extra={
'status_code': 400,
'request': request
})
response = self.get_exception_response(request, resolver, 400, exc)
except SuspiciousOperation as exc:
# The request logger receives events for any problematic request
# The security logger receives events for all SuspiciousOperations
security_logger = logging.getLogger('django.security.%s' %
exc.__class__.__name__)
security_logger.error(
force_text(exc),
extra={
'status_code': 400,
'request': request
})
if settings.DEBUG:
return debug.technical_500_response(request, *sys.exc_info(), status_code=400)
response = self.get_exception_response(request, resolver, 400, exc)
except SystemExit:
# Allow sys.exit() to actually exit. See tickets #1023 and #4701
raise
except: # Handle everything else.
# Get the exception info now, in case another exception is thrown later.
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
try:
# Apply response middleware, regardless of the response
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
# Complain if the response middleware returned None (a common error).
if response is None:
raise ValueError(
"%s.process_response didn't return an "
"HttpResponse object. It returned None instead."
% (middleware_method.__self__.__class__.__name__))
response = self.apply_response_fixes(request, response)
except: # Any exception should be gathered and handled
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
response._closable_objects.append(request)
# If the exception handler returns a TemplateResponse that has not
# been rendered, force it to be rendered.
if not response_is_rendered and callable(getattr(response, 'render', None)):
response = response.render()
return response
def process_exception_by_middleware(self, exception, request):
"""
Pass the exception to the exception middleware. If no middleware
return a response for this exception, raise it.
"""
for middleware_method in self._exception_middleware:
response = middleware_method(request, exception)
if response:
return response
raise
def handle_uncaught_exception(self, request, resolver, exc_info):
"""
Processing for any otherwise uncaught exceptions (those that will
generate HTTP 500 responses). Can be overridden by subclasses who want
customised 500 handling.
Be *very* careful when overriding this because the error could be
caused by anything, so assuming something like the database is always
available would be an error.
"""
if settings.DEBUG_PROPAGATE_EXCEPTIONS:
raise
logger.error('Internal Server Error: %s', request.path,
exc_info=exc_info,
extra={
'status_code': 500,
'request': request
}
)
if settings.DEBUG:
return debug.technical_500_response(request, *exc_info)
# If Http500 handler is not installed, re-raise last exception
if resolver.urlconf_module is None:
six.reraise(*exc_info)
# Return an HttpResponse that displays a friendly error message.
callback, param_dict = resolver.resolve_error_handler(500)
return callback(request, **param_dict)
def apply_response_fixes(self, request, response):
"""
Applies each of the functions in self.response_fixes to the request and
response, modifying the response in the process. Returns the new
response.
"""
for func in self.response_fixes:
response = func(request, response)
return response | unknown | codeparrot/codeparrot-clean | ||
import numpy as np
from numpy.testing import assert_equal, assert_array_equal
from skimage._shared.testing import assert_greater, test_parallel
from skimage.segmentation import felzenszwalb
from skimage import data
@test_parallel()
def test_grey():
# very weak tests. This algorithm is pretty unstable.
img = np.zeros((20, 21))
img[:10, 10:] = 0.2
img[10:, :10] = 0.4
img[10:, 10:] = 0.6
seg = felzenszwalb(img, sigma=0)
# we expect 4 segments:
assert_equal(len(np.unique(seg)), 4)
# that mostly respect the 4 regions:
for i in range(4):
hist = np.histogram(img[seg == i], bins=[0, 0.1, 0.3, 0.5, 1])[0]
assert_greater(hist[i], 40)
def test_minsize():
# single-channel:
img = data.coins()[20:168,0:128]
for min_size in np.arange(10, 100, 10):
segments = felzenszwalb(img, min_size=min_size, sigma=3)
counts = np.bincount(segments.ravel())
# actually want to test greater or equal.
assert_greater(counts.min() + 1, min_size)
# multi-channel:
coffee = data.coffee()[::4, ::4]
for min_size in np.arange(10, 100, 10):
segments = felzenszwalb(coffee, min_size=min_size, sigma=3)
counts = np.bincount(segments.ravel())
# actually want to test greater or equal.
# the construction doesn't guarantee min_size is respected
# after intersecting the sementations for the colors
assert_greater(np.mean(counts) + 1, min_size)
def test_color():
# very weak tests. This algorithm is pretty unstable.
img = np.zeros((20, 21, 3))
img[:10, :10, 0] = 1
img[10:, :10, 1] = 1
img[10:, 10:, 2] = 1
seg = felzenszwalb(img, sigma=0)
# we expect 4 segments:
assert_equal(len(np.unique(seg)), 4)
assert_array_equal(seg[:10, :10], 0)
assert_array_equal(seg[10:, :10], 2)
assert_array_equal(seg[:10, 10:], 1)
assert_array_equal(seg[10:, 10:], 3)
if __name__ == '__main__':
from numpy import testing
testing.run_module_suite() | unknown | codeparrot/codeparrot-clean | ||
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import dns.exception
import dns.rdata
import dns.tokenizer
from dns._compat import text_type
class HINFO(dns.rdata.Rdata):
"""HINFO record
@ivar cpu: the CPU type
@type cpu: string
@ivar os: the OS type
@type os: string
@see: RFC 1035"""
__slots__ = ['cpu', 'os']
def __init__(self, rdclass, rdtype, cpu, os):
super(HINFO, self).__init__(rdclass, rdtype)
if isinstance(cpu, text_type):
self.cpu = cpu.encode()
else:
self.cpu = cpu
if isinstance(os, text_type):
self.os = os.encode()
else:
self.os = os
def to_text(self, origin=None, relativize=True, **kw):
return '"%s" "%s"' % (dns.rdata._escapify(self.cpu),
dns.rdata._escapify(self.os))
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
cpu = tok.get_string()
os = tok.get_string()
tok.get_eol()
return cls(rdclass, rdtype, cpu, os)
def to_wire(self, file, compress=None, origin=None):
l = len(self.cpu)
assert l < 256
file.write(struct.pack('!B', l))
file.write(self.cpu)
l = len(self.os)
assert l < 256
file.write(struct.pack('!B', l))
file.write(self.os)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
l = wire[current]
current += 1
rdlen -= 1
if l > rdlen:
raise dns.exception.FormError
cpu = wire[current:current + l].unwrap()
current += l
rdlen -= l
l = wire[current]
current += 1
rdlen -= 1
if l != rdlen:
raise dns.exception.FormError
os = wire[current: current + l].unwrap()
return cls(rdclass, rdtype, cpu, os) | unknown | codeparrot/codeparrot-clean | ||
# Safely load fast C Yaml loader/dumper if they are available
try:
from yaml import CSafeLoader as Loader
except ImportError:
from yaml import SafeLoader as Loader # type: ignore[assignment, misc]
try:
from yaml import CSafeDumper as Dumper
except ImportError:
from yaml import SafeDumper as Dumper # type: ignore[assignment, misc]
YamlDumper = Dumper
# A custom loader for YAML that errors on duplicate keys.
# This doesn't happen by default: see https://github.com/yaml/pyyaml/issues/165
class YamlLoader(Loader):
def construct_mapping(self, node, deep=False): # type: ignore[no-untyped-def]
mapping = []
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep) # type: ignore[no-untyped-call]
if key in mapping:
raise AssertionError(
f"Found a duplicate key in the yaml. key={key}, line={node.start_mark.line}"
)
mapping.append(key)
mapping = super().construct_mapping(node, deep=deep) # type: ignore[no-untyped-call]
return mapping | python | github | https://github.com/pytorch/pytorch | torchgen/yaml_utils.py |
#!/usr/bin/env python
#
# Copyright 2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
import digital_swig as digital
class test_probe_density(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_001(self):
src_data = [0, 1, 0, 1]
expected_data = 1
src = gr.vector_source_b (src_data)
op = digital.probe_density_b(1)
self.tb.connect (src, op)
self.tb.run ()
result_data = op.density()
self.assertEqual (expected_data, result_data)
def test_002(self):
src_data = [1, 1, 1, 1]
expected_data = 1
src = gr.vector_source_b (src_data)
op = digital.probe_density_b(0.01)
self.tb.connect (src, op)
self.tb.run ()
result_data = op.density()
self.assertEqual (expected_data, result_data)
def test_003(self):
src_data = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
expected_data = 0.95243
src = gr.vector_source_b (src_data)
op = digital.probe_density_b(0.01)
self.tb.connect (src, op)
self.tb.run ()
result_data = op.density()
print result_data
self.assertAlmostEqual (expected_data, result_data, 5)
if __name__ == '__main__':
gr_unittest.run(test_probe_density, "test_probe_density.xml") | unknown | codeparrot/codeparrot-clean | ||
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from abc import abstractmethod
from typing import List
import numpy as np
from scipy.sparse import issparse
from sklearn import get_config
from sklearn.metrics._dist_metrics import BOOL_METRICS, METRIC_MAPPING64, DistanceMetric
from sklearn.metrics._pairwise_distances_reduction._argkmin import ArgKmin32, ArgKmin64
from sklearn.metrics._pairwise_distances_reduction._argkmin_classmode import (
ArgKminClassMode32,
ArgKminClassMode64,
)
from sklearn.metrics._pairwise_distances_reduction._base import (
_sqeuclidean_row_norms32,
_sqeuclidean_row_norms64,
)
from sklearn.metrics._pairwise_distances_reduction._radius_neighbors import (
RadiusNeighbors32,
RadiusNeighbors64,
)
from sklearn.metrics._pairwise_distances_reduction._radius_neighbors_classmode import (
RadiusNeighborsClassMode32,
RadiusNeighborsClassMode64,
)
def sqeuclidean_row_norms(X, num_threads):
"""Compute the squared euclidean norm of the rows of X in parallel.
Parameters
----------
X : ndarray or CSR matrix of shape (n_samples, n_features)
Input data. Must be c-contiguous.
num_threads : int
The number of OpenMP threads to use.
Returns
-------
sqeuclidean_row_norms : ndarray of shape (n_samples,)
Arrays containing the squared euclidean norm of each row of X.
"""
if X.dtype == np.float64:
return np.asarray(_sqeuclidean_row_norms64(X, num_threads))
if X.dtype == np.float32:
return np.asarray(_sqeuclidean_row_norms32(X, num_threads))
raise ValueError(
"Only float64 or float32 datasets are supported at this time, "
f"got: X.dtype={X.dtype}."
)
class BaseDistancesReductionDispatcher:
"""Abstract base dispatcher for pairwise distance computation & reduction.
Each dispatcher extending the base :class:`BaseDistancesReductionDispatcher`
dispatcher must implement the :meth:`compute` classmethod.
"""
@classmethod
def valid_metrics(cls) -> List[str]:
excluded = {
# PyFunc cannot be supported because it necessitates interacting with
# the CPython interpreter to call user defined functions.
"pyfunc",
"mahalanobis", # is numerically unstable
# In order to support discrete distance metrics, we need to have a
# stable simultaneous sort which preserves the order of the indices
# because there generally is a lot of occurrences for a given values
# of distances in this case.
# TODO: implement a stable simultaneous_sort.
"hamming",
*BOOL_METRICS,
}
return sorted(({"sqeuclidean"} | set(METRIC_MAPPING64.keys())) - excluded)
@classmethod
def is_usable_for(cls, X, Y, metric) -> bool:
"""Return True if the dispatcher can be used for the
given parameters.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples_X, n_features)
Input data.
Y : {ndarray, sparse matrix} of shape (n_samples_Y, n_features)
Input data.
metric : str, default='euclidean'
The distance metric to use.
For a list of available metrics, see the documentation of
:class:`~sklearn.metrics.DistanceMetric`.
Returns
-------
True if the dispatcher can be used, else False.
"""
# FIXME: the current Cython implementation is too slow for a large number of
# features. We temporarily disable it to fallback on SciPy's implementation.
# See: https://github.com/scikit-learn/scikit-learn/issues/28191
if (
issparse(X)
and issparse(Y)
and isinstance(metric, str)
and "euclidean" in metric
):
return False
def is_numpy_c_ordered(X):
return hasattr(X, "flags") and getattr(X.flags, "c_contiguous", False)
def is_valid_sparse_matrix(X):
return (
issparse(X)
and X.format == "csr"
and
# TODO: support CSR matrices without non-zeros elements
X.nnz > 0
and
# TODO: support CSR matrices with int64 indices and indptr
# See: https://github.com/scikit-learn/scikit-learn/issues/23653
X.indices.dtype == X.indptr.dtype == np.int32
)
is_usable = (
get_config().get("enable_cython_pairwise_dist", True)
and (is_numpy_c_ordered(X) or is_valid_sparse_matrix(X))
and (is_numpy_c_ordered(Y) or is_valid_sparse_matrix(Y))
and X.dtype == Y.dtype
and X.dtype in (np.float32, np.float64)
and (metric in cls.valid_metrics() or isinstance(metric, DistanceMetric))
)
return is_usable
@classmethod
@abstractmethod
def compute(
cls,
X,
Y,
**kwargs,
):
"""Compute the reduction.
Parameters
----------
X : ndarray or CSR matrix of shape (n_samples_X, n_features)
Input data.
Y : ndarray or CSR matrix of shape (n_samples_Y, n_features)
Input data.
**kwargs : additional parameters for the reduction
Notes
-----
This method is an abstract class method: it has to be implemented
for all subclasses.
"""
class ArgKmin(BaseDistancesReductionDispatcher):
"""Compute the argkmin of row vectors of X on the ones of Y.
For each row vector of X, computes the indices of k first the rows
vectors of Y with the smallest distances.
ArgKmin is typically used to perform
bruteforce k-nearest neighbors queries.
This class is not meant to be instantiated, one should only use
its :meth:`compute` classmethod which handles allocation and
deallocation consistently.
"""
@classmethod
def compute(
cls,
X,
Y,
k,
metric="euclidean",
chunk_size=None,
metric_kwargs=None,
strategy=None,
return_distance=False,
):
"""Compute the argkmin reduction.
Parameters
----------
X : ndarray or CSR matrix of shape (n_samples_X, n_features)
Input data.
Y : ndarray or CSR matrix of shape (n_samples_Y, n_features)
Input data.
k : int
The k for the argkmin reduction.
metric : str, default='euclidean'
The distance metric to use for argkmin.
For a list of available metrics, see the documentation of
:class:`~sklearn.metrics.DistanceMetric`.
chunk_size : int, default=None,
The number of vectors per chunk. If None (default) looks-up in
scikit-learn configuration for `pairwise_dist_chunk_size`,
and use 256 if it is not set.
metric_kwargs : dict, default=None
Keyword arguments to pass to specified metric function.
strategy : str, {'auto', 'parallel_on_X', 'parallel_on_Y'}, default=None
The chunking strategy defining which dataset parallelization are made on.
For both strategies the computations happens with two nested loops,
respectively on chunks of X and chunks of Y.
Strategies differs on which loop (outer or inner) is made to run
in parallel with the Cython `prange` construct:
- 'parallel_on_X' dispatches chunks of X uniformly on threads.
Each thread then iterates on all the chunks of Y. This strategy is
embarrassingly parallel and comes with no datastructures
synchronisation.
- 'parallel_on_Y' dispatches chunks of Y uniformly on threads.
Each thread processes all the chunks of X in turn. This strategy is
a sequence of embarrassingly parallel subtasks (the inner loop on Y
chunks) with intermediate datastructures synchronisation at each
iteration of the sequential outer loop on X chunks.
- 'auto' relies on a simple heuristic to choose between
'parallel_on_X' and 'parallel_on_Y': when `X.shape[0]` is large enough,
'parallel_on_X' is usually the most efficient strategy.
When `X.shape[0]` is small but `Y.shape[0]` is large, 'parallel_on_Y'
brings more opportunity for parallelism and is therefore more efficient
- None (default) looks-up in scikit-learn configuration for
`pairwise_dist_parallel_strategy`, and use 'auto' if it is not set.
return_distance : boolean, default=False
Return distances between each X vector and its
argkmin if set to True.
Returns
-------
If return_distance=False:
- argkmin_indices : ndarray of shape (n_samples_X, k)
Indices of the argkmin for each vector in X.
If return_distance=True:
- argkmin_distances : ndarray of shape (n_samples_X, k)
Distances to the argkmin for each vector in X.
- argkmin_indices : ndarray of shape (n_samples_X, k)
Indices of the argkmin for each vector in X.
Notes
-----
This classmethod inspects the arguments values to dispatch to the
dtype-specialized implementation of :class:`ArgKmin`.
This allows decoupling the API entirely from the implementation details
whilst maintaining RAII: all temporarily allocated datastructures necessary
for the concrete implementation are therefore freed when this classmethod
returns.
"""
if X.dtype == Y.dtype == np.float64:
return ArgKmin64.compute(
X=X,
Y=Y,
k=k,
metric=metric,
chunk_size=chunk_size,
metric_kwargs=metric_kwargs,
strategy=strategy,
return_distance=return_distance,
)
if X.dtype == Y.dtype == np.float32:
return ArgKmin32.compute(
X=X,
Y=Y,
k=k,
metric=metric,
chunk_size=chunk_size,
metric_kwargs=metric_kwargs,
strategy=strategy,
return_distance=return_distance,
)
raise ValueError(
"Only float64 or float32 datasets pairs are supported at this time, "
f"got: X.dtype={X.dtype} and Y.dtype={Y.dtype}."
)
class RadiusNeighbors(BaseDistancesReductionDispatcher):
"""Compute radius-based neighbors for two sets of vectors.
For each row-vector X[i] of the queries X, find all the indices j of
row-vectors in Y such that:
dist(X[i], Y[j]) <= radius
The distance function `dist` depends on the values of the `metric`
and `metric_kwargs` parameters.
This class is not meant to be instantiated, one should only use
its :meth:`compute` classmethod which handles allocation and
deallocation consistently.
"""
@classmethod
def compute(
cls,
X,
Y,
radius,
metric="euclidean",
chunk_size=None,
metric_kwargs=None,
strategy=None,
return_distance=False,
sort_results=False,
):
"""Return the results of the reduction for the given arguments.
Parameters
----------
X : ndarray or CSR matrix of shape (n_samples_X, n_features)
Input data.
Y : ndarray or CSR matrix of shape (n_samples_Y, n_features)
Input data.
radius : float
The radius defining the neighborhood.
metric : str, default='euclidean'
The distance metric to use.
For a list of available metrics, see the documentation of
:class:`~sklearn.metrics.DistanceMetric`.
chunk_size : int, default=None,
The number of vectors per chunk. If None (default) looks-up in
scikit-learn configuration for `pairwise_dist_chunk_size`,
and use 256 if it is not set.
metric_kwargs : dict, default=None
Keyword arguments to pass to specified metric function.
strategy : str, {'auto', 'parallel_on_X', 'parallel_on_Y'}, default=None
The chunking strategy defining which dataset parallelization are made on.
For both strategies the computations happens with two nested loops,
respectively on chunks of X and chunks of Y.
Strategies differs on which loop (outer or inner) is made to run
in parallel with the Cython `prange` construct:
- 'parallel_on_X' dispatches chunks of X uniformly on threads.
Each thread then iterates on all the chunks of Y. This strategy is
embarrassingly parallel and comes with no datastructures
synchronisation.
- 'parallel_on_Y' dispatches chunks of Y uniformly on threads.
Each thread processes all the chunks of X in turn. This strategy is
a sequence of embarrassingly parallel subtasks (the inner loop on Y
chunks) with intermediate datastructures synchronisation at each
iteration of the sequential outer loop on X chunks.
- 'auto' relies on a simple heuristic to choose between
'parallel_on_X' and 'parallel_on_Y': when `X.shape[0]` is large enough,
'parallel_on_X' is usually the most efficient strategy.
When `X.shape[0]` is small but `Y.shape[0]` is large, 'parallel_on_Y'
brings more opportunity for parallelism and is therefore more efficient
despite the synchronization step at each iteration of the outer loop
on chunks of `X`.
- None (default) looks-up in scikit-learn configuration for
`pairwise_dist_parallel_strategy`, and use 'auto' if it is not set.
return_distance : boolean, default=False
Return distances between each X vector and its neighbors if set to True.
sort_results : boolean, default=False
Sort results with respect to distances between each X vector and its
neighbors if set to True.
Returns
-------
If return_distance=False:
- neighbors_indices : ndarray of n_samples_X ndarray
Indices of the neighbors for each vector in X.
If return_distance=True:
- neighbors_indices : ndarray of n_samples_X ndarray
Indices of the neighbors for each vector in X.
- neighbors_distances : ndarray of n_samples_X ndarray
Distances to the neighbors for each vector in X.
Notes
-----
This classmethod inspects the arguments values to dispatch to the
dtype-specialized implementation of :class:`RadiusNeighbors`.
This allows decoupling the API entirely from the implementation details
whilst maintaining RAII: all temporarily allocated datastructures necessary
for the concrete implementation are therefore freed when this classmethod
returns.
"""
if X.dtype == Y.dtype == np.float64:
return RadiusNeighbors64.compute(
X=X,
Y=Y,
radius=radius,
metric=metric,
chunk_size=chunk_size,
metric_kwargs=metric_kwargs,
strategy=strategy,
sort_results=sort_results,
return_distance=return_distance,
)
if X.dtype == Y.dtype == np.float32:
return RadiusNeighbors32.compute(
X=X,
Y=Y,
radius=radius,
metric=metric,
chunk_size=chunk_size,
metric_kwargs=metric_kwargs,
strategy=strategy,
sort_results=sort_results,
return_distance=return_distance,
)
raise ValueError(
"Only float64 or float32 datasets pairs are supported at this time, "
f"got: X.dtype={X.dtype} and Y.dtype={Y.dtype}."
)
class ArgKminClassMode(BaseDistancesReductionDispatcher):
"""Compute the argkmin of row vectors of X on the ones of Y with labels.
For each row vector of X, computes the indices of k first the rows
vectors of Y with the smallest distances. Computes weighted mode of labels.
ArgKminClassMode is typically used to perform bruteforce k-nearest neighbors
queries when the weighted mode of the labels for the k-nearest neighbors
are required, such as in `predict` methods.
This class is not meant to be instantiated, one should only use
its :meth:`compute` classmethod which handles allocation and
deallocation consistently.
"""
@classmethod
def valid_metrics(cls) -> List[str]:
excluded = {
# Euclidean is technically usable for ArgKminClassMode
# but its current implementation would not be competitive.
# TODO: implement Euclidean specialization using GEMM.
"euclidean",
"sqeuclidean",
}
return list(set(BaseDistancesReductionDispatcher.valid_metrics()) - excluded)
@classmethod
def compute(
cls,
X,
Y,
k,
weights,
Y_labels,
unique_Y_labels,
metric="euclidean",
chunk_size=None,
metric_kwargs=None,
strategy=None,
):
"""Compute the argkmin reduction.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
The input array to be labelled.
Y : ndarray of shape (n_samples_Y, n_features)
The input array whose class membership are provided through the
`Y_labels` parameter.
k : int
The number of nearest neighbors to consider.
weights : ndarray
The weights applied over the `Y_labels` of `Y` when computing the
weighted mode of the labels.
Y_labels : ndarray
An array containing the index of the class membership of the
associated samples in `Y`. This is used in labeling `X`.
unique_Y_labels : ndarray
An array containing all unique indices contained in the
corresponding `Y_labels` array.
metric : str, default='euclidean'
The distance metric to use. For a list of available metrics, see
the documentation of :class:`~sklearn.metrics.DistanceMetric`.
Currently does not support `'precomputed'`.
chunk_size : int, default=None,
The number of vectors per chunk. If None (default) looks-up in
scikit-learn configuration for `pairwise_dist_chunk_size`,
and use 256 if it is not set.
metric_kwargs : dict, default=None
Keyword arguments to pass to specified metric function.
strategy : str, {'auto', 'parallel_on_X', 'parallel_on_Y'}, default=None
The chunking strategy defining which dataset parallelization are made on.
For both strategies the computations happens with two nested loops,
respectively on chunks of X and chunks of Y.
Strategies differs on which loop (outer or inner) is made to run
in parallel with the Cython `prange` construct:
- 'parallel_on_X' dispatches chunks of X uniformly on threads.
Each thread then iterates on all the chunks of Y. This strategy is
embarrassingly parallel and comes with no datastructures
synchronisation.
- 'parallel_on_Y' dispatches chunks of Y uniformly on threads.
Each thread processes all the chunks of X in turn. This strategy is
a sequence of embarrassingly parallel subtasks (the inner loop on Y
chunks) with intermediate datastructures synchronisation at each
iteration of the sequential outer loop on X chunks.
- 'auto' relies on a simple heuristic to choose between
'parallel_on_X' and 'parallel_on_Y': when `X.shape[0]` is large enough,
'parallel_on_X' is usually the most efficient strategy.
When `X.shape[0]` is small but `Y.shape[0]` is large, 'parallel_on_Y'
brings more opportunity for parallelism and is therefore more efficient
despite the synchronization step at each iteration of the outer loop
on chunks of `X`.
- None (default) looks-up in scikit-learn configuration for
`pairwise_dist_parallel_strategy`, and use 'auto' if it is not set.
Returns
-------
probabilities : ndarray of shape (n_samples_X, n_classes)
An array containing the class probabilities for each sample.
Notes
-----
This classmethod is responsible for introspecting the arguments
values to dispatch to the most appropriate implementation of
:class:`PairwiseDistancesArgKmin`.
This allows decoupling the API entirely from the implementation details
whilst maintaining RAII: all temporarily allocated datastructures necessary
for the concrete implementation are therefore freed when this classmethod
returns.
"""
if weights not in {"uniform", "distance"}:
raise ValueError(
"Only the 'uniform' or 'distance' weights options are supported"
f" at this time. Got: {weights=}."
)
if X.dtype == Y.dtype == np.float64:
return ArgKminClassMode64.compute(
X=X,
Y=Y,
k=k,
weights=weights,
Y_labels=np.array(Y_labels, dtype=np.intp),
unique_Y_labels=np.array(unique_Y_labels, dtype=np.intp),
metric=metric,
chunk_size=chunk_size,
metric_kwargs=metric_kwargs,
strategy=strategy,
)
if X.dtype == Y.dtype == np.float32:
return ArgKminClassMode32.compute(
X=X,
Y=Y,
k=k,
weights=weights,
Y_labels=np.array(Y_labels, dtype=np.intp),
unique_Y_labels=np.array(unique_Y_labels, dtype=np.intp),
metric=metric,
chunk_size=chunk_size,
metric_kwargs=metric_kwargs,
strategy=strategy,
)
raise ValueError(
"Only float64 or float32 datasets pairs are supported at this time, "
f"got: X.dtype={X.dtype} and Y.dtype={Y.dtype}."
)
class RadiusNeighborsClassMode(BaseDistancesReductionDispatcher):
"""Compute radius-based class modes of row vectors of X using the
those of Y.
For each row-vector X[i] of the queries X, find all the indices j of
row-vectors in Y such that:
dist(X[i], Y[j]) <= radius
RadiusNeighborsClassMode is typically used to perform bruteforce
radius neighbors queries when the weighted mode of the labels for
the nearest neighbors within the specified radius are required,
such as in `predict` methods.
This class is not meant to be instantiated, one should only use
its :meth:`compute` classmethod which handles allocation and
deallocation consistently.
"""
@classmethod
def valid_metrics(cls) -> List[str]:
excluded = {
# Euclidean is technically usable for RadiusNeighborsClassMode
# but it would not be competitive.
# TODO: implement Euclidean specialization using GEMM.
"euclidean",
"sqeuclidean",
}
return sorted(set(BaseDistancesReductionDispatcher.valid_metrics()) - excluded)
@classmethod
def compute(
cls,
X,
Y,
radius,
weights,
Y_labels,
unique_Y_labels,
outlier_label,
metric="euclidean",
chunk_size=None,
metric_kwargs=None,
strategy=None,
):
"""Return the results of the reduction for the given arguments.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
The input array to be labelled.
Y : ndarray of shape (n_samples_Y, n_features)
The input array whose class membership is provided through
the `Y_labels` parameter.
radius : float
The radius defining the neighborhood.
weights : ndarray
The weights applied to the `Y_labels` when computing the
weighted mode of the labels.
Y_labels : ndarray
An array containing the index of the class membership of the
associated samples in `Y`. This is used in labeling `X`.
unique_Y_labels : ndarray
An array containing all unique class labels.
outlier_label : int, default=None
Label for outlier samples (samples with no neighbors in given
radius). In the default case when the value is None if any
outlier is detected, a ValueError will be raised. The outlier
label should be selected from among the unique 'Y' labels. If
it is specified with a different value a warning will be raised
and all class probabilities of outliers will be assigned to be 0.
metric : str, default='euclidean'
The distance metric to use. For a list of available metrics, see
the documentation of :class:`~sklearn.metrics.DistanceMetric`.
Currently does not support `'precomputed'`.
chunk_size : int, default=None,
The number of vectors per chunk. If None (default) looks-up in
scikit-learn configuration for `pairwise_dist_chunk_size`,
and use 256 if it is not set.
metric_kwargs : dict, default=None
Keyword arguments to pass to specified metric function.
strategy : str, {'auto', 'parallel_on_X', 'parallel_on_Y'}, default=None
The chunking strategy defining which dataset parallelization are made on.
For both strategies the computations happens with two nested loops,
respectively on chunks of X and chunks of Y.
Strategies differs on which loop (outer or inner) is made to run
in parallel with the Cython `prange` construct:
- 'parallel_on_X' dispatches chunks of X uniformly on threads.
Each thread then iterates on all the chunks of Y. This strategy is
embarrassingly parallel and comes with no datastructures
synchronisation.
- 'parallel_on_Y' dispatches chunks of Y uniformly on threads.
Each thread processes all the chunks of X in turn. This strategy is
a sequence of embarrassingly parallel subtasks (the inner loop on Y
chunks) with intermediate datastructures synchronisation at each
iteration of the sequential outer loop on X chunks.
- 'auto' relies on a simple heuristic to choose between
'parallel_on_X' and 'parallel_on_Y': when `X.shape[0]` is large enough,
'parallel_on_X' is usually the most efficient strategy.
When `X.shape[0]` is small but `Y.shape[0]` is large, 'parallel_on_Y'
brings more opportunity for parallelism and is therefore more efficient
despite the synchronization step at each iteration of the outer loop
on chunks of `X`.
- None (default) looks-up in scikit-learn configuration for
`pairwise_dist_parallel_strategy`, and use 'auto' if it is not set.
Returns
-------
probabilities : ndarray of shape (n_samples_X, n_classes)
An array containing the class probabilities for each sample.
"""
if weights not in {"uniform", "distance"}:
raise ValueError(
"Only the 'uniform' or 'distance' weights options are supported"
f" at this time. Got: {weights=}."
)
if X.dtype == Y.dtype == np.float64:
return RadiusNeighborsClassMode64.compute(
X=X,
Y=Y,
radius=radius,
weights=weights,
Y_labels=np.array(Y_labels, dtype=np.intp),
unique_Y_labels=np.array(unique_Y_labels, dtype=np.intp),
outlier_label=outlier_label,
metric=metric,
chunk_size=chunk_size,
metric_kwargs=metric_kwargs,
strategy=strategy,
)
if X.dtype == Y.dtype == np.float32:
return RadiusNeighborsClassMode32.compute(
X=X,
Y=Y,
radius=radius,
weights=weights,
Y_labels=np.array(Y_labels, dtype=np.intp),
unique_Y_labels=np.array(unique_Y_labels, dtype=np.intp),
outlier_label=outlier_label,
metric=metric,
chunk_size=chunk_size,
metric_kwargs=metric_kwargs,
strategy=strategy,
)
raise ValueError(
"Only float64 or float32 datasets pairs are supported at this time, "
f"got: X.dtype={X.dtype} and Y.dtype={Y.dtype}."
) | python | github | https://github.com/scikit-learn/scikit-learn | sklearn/metrics/_pairwise_distances_reduction/_dispatcher.py |
// Package splunk provides the log driver for forwarding server logs to
// Splunk HTTP Event Collector endpoint.
package splunk
import (
"bytes"
"compress/gzip"
"context"
"crypto/tls"
"crypto/x509"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/containerd/log"
"github.com/google/uuid"
"github.com/moby/moby/v2/daemon/logger"
"github.com/moby/moby/v2/daemon/logger/loggerutils"
"github.com/moby/moby/v2/pkg/pools"
)
const (
driverName = "splunk"
splunkURLKey = "splunk-url"
splunkTokenKey = "splunk-token"
splunkSourceKey = "splunk-source"
splunkSourceTypeKey = "splunk-sourcetype"
splunkIndexKey = "splunk-index"
splunkCAPathKey = "splunk-capath"
splunkCANameKey = "splunk-caname"
splunkInsecureSkipVerifyKey = "splunk-insecureskipverify"
splunkFormatKey = "splunk-format"
splunkVerifyConnectionKey = "splunk-verify-connection" // #nosec G101 -- ignoring: Potential hardcoded credentials (gosec)
splunkGzipCompressionKey = "splunk-gzip"
splunkGzipCompressionLevelKey = "splunk-gzip-level"
splunkIndexAcknowledgment = "splunk-index-acknowledgment"
envKey = "env"
envRegexKey = "env-regex"
labelsKey = "labels"
labelsRegexKey = "labels-regex"
tagKey = "tag"
)
const (
// How often do we send messages (if we are not reaching batch size)
defaultPostMessagesFrequency = 5 * time.Second
// How big can be batch of messages
defaultPostMessagesBatchSize = 1000
// Maximum number of messages we can store in buffer
defaultBufferMaximum = 10 * defaultPostMessagesBatchSize
// Number of messages allowed to be queued in the channel
defaultStreamChannelSize = 4 * defaultPostMessagesBatchSize
// maxResponseSize is the max amount that will be read from an http response
maxResponseSize = 1024
)
const (
envVarPostMessagesFrequency = "SPLUNK_LOGGING_DRIVER_POST_MESSAGES_FREQUENCY"
envVarPostMessagesBatchSize = "SPLUNK_LOGGING_DRIVER_POST_MESSAGES_BATCH_SIZE"
envVarBufferMaximum = "SPLUNK_LOGGING_DRIVER_BUFFER_MAX"
envVarStreamChannelSize = "SPLUNK_LOGGING_DRIVER_CHANNEL_SIZE"
)
var batchSendTimeout = 30 * time.Second
type splunkLoggerInterface interface {
logger.Logger
worker()
}
type splunkLogger struct {
client *http.Client
transport *http.Transport
url string
auth string
nullMessage *splunkMessage
// http compression
gzipCompression bool
gzipCompressionLevel int
// Advanced options
postMessagesFrequency time.Duration
postMessagesBatchSize int
bufferMaximum int
indexAck bool
// For synchronization between background worker and logger.
// We use channel to send messages to worker go routine.
// All other variables for blocking Close call before we flush all messages to HEC
stream chan *splunkMessage
lock sync.RWMutex
closed bool
closedCond *sync.Cond
}
type splunkLoggerInline struct {
*splunkLogger
nullEvent *splunkMessageEvent
}
type splunkLoggerJSON struct {
*splunkLoggerInline
}
type splunkLoggerRaw struct {
*splunkLogger
prefix []byte
}
type splunkMessage struct {
Event any `json:"event"`
Time string `json:"time"`
Host string `json:"host"`
Source string `json:"source,omitempty"`
SourceType string `json:"sourcetype,omitempty"`
Index string `json:"index,omitempty"`
}
type splunkMessageEvent struct {
Line any `json:"line"`
Source string `json:"source"`
Tag string `json:"tag,omitempty"`
Attrs map[string]string `json:"attrs,omitempty"`
}
const (
splunkFormatRaw = "raw"
splunkFormatJSON = "json"
splunkFormatInline = "inline"
)
func init() {
if err := logger.RegisterLogDriver(driverName, New); err != nil {
panic(err)
}
if err := logger.RegisterLogOptValidator(driverName, ValidateLogOpt); err != nil {
panic(err)
}
}
// New creates splunk logger driver using configuration passed in context
func New(info logger.Info) (logger.Logger, error) {
hostname, err := info.Hostname()
if err != nil {
return nil, fmt.Errorf("%s: cannot access hostname to set source field", driverName)
}
// Parse and validate Splunk URL
splunkURL, err := parseURL(info)
if err != nil {
return nil, err
}
// Splunk Token is required parameter
splunkToken, ok := info.Config[splunkTokenKey]
if !ok {
return nil, fmt.Errorf("%s: %s is expected", driverName, splunkTokenKey)
}
// FIXME set minimum TLS version for splunk (see https://github.com/moby/moby/issues/42443)
tlsConfig := &tls.Config{} //nolint: gosec // G402: TLS MinVersion too low.
// Splunk is using autogenerated certificates by default,
// allow users to trust them with skipping verification
if insecureSkipVerifyStr, ok := info.Config[splunkInsecureSkipVerifyKey]; ok {
insecureSkipVerify, err := strconv.ParseBool(insecureSkipVerifyStr)
if err != nil {
return nil, err
}
tlsConfig.InsecureSkipVerify = insecureSkipVerify
}
// If path to the root certificate is provided - load it
if caPath, ok := info.Config[splunkCAPathKey]; ok {
caCert, err := os.ReadFile(caPath)
if err != nil {
return nil, err
}
caPool := x509.NewCertPool()
caPool.AppendCertsFromPEM(caCert)
tlsConfig.RootCAs = caPool
}
if caName, ok := info.Config[splunkCANameKey]; ok {
tlsConfig.ServerName = caName
}
gzipCompression := false
if gzipCompressionStr, ok := info.Config[splunkGzipCompressionKey]; ok {
gzipCompression, err = strconv.ParseBool(gzipCompressionStr)
if err != nil {
return nil, err
}
}
gzipCompressionLevel := gzip.DefaultCompression
if gzipCompressionLevelStr, ok := info.Config[splunkGzipCompressionLevelKey]; ok {
var err error
gzipCompressionLevel64, err := strconv.ParseInt(gzipCompressionLevelStr, 10, 32)
if err != nil {
return nil, err
}
gzipCompressionLevel = int(gzipCompressionLevel64)
if gzipCompressionLevel < gzip.DefaultCompression || gzipCompressionLevel > gzip.BestCompression {
err := fmt.Errorf("not supported level '%s' for %s (supported values between %d and %d)",
gzipCompressionLevelStr, splunkGzipCompressionLevelKey, gzip.DefaultCompression, gzip.BestCompression)
return nil, err
}
}
indexAck := false
if indexAckStr, ok := info.Config[splunkIndexAcknowledgment]; ok {
indexAck, err = strconv.ParseBool(indexAckStr)
if err != nil {
return nil, err
}
}
// Allow user to remove tag from the messages by setting tag to empty string
var tag string
if tagTemplate, ok := info.Config[tagKey]; !ok || tagTemplate != "" {
tag, err = loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate)
if err != nil {
return nil, err
}
}
extraAttrs, err := info.ExtraAttributes(nil)
if err != nil {
return nil, err
}
var (
postMessagesFrequency = getAdvancedOptionDuration(envVarPostMessagesFrequency, defaultPostMessagesFrequency)
postMessagesBatchSize = getAdvancedOptionInt(envVarPostMessagesBatchSize, defaultPostMessagesBatchSize)
bufferMaximum = getAdvancedOptionInt(envVarBufferMaximum, defaultBufferMaximum)
streamChannelSize = getAdvancedOptionInt(envVarStreamChannelSize, defaultStreamChannelSize)
)
transport := &http.Transport{
TLSClientConfig: tlsConfig,
Proxy: http.ProxyFromEnvironment,
}
splLogger := &splunkLogger{
client: &http.Client{Transport: transport},
transport: transport,
url: splunkURL.String(),
auth: "Splunk " + splunkToken,
nullMessage: &splunkMessage{
Host: hostname,
Source: info.Config[splunkSourceKey],
SourceType: info.Config[splunkSourceTypeKey],
Index: info.Config[splunkIndexKey],
},
gzipCompression: gzipCompression,
gzipCompressionLevel: gzipCompressionLevel,
stream: make(chan *splunkMessage, streamChannelSize),
postMessagesFrequency: postMessagesFrequency,
postMessagesBatchSize: postMessagesBatchSize,
bufferMaximum: bufferMaximum,
indexAck: indexAck,
}
// By default we verify connection, but we allow use to skip that
verifyConnection := true
if verifyConnectionStr, ok := info.Config[splunkVerifyConnectionKey]; ok {
var err error
verifyConnection, err = strconv.ParseBool(verifyConnectionStr)
if err != nil {
return nil, err
}
}
if verifyConnection {
err = verifySplunkConnection(splLogger)
if err != nil {
return nil, err
}
}
splunkFormat := splunkFormatInline
if f, ok := info.Config[splunkFormatKey]; ok {
splunkFormat = f
}
var loggerWrapper splunkLoggerInterface
switch splunkFormat {
case splunkFormatInline:
nullEvent := &splunkMessageEvent{
Tag: tag,
Attrs: extraAttrs,
}
loggerWrapper = &splunkLoggerInline{splLogger, nullEvent}
case splunkFormatJSON:
nullEvent := &splunkMessageEvent{
Tag: tag,
Attrs: extraAttrs,
}
loggerWrapper = &splunkLoggerJSON{&splunkLoggerInline{splLogger, nullEvent}}
case splunkFormatRaw:
var prefix bytes.Buffer
if tag != "" {
prefix.WriteString(tag)
prefix.WriteString(" ")
}
for key, value := range extraAttrs {
prefix.WriteString(key)
prefix.WriteString("=")
prefix.WriteString(value)
prefix.WriteString(" ")
}
loggerWrapper = &splunkLoggerRaw{splLogger, prefix.Bytes()}
default:
return nil, fmt.Errorf("unknown format specified %s, supported formats are inline, json and raw", splunkFormat)
}
go loggerWrapper.worker()
return loggerWrapper, nil
}
func (l *splunkLoggerInline) Log(msg *logger.Message) error {
message := l.createSplunkMessage(msg)
event := *l.nullEvent
event.Line = string(msg.Line)
event.Source = msg.Source
message.Event = &event
logger.PutMessage(msg)
return l.queueMessageAsync(message)
}
func (l *splunkLoggerJSON) Log(msg *logger.Message) error {
message := l.createSplunkMessage(msg)
event := *l.nullEvent
var rawJSONMessage json.RawMessage
if err := json.Unmarshal(msg.Line, &rawJSONMessage); err == nil {
event.Line = &rawJSONMessage
} else {
event.Line = string(msg.Line)
}
event.Source = msg.Source
message.Event = &event
logger.PutMessage(msg)
return l.queueMessageAsync(message)
}
func (l *splunkLoggerRaw) Log(msg *logger.Message) error {
// empty or whitespace-only messages are not accepted by HEC
if strings.TrimSpace(string(msg.Line)) == "" {
return nil
}
message := l.createSplunkMessage(msg)
message.Event = string(append(l.prefix, msg.Line...))
logger.PutMessage(msg)
return l.queueMessageAsync(message)
}
func (l *splunkLogger) queueMessageAsync(message *splunkMessage) error {
l.lock.RLock()
defer l.lock.RUnlock()
if l.closedCond != nil {
return fmt.Errorf("%s: driver is closed", driverName)
}
l.stream <- message
return nil
}
func (l *splunkLogger) worker() {
timer := time.NewTicker(l.postMessagesFrequency)
var messages []*splunkMessage
for {
select {
case message, open := <-l.stream:
if !open {
l.postMessages(messages, true)
l.lock.Lock()
l.transport.CloseIdleConnections()
l.closed = true
l.closedCond.Signal()
l.lock.Unlock()
return
}
messages = append(messages, message)
// Only sending when we get exactly to the batch size,
// This also helps not to fire postMessages on every new message,
// when previous try failed.
if len(messages)%l.postMessagesBatchSize == 0 {
messages = l.postMessages(messages, false)
}
case <-timer.C:
messages = l.postMessages(messages, false)
}
}
}
func (l *splunkLogger) postMessages(messages []*splunkMessage, lastChance bool) []*splunkMessage {
ctx, cancel := context.WithTimeout(context.Background(), batchSendTimeout)
defer cancel()
messagesLen := len(messages)
for i := 0; i < messagesLen; i += l.postMessagesBatchSize {
upperBound := min(i+l.postMessagesBatchSize, messagesLen)
if err := l.tryPostMessages(ctx, messages[i:upperBound]); err != nil {
log.G(ctx).WithError(err).WithField("module", "logger/splunk").Warn("Error while sending logs")
if messagesLen-i >= l.bufferMaximum || lastChance {
// If this is last chance - print them all to the daemon log
if lastChance {
upperBound = messagesLen
}
// Not all sent, but buffer has got to its maximum, let's log all messages
// we could not send and return buffer minus one batch size
for j := i; j < upperBound; j++ {
if jsonEvent, err := json.Marshal(messages[j]); err != nil {
log.G(ctx).WithError(err).Error("Failed to send a message and failed to encode to JSON")
} else {
log.G(ctx).WithField("message", string(jsonEvent)).Error("Failed to send a message")
}
}
return messages[upperBound:messagesLen]
}
// Not all sent, returning buffer from where we have not sent messages
return messages[i:messagesLen]
}
}
// All sent, return empty buffer
return messages[:0]
}
func (l *splunkLogger) tryPostMessages(ctx context.Context, messages []*splunkMessage) error {
if len(messages) == 0 {
return nil
}
var buffer bytes.Buffer
var writer io.Writer
var gzipWriter *gzip.Writer
var err error
// If gzip compression is enabled - create gzip writer with specified compression
// level. If gzip compression is disabled, use standard buffer as a writer
if l.gzipCompression {
gzipWriter, err = gzip.NewWriterLevel(&buffer, l.gzipCompressionLevel)
if err != nil {
return err
}
writer = gzipWriter
} else {
writer = &buffer
}
for _, message := range messages {
jsonEvent, err := json.Marshal(message)
if err != nil {
return err
}
if _, err := writer.Write(jsonEvent); err != nil {
return err
}
}
// If gzip compression is enabled, tell it, that we are done
if l.gzipCompression {
err = gzipWriter.Close()
if err != nil {
return err
}
}
req, err := http.NewRequest(http.MethodPost, l.url, bytes.NewBuffer(buffer.Bytes()))
if err != nil {
return err
}
req = req.WithContext(ctx)
req.Header.Set("Authorization", l.auth)
// Tell if we are sending gzip compressed body
if l.gzipCompression {
req.Header.Set("Content-Encoding", "gzip")
}
// Set the correct header if index acknowledgment is enabled
if l.indexAck {
requestChannel, err := uuid.NewRandom()
if err != nil {
return err
}
req.Header.Set("X-Splunk-Request-Channel", requestChannel.String())
}
resp, err := l.client.Do(req)
if err != nil {
return err
}
defer func() {
// Drain and close the body to let the transport reuse the connection.
// see https://github.com/google/go-github/pull/317/files#r57536827
_, _ = pools.Copy(io.Discard, resp.Body)
_ = resp.Body.Close()
}()
if resp.StatusCode != http.StatusOK {
rdr := io.LimitReader(resp.Body, maxResponseSize)
body, err := io.ReadAll(rdr)
if err != nil {
return err
}
return fmt.Errorf("%s: failed to send event - %s - %s", driverName, resp.Status, string(body))
}
return nil
}
func (l *splunkLogger) Close() error {
l.lock.Lock()
defer l.lock.Unlock()
if l.closedCond == nil {
l.closedCond = sync.NewCond(&l.lock)
close(l.stream)
for !l.closed {
l.closedCond.Wait()
}
}
return nil
}
func (l *splunkLogger) Name() string {
return driverName
}
func (l *splunkLogger) createSplunkMessage(msg *logger.Message) *splunkMessage {
message := *l.nullMessage
message.Time = fmt.Sprintf("%f", float64(msg.Timestamp.UnixNano())/float64(time.Second))
return &message
}
// ValidateLogOpt looks for all supported by splunk driver options
func ValidateLogOpt(cfg map[string]string) error {
for key := range cfg {
switch key {
case splunkURLKey:
case splunkTokenKey:
case splunkSourceKey:
case splunkSourceTypeKey:
case splunkIndexKey:
case splunkCAPathKey:
case splunkCANameKey:
case splunkInsecureSkipVerifyKey:
case splunkFormatKey:
case splunkVerifyConnectionKey:
case splunkGzipCompressionKey:
case splunkGzipCompressionLevelKey:
case splunkIndexAcknowledgment:
case envKey:
case envRegexKey:
case labelsKey:
case labelsRegexKey:
case tagKey:
default:
return fmt.Errorf("unknown log opt '%s' for %s log driver", key, driverName)
}
}
return nil
}
func parseURL(info logger.Info) (*url.URL, error) {
splunkURLStr, ok := info.Config[splunkURLKey]
if !ok {
return nil, fmt.Errorf("%s: %s is expected", driverName, splunkURLKey)
}
splunkURL, err := url.Parse(splunkURLStr)
if err != nil {
return nil, fmt.Errorf("%s: failed to parse %s as url value in %s", driverName, splunkURLStr, splunkURLKey)
}
if !splunkURL.IsAbs() ||
(splunkURL.Scheme != "http" && splunkURL.Scheme != "https") ||
(splunkURL.Path != "" && splunkURL.Path != "/") ||
splunkURL.RawQuery != "" ||
splunkURL.Fragment != "" {
return nil, fmt.Errorf("%s: expected format scheme://dns_name_or_ip:port for %s", driverName, splunkURLKey)
}
splunkURL.Path = "/services/collector/event/1.0"
return splunkURL, nil
}
func verifySplunkConnection(l *splunkLogger) error {
req, err := http.NewRequest(http.MethodOptions, l.url, http.NoBody)
if err != nil {
return err
}
resp, err := l.client.Do(req)
if err != nil {
return err
}
defer func() {
// Drain and close the body to let the transport reuse the connection.
// see https://github.com/google/go-github/pull/317/files#r57536827
_, _ = pools.Copy(io.Discard, resp.Body)
_ = resp.Body.Close()
}()
if resp.StatusCode != http.StatusOK {
rdr := io.LimitReader(resp.Body, maxResponseSize)
body, err := io.ReadAll(rdr)
if err != nil {
return err
}
return fmt.Errorf("%s: failed to verify connection - %s - %s", driverName, resp.Status, string(body))
}
return nil
}
func getAdvancedOptionDuration(envName string, defaultValue time.Duration) time.Duration {
valueStr := os.Getenv(envName)
if valueStr == "" {
return defaultValue
}
parsedValue, err := time.ParseDuration(valueStr)
if err != nil {
log.G(context.TODO()).Error(fmt.Sprintf("Failed to parse value of %s as duration. Using default %v. %v", envName, defaultValue, err))
return defaultValue
}
return parsedValue
}
func getAdvancedOptionInt(envName string, defaultValue int) int {
valueStr := os.Getenv(envName)
if valueStr == "" {
return defaultValue
}
parsedValue, err := strconv.ParseInt(valueStr, 10, 32)
if err != nil {
log.G(context.TODO()).Error(fmt.Sprintf("Failed to parse value of %s as integer. Using default %d. %v", envName, defaultValue, err))
return defaultValue
}
return int(parsedValue)
} | go | github | https://github.com/moby/moby | daemon/logger/splunk/splunk.go |
import { ReactNode } from "react";
type ContainerProps = {
children: ReactNode;
};
const Container = (props: ContainerProps) => {
const { children } = props;
return <div className="container mx-auto px-5">{children}</div>;
};
export default Container; | typescript | github | https://github.com/vercel/next.js | examples/cms-cosmic/components/container.tsx |
# Copyright (C) 2015 Igor Gnatenko <i.gnatenko.brain@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from itertools import chain
import logging
import time
logger = logging.getLogger(__name__)
tabbing = 0
def log(fn):
if logger.getEffectiveLevel() > logging.DEBUG:
return fn
def wrapped(*v, **k):
global tabbing
name = fn.__name__
module = fn.__module__
filename = fn.__code__.co_filename.split('/')[-1]
lineno = fn.__code__.co_firstlineno
params = ", ".join(map(repr, chain(v, k.values())))
tabbing += 1
start = time.time()
retval = fn(*v, **k)
elapsed = time.time() - start
tabbing -= 1
elapsed_time = ''
if elapsed > 0.5:
elapsed_time = ', took %02f' % elapsed
logger.debug("%s:%s\t%s%s.%s(%s), returned %s%s",
filename, lineno, '|' * tabbing, module, name, params, retval, elapsed_time)
return retval
return wrapped | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB
* Copyright (C) 2009-2011, 2014-2016, 2018, 2022, 2024, D. R. Commander.
* Copyright (C) 2015-2016, 2018, 2022, Matthieu Darbois.
*
* Based on the x86 SIMD extension for IJG JPEG library,
* Copyright (C) 1999-2006, MIYASAKA Masaru.
* For conditions of distribution and use, see copyright notice in jsimdext.inc
*
* This file contains the interface between the "normal" portions
* of the library and the SIMD implementations when running on a
* PowerPC architecture.
*/
#ifdef __amigaos4__
/* This must be defined first as it re-defines GLOBAL otherwise */
#include <proto/exec.h>
#endif
#define JPEG_INTERNALS
#include "../../src/jinclude.h"
#include "../../src/jpeglib.h"
#include "../../src/jsimd.h"
#include "../../src/jdct.h"
#include "../../src/jsimddct.h"
#include "../jsimd.h"
#include <ctype.h>
#if defined(__APPLE__)
#include <sys/types.h>
#include <sys/sysctl.h>
#elif defined(__OpenBSD__)
#include <sys/param.h>
#include <sys/sysctl.h>
#include <machine/cpu.h>
#elif defined(__FreeBSD__)
#include <machine/cpu.h>
#include <sys/auxv.h>
#endif
static THREAD_LOCAL unsigned int simd_support = ~0;
#if !defined(__ALTIVEC__) && (defined(__linux__) || defined(ANDROID) || defined(__ANDROID__))
#define SOMEWHAT_SANE_PROC_CPUINFO_SIZE_LIMIT (1024 * 1024)
LOCAL(int)
check_feature(char *buffer, char *feature)
{
char *p;
if (*feature == 0)
return 0;
if (strncmp(buffer, "cpu", 3) != 0)
return 0;
buffer += 3;
while (isspace(*buffer))
buffer++;
/* Check if 'feature' is present in the buffer as a separate word */
while ((p = strstr(buffer, feature))) {
if (p > buffer && !isspace(*(p - 1))) {
buffer++;
continue;
}
p += strlen(feature);
if (*p != 0 && !isspace(*p)) {
buffer++;
continue;
}
return 1;
}
return 0;
}
LOCAL(int)
parse_proc_cpuinfo(int bufsize)
{
char *buffer = (char *)malloc(bufsize);
FILE *fd;
simd_support = 0;
if (!buffer)
return 0;
fd = fopen("/proc/cpuinfo", "r");
if (fd) {
while (fgets(buffer, bufsize, fd)) {
if (!strchr(buffer, '\n') && !feof(fd)) {
/* "impossible" happened - insufficient size of the buffer! */
fclose(fd);
free(buffer);
return 0;
}
if (check_feature(buffer, "altivec"))
simd_support |= JSIMD_ALTIVEC;
}
fclose(fd);
}
free(buffer);
return 1;
}
#endif
/*
* Check what SIMD accelerations are supported.
*/
LOCAL(void)
init_simd(void)
{
#ifndef NO_GETENV
char *env = NULL;
#endif
#if !defined(__ALTIVEC__) && (defined(__linux__) || defined(ANDROID) || defined(__ANDROID__))
int bufsize = 1024; /* an initial guess for the line buffer size limit */
#elif defined(__amigaos4__)
uint32 altivec = 0;
#elif defined(__APPLE__)
int mib[2] = { CTL_HW, HW_VECTORUNIT };
int altivec;
size_t len = sizeof(altivec);
#elif defined(__OpenBSD__)
int mib[2] = { CTL_MACHDEP, CPU_ALTIVEC };
int altivec;
size_t len = sizeof(altivec);
#elif defined(__FreeBSD__)
unsigned long cpufeatures = 0;
#endif
if (simd_support != ~0U)
return;
simd_support = 0;
#if defined(__ALTIVEC__)
simd_support |= JSIMD_ALTIVEC;
#elif defined(__linux__) || defined(ANDROID) || defined(__ANDROID__)
while (!parse_proc_cpuinfo(bufsize)) {
bufsize *= 2;
if (bufsize > SOMEWHAT_SANE_PROC_CPUINFO_SIZE_LIMIT)
break;
}
#elif defined(__amigaos4__)
IExec->GetCPUInfoTags(GCIT_VectorUnit, &altivec, TAG_DONE);
if (altivec == VECTORTYPE_ALTIVEC)
simd_support |= JSIMD_ALTIVEC;
#elif defined(__APPLE__) || defined(__OpenBSD__)
if (sysctl(mib, 2, &altivec, &len, NULL, 0) == 0 && altivec != 0)
simd_support |= JSIMD_ALTIVEC;
#elif defined(__FreeBSD__)
elf_aux_info(AT_HWCAP, &cpufeatures, sizeof(cpufeatures));
if (cpufeatures & PPC_FEATURE_HAS_ALTIVEC)
simd_support |= JSIMD_ALTIVEC;
#endif
#ifndef NO_GETENV
/* Force different settings through environment variables */
env = getenv("JSIMD_FORCEALTIVEC");
if ((env != NULL) && (strcmp(env, "1") == 0))
simd_support = JSIMD_ALTIVEC;
env = getenv("JSIMD_FORCENONE");
if ((env != NULL) && (strcmp(env, "1") == 0))
simd_support = 0;
#endif
}
GLOBAL(int)
jsimd_can_rgb_ycc(void)
{
init_simd();
/* The code is optimised for these values only */
if (BITS_IN_JSAMPLE != 8)
return 0;
if (sizeof(JDIMENSION) != 4)
return 0;
if ((RGB_PIXELSIZE != 3) && (RGB_PIXELSIZE != 4))
return 0;
if (simd_support & JSIMD_ALTIVEC)
return 1;
return 0;
}
GLOBAL(int)
jsimd_can_rgb_gray(void)
{
init_simd();
/* The code is optimised for these values only */
if (BITS_IN_JSAMPLE != 8)
return 0;
if (sizeof(JDIMENSION) != 4)
return 0;
if ((RGB_PIXELSIZE != 3) && (RGB_PIXELSIZE != 4))
return 0;
if (simd_support & JSIMD_ALTIVEC)
return 1;
return 0;
}
GLOBAL(int)
jsimd_can_ycc_rgb(void)
{
init_simd();
/* The code is optimised for these values only */
if (BITS_IN_JSAMPLE != 8)
return 0;
if (sizeof(JDIMENSION) != 4)
return 0;
if ((RGB_PIXELSIZE != 3) && (RGB_PIXELSIZE != 4))
return 0;
if (simd_support & JSIMD_ALTIVEC)
return 1;
return 0;
}
GLOBAL(int)
jsimd_can_ycc_rgb565(void)
{
return 0;
}
GLOBAL(void)
jsimd_rgb_ycc_convert(j_compress_ptr cinfo, JSAMPARRAY input_buf,
JSAMPIMAGE output_buf, JDIMENSION output_row,
int num_rows)
{
void (*altivecfct) (JDIMENSION, JSAMPARRAY, JSAMPIMAGE, JDIMENSION, int);
switch (cinfo->in_color_space) {
case JCS_EXT_RGB:
altivecfct = jsimd_extrgb_ycc_convert_altivec;
break;
case JCS_EXT_RGBX:
case JCS_EXT_RGBA:
altivecfct = jsimd_extrgbx_ycc_convert_altivec;
break;
case JCS_EXT_BGR:
altivecfct = jsimd_extbgr_ycc_convert_altivec;
break;
case JCS_EXT_BGRX:
case JCS_EXT_BGRA:
altivecfct = jsimd_extbgrx_ycc_convert_altivec;
break;
case JCS_EXT_XBGR:
case JCS_EXT_ABGR:
altivecfct = jsimd_extxbgr_ycc_convert_altivec;
break;
case JCS_EXT_XRGB:
case JCS_EXT_ARGB:
altivecfct = jsimd_extxrgb_ycc_convert_altivec;
break;
default:
altivecfct = jsimd_rgb_ycc_convert_altivec;
break;
}
altivecfct(cinfo->image_width, input_buf, output_buf, output_row, num_rows);
}
GLOBAL(void)
jsimd_rgb_gray_convert(j_compress_ptr cinfo, JSAMPARRAY input_buf,
JSAMPIMAGE output_buf, JDIMENSION output_row,
int num_rows)
{
void (*altivecfct) (JDIMENSION, JSAMPARRAY, JSAMPIMAGE, JDIMENSION, int);
switch (cinfo->in_color_space) {
case JCS_EXT_RGB:
altivecfct = jsimd_extrgb_gray_convert_altivec;
break;
case JCS_EXT_RGBX:
case JCS_EXT_RGBA:
altivecfct = jsimd_extrgbx_gray_convert_altivec;
break;
case JCS_EXT_BGR:
altivecfct = jsimd_extbgr_gray_convert_altivec;
break;
case JCS_EXT_BGRX:
case JCS_EXT_BGRA:
altivecfct = jsimd_extbgrx_gray_convert_altivec;
break;
case JCS_EXT_XBGR:
case JCS_EXT_ABGR:
altivecfct = jsimd_extxbgr_gray_convert_altivec;
break;
case JCS_EXT_XRGB:
case JCS_EXT_ARGB:
altivecfct = jsimd_extxrgb_gray_convert_altivec;
break;
default:
altivecfct = jsimd_rgb_gray_convert_altivec;
break;
}
altivecfct(cinfo->image_width, input_buf, output_buf, output_row, num_rows);
}
GLOBAL(void)
jsimd_ycc_rgb_convert(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
JDIMENSION input_row, JSAMPARRAY output_buf,
int num_rows)
{
void (*altivecfct) (JDIMENSION, JSAMPIMAGE, JDIMENSION, JSAMPARRAY, int);
switch (cinfo->out_color_space) {
case JCS_EXT_RGB:
altivecfct = jsimd_ycc_extrgb_convert_altivec;
break;
case JCS_EXT_RGBX:
case JCS_EXT_RGBA:
altivecfct = jsimd_ycc_extrgbx_convert_altivec;
break;
case JCS_EXT_BGR:
altivecfct = jsimd_ycc_extbgr_convert_altivec;
break;
case JCS_EXT_BGRX:
case JCS_EXT_BGRA:
altivecfct = jsimd_ycc_extbgrx_convert_altivec;
break;
case JCS_EXT_XBGR:
case JCS_EXT_ABGR:
altivecfct = jsimd_ycc_extxbgr_convert_altivec;
break;
case JCS_EXT_XRGB:
case JCS_EXT_ARGB:
altivecfct = jsimd_ycc_extxrgb_convert_altivec;
break;
default:
altivecfct = jsimd_ycc_rgb_convert_altivec;
break;
}
altivecfct(cinfo->output_width, input_buf, input_row, output_buf, num_rows);
}
GLOBAL(void)
jsimd_ycc_rgb565_convert(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
JDIMENSION input_row, JSAMPARRAY output_buf,
int num_rows)
{
}
GLOBAL(int)
jsimd_can_h2v2_downsample(void)
{
init_simd();
/* The code is optimised for these values only */
if (BITS_IN_JSAMPLE != 8)
return 0;
if (sizeof(JDIMENSION) != 4)
return 0;
if (simd_support & JSIMD_ALTIVEC)
return 1;
return 0;
}
GLOBAL(int)
jsimd_can_h2v1_downsample(void)
{
init_simd();
/* The code is optimised for these values only */
if (BITS_IN_JSAMPLE != 8)
return 0;
if (sizeof(JDIMENSION) != 4)
return 0;
if (simd_support & JSIMD_ALTIVEC)
return 1;
return 0;
}
GLOBAL(void)
jsimd_h2v2_downsample(j_compress_ptr cinfo, jpeg_component_info *compptr,
JSAMPARRAY input_data, JSAMPARRAY output_data)
{
jsimd_h2v2_downsample_altivec(cinfo->image_width, cinfo->max_v_samp_factor,
compptr->v_samp_factor,
compptr->width_in_blocks, input_data,
output_data);
}
GLOBAL(void)
jsimd_h2v1_downsample(j_compress_ptr cinfo, jpeg_component_info *compptr,
JSAMPARRAY input_data, JSAMPARRAY output_data)
{
jsimd_h2v1_downsample_altivec(cinfo->image_width, cinfo->max_v_samp_factor,
compptr->v_samp_factor,
compptr->width_in_blocks, input_data,
output_data);
}
GLOBAL(int)
jsimd_can_h2v2_upsample(void)
{
init_simd();
/* The code is optimised for these values only */
if (BITS_IN_JSAMPLE != 8)
return 0;
if (sizeof(JDIMENSION) != 4)
return 0;
if (simd_support & JSIMD_ALTIVEC)
return 1;
return 0;
}
GLOBAL(int)
jsimd_can_h2v1_upsample(void)
{
init_simd();
/* The code is optimised for these values only */
if (BITS_IN_JSAMPLE != 8)
return 0;
if (sizeof(JDIMENSION) != 4)
return 0;
if (simd_support & JSIMD_ALTIVEC)
return 1;
return 0;
}
GLOBAL(void)
jsimd_h2v2_upsample(j_decompress_ptr cinfo, jpeg_component_info *compptr,
JSAMPARRAY input_data, JSAMPARRAY *output_data_ptr)
{
jsimd_h2v2_upsample_altivec(cinfo->max_v_samp_factor, cinfo->output_width,
input_data, output_data_ptr);
}
GLOBAL(void)
jsimd_h2v1_upsample(j_decompress_ptr cinfo, jpeg_component_info *compptr,
JSAMPARRAY input_data, JSAMPARRAY *output_data_ptr)
{
jsimd_h2v1_upsample_altivec(cinfo->max_v_samp_factor, cinfo->output_width,
input_data, output_data_ptr);
}
GLOBAL(int)
jsimd_can_h2v2_fancy_upsample(void)
{
init_simd();
/* The code is optimised for these values only */
if (BITS_IN_JSAMPLE != 8)
return 0;
if (sizeof(JDIMENSION) != 4)
return 0;
if (simd_support & JSIMD_ALTIVEC)
return 1;
return 0;
}
GLOBAL(int)
jsimd_can_h2v1_fancy_upsample(void)
{
init_simd();
/* The code is optimised for these values only */
if (BITS_IN_JSAMPLE != 8)
return 0;
if (sizeof(JDIMENSION) != 4)
return 0;
if (simd_support & JSIMD_ALTIVEC)
return 1;
return 0;
}
GLOBAL(void)
jsimd_h2v2_fancy_upsample(j_decompress_ptr cinfo, jpeg_component_info *compptr,
JSAMPARRAY input_data, JSAMPARRAY *output_data_ptr)
{
jsimd_h2v2_fancy_upsample_altivec(cinfo->max_v_samp_factor,
compptr->downsampled_width, input_data,
output_data_ptr);
}
GLOBAL(void)
jsimd_h2v1_fancy_upsample(j_decompress_ptr cinfo, jpeg_component_info *compptr,
JSAMPARRAY input_data, JSAMPARRAY *output_data_ptr)
{
jsimd_h2v1_fancy_upsample_altivec(cinfo->max_v_samp_factor,
compptr->downsampled_width, input_data,
output_data_ptr);
}
GLOBAL(int)
jsimd_can_h2v2_merged_upsample(void)
{
init_simd();
/* The code is optimised for these values only */
if (BITS_IN_JSAMPLE != 8)
return 0;
if (sizeof(JDIMENSION) != 4)
return 0;
if (simd_support & JSIMD_ALTIVEC)
return 1;
return 0;
}
GLOBAL(int)
jsimd_can_h2v1_merged_upsample(void)
{
init_simd();
/* The code is optimised for these values only */
if (BITS_IN_JSAMPLE != 8)
return 0;
if (sizeof(JDIMENSION) != 4)
return 0;
if (simd_support & JSIMD_ALTIVEC)
return 1;
return 0;
}
GLOBAL(void)
jsimd_h2v2_merged_upsample(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
JDIMENSION in_row_group_ctr, JSAMPARRAY output_buf)
{
void (*altivecfct) (JDIMENSION, JSAMPIMAGE, JDIMENSION, JSAMPARRAY);
switch (cinfo->out_color_space) {
case JCS_EXT_RGB:
altivecfct = jsimd_h2v2_extrgb_merged_upsample_altivec;
break;
case JCS_EXT_RGBX:
case JCS_EXT_RGBA:
altivecfct = jsimd_h2v2_extrgbx_merged_upsample_altivec;
break;
case JCS_EXT_BGR:
altivecfct = jsimd_h2v2_extbgr_merged_upsample_altivec;
break;
case JCS_EXT_BGRX:
case JCS_EXT_BGRA:
altivecfct = jsimd_h2v2_extbgrx_merged_upsample_altivec;
break;
case JCS_EXT_XBGR:
case JCS_EXT_ABGR:
altivecfct = jsimd_h2v2_extxbgr_merged_upsample_altivec;
break;
case JCS_EXT_XRGB:
case JCS_EXT_ARGB:
altivecfct = jsimd_h2v2_extxrgb_merged_upsample_altivec;
break;
default:
altivecfct = jsimd_h2v2_merged_upsample_altivec;
break;
}
altivecfct(cinfo->output_width, input_buf, in_row_group_ctr, output_buf);
}
GLOBAL(void)
jsimd_h2v1_merged_upsample(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
JDIMENSION in_row_group_ctr, JSAMPARRAY output_buf)
{
void (*altivecfct) (JDIMENSION, JSAMPIMAGE, JDIMENSION, JSAMPARRAY);
switch (cinfo->out_color_space) {
case JCS_EXT_RGB:
altivecfct = jsimd_h2v1_extrgb_merged_upsample_altivec;
break;
case JCS_EXT_RGBX:
case JCS_EXT_RGBA:
altivecfct = jsimd_h2v1_extrgbx_merged_upsample_altivec;
break;
case JCS_EXT_BGR:
altivecfct = jsimd_h2v1_extbgr_merged_upsample_altivec;
break;
case JCS_EXT_BGRX:
case JCS_EXT_BGRA:
altivecfct = jsimd_h2v1_extbgrx_merged_upsample_altivec;
break;
case JCS_EXT_XBGR:
case JCS_EXT_ABGR:
altivecfct = jsimd_h2v1_extxbgr_merged_upsample_altivec;
break;
case JCS_EXT_XRGB:
case JCS_EXT_ARGB:
altivecfct = jsimd_h2v1_extxrgb_merged_upsample_altivec;
break;
default:
altivecfct = jsimd_h2v1_merged_upsample_altivec;
break;
}
altivecfct(cinfo->output_width, input_buf, in_row_group_ctr, output_buf);
}
GLOBAL(int)
jsimd_can_convsamp(void)
{
init_simd();
/* The code is optimised for these values only */
if (DCTSIZE != 8)
return 0;
if (BITS_IN_JSAMPLE != 8)
return 0;
if (sizeof(JDIMENSION) != 4)
return 0;
if (sizeof(DCTELEM) != 2)
return 0;
if (simd_support & JSIMD_ALTIVEC)
return 1;
return 0;
}
GLOBAL(int)
jsimd_can_convsamp_float(void)
{
return 0;
}
GLOBAL(void)
jsimd_convsamp(JSAMPARRAY sample_data, JDIMENSION start_col,
DCTELEM *workspace)
{
jsimd_convsamp_altivec(sample_data, start_col, workspace);
}
GLOBAL(void)
jsimd_convsamp_float(JSAMPARRAY sample_data, JDIMENSION start_col,
FAST_FLOAT *workspace)
{
}
GLOBAL(int)
jsimd_can_fdct_islow(void)
{
init_simd();
/* The code is optimised for these values only */
if (DCTSIZE != 8)
return 0;
if (sizeof(DCTELEM) != 2)
return 0;
if (simd_support & JSIMD_ALTIVEC)
return 1;
return 0;
}
GLOBAL(int)
jsimd_can_fdct_ifast(void)
{
init_simd();
/* The code is optimised for these values only */
if (DCTSIZE != 8)
return 0;
if (sizeof(DCTELEM) != 2)
return 0;
if (simd_support & JSIMD_ALTIVEC)
return 1;
return 0;
}
GLOBAL(int)
jsimd_can_fdct_float(void)
{
return 0;
}
GLOBAL(void)
jsimd_fdct_islow(DCTELEM *data)
{
jsimd_fdct_islow_altivec(data);
}
GLOBAL(void)
jsimd_fdct_ifast(DCTELEM *data)
{
jsimd_fdct_ifast_altivec(data);
}
GLOBAL(void)
jsimd_fdct_float(FAST_FLOAT *data)
{
}
GLOBAL(int)
jsimd_can_quantize(void)
{
init_simd();
/* The code is optimised for these values only */
if (DCTSIZE != 8)
return 0;
if (sizeof(JCOEF) != 2)
return 0;
if (sizeof(DCTELEM) != 2)
return 0;
if (simd_support & JSIMD_ALTIVEC)
return 1;
return 0;
}
GLOBAL(int)
jsimd_can_quantize_float(void)
{
return 0;
}
GLOBAL(void)
jsimd_quantize(JCOEFPTR coef_block, DCTELEM *divisors, DCTELEM *workspace)
{
jsimd_quantize_altivec(coef_block, divisors, workspace);
}
GLOBAL(void)
jsimd_quantize_float(JCOEFPTR coef_block, FAST_FLOAT *divisors,
FAST_FLOAT *workspace)
{
}
GLOBAL(int)
jsimd_can_idct_2x2(void)
{
return 0;
}
GLOBAL(int)
jsimd_can_idct_4x4(void)
{
return 0;
}
GLOBAL(void)
jsimd_idct_2x2(j_decompress_ptr cinfo, jpeg_component_info *compptr,
JCOEFPTR coef_block, JSAMPARRAY output_buf,
JDIMENSION output_col)
{
}
GLOBAL(void)
jsimd_idct_4x4(j_decompress_ptr cinfo, jpeg_component_info *compptr,
JCOEFPTR coef_block, JSAMPARRAY output_buf,
JDIMENSION output_col)
{
}
GLOBAL(int)
jsimd_can_idct_islow(void)
{
init_simd();
/* The code is optimised for these values only */
if (DCTSIZE != 8)
return 0;
if (sizeof(JCOEF) != 2)
return 0;
if (simd_support & JSIMD_ALTIVEC)
return 1;
return 0;
}
GLOBAL(int)
jsimd_can_idct_ifast(void)
{
init_simd();
/* The code is optimised for these values only */
if (DCTSIZE != 8)
return 0;
if (sizeof(JCOEF) != 2)
return 0;
if (simd_support & JSIMD_ALTIVEC)
return 1;
return 0;
}
GLOBAL(int)
jsimd_can_idct_float(void)
{
return 0;
}
GLOBAL(void)
jsimd_idct_islow(j_decompress_ptr cinfo, jpeg_component_info *compptr,
JCOEFPTR coef_block, JSAMPARRAY output_buf,
JDIMENSION output_col)
{
jsimd_idct_islow_altivec(compptr->dct_table, coef_block, output_buf,
output_col);
}
GLOBAL(void)
jsimd_idct_ifast(j_decompress_ptr cinfo, jpeg_component_info *compptr,
JCOEFPTR coef_block, JSAMPARRAY output_buf,
JDIMENSION output_col)
{
jsimd_idct_ifast_altivec(compptr->dct_table, coef_block, output_buf,
output_col);
}
GLOBAL(void)
jsimd_idct_float(j_decompress_ptr cinfo, jpeg_component_info *compptr,
JCOEFPTR coef_block, JSAMPARRAY output_buf,
JDIMENSION output_col)
{
}
GLOBAL(int)
jsimd_can_huff_encode_one_block(void)
{
return 0;
}
GLOBAL(JOCTET *)
jsimd_huff_encode_one_block(void *state, JOCTET *buffer, JCOEFPTR block,
int last_dc_val, c_derived_tbl *dctbl,
c_derived_tbl *actbl)
{
return NULL;
}
GLOBAL(int)
jsimd_can_encode_mcu_AC_first_prepare(void)
{
return 0;
}
GLOBAL(void)
jsimd_encode_mcu_AC_first_prepare(const JCOEF *block,
const int *jpeg_natural_order_start, int Sl,
int Al, UJCOEF *values, size_t *zerobits)
{
}
GLOBAL(int)
jsimd_can_encode_mcu_AC_refine_prepare(void)
{
return 0;
}
GLOBAL(int)
jsimd_encode_mcu_AC_refine_prepare(const JCOEF *block,
const int *jpeg_natural_order_start, int Sl,
int Al, UJCOEF *absvalues, size_t *bits)
{
return 0;
} | c | github | https://github.com/opencv/opencv | 3rdparty/libjpeg-turbo/simd/powerpc/jsimd.c |
#!/usr/bin/env python
import os
import pexpect
import sys
from ansible.module_utils.six import PY2
if PY2:
log_buffer = sys.stdout
else:
log_buffer = sys.stdout.buffer
env_vars = {
'ANSIBLE_ROLES_PATH': './roles',
'ANSIBLE_NOCOLOR': 'True',
'ANSIBLE_RETRY_FILES_ENABLED': 'False',
}
def run_test(playbook, test_spec, args=None, timeout=10, env=None):
if not env:
env = os.environ.copy()
env.update(env_vars)
if not args:
args = sys.argv[1:]
vars_prompt_test = pexpect.spawn(
'ansible-playbook',
args=[playbook] + args,
timeout=timeout,
env=env,
)
vars_prompt_test.logfile = log_buffer
for item in test_spec[0]:
vars_prompt_test.expect(item[0])
if item[1]:
vars_prompt_test.send(item[1])
vars_prompt_test.expect(test_spec[1])
vars_prompt_test.expect(pexpect.EOF)
vars_prompt_test.close()
# These are the tests to run. Each test is a playbook and a test_spec.
#
# The test_spec is a list with two elements.
#
# The first element is a list of two element tuples. The first is the regexp to look
# for in the output, the second is the line to send.
#
# The last element is the last string of text to look for in the output.
#
tests = [
# Basic vars_prompt
{'playbook': 'vars_prompt-1.yml',
'test_spec': [
[('input:', 'some input\r')],
'"input": "some input"']},
# Custom prompt
{'playbook': 'vars_prompt-2.yml',
'test_spec': [
[('Enter some input:', 'some more input\r')],
'"input": "some more input"']},
# Test confirm, both correct and incorrect
{'playbook': 'vars_prompt-3.yml',
'test_spec': [
[('input:', 'confirm me\r'),
('confirm input:', 'confirm me\r')],
'"input": "confirm me"']},
{'playbook': 'vars_prompt-3.yml',
'test_spec': [
[('input:', 'confirm me\r'),
('confirm input:', 'incorrect\r'),
(r'\*\*\*\*\* VALUES ENTERED DO NOT MATCH \*\*\*\*', ''),
('input:', 'confirm me\r'),
('confirm input:', 'confirm me\r')],
'"input": "confirm me"']},
# Test private
{'playbook': 'vars_prompt-4.yml',
'test_spec': [
[('not_secret', 'this is displayed\r'),
('this is displayed', '')],
'"not_secret": "this is displayed"']},
# Test hashing
{'playbook': 'vars_prompt-5.yml',
'test_spec': [
[('password', 'Scenic-Improving-Payphone\r'),
('confirm password', 'Scenic-Improving-Payphone\r')],
r'"password": "\$6\$']},
# Test variables in prompt field
# https://github.com/ansible/ansible/issues/32723
{'playbook': 'vars_prompt-6.yml',
'test_spec': [
[('prompt from variable:', 'input\r')],
'']},
# Test play vars coming from vars_prompt
# https://github.com/ansible/ansible/issues/37984
{'playbook': 'vars_prompt-7.yml',
'test_spec': [
[('prompting for host:', 'testhost\r')],
r'testhost.*ok=1']},
]
for t in tests:
run_test(playbook=t['playbook'], test_spec=t['test_spec']) | unknown | codeparrot/codeparrot-clean | ||
"""distutils.msvccompiler
Contains MSVCCompiler, an implementation of the abstract CCompiler class
for the Microsoft Visual Studio.
"""
# Written by Perry Stoll
# hacked by Robin Becker and Thomas Heller to do a better job of
# finding DevStudio (through the registry)
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: msvccompiler.py 62197 2008-04-07 01:53:39Z mark.hammond $"
import sys, os, string
from distutils.errors import \
DistutilsExecError, DistutilsPlatformError, \
CompileError, LibError, LinkError
from distutils.ccompiler import \
CCompiler, gen_preprocess_options, gen_lib_options
from distutils import log
_can_read_reg = 0
try:
import _winreg
_can_read_reg = 1
hkey_mod = _winreg
RegOpenKeyEx = _winreg.OpenKeyEx
RegEnumKey = _winreg.EnumKey
RegEnumValue = _winreg.EnumValue
RegError = _winreg.error
except ImportError:
try:
import win32api
import win32con
_can_read_reg = 1
hkey_mod = win32con
RegOpenKeyEx = win32api.RegOpenKeyEx
RegEnumKey = win32api.RegEnumKey
RegEnumValue = win32api.RegEnumValue
RegError = win32api.error
except ImportError:
log.info("Warning: Can't read registry to find the "
"necessary compiler setting\n"
"Make sure that Python modules _winreg, "
"win32api or win32con are installed.")
pass
if _can_read_reg:
HKEYS = (hkey_mod.HKEY_USERS,
hkey_mod.HKEY_CURRENT_USER,
hkey_mod.HKEY_LOCAL_MACHINE,
hkey_mod.HKEY_CLASSES_ROOT)
def read_keys(base, key):
"""Return list of registry keys."""
try:
handle = RegOpenKeyEx(base, key)
except RegError:
return None
L = []
i = 0
while 1:
try:
k = RegEnumKey(handle, i)
except RegError:
break
L.append(k)
i = i + 1
return L
def read_values(base, key):
"""Return dict of registry keys and values.
All names are converted to lowercase.
"""
try:
handle = RegOpenKeyEx(base, key)
except RegError:
return None
d = {}
i = 0
while 1:
try:
name, value, type = RegEnumValue(handle, i)
except RegError:
break
name = name.lower()
d[convert_mbcs(name)] = convert_mbcs(value)
i = i + 1
return d
def convert_mbcs(s):
enc = getattr(s, "encode", None)
if enc is not None:
try:
s = enc("mbcs")
except UnicodeError:
pass
return s
class MacroExpander:
def __init__(self, version):
self.macros = {}
self.load_macros(version)
def set_macro(self, macro, path, key):
for base in HKEYS:
d = read_values(base, path)
if d:
self.macros["$(%s)" % macro] = d[key]
break
def load_macros(self, version):
vsbase = r"Software\Microsoft\VisualStudio\%0.1f" % version
self.set_macro("VCInstallDir", vsbase + r"\Setup\VC", "productdir")
self.set_macro("VSInstallDir", vsbase + r"\Setup\VS", "productdir")
net = r"Software\Microsoft\.NETFramework"
self.set_macro("FrameworkDir", net, "installroot")
try:
if version > 7.0:
self.set_macro("FrameworkSDKDir", net, "sdkinstallrootv1.1")
else:
self.set_macro("FrameworkSDKDir", net, "sdkinstallroot")
except KeyError, exc: #
raise DistutilsPlatformError, \
("""Python was built with Visual Studio 2003;
extensions must be built with a compiler than can generate compatible binaries.
Visual Studio 2003 was not found on this system. If you have Cygwin installed,
you can try compiling with MingW32, by passing "-c mingw32" to setup.py.""")
p = r"Software\Microsoft\NET Framework Setup\Product"
for base in HKEYS:
try:
h = RegOpenKeyEx(base, p)
except RegError:
continue
key = RegEnumKey(h, 0)
d = read_values(base, r"%s\%s" % (p, key))
self.macros["$(FrameworkVersion)"] = d["version"]
def sub(self, s):
for k, v in self.macros.items():
s = string.replace(s, k, v)
return s
def get_build_version():
"""Return the version of MSVC that was used to build Python.
For Python 2.3 and up, the version number is included in
sys.version. For earlier versions, assume the compiler is MSVC 6.
"""
prefix = "MSC v."
i = string.find(sys.version, prefix)
if i == -1:
return 6
i = i + len(prefix)
s, rest = sys.version[i:].split(" ", 1)
majorVersion = int(s[:-2]) - 6
minorVersion = int(s[2:3]) / 10.0
# I don't think paths are affected by minor version in version 6
if majorVersion == 6:
minorVersion = 0
if majorVersion >= 6:
return majorVersion + minorVersion
# else we don't know what version of the compiler this is
return None
def get_build_architecture():
"""Return the processor architecture.
Possible results are "Intel", "Itanium", or "AMD64".
"""
prefix = " bit ("
i = string.find(sys.version, prefix)
if i == -1:
return "Intel"
j = string.find(sys.version, ")", i)
return sys.version[i+len(prefix):j]
def normalize_and_reduce_paths(paths):
"""Return a list of normalized paths with duplicates removed.
The current order of paths is maintained.
"""
# Paths are normalized so things like: /a and /a/ aren't both preserved.
reduced_paths = []
for p in paths:
np = os.path.normpath(p)
# XXX(nnorwitz): O(n**2), if reduced_paths gets long perhaps use a set.
if np not in reduced_paths:
reduced_paths.append(np)
return reduced_paths
class MSVCCompiler (CCompiler) :
"""Concrete class that implements an interface to Microsoft Visual C++,
as defined by the CCompiler abstract class."""
compiler_type = 'msvc'
# Just set this so CCompiler's constructor doesn't barf. We currently
# don't use the 'set_executables()' bureaucracy provided by CCompiler,
# as it really isn't necessary for this sort of single-compiler class.
# Would be nice to have a consistent interface with UnixCCompiler,
# though, so it's worth thinking about.
executables = {}
# Private class data (need to distinguish C from C++ source for compiler)
_c_extensions = ['.c']
_cpp_extensions = ['.cc', '.cpp', '.cxx']
_rc_extensions = ['.rc']
_mc_extensions = ['.mc']
# Needed for the filename generation methods provided by the
# base class, CCompiler.
src_extensions = (_c_extensions + _cpp_extensions +
_rc_extensions + _mc_extensions)
res_extension = '.res'
obj_extension = '.obj'
static_lib_extension = '.lib'
shared_lib_extension = '.dll'
static_lib_format = shared_lib_format = '%s%s'
exe_extension = '.exe'
def __init__ (self, verbose=0, dry_run=0, force=0):
CCompiler.__init__ (self, verbose, dry_run, force)
self.__version = get_build_version()
self.__arch = get_build_architecture()
if self.__arch == "Intel":
# x86
if self.__version >= 7:
self.__root = r"Software\Microsoft\VisualStudio"
self.__macros = MacroExpander(self.__version)
else:
self.__root = r"Software\Microsoft\Devstudio"
self.__product = "Visual Studio version %s" % self.__version
else:
# Win64. Assume this was built with the platform SDK
self.__product = "Microsoft SDK compiler %s" % (self.__version + 6)
self.initialized = False
def initialize(self):
self.__paths = []
if "DISTUTILS_USE_SDK" in os.environ and "MSSdk" in os.environ and self.find_exe("cl.exe"):
# Assume that the SDK set up everything alright; don't try to be
# smarter
self.cc = "cl.exe"
self.linker = "link.exe"
self.lib = "lib.exe"
self.rc = "rc.exe"
self.mc = "mc.exe"
else:
self.__paths = self.get_msvc_paths("path")
if len (self.__paths) == 0:
raise DistutilsPlatformError, \
("Python was built with %s, "
"and extensions need to be built with the same "
"version of the compiler, but it isn't installed." % self.__product)
self.cc = self.find_exe("cl.exe")
self.linker = self.find_exe("link.exe")
self.lib = self.find_exe("lib.exe")
self.rc = self.find_exe("rc.exe") # resource compiler
self.mc = self.find_exe("mc.exe") # message compiler
self.set_path_env_var('lib')
self.set_path_env_var('include')
# extend the MSVC path with the current path
try:
for p in string.split(os.environ['path'], ';'):
self.__paths.append(p)
except KeyError:
pass
self.__paths = normalize_and_reduce_paths(self.__paths)
os.environ['path'] = string.join(self.__paths, ';')
self.preprocess_options = None
if self.__arch == "Intel":
self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GX' ,
'/DNDEBUG']
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GX',
'/Z7', '/D_DEBUG']
else:
# Win64
self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GS-' ,
'/DNDEBUG']
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GS-',
'/Z7', '/D_DEBUG']
self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO']
if self.__version >= 7:
self.ldflags_shared_debug = [
'/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG'
]
else:
self.ldflags_shared_debug = [
'/DLL', '/nologo', '/INCREMENTAL:no', '/pdb:None', '/DEBUG'
]
self.ldflags_static = [ '/nologo']
self.initialized = True
# -- Worker methods ------------------------------------------------
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
# Copied from ccompiler.py, extended to return .res as 'object'-file
# for .rc input file
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
(base, ext) = os.path.splitext (src_name)
base = os.path.splitdrive(base)[1] # Chop off the drive
base = base[os.path.isabs(base):] # If abs, chop off leading /
if ext not in self.src_extensions:
# Better to raise an exception instead of silently continuing
# and later complain about sources and targets having
# different lengths
raise CompileError ("Don't know how to compile %s" % src_name)
if strip_dir:
base = os.path.basename (base)
if ext in self._rc_extensions:
obj_names.append (os.path.join (output_dir,
base + self.res_extension))
elif ext in self._mc_extensions:
obj_names.append (os.path.join (output_dir,
base + self.res_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
def compile(self, sources,
output_dir=None, macros=None, include_dirs=None, debug=0,
extra_preargs=None, extra_postargs=None, depends=None):
if not self.initialized: self.initialize()
macros, objects, extra_postargs, pp_opts, build = \
self._setup_compile(output_dir, macros, include_dirs, sources,
depends, extra_postargs)
compile_opts = extra_preargs or []
compile_opts.append ('/c')
if debug:
compile_opts.extend(self.compile_options_debug)
else:
compile_opts.extend(self.compile_options)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
if debug:
# pass the full pathname to MSVC in debug mode,
# this allows the debugger to find the source file
# without asking the user to browse for it
src = os.path.abspath(src)
if ext in self._c_extensions:
input_opt = "/Tc" + src
elif ext in self._cpp_extensions:
input_opt = "/Tp" + src
elif ext in self._rc_extensions:
# compile .RC to .RES file
input_opt = src
output_opt = "/fo" + obj
try:
self.spawn ([self.rc] + pp_opts +
[output_opt] + [input_opt])
except DistutilsExecError, msg:
raise CompileError, msg
continue
elif ext in self._mc_extensions:
# Compile .MC to .RC file to .RES file.
# * '-h dir' specifies the directory for the
# generated include file
# * '-r dir' specifies the target directory of the
# generated RC file and the binary message resource
# it includes
#
# For now (since there are no options to change this),
# we use the source-directory for the include file and
# the build directory for the RC file and message
# resources. This works at least for win32all.
h_dir = os.path.dirname (src)
rc_dir = os.path.dirname (obj)
try:
# first compile .MC to .RC and .H file
self.spawn ([self.mc] +
['-h', h_dir, '-r', rc_dir] + [src])
base, _ = os.path.splitext (os.path.basename (src))
rc_file = os.path.join (rc_dir, base + '.rc')
# then compile .RC to .RES file
self.spawn ([self.rc] +
["/fo" + obj] + [rc_file])
except DistutilsExecError, msg:
raise CompileError, msg
continue
else:
# how to handle this file?
raise CompileError (
"Don't know how to compile %s to %s" % \
(src, obj))
output_opt = "/Fo" + obj
try:
self.spawn ([self.cc] + compile_opts + pp_opts +
[input_opt, output_opt] +
extra_postargs)
except DistutilsExecError, msg:
raise CompileError, msg
return objects
# compile ()
def create_static_lib (self,
objects,
output_libname,
output_dir=None,
debug=0,
target_lang=None):
if not self.initialized: self.initialize()
(objects, output_dir) = self._fix_object_args (objects, output_dir)
output_filename = \
self.library_filename (output_libname, output_dir=output_dir)
if self._need_link (objects, output_filename):
lib_args = objects + ['/OUT:' + output_filename]
if debug:
pass # XXX what goes here?
try:
self.spawn ([self.lib] + lib_args)
except DistutilsExecError, msg:
raise LibError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
# create_static_lib ()
def link (self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
if not self.initialized: self.initialize()
(objects, output_dir) = self._fix_object_args (objects, output_dir)
(libraries, library_dirs, runtime_library_dirs) = \
self._fix_lib_args (libraries, library_dirs, runtime_library_dirs)
if runtime_library_dirs:
self.warn ("I don't know what to do with 'runtime_library_dirs': "
+ str (runtime_library_dirs))
lib_opts = gen_lib_options (self,
library_dirs, runtime_library_dirs,
libraries)
if output_dir is not None:
output_filename = os.path.join (output_dir, output_filename)
if self._need_link (objects, output_filename):
if target_desc == CCompiler.EXECUTABLE:
if debug:
ldflags = self.ldflags_shared_debug[1:]
else:
ldflags = self.ldflags_shared[1:]
else:
if debug:
ldflags = self.ldflags_shared_debug
else:
ldflags = self.ldflags_shared
export_opts = []
for sym in (export_symbols or []):
export_opts.append("/EXPORT:" + sym)
ld_args = (ldflags + lib_opts + export_opts +
objects + ['/OUT:' + output_filename])
# The MSVC linker generates .lib and .exp files, which cannot be
# suppressed by any linker switches. The .lib files may even be
# needed! Make sure they are generated in the temporary build
# directory. Since they have different names for debug and release
# builds, they can go into the same directory.
if export_symbols is not None:
(dll_name, dll_ext) = os.path.splitext(
os.path.basename(output_filename))
implib_file = os.path.join(
os.path.dirname(objects[0]),
self.library_filename(dll_name))
ld_args.append ('/IMPLIB:' + implib_file)
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath (os.path.dirname (output_filename))
try:
self.spawn ([self.linker] + ld_args)
except DistutilsExecError, msg:
raise LinkError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
# link ()
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function, in
# ccompiler.py.
def library_dir_option (self, dir):
return "/LIBPATH:" + dir
def runtime_library_dir_option (self, dir):
raise DistutilsPlatformError, \
"don't know how to set runtime library search path for MSVC++"
def library_option (self, lib):
return self.library_filename (lib)
def find_library_file (self, dirs, lib, debug=0):
# Prefer a debugging library if found (and requested), but deal
# with it if we don't have one.
if debug:
try_names = [lib + "_d", lib]
else:
try_names = [lib]
for dir in dirs:
for name in try_names:
libfile = os.path.join(dir, self.library_filename (name))
if os.path.exists(libfile):
return libfile
else:
# Oops, didn't find it in *any* of 'dirs'
return None
# find_library_file ()
# Helper methods for using the MSVC registry settings
def find_exe(self, exe):
"""Return path to an MSVC executable program.
Tries to find the program in several places: first, one of the
MSVC program search paths from the registry; next, the directories
in the PATH environment variable. If any of those work, return an
absolute path that is known to exist. If none of them work, just
return the original program name, 'exe'.
"""
for p in self.__paths:
fn = os.path.join(os.path.abspath(p), exe)
if os.path.isfile(fn):
return fn
# didn't find it; try existing path
for p in string.split(os.environ['Path'],';'):
fn = os.path.join(os.path.abspath(p),exe)
if os.path.isfile(fn):
return fn
return exe
def get_msvc_paths(self, path, platform='x86'):
"""Get a list of devstudio directories (include, lib or path).
Return a list of strings. The list will be empty if unable to
access the registry or appropriate registry keys not found.
"""
if not _can_read_reg:
return []
path = path + " dirs"
if self.__version >= 7:
key = (r"%s\%0.1f\VC\VC_OBJECTS_PLATFORM_INFO\Win32\Directories"
% (self.__root, self.__version))
else:
key = (r"%s\6.0\Build System\Components\Platforms"
r"\Win32 (%s)\Directories" % (self.__root, platform))
for base in HKEYS:
d = read_values(base, key)
if d:
if self.__version >= 7:
return string.split(self.__macros.sub(d[path]), ";")
else:
return string.split(d[path], ";")
# MSVC 6 seems to create the registry entries we need only when
# the GUI is run.
if self.__version == 6:
for base in HKEYS:
if read_values(base, r"%s\6.0" % self.__root) is not None:
self.warn("It seems you have Visual Studio 6 installed, "
"but the expected registry settings are not present.\n"
"You must at least run the Visual Studio GUI once "
"so that these entries are created.")
break
return []
def set_path_env_var(self, name):
"""Set environment variable 'name' to an MSVC path type value.
This is equivalent to a SET command prior to execution of spawned
commands.
"""
if name == "lib":
p = self.get_msvc_paths("library")
else:
p = self.get_msvc_paths(name)
if p:
os.environ[name] = string.join(p, ';')
if get_build_version() >= 8.0:
log.debug("Importing new compiler from distutils.msvc9compiler")
OldMSVCCompiler = MSVCCompiler
from distutils.msvc9compiler import MSVCCompiler
# get_build_architecture not really relevant now we support cross-compile
from distutils.msvc9compiler import MacroExpander | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides thread-related generators
This module defines classes for threading-related
generators for generating the models in
:mod:`openstack.common.report.models.threading`.
"""
from __future__ import absolute_import
import sys
import threading
import greenlet
from nova.openstack.common.report.models import threading as tm
from nova.openstack.common.report.models import with_default_views as mwdv
from nova.openstack.common.report import utils as rutils
from nova.openstack.common.report.views.text import generic as text_views
class ThreadReportGenerator(object):
"""A Thread Data Generator
This generator returns a collection of
:class:`openstack.common.report.models.threading.ThreadModel`
objects by introspecting the current python state using
:func:`sys._current_frames()` . Its constructor may optionally
be passed a frame object. This frame object will be interpreted
as the actual stack trace for the current thread, and, come generation
time, will be used to replace the stack trace of the thread in which
this code is running.
"""
def __init__(self, curr_thread_traceback=None):
self.traceback = curr_thread_traceback
def __call__(self):
threadModels = dict(
(thread_id, tm.ThreadModel(thread_id, stack))
for thread_id, stack in sys._current_frames().items()
)
if self.traceback is not None:
curr_thread_id = threading.current_thread().ident
threadModels[curr_thread_id] = tm.ThreadModel(curr_thread_id,
self.traceback)
return mwdv.ModelWithDefaultViews(threadModels,
text_view=text_views.MultiView())
class GreenThreadReportGenerator(object):
"""A Green Thread Data Generator
This generator returns a collection of
:class:`openstack.common.report.models.threading.GreenThreadModel`
objects by introspecting the current python garbage collection
state, and sifting through for :class:`greenlet.greenlet` objects.
.. seealso::
Function :func:`openstack.common.report.utils._find_objects`
"""
def __call__(self):
threadModels = [
tm.GreenThreadModel(gr.gr_frame)
for gr in rutils._find_objects(greenlet.greenlet)
]
return mwdv.ModelWithDefaultViews(threadModels,
text_view=text_views.MultiView()) | unknown | codeparrot/codeparrot-clean | ||
fun main(args: Array<String>) {
val a = ArrayList<String>()
a.add("")
<expr>a</expr>[0]++
} | kotlin | github | https://github.com/JetBrains/kotlin | analysis/analysis-api/testData/components/compileTimeConstantProvider/evaluate/arrayWithInc.kt |
export function getStaticProps() {}
export default function() {
return null;
} | javascript | github | https://github.com/vercel/next.js | crates/next-custom-transforms/tests/errors/react-server-components/client-graph/get-static-props/output.js |
import pytest
import json
class TestMDSs(object):
@pytest.mark.no_docker
def test_mds_is_installed(self, node, host):
assert host.package("ceph-mds").is_installed
def test_mds_service_is_running(self, node, host):
service_name = "ceph-mds@{hostname}".format(
hostname=node["vars"]["inventory_hostname"]
)
assert host.service(service_name).is_running
def test_mds_service_is_enabled(self, node, host):
service_name = "ceph-mds@{hostname}".format(
hostname=node["vars"]["inventory_hostname"]
)
assert host.service(service_name).is_enabled
@pytest.mark.no_docker
def test_mds_is_up(self, node, host):
hostname = node["vars"]["inventory_hostname"]
cmd = "sudo ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(cluster=node['cluster_name'])
output = host.check_output(cmd)
daemons = json.loads(output)["fsmap"]["by_rank"][0]["name"]
assert hostname in daemons
@pytest.mark.docker
def test_docker_mds_is_up(self, node, host):
hostname = node["vars"]["inventory_hostname"]
cmd = "sudo docker exec ceph-mds-{hostname} ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
hostname=node["vars"]["inventory_hostname"],
cluster=node["cluster_name"]
)
output_raw = host.check_output(cmd)
output_json = json.loads(output_raw)
active_daemon = output_json["fsmap"]["by_rank"][0]["name"]
if active_daemon != hostname:
assert output_json['fsmap']['up:standby'] == 1 | unknown | codeparrot/codeparrot-clean | ||
use serde_derive::Serialize;
#[derive(Serialize)]
#[serde(field_identifier)]
enum F {
A,
B,
}
fn main() {} | rust | github | https://github.com/serde-rs/serde | test_suite/tests/ui/precondition/serialize_field_identifier.rs |
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A sample app for Google Apps Email Settings features.
EmailSettingsSample: demonstrates getting and setting/updating email settings
"""
__author__ = 'Prashant Tiwari <pti@google.com>'
from optparse import OptionParser
from gdata.apps.emailsettings.client import EmailSettingsClient
#defaults for sendAs alias settings
SEND_AS_NAME = 'test-alias'
#update SEND_AS_ADDRESS to a valid account on your domain
SEND_AS_ADDRESS = 'johndoe@domain.com'
SEND_AS_REPLY_TO = 'replyto@example.com'
SEND_AS_MAKE_DEFAULT = False
#defaults for label settings
LABEL_NAME = 'label'
#defaults for forwarding settings
#update FORWARD_TO to a valid account on your domain
FORWARD_TO = 'account@domain.com'
FORWARDING_ACTION = 'ARCHIVE'
#defaults for pop settings
POP_ENABLE_FOR = 'MAIL_FROM_NOW_ON'
POP_ACTION = 'ARCHIVE'
#defaults for signature settings
SIGNATURE = "<Insert witty signature here>"
#defaults for vacation settings
VACATION_SUBJECT = "On vacation"
VACATION_MESSAGE = "I'm on vacation, will respond when I return."
VACATION_CONTACTS_ONLY = True
#defaults for filter settings
FILTER_FROM = 'me@domain.com'
FILTER_TO = 'you@domain.com'
FILTER_SUBJECT = 'subject'
FILTER_HAS_THE_WORD = 'has'
FILTER_DOES_NOT_HAVE_THE_WORD = 'no'
FILTER_HAS_ATTACHMENT = True
FILTER_SHOULD_MARK_AS_READ = True
FILTER_SHOULD_ARCHIVE = True
FILTER_LABEL = 'label'
#defaults for general settings
GENERAL_PAGE_SIZE = '50'
GENERAL_ENABLE_SHORTCUTS = True
GENERAL_ENABLE_ARROWS = True
GENERAL_ENABLE_SNIPPETS = True
GENERAL_ENABLE_UNICODE = True
#defaults for language settings
LANGUAGE = 'en-US'
parser = None
options = None
class EmailSettingsSample(object):
"""EmailsSettingsSample object demos the Email Settings API."""
def __init__(self, domain, email, password, app):
"""Constructor for the EmailSettingsSample object.
Takes an email, password and an app id corresponding to a google apps admin
account to demo the Email Settings API.
Args:
domain: [string] The domain name (e.g. domain.com)
email: [string] The e-mail address of a domain admin account.
password: [string] The domain admin's password.
app: [string] The app name of the form
companyName-applicationName-versionID
"""
self.client = EmailSettingsClient(domain=domain)
self.client.ClientLogin(email=email, password=password,
source=app)
def run(self, username, setting, method, args):
"""Method that invokes the EmailSettingsClient services
Args:
username: [string] The name of the account for whom to get/set settings
setting: [string] The email setting to be got/set/updated
method: [string] Specifies the get or set method
"""
if setting == 'label':
if method == 'get':
print "getting labels for %s...\n" % (username)
print self.client.RetrieveLabels(username=username)
elif method == 'set':
print "creating label for %s...\n" % (username)
print self.client.CreateLabel(username=username, name=LABEL_NAME)
else:
print "deleting labels isn't supported"
elif setting == 'forwarding':
if method == 'get':
print "getting forwarding for %s...\n" % (username)
print self.client.RetrieveForwarding(username)
elif method == 'set':
print "updating forwarding settings for %s...\n" % (username)
print self.client.UpdateForwarding(username=username,
enable=not(options.disable),
forward_to=FORWARD_TO,
action=FORWARDING_ACTION)
else:
print "deleting forwarding settings isn't supported"
elif setting == 'sendas':
if method == 'get':
print "getting sendAs alias for %s...\n" % (username)
print self.client.RetrieveSendAs(username=username)
elif method == 'set':
print "creating sendAs alias for %s...\n" % (username)
print self.client.CreateSendAs(username=username, name=SEND_AS_NAME,
address=SEND_AS_ADDRESS,
reply_to=SEND_AS_REPLY_TO,
make_default=SEND_AS_MAKE_DEFAULT)
else:
print "deleting send-as settings isn't supported"
elif setting == 'pop':
if method == 'get':
print "getting pop settings for %s...\n" % (username)
print self.client.RetrievePop(username=username)
elif method == 'set':
print "updating pop settings for %s...\n" % (username)
print self.client.UpdatePop(username=username,
enable=not(options.disable),
enable_for=POP_ENABLE_FOR,
action=POP_ACTION)
else:
print "deleting pop settings isn't supported"
elif setting == 'signature':
if method == 'get':
print "getting signature for %s...\n" % (username)
print self.client.RetrieveSignature(username=username)
elif method == 'set':
print "updating signature for %s...\n" % (username)
print self.client.UpdateSignature(username=username,
signature=SIGNATURE)
else:
print "deleting signature settings isn't supported"
elif setting == 'vacation':
if method == 'get':
print "getting vacation settings for %s...\n" % (username)
print self.client.RetrieveVacation(username=username)
elif method == 'set':
print "updating vacation settings for %s...\n" % (username)
print self.client.UpdateVacation(username=username,
enable=not(options.disable),
subject=VACATION_SUBJECT,
message=VACATION_MESSAGE,
contacts_only=VACATION_CONTACTS_ONLY)
else:
print "deleting vacation settings isn't supported"
elif setting == 'imap':
if method == 'get':
print "getting imap settings for %s...\n" % (username)
print self.client.RetrieveImap(username)
elif setting == 'set':
print "updating imap settings for %s...\n" % (username)
print self.client.UpdateImap(username=username,
enable=not(options.disable))
else:
print "deleting imap settings isn't supported"
elif setting == 'filter':
if method == 'get':
print "getting email filters is not yet possible\n"
parser.print_help()
elif method == 'set':
print "creating an email filter for %s...\n" % (username)
print self.client.CreateFilter(username=username,
from_address=FILTER_FROM,
to_address=FILTER_TO,
subject=FILTER_SUBJECT,
has_the_word=FILTER_HAS_THE_WORD,
does_not_have_the_word=
FILTER_DOES_NOT_HAVE_THE_WORD,
has_attachments=FILTER_HAS_ATTACHMENT,
label=FILTER_LABEL,
mark_as_read=FILTER_SHOULD_MARK_AS_READ,
archive=FILTER_SHOULD_ARCHIVE)
else:
print "deleting filters isn't supported"
elif setting == 'general':
if method == 'get':
print "getting general email settings is not yet possible\n"
parser.print_help()
elif method == 'set':
print "updating general settings for %s...\n" % (username)
print self.client.UpdateGeneralSettings(username=username,
page_size=GENERAL_PAGE_SIZE,
shortcuts=
GENERAL_ENABLE_SHORTCUTS,
arrows=
GENERAL_ENABLE_ARROWS,
snippets=
GENERAL_ENABLE_SNIPPETS,
use_unicode=
GENERAL_ENABLE_UNICODE)
else:
print "deleting general settings isn't supported"
elif setting == 'language':
if method == 'get':
print "getting language settings is not yet possible\n"
parser.print_help()
elif method == 'set':
print "updating language for %s...\n" % (username)
print self.client.UpdateLanguage(username=username, language=LANGUAGE)
else:
print "deleting language settings isn't supported"
elif setting == 'webclip':
if method == 'get':
print "getting webclip settings is not yet possible\n"
parser.print_help()
elif method == 'get':
print "updating webclip settings for %s...\n" % (username)
print self.client.UpdateWebclip(username=username,
enable=not(options.disable))
else:
print "deleting webclip settings isn't supported"
elif setting == 'delegation':
if method == 'get':
print "getting email delegates for %s..." % (username)
print self.client.RetrieveEmailDelegates(username=username)
elif method == 'set':
address = args['delegationId']
print "adding %s as an email delegate to %s..." % (address, username)
print self.client.AddEmailDelegate(username=username, address=address)
else:
address = args['delegationId']
print "deleting %s as an email delegate for %s..." % (address, username)
print self.client.DeleteEmailDelegate(username=username,
address=address)
else:
parser.print_help()
def main():
"""Demos the Email Settings API using the EmailSettingsSample object."""
usage = 'usage: %prog [options]'
global parser
global options
parser = OptionParser(usage=usage)
parser.add_option('--domain',
help="The Google Apps domain, e.g. 'domain.com'.")
parser.add_option('--email',
help="The admin's email account, e.g. 'admin@domain.com'.")
parser.add_option('--password',
help="The admin's password.")
parser.add_option('--app',
help="The name of the app.")
parser.add_option('--username',
help="The user account on which to perform operations.")
parser.add_option('--setting',
choices=['filter', 'label', 'forwarding', 'sendas', 'pop',
'signature', 'vacation', 'imap', 'general',
'language', 'webclip', 'delegation'],
help="The email setting to use. Choose from filter, label, \
forwarding, sendas, pop, signature, vacation, imap, \
general, language, webclip, and delegation.")
parser.add_option('--method',
default='get',
choices=['get', 'set', 'delete'],
help="Specify whether to get, set/update or delete \
setting. Choose between get (default), set, and delete.")
parser.add_option('--disable',
action="store_true",
default=False,
dest="disable",
help="Disable a setting when using the set method with the\
--disable option. The default is to enable the setting.")
parser.add_option('--delegationId',
default=None,
help="The emailId of the account to which email access has\
to be delegated. Required for adding or deleting an \
email delegate.")
(options, args) = parser.parse_args()
if (options.domain is None or options.email is None or options.password ==
None or options.username is None or options.app is None or
options.setting is None or (options.setting == 'delegation' and
options.method != 'get' and
options.delegationId is None)):
parser.print_help()
return
args = {'delegationId':options.delegationId}
sample = EmailSettingsSample(options.domain, options.email, options.password,
options.app)
sample.run(options.username, options.setting, options.method, args)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# urllib3/request.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
from .filepost import encode_multipart_formdata
__all__ = ['RequestMethods']
class RequestMethods(object):
"""
Convenience mixin for classes who implement a :meth:`urlopen` method, such
as :class:`~urllib3.connectionpool.HTTPConnectionPool` and
:class:`~urllib3.poolmanager.PoolManager`.
Provides behavior for making common types of HTTP request methods and
decides which type of request field encoding to use.
Specifically,
:meth:`.request_encode_url` is for sending requests whose fields are encoded
in the URL (such as GET, HEAD, DELETE).
:meth:`.request_encode_body` is for sending requests whose fields are
encoded in the *body* of the request using multipart or www-form-urlencoded
(such as for POST, PUT, PATCH).
:meth:`.request` is for making any kind of request, it will look up the
appropriate encoding format and use one of the above two methods to make
the request.
Initializer parameters:
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
"""
_encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])
_encode_body_methods = set(['PATCH', 'POST', 'PUT', 'TRACE'])
def __init__(self, headers=None):
self.headers = headers or {}
def urlopen(self, method, url, body=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**kw): # Abstract
raise NotImplemented("Classes extending RequestMethods must implement "
"their own ``urlopen`` method.")
def request(self, method, url, fields=None, headers=None, **urlopen_kw):
"""
Make a request using :meth:`urlopen` with the appropriate encoding of
``fields`` based on the ``method`` used.
This is a convenience method that requires the least amount of manual
effort. It can be used in most situations, while still having the option
to drop down to more specific methods when necessary, such as
:meth:`request_encode_url`, :meth:`request_encode_body`,
or even the lowest level :meth:`urlopen`.
"""
method = method.upper()
if method in self._encode_url_methods:
return self.request_encode_url(method, url, fields=fields,
headers=headers,
**urlopen_kw)
else:
return self.request_encode_body(method, url, fields=fields,
headers=headers,
**urlopen_kw)
def request_encode_url(self, method, url, fields=None, **urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the url. This is useful for request methods like GET, HEAD, DELETE, etc.
"""
if fields:
url += '?' + urlencode(fields)
return self.urlopen(method, url, **urlopen_kw)
def request_encode_body(self, method, url, fields=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the body. This is useful for request methods like POST, PUT, PATCH, etc.
When ``encode_multipart=True`` (default), then
:meth:`urllib3.filepost.encode_multipart_formdata` is used to encode the
payload with the appropriate content type. Otherwise
:meth:`urllib.urlencode` is used with the
'application/x-www-form-urlencoded' content type.
Multipart encoding must be used when posting files, and it's reasonably
safe to use it in other times too. However, it may break request signing,
such as with OAuth.
Supports an optional ``fields`` parameter of key/value strings AND
key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
the MIME type is optional. For example: ::
fields = {
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(),
'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
}
When uploading a file, providing a filename (the first parameter of the
tuple) is optional but recommended to best mimick behavior of browsers.
Note that if ``headers`` are supplied, the 'Content-Type' header will be
overwritten because it depends on the dynamic random boundary string
which is used to compose the body of the request. The random boundary
string can be explicitly set with the ``multipart_boundary`` parameter.
"""
if encode_multipart:
body, content_type = encode_multipart_formdata(fields or {},
boundary=multipart_boundary)
else:
body, content_type = (urlencode(fields or {}),
'application/x-www-form-urlencoded')
if headers is None:
headers = self.headers
headers_ = {'Content-Type': content_type}
headers_.update(headers)
return self.urlopen(method, url, body=body, headers=headers_,
**urlopen_kw) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
***************************************************************************
__init__.py
---------------------
Date : January 2016
Copyright : (C) 2016 by Matthias Kuhn
Email : matthias@opengis.ch
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Matthias Kuhn'
__date__ = 'January 2016'
__copyright__ = '(C) 2016, Matthias Kuhn'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = ':%H$'
import os
import sys
import difflib
import functools
import filecmp
from qgis.PyQt.QtCore import QVariant
from qgis.core import QgsApplication, QgsFeatureRequest, NULL
import unittest
# Get a backup, we will patch this one later
_TestCase = unittest.TestCase
unittest.util._MAX_LENGTH = 2000
class TestCase(_TestCase):
def assertLayersEqual(self, layer_expected, layer_result, **kwargs):
"""
:param layer_expected: The first layer to compare
:param layer_result: The second layer to compare
:param request: Optional, A feature request. This can be used to specify
an order by clause to make sure features are compared in
a given sequence if they don't match by default.
:keyword compare: A map of comparison options. e.g.
{ fields: { a: skip, b: { precision: 2 }, geometry: { precision: 5 } }
{ fields: { __all__: cast( str ) } }
:keyword pk: "Primary key" type field - used to match features
from the expected table to their corresponding features in the result table. If not specified
features are compared by their order in the layer (e.g. first feature compared with first feature,
etc)
"""
self.checkLayersEqual(layer_expected, layer_result, True, **kwargs)
def checkLayersEqual(self, layer_expected, layer_result, use_asserts=False, **kwargs):
"""
:param layer_expected: The first layer to compare
:param layer_result: The second layer to compare
:param use_asserts: If true, asserts are used to test conditions, if false, asserts
are not used and the function will only return False if the test fails
:param request: Optional, A feature request. This can be used to specify
an order by clause to make sure features are compared in
a given sequence if they don't match by default.
:keyword compare: A map of comparison options. e.g.
{ fields: { a: skip, b: { precision: 2 }, geometry: { precision: 5 } }
{ fields: { __all__: cast( str ) } }
:keyword pk: "Primary key" type field - used to match features
from the expected table to their corresponding features in the result table. If not specified
features are compared by their order in the layer (e.g. first feature compared with first feature,
etc)
"""
try:
request = kwargs['request']
except KeyError:
request = QgsFeatureRequest()
try:
compare = kwargs['compare']
except KeyError:
compare = {}
# Compare CRS
if 'ignore_crs_check' not in compare or not compare['ignore_crs_check']:
if use_asserts:
_TestCase.assertEqual(self, layer_expected.dataProvider().crs().authid(), layer_result.dataProvider().crs().authid())
elif not layer_expected.dataProvider().crs().authid() == layer_result.dataProvider().crs().authid():
return False
# Compare features
if use_asserts:
_TestCase.assertEqual(self, layer_expected.featureCount(), layer_result.featureCount())
elif layer_expected.featureCount() != layer_result.featureCount():
return False
try:
precision = compare['geometry']['precision']
except KeyError:
precision = 14
try:
topo_equal_check = compare['geometry']['topo_equal_check']
except KeyError:
topo_equal_check = False
try:
unordered = compare['unordered']
except KeyError:
unordered = False
if unordered:
features_expected = [f for f in layer_expected.getFeatures(request)]
for feat in layer_result.getFeatures(request):
feat_expected_equal = None
for feat_expected in features_expected:
if self.checkGeometriesEqual(feat.geometry(), feat_expected.geometry(),
feat.id(), feat_expected.id(),
False, precision, topo_equal_check) and \
self.checkAttributesEqual(feat, feat_expected, layer_expected.fields(), False, compare):
feat_expected_equal = feat_expected
break
if feat_expected_equal is not None:
features_expected.remove(feat_expected_equal)
else:
if use_asserts:
_TestCase.assertTrue(
self, False,
'Unexpected result feature: fid {}, geometry: {}, attributes: {}'.format(
feat.id(),
feat.geometry().constGet().asWkt(precision) if feat.geometry() else 'NULL',
feat.attributes())
)
else:
return False
if len(features_expected) != 0:
if use_asserts:
lst_missing = []
for feat in features_expected:
lst_missing.append('fid {}, geometry: {}, attributes: {}'.format(
feat.id(),
feat.geometry().constGet().asWkt(precision) if feat.geometry() else 'NULL',
feat.attributes())
)
_TestCase.assertTrue(self, False, 'Some expected features not found in results:\n' + '\n'.join(lst_missing))
else:
return False
return True
def sort_by_pk_or_fid(f):
if 'pk' in kwargs and kwargs['pk'] is not None:
key = kwargs['pk']
if isinstance(key, list) or isinstance(key, tuple):
return [f[k] for k in key]
else:
return f[kwargs['pk']]
else:
return f.id()
expected_features = sorted(layer_expected.getFeatures(request), key=sort_by_pk_or_fid)
result_features = sorted(layer_result.getFeatures(request), key=sort_by_pk_or_fid)
for feats in zip(expected_features, result_features):
eq = self.checkGeometriesEqual(feats[0].geometry(),
feats[1].geometry(),
feats[0].id(),
feats[1].id(),
use_asserts, precision, topo_equal_check)
if not eq and not use_asserts:
return False
eq = self.checkAttributesEqual(feats[0], feats[1], layer_expected.fields(), use_asserts, compare)
if not eq and not use_asserts:
return False
return True
def assertFilesEqual(self, filepath_expected, filepath_result):
with open(filepath_expected, 'r') as file_expected:
with open(filepath_result, 'r') as file_result:
diff = difflib.unified_diff(
file_expected.readlines(),
file_result.readlines(),
fromfile='expected',
tofile='result',
)
diff = list(diff)
self.assertEqual(0, len(diff), ''.join(diff))
def assertDirectoriesEqual(self, dirpath_expected, dirpath_result):
""" Checks whether both directories have the same content (recursively) and raises an assertion error if not. """
dc = filecmp.dircmp(dirpath_expected, dirpath_result)
dc.report_full_closure()
def _check_dirs_equal_recursive(dcmp):
self.assertEqual(dcmp.left_only, [])
self.assertEqual(dcmp.right_only, [])
self.assertEqual(dcmp.diff_files, [])
for sub_dcmp in dcmp.subdirs.values():
_check_dirs_equal_recursive(sub_dcmp)
_check_dirs_equal_recursive(dc)
def assertGeometriesEqual(self, geom0, geom1, geom0_id='geometry 1', geom1_id='geometry 2', precision=14, topo_equal_check=False):
self.checkGeometriesEqual(geom0, geom1, geom0_id, geom1_id, use_asserts=True, precision=precision, topo_equal_check=topo_equal_check)
def checkGeometriesEqual(self, geom0, geom1, geom0_id, geom1_id, use_asserts=False, precision=14, topo_equal_check=False):
""" Checks whether two geometries are the same - using either a strict check of coordinates (up to given precision)
or by using topological equality (where e.g. a polygon with clockwise is equal to a polygon with counter-clockwise
order of vertices)
.. versionadded:: 3.2
"""
if not geom0.isNull() and not geom1.isNull():
if topo_equal_check:
equal = geom0.isGeosEqual(geom1)
else:
equal = geom0.constGet().asWkt(precision) == geom1.constGet().asWkt(precision)
elif geom0.isNull() and geom1.isNull():
equal = True
else:
equal = False
if use_asserts:
_TestCase.assertTrue(
self,
equal,
'Features (Expected fid: {}, Result fid: {}) differ in geometry: \n\n Expected geometry:\n {}\n\n Result geometry:\n {}'.format(
geom0_id,
geom1_id,
geom0.constGet().asWkt(precision) if not geom0.isNull() else 'NULL',
geom1.constGet().asWkt(precision) if not geom1.isNull() else 'NULL'
)
)
else:
return equal
def checkAttributesEqual(self, feat0, feat1, fields_expected, use_asserts, compare):
""" Checks whether attributes of two features are the same
.. versionadded:: 3.2
"""
for attr_expected, field_expected in zip(feat0.attributes(), fields_expected.toList()):
try:
cmp = compare['fields'][field_expected.name()]
except KeyError:
try:
cmp = compare['fields']['__all__']
except KeyError:
cmp = {}
# Skip field
if 'skip' in cmp:
continue
if use_asserts:
_TestCase.assertIn(
self,
field_expected.name().lower(),
[name.lower() for name in feat1.fields().names()])
attr_result = feat1[field_expected.name()]
field_result = [fld for fld in fields_expected.toList() if fld.name() == field_expected.name()][0]
# Cast field to a given type
if 'cast' in cmp:
if cmp['cast'] == 'int':
attr_expected = int(attr_expected) if attr_expected else None
attr_result = int(attr_result) if attr_result else None
if cmp['cast'] == 'float':
attr_expected = float(attr_expected) if attr_expected else None
attr_result = float(attr_result) if attr_result else None
if cmp['cast'] == 'str':
attr_expected = str(attr_expected) if attr_expected else None
attr_result = str(attr_result) if attr_result else None
# Round field (only numeric so it works with __all__)
if 'precision' in cmp and field_expected.type() in [QVariant.Int, QVariant.Double, QVariant.LongLong]:
if not attr_expected == NULL:
attr_expected = round(attr_expected, cmp['precision'])
if not attr_result == NULL:
attr_result = round(attr_result, cmp['precision'])
if use_asserts:
_TestCase.assertEqual(
self,
attr_expected,
attr_result,
'Features {}/{} differ in attributes\n\n * Field expected: {} ({})\n * result : {} ({})\n\n * Expected: {} != Result : {}'.format(
feat0.id(),
feat1.id(),
field_expected.name(),
field_expected.typeName(),
field_result.name(),
field_result.typeName(),
repr(attr_expected),
repr(attr_result)
)
)
elif attr_expected != attr_result:
return False
return True
class _UnexpectedSuccess(Exception):
"""
The test was supposed to fail, but it didn't!
"""
pass
def expectedFailure(*args):
"""
Will decorate a unittest function as an expectedFailure. A function
flagged as expectedFailure will be succeed if it raises an exception.
If it does not raise an exception, this will throw an
`_UnexpectedSuccess` exception.
@expectedFailure
def my_test(self):
self.assertTrue(False)
The decorator also accepts a parameter to only expect a failure under
certain conditions.
@expectedFailure(time.localtime().tm_year < 2002)
def my_test(self):
self.assertTrue(qgisIsInvented())
"""
if hasattr(args[0], '__call__'):
# We got a function as parameter: assume usage like
# @expectedFailure
# def testfunction():
func = args[0]
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception:
pass
else:
raise _UnexpectedSuccess
return wrapper
else:
# We got a function as parameter: assume usage like
# @expectedFailure(failsOnThisPlatform)
# def testfunction():
condition = args[0]
def realExpectedFailure(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if condition:
try:
func(*args, **kwargs)
except Exception:
pass
else:
raise _UnexpectedSuccess
else:
func(*args, **kwargs)
return wrapper
return realExpectedFailure
# Patch unittest
unittest.TestCase = TestCase
unittest.expectedFailure = expectedFailure
def start_app(cleanup=True):
"""
Will start a QgsApplication and call all initialization code like
registering the providers and other infrastructure. It will not load
any plugins.
You can always get the reference to a running app by calling `QgsApplication.instance()`.
The initialization will only happen once, so it is safe to call this method repeatedly.
Parameters
----------
cleanup: Do cleanup on exit. Defaults to true.
Returns
-------
QgsApplication
A QgsApplication singleton
"""
global QGISAPP
try:
QGISAPP
except NameError:
myGuiFlag = True # All test will run qgis in gui mode
try:
sys.argv
except:
sys.argv = ['']
# In python3 we need to convert to a bytes object (or should
# QgsApplication accept a QString instead of const char* ?)
try:
argvb = list(map(os.fsencode, sys.argv))
except AttributeError:
argvb = sys.argv
# Note: QGIS_PREFIX_PATH is evaluated in QgsApplication -
# no need to mess with it here.
QGISAPP = QgsApplication(argvb, myGuiFlag)
QGISAPP.initQgis()
print(QGISAPP.showSettings())
def debug_log_message(message, tag, level):
print('{}({}): {}'.format(tag, level, message))
QgsApplication.instance().messageLog().messageReceived.connect(debug_log_message)
if cleanup:
import atexit
@atexit.register
def exitQgis():
QGISAPP.exitQgis()
return QGISAPP
def stop_app():
"""
Cleans up and exits QGIS
"""
global QGISAPP
QGISAPP.exitQgis()
del QGISAPP | unknown | codeparrot/codeparrot-clean | ||
"""Runtime projections to provide template/var-visible views of objects that are not natively allowed in Ansible's type system."""
from __future__ import annotations
import dataclasses
import typing as t
from ansible.module_utils._internal import _traceback, _event_utils, _messages
from ansible.parsing.vault import EncryptedString, VaultHelper
from ansible.utils.display import Display
from ._jinja_common import VaultExceptionMarker
from .._errors import _captured, _error_factory
from .. import _event_formatting
display = Display()
def plugin_info(value: _messages.PluginInfo) -> dict[str, str]:
"""Render PluginInfo as a dictionary."""
return dataclasses.asdict(value)
def plugin_type(value: _messages.PluginType) -> str:
"""Render PluginType as a string."""
return value.value
def error_summary(value: _messages.ErrorSummary) -> str:
"""Render ErrorSummary as a formatted traceback for backward-compatibility with pre-2.19 TaskResult.exception."""
if _traceback._is_traceback_enabled(_traceback.TracebackEvent.ERROR):
return _event_formatting.format_event_traceback(value.event)
return '(traceback unavailable)'
def warning_summary(value: _messages.WarningSummary) -> str:
"""Render WarningSummary as a simple message string for backward-compatibility with pre-2.19 TaskResult.warnings."""
return _event_utils.format_event_brief_message(value.event)
def deprecation_summary(value: _messages.DeprecationSummary) -> dict[str, t.Any]:
"""Render DeprecationSummary as dict values for backward-compatibility with pre-2.19 TaskResult.deprecations."""
transformed = _event_utils.deprecation_as_dict(value)
transformed.update(deprecator=value.deprecator)
return transformed
def encrypted_string(value: EncryptedString) -> str | VaultExceptionMarker:
"""Decrypt an encrypted string and return its value, or a VaultExceptionMarker if decryption fails."""
try:
return value._decrypt()
except Exception as ex:
return VaultExceptionMarker(
ciphertext=VaultHelper.get_ciphertext(value, with_tags=True),
event=_error_factory.ControllerEventFactory.from_exception(ex, _traceback.is_traceback_enabled(_traceback.TracebackEvent.ERROR)),
)
_type_transform_mapping: dict[type, t.Callable[[t.Any], t.Any]] = {
_captured.CapturedErrorSummary: error_summary,
_messages.PluginInfo: plugin_info,
_messages.PluginType: plugin_type,
_messages.ErrorSummary: error_summary,
_messages.WarningSummary: warning_summary,
_messages.DeprecationSummary: deprecation_summary,
EncryptedString: encrypted_string,
}
"""This mapping is consulted by `Templar.template` to provide custom views of some objects.""" | python | github | https://github.com/ansible/ansible | lib/ansible/_internal/_templating/_transform.py |
"""Support for BT Smart Hub (Sometimes referred to as BT Home Hub 6)."""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.device_tracker import (
DOMAIN, PLATFORM_SCHEMA, DeviceScanner)
from homeassistant.const import CONF_HOST
_LOGGER = logging.getLogger(__name__)
CONF_DEFAULT_IP = '192.168.1.254'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_HOST, default=CONF_DEFAULT_IP): cv.string,
})
def get_scanner(hass, config):
"""Return a BT Smart Hub scanner if successful."""
scanner = BTSmartHubScanner(config[DOMAIN])
return scanner if scanner.success_init else None
class BTSmartHubScanner(DeviceScanner):
"""This class queries a BT Smart Hub."""
def __init__(self, config):
"""Initialise the scanner."""
_LOGGER.debug("Initialising BT Smart Hub")
self.host = config[CONF_HOST]
self.last_results = {}
self.success_init = False
# Test the router is accessible
data = self.get_bt_smarthub_data()
if data:
self.success_init = True
else:
_LOGGER.info("Failed to connect to %s", self.host)
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return [client['mac'] for client in self.last_results]
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
if not self.last_results:
return None
for client in self.last_results:
if client['mac'] == device:
return client['host']
return None
def _update_info(self):
"""Ensure the information from the BT Smart Hub is up to date."""
if not self.success_init:
return
_LOGGER.info("Scanning")
data = self.get_bt_smarthub_data()
if not data:
_LOGGER.warning("Error scanning devices")
return
clients = [client for client in data.values()]
self.last_results = clients
def get_bt_smarthub_data(self):
"""Retrieve data from BT Smart Hub and return parsed result."""
import btsmarthub_devicelist
# Request data from bt smarthub into a list of dicts.
data = btsmarthub_devicelist.get_devicelist(
router_ip=self.host, only_active_devices=True)
# Renaming keys from parsed result.
devices = {}
for device in data:
try:
devices[device['UserHostName']] = {
'ip': device['IPAddress'],
'mac': device['PhysAddress'],
'host': device['UserHostName'],
'status': device['Active']
}
except KeyError:
pass
return devices | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
MiniTwit Tests
~~~~~~~~~~~~~~
Tests the MiniTwit application.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import minitwit
import tempfile
import pytest
@pytest.fixture
def client(request):
db_fd, minitwit.app.config['DATABASE'] = tempfile.mkstemp()
client = minitwit.app.test_client()
with minitwit.app.app_context():
minitwit.init_db()
def teardown():
"""Get rid of the database again after each test."""
os.close(db_fd)
os.unlink(minitwit.app.config['DATABASE'])
request.addfinalizer(teardown)
return client
def register(client, username, password, password2=None, email=None):
"""Helper function to register a user"""
if password2 is None:
password2 = password
if email is None:
email = username + '@example.com'
return client.post('/register', data={
'username': username,
'password': password,
'password2': password2,
'email': email,
}, follow_redirects=True)
def login(client, username, password):
"""Helper function to login"""
return client.post('/login', data={
'username': username,
'password': password
}, follow_redirects=True)
def register_and_login(client, username, password):
"""Registers and logs in in one go"""
register(client, username, password)
return login(client, username, password)
def logout(client):
"""Helper function to logout"""
return client.get('/logout', follow_redirects=True)
def add_message(client, text):
"""Records a message"""
rv = client.post('/add_message', data={'text': text},
follow_redirects=True)
if text:
assert b'Your message was recorded' in rv.data
return rv
def test_register(client):
"""Make sure registering works"""
rv = register(client, 'user1', 'default')
assert b'You were successfully registered ' \
b'and can login now' in rv.data
rv = register(client, 'user1', 'default')
assert b'The username is already taken' in rv.data
rv = register(client, '', 'default')
assert b'You have to enter a username' in rv.data
rv = register(client, 'meh', '')
assert b'You have to enter a password' in rv.data
rv = register(client, 'meh', 'x', 'y')
assert b'The two passwords do not match' in rv.data
rv = register(client, 'meh', 'foo', email='broken')
assert b'You have to enter a valid email address' in rv.data
def test_login_logout(client):
"""Make sure logging in and logging out works"""
rv = register_and_login(client, 'user1', 'default')
assert b'You were logged in' in rv.data
rv = logout(client)
assert b'You were logged out' in rv.data
rv = login(client, 'user1', 'wrongpassword')
assert b'Invalid password' in rv.data
rv = login(client, 'user2', 'wrongpassword')
assert b'Invalid username' in rv.data
def test_message_recording(client):
"""Check if adding messages works"""
register_and_login(client, 'foo', 'default')
add_message(client, 'test message 1')
add_message(client, '<test message 2>')
rv = client.get('/')
assert b'test message 1' in rv.data
assert b'<test message 2>' in rv.data
def test_timelines(client):
"""Make sure that timelines work"""
register_and_login(client, 'foo', 'default')
add_message(client, 'the message by foo')
logout(client)
register_and_login(client, 'bar', 'default')
add_message(client, 'the message by bar')
rv = client.get('/public')
assert b'the message by foo' in rv.data
assert b'the message by bar' in rv.data
# bar's timeline should just show bar's message
rv = client.get('/')
assert b'the message by foo' not in rv.data
assert b'the message by bar' in rv.data
# now let's follow foo
rv = client.get('/foo/follow', follow_redirects=True)
assert b'You are now following "foo"' in rv.data
# we should now see foo's message
rv = client.get('/')
assert b'the message by foo' in rv.data
assert b'the message by bar' in rv.data
# but on the user's page we only want the user's message
rv = client.get('/bar')
assert b'the message by foo' not in rv.data
assert b'the message by bar' in rv.data
rv = client.get('/foo')
assert b'the message by foo' in rv.data
assert b'the message by bar' not in rv.data
# now unfollow and check if that worked
rv = client.get('/foo/unfollow', follow_redirects=True)
assert b'You are no longer following "foo"' in rv.data
rv = client.get('/')
assert b'the message by foo' not in rv.data
assert b'the message by bar' in rv.data | unknown | codeparrot/codeparrot-clean | ||
from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
from openpyxl.compat import iteritems, OrderedDict, deprecated
from openpyxl.styles.differential import DifferentialStyle
from .rule import Rule
def unpack_rules(cfRules):
for key, rules in iteritems(cfRules):
for idx,rule in enumerate(rules):
yield (key, idx, rule.priority)
class ConditionalFormatting(object):
"""Conditional formatting rules."""
def __init__(self):
self.cf_rules = OrderedDict()
self.max_priority = 0
def add(self, range_string, cfRule):
"""Add a rule such as ColorScaleRule, FormulaRule or CellIsRule
The priority will be added automatically.
"""
if not isinstance(cfRule, Rule):
raise ValueError("Only instances of openpyxl.formatting.rule.Rule may be added")
rule = cfRule
self.max_priority += 1
rule.priority = self.max_priority
self.cf_rules.setdefault(range_string, []).append(rule)
def _fix_priorities(self):
rules = unpack_rules(self.cf_rules)
rules = sorted(rules, key=lambda x: x[2])
for idx, (key, rule_no, prio) in enumerate(rules, 1):
self.cf_rules[key][rule_no].priority = idx
self.max_priority = len(rules)
@deprecated("Always use Rule objects")
def update(self, cfRules):
pass
@deprecated("Conditionl Formats are saved automatically")
def setDxfStyles(self, wb):
pass | unknown | codeparrot/codeparrot-clean | ||
# This file is part of VoltDB.
# Copyright (C) 2008-2016 VoltDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import os
import random
import re
import subprocess
import sys
import xmlrunner
import unittest
from optparse import OptionParser
random.seed()
# verbs contains verb to version (denote old cli version 1)
volt_support_version = [1, 2]
volt_verbs = {'create': 1,
'recover': 1,
'rejoin': 1,
'add': 1,
'init': 2,
'start': 2}
volt_verbs_mapping = {'create': 'create',
'recover': 'recover',
'rejoin': 'live rejoin',
'add': 'add',
'init': 'initialize',
'start': 'probe'}
# create all the options
class Opt:
def __init__(self, pyname, javaname, datatype, ver):
self.pyname = pyname
self.javaname = javaname
self.datatype = datatype
self.ver = ver
# for oldcli
admin = Opt('admin', 'adminport', str, 1)
client = Opt('client', 'port', str, 1)
externalinterface = Opt('externalinterface', 'externalinterface', str, 1)
http = Opt('http', 'httpport', str, 1)
internal = Opt('internal', 'internalport', str, 1)
internalinterface = Opt('internalinterface', 'internalinterface', str, 1)
publicinterface = Opt('publicinterface', 'publicinterface', str, 1)
replication = Opt('replication', 'replicationport', str, 1)
zookeeper = Opt('zookeeper', 'zkport', str, 1)
deployment = Opt('deployment', 'deployment', str, 1)
force = Opt('force', 'force', None, 1)
placementgroup = Opt('placement-group', 'placementgroup', str, 1)
host = Opt('host', 'host', str, 1)
licensefile = Opt('license', 'license', str, 1)
pause = Opt('pause', 'paused', None, 1)
# 'replica' should be immediately after verb
replica = Opt('replica', 'replica', None, 1)
# 'blocking' is only for rejoin, does not have corresponding java optional name, change verb 'live rejoin' to 'rejoin'
blocking = Opt('blocking', '', None, 1)
# for newcli only
mesh = Opt('host', 'mesh', str, 2)
config = Opt('config', 'deployment', str, 2)
voltdbroot = Opt('dir', 'voltdbroot', str, 2)
hostcount = Opt('count', 'hostcount', int, 2)
add = Opt('add', 'enableadd', None, 2)
# negative opt
unknown = Opt('unknown', None, None, 0)
volt_opts = {'create': [admin,
client,
externalinterface,
http,
internal,
internalinterface,
publicinterface,
replication,
zookeeper,
deployment,
force,
placementgroup,
host,
licensefile,
pause,
replica],
'recover': [admin,
client,
externalinterface,
http,
internal,
internalinterface,
publicinterface,
replication,
zookeeper,
deployment,
placementgroup,
host,
licensefile,
pause,
replica],
'rejoin': [admin,
client,
externalinterface,
http,
internal,
internalinterface,
publicinterface,
replication,
zookeeper,
deployment,
placementgroup,
licensefile],
'add': [admin,
client,
externalinterface,
http,
internal,
internalinterface,
publicinterface,
replication,
zookeeper,
deployment,
placementgroup,
licensefile],
'init': [config,
voltdbroot,
force],
'start': [admin,
client,
externalinterface,
http,
internal,
internalinterface,
publicinterface,
replication,
zookeeper,
hostcount,
voltdbroot,
placementgroup,
mesh,
licensefile,
pause,
replica,
add]
}
volt_opts_mandatory = {'create': [],
'recover': [],
'rejoin': [host],
'add': [host],
'init': [],
'start': []
}
volt_opts_negative = [unknown]
# additional output cli
volt_verbs_output = {'create': ' [ CATALOG ]',
'recover': '',
'rejoin': '',
'add': '',
'init': '',
'start': ''
}
# some verbs will generate default opts to java command line
volt_opts_default = {
'create': {placementgroup.javaname: '0', host.javaname: 'localhost:3021'},
'recover': {placementgroup.javaname: '0', host.javaname: 'localhost:3021'},
'rejoin': {placementgroup.javaname: '0'},
'add': {placementgroup.javaname: '0'},
'init': {},
'start': {placementgroup.javaname: '0', mesh.javaname: "\"\""}
}
# regular expression for pre-process the actual output before comparison
ignore = "^(Exec:|Run:) (?P<java_path>.+?)(java) (?P<java_opts>.+?) (-classpath) (?P<classpath>.+?) (org.voltdb.VoltDB)";
ignore_re = re.compile(ignore, re.X | re.M)
# override of environments
volt_override = {'VOLTDB_HEAPMAX': '3072',
'VOLTDB_OPTS': '-XX:disableGCHeuristics'}
# regular expression option naming convention
option_name = "--([\-a-z]+)"
option_name_re = re.compile(option_name)
# ignore python only option
# also skip 'blocking'
option_ignore = ['version', 'help', 'verbose', 'background', 'ignore', 'blocking']
class TestsContainer(unittest.TestCase):
longMessage = True
def make_test_function(haddiff, description):
def test(self):
self.assertFalse(haddiff, description)
return test
def run_unit_test(verb, opts, expected_opts, reportout, expectedOut=None, expectedErr=None):
stdout, stderr = run_voltcli(verb, opts, reportout)
haddiff, description = compare_result(stdout, stderr, volt_verbs_mapping[verb], expected_opts, reportout,
expectedOut, expectedErr)
setattr(TestsContainer, 'test: {0}'.format(verb + " " + " ".join(opts)), make_test_function(haddiff, description))
return haddiff
# Execute the command.
def run_voltcli(verb, opts, reportout=None, cmd=['../../bin/voltdb'], mode=['--dry-run'], environ=None, cwd=None):
command = cmd + [verb] + mode + opts
if reportout:
reportout.write("Test python cli:\n\t" + " ".join([verb] + opts) + "\n")
proc = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=cwd,
env=environ)
stdout, stderr = proc.communicate()
return stdout, stderr
def compare_result(stdout, stderr, verb, opts, reportout, expectedOut=None, expectedErr=None):
output_str = sanitize(stdout).strip()
description = "Generate java command line:\n\t" + output_str + "\nTest Passed!\n\n"
if expectedOut:
haddiff = False
if expectedOut != stdout:
description = "Generate stdout:\n" + stdout + "\n" + "doest not match expected:\n" + expectedOut + + "\nTest Failed!\n\n"
haddiff = True
else:
description = "Generate expected stdout:\n" + stdout + "Test Passed!\n\n"
reportout.write(description)
return haddiff, description
if expectedErr:
haddiff = False
if stderr != expectedErr:
haddiff = True
description = "Generate stderr:\n" + stderr + "\n" + "doest not match expected:\n" + expectedErr + "\nTest Failed!\n\n"
else:
description = "Generate expected stderr:\n" + stderr + "Test Passed!\n\n"
reportout.write(description)
return haddiff, description
# match the verbs
if output_str.find(verb) != 0:
description = "Generate java command line:\n\t" + output_str + "\n" + "does not contain expected verb:\n" + verb + "\nTest Failed!\n\n"
reportout.write(description)
return True, description
# match the opts
output_tokens = output_str.lstrip(verb).split()
expected_tokens = []
for k, v in opts.items():
if v:
expected_tokens.extend([k, v])
else:
expected_tokens.append(k)
if set(output_tokens) != set(expected_tokens):
description = "Generate java command line:\n\t" + output_str + "\n" + "does not match expected options:\n" + " ".join(
expected_tokens) + "\nTest Failed!\n\n"
reportout.write(description)
return True, description
reportout.write(description)
return False, description
def sanitize_replace(match):
# If `ignore` pattern does not contain subgroups, remove
# the whole match.
if not match.re.groups:
return ""
# Otherwise, remove subgroups.
spans = []
group_start = match.start()
for idx in range(match.re.groups):
start, end = match.span(idx + 1)
if start < end:
start -= group_start
end -= group_start
spans.append((end, start))
spans.sort()
spans.reverse()
text = match.group()
last_cut = len(text)
for end, start in spans:
end = min(end, last_cut)
if start >= end:
continue
text = text[:start] + text[end:]
last_cut = start
return text
def sanitize(text):
# Remove portion of output matching ignore pattern
if ignore is True:
return ""
if not ignore:
return text
text = ignore_re.sub(sanitize_replace, text)
return text
def gen_config(mandatory_opts, all_ops, count, expected_opts={}):
opts = []
i = 1 # pseudo optional value
for opt in mandatory_opts + random.sample(all_ops, count):
if not opt.datatype:
o = '--' + opt.pyname
expected_opts[opt.javaname] = None
else:
o = '--' + opt.pyname + '=' + str(i)
expected_opts[opt.javaname] = str(i)
i += 1
opts.append(o)
return opts, expected_opts
# Test JAVA HEAP (VOLTDB_HEAPMAX) and Java Runtime Options(VOLTDB_OTPS) can be override
def test_java_opts_override(verb = 'start', reportout = None):
haddiffs = False
override_env = dict(os.environ.copy(), **volt_override)
stdout, _ = run_voltcli(verb, [], environ=override_env)
matched_java_opts = ignore_re.match(stdout).group('java_opts')
reportout.write("Given: " + " ".join([k + '=' + v for k, v in volt_override.items()]) + "\n" +
"Got JVM Options: " + matched_java_opts + "\n")
if 'VOLTDB_HEAPMAX' in volt_override:
if '-Xmx{}m'.format(volt_override['VOLTDB_HEAPMAX']) in matched_java_opts:
reportout.write("VOLTDB_HEAPMAX override sucessfully!\n")
else:
reportout.write("VOLTDB_HEAPMAX override failed!\n")
haddiffs = True
if 'VOLTDB_OPTS' in volt_override:
if volt_override['VOLTDB_OPTS'] in matched_java_opts:
reportout.write("VOLTDB_OPTS override sucessfully!\n\n")
else:
reportout.write("VOLTDB_OPTS override failed!\n\n")
haddiffs = True
return haddiffs
def do_main():
parser = OptionParser()
parser.add_option("-o", "--report_file", dest="report_file",
default="./voltverbstest.report",
help="report output file")
(options, args) = parser.parse_args()
# generate output report: plain text
reportout = open(options.report_file, 'w+')
# test override of environment
haddiffs = test_java_opts_override(reportout=reportout)
try:
for verb, version in volt_verbs.items():
if not (version in volt_support_version):
continue
# test verb coverage
stdout, stderr = run_voltcli(verb, [], mode=['--help'])
# parse_help_message(stdout)
available_opts = option_name_re.findall(stdout)
covered_opts = [opt.pyname for opt in volt_opts_mandatory[verb] + volt_opts[verb]]
untested_opts = set(available_opts) - set(option_ignore) - set(covered_opts)
if untested_opts:
description = "Uncovered option(s) for " + verb + " : [" + " ".join(untested_opts) + "]\n"
reportout.write(description)
haddiffs = True
# generate the test cases
## generate minimal config
opts, expected_opts = gen_config(volt_opts_mandatory[verb], volt_opts[verb], 0,
volt_opts_default[verb].copy())
haddiffs = run_unit_test(verb, opts, expected_opts, reportout) or haddiffs
## generate config that contain a single opt
for opt in volt_opts[verb]:
opts, expected_opts = gen_config(volt_opts_mandatory[verb] + [opt], [], 0,
volt_opts_default[verb].copy())
haddiffs = run_unit_test(verb, opts, expected_opts, reportout) or haddiffs
## generate config contain random opts
opts, expected_opts = gen_config(volt_opts_mandatory[verb], volt_opts[verb],
random.randint(0, len(volt_opts[verb])), volt_opts_default[verb].copy())
haddiffs = run_unit_test(verb, opts, expected_opts, reportout) or haddiffs
## generate config contain negative opts
opts, expected_opts = gen_config(volt_opts_mandatory[verb], volt_opts_negative, 1)
haddiffs = run_unit_test(verb, opts, expected_opts, reportout,
expectedOut="""Usage: voltdb {} [ OPTIONS ... ]{}\n\nvoltdb: error: no such option: --{}\n""".
format(verb, volt_verbs_output[verb], volt_opts_negative[0].pyname)) or haddiffs
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='test-reports'))
# unittest.main(verbosity=2)
finally:
print "Summary report written to file://" + os.path.abspath(options.report_file)
if haddiffs:
sys.exit("One or more voltverbstest script failures or errors was detected.")
else:
print "All verb test covered and passed!"
if __name__ == "__main__":
do_main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright (C) 2013-2017 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class TestResult(object):
"""Base class to record and report test results.
Method record is to record the results of test case, and report
method is to report the recorded results by a given reporter.
"""
def record(self, parameter, result):
raise NotImplementedError("Abstract Method:record.")
def report(self, reporter, name):
"""Report the test results by reporter."""
raise NotImplementedError("Abstract Method:report.")
class SingleStatisticTestResult(TestResult):
"""Test results for the test case with a single statistic."""
def __init__(self):
super (SingleStatisticTestResult, self).__init__ ()
self.results = dict ()
def record(self, parameter, result):
if parameter in self.results:
self.results[parameter].append(result)
else:
self.results[parameter] = [result]
def report(self, reporter, name):
reporter.start()
for key in sorted(self.results.keys()):
reporter.report(name, key, self.results[key])
reporter.end()
class ResultFactory(object):
"""A factory to create an instance of TestResult."""
def create_result(self):
"""Create an instance of TestResult."""
raise NotImplementedError("Abstract Method:create_result.")
class SingleStatisticResultFactory(ResultFactory):
"""A factory to create an instance of SingleStatisticTestResult."""
def create_result(self):
return SingleStatisticTestResult() | unknown | codeparrot/codeparrot-clean | ||
################################################################
#
# menu.py provides terminal control capabilities to browse
# through a user-defined menu.
# It employs the Python curses library.
# An alternative (text-based) menu is also possible if the
# --no-curses is given in the command line options.
#
# Last modified:
#
# Tuesday June 22, 2010 by A. Mignone (mignone@ph.unito.it)
#
################################################################
import os, sys, traceback, time, string
have_curses = 1
for x in sys.argv: # avoid curses library with the --no-curses option.
if (x == "--no-curses"):
have_curses = 0
if (have_curses == 1):
import curses, curses.textpad
#####################################################
#
# The class gb contains global variables and
# pointers to ease up accessibility in coding the
# functions of this module
#
#####################################################
class gb:
scrn = None # a pointer to a window object
rbeg = 6 # starting row
cbeg = 1 # starting column
row = 0 # the current row
ncol = 1 # number of columns in the menu
csep = 30 # separation between multi-columns
title = 'No Title'
subtitle = ''
init = 0 # a value of 0 means that curses functionality has not
# been activated (i.e. curses.initscr() has never been called)
#####################################################
#
# Sets the title (but does not print it)
# that will be used by ShowMenu
#
#####################################################
def SetTitle (title, subtitle = ''):
gb.title = title
gb.subtitle = subtitle
#####################################################
#
# Print a message on the screen
#
#####################################################
def Print (message, sleep=0.7,row=1):
for x in sys.argv: # avoid curses library with the --no-curses option.
# if (x == "--no-curses" or gb.init == 0):
if (x == "--no-curses" and gb.init == 0):
Print_no_curses(message,sleep,row)
return
if (gb.scrn == None): return # need this when using test_pluto.py script
if (row == 1): gb.scrn.erase()
gb.scrn.addstr(row,1,message, curses.A_BOLD)
gb.scrn.refresh()
time.sleep(sleep)
#####################################################
#
# Prompt a message, wait for any key to be pressed
#
#####################################################
def Prompt (message):
for x in sys.argv: # avoid curses library with the --no-curses option.
if (x == "--no-curses"):
Prompt_no_curses(message)
return
gb.scrn.erase()
gb.scrn.addstr(1,1,message, curses.A_BOLD)
gb.scrn.refresh()
c = gb.scrn.getch()
#####################################################
#
# Show menu using entries (1st column) and
# default (2nd column).
# row is an optional argument giving the line
# to be highlighted (default is first line)
#
#####################################################
def ShowMenu(entries, default, row=0):
# display title
gb.scrn.clear()
gb.scrn.addstr(0,0,">> "+gb.title+" <<", curses.A_BOLD)
gb.rbeg = 3
if (len(gb.subtitle) > 1):
gb.scrn.addstr(2,0, gb.subtitle, curses.A_UNDERLINE)
gb.rbeg = 6
lastrow = gb.rbeg
for ln in entries[0:len(entries)]:
indx = entries.index(ln)
gb.scrn.addstr(lastrow, gb.cbeg, ln)
if (gb.ncol == 2):
gb.scrn.addstr(lastrow, gb.cbeg + gb.csep, default[indx])
lastrow += 1
if (row == 0 or gb.row < gb.rbeg or gb.row > lastrow):
gb.row = gb.rbeg # initial position
n = gb.row - gb.rbeg
gb.scrn.addstr(gb.row, gb.cbeg , entries[n], curses.A_REVERSE)
if (gb.ncol == 2): gb.scrn.addstr(gb.row, gb.cbeg+gb.csep, default[n], curses.A_UNDERLINE)
gb.scrn.refresh()
#####################################################
#
# Allow the cursor to move up and down in the list
#
#####################################################
def UpDown(entries, default, inc):
tmp = gb.row + inc
# ignore attempts to go off the edge of menu
if tmp >= gb.rbeg and tmp < (gb.rbeg + len(entries)):
# unhighlight the current line by rewriting it in default attributes
gb.scrn.addstr(gb.row, gb.cbeg , entries[gb.row-gb.rbeg])
if (gb.ncol == 2): gb.scrn.addstr(gb.row, gb.cbeg + gb.csep, default[gb.row-gb.rbeg])
# highlight the previous/next line
gb.row = tmp
c1 = entries[gb.row-gb.rbeg]
if (gb.ncol == 2): c2 = default[gb.row-gb.rbeg]
gb.scrn.addstr(gb.row, gb.cbeg , c1, curses.A_REVERSE)
if (gb.ncol == 2): gb.scrn.addstr(gb.row, gb.cbeg + gb.csep, c2, curses.A_UNDERLINE)
gb.scrn.refresh()
#####################################################
#
# Allow left/right keys to switch options in the
# second column and change default values
#
#####################################################
def LeftRight(entries, default, options, inc):
i = gb.row - gb.rbeg
idef = options[i].index(default[i])
nopt = len(options[i])
if (inc > 0): idef = idef + 1
if (inc < 0): idef = idef - 1
if (idef < 0): idef = nopt-1
if (idef == nopt): idef = 0
default[i] = options[i][idef]
gb.scrn.addstr(gb.row, gb.cbeg , entries[i], curses.A_REVERSE)
gb.scrn.addstr(gb.row, gb.cbeg + gb.csep, default[i], curses.A_UNDERLINE)
gb.scrn.clrtoeol()
gb.scrn.refresh()
#####################################################
#
# Browse a menu with entries (1st column) and
# default (2nd column, optional)
# Note: with Python > 2.5 we had some troubles
# initializing curses more than once.
# For this reason we prefer to initialize
# curses only at the beginning.
#
#####################################################
def Browse(entries, default=[], options=[]):
gb.ncol = 1
if (len(default) > 0): gb.ncol = 2
for x in sys.argv: # avoid curses library with the --no-curses option.
if (x == "--no-curses"):
return Browse_no_curses(entries, default, options)
#
# window setup will be done just once.
#
if (gb.init == 0):
gb.scrn = curses.initscr()
curses.noecho()
curses.cbreak()
gb.scrn.keypad(1)
gb.init = 1
ShowMenu(entries, default)
while True:
# get user command
c = gb.scrn.getch()
try: cc = chr(c)
except: cc = 0
if (c == 10):
# RestoreScreen()
return entries[gb.row-gb.rbeg]
elif (cc == 'q'):
RestoreScreen()
# curses.reset_shell_mode()
sys.exit()
elif (cc == 'u' or c == curses.KEY_UP): UpDown(entries, default, -1)
elif (cc == 'd' or c == curses.KEY_DOWN): UpDown(entries, default, 1)
elif (gb.ncol > 1):
if (cc == 'r' or c == curses.KEY_RIGHT):
LeftRight(entries, default, options, 1)
elif (cc == 'l' or c == curses.KEY_LEFT):
LeftRight(entries, default, options, -1)
#####################################################
#
# Similar to Browse, but allow the user to directly
# input the default values by a reading a string
#
#####################################################
def Insert(entries, default):
gb.ncol = 2
for x in sys.argv: # avoid curses library with the --no-curses option.
if (x == "--no-curses"):
return Insert_no_curses(entries, default)
#
# window setup will be done just once.
#
if (gb.init == 0):
gb.scrn = curses.initscr()
curses.noecho()
curses.cbreak()
gb.scrn.keypad(1)
gb.init = 1
# entries = []
# for n in range(num): entries.append(repr(n))
# RestoreScreen()
# print default
# sys.exit()
ShowMenu(entries, default)
while True:
c = gb.scrn.getch() # get user command
try: cc = chr(c)
except: cc = 0
if (c == 10):
return
elif (cc == 'q'):
RestoreScreen()
sys.exit()
elif (cc == 'u' or c == curses.KEY_UP): UpDown(entries, default, -1)
elif (cc == 'd' or c == curses.KEY_DOWN): UpDown(entries, default, 1)
elif (cc == 'r' or c == curses.KEY_RIGHT):
curses.echo()
gb.scrn.addstr(gb.row,gb.cbeg+gb.csep,' ')
gb.scrn.addstr(gb.row,gb.cbeg+gb.csep,'NAME or VALUE > ',curses.A_UNDERLINE)
new_name = gb.scrn.getstr()
i = gb.row-gb.rbeg
default.pop(i)
default.insert(i,new_name)
curses.noecho()
gb.scrn.clrtoeol()
ShowMenu(entries,default, gb.row)
#####################################################
#
# Restore screen back to shell functionality.
# Note that RestoreScreen should be followed by
# sys.exit() in order to avoid troubleshooting
# observed with Python > 2.5
#
#####################################################
def RestoreScreen():
for x in sys.argv: # avoid curses library with the --no-curses option.
if (x == "--no-curses"):
return
curses.reset_shell_mode()
curses.nocbreak()
gb.scrn.keypad(0)
curses.echo()
curses.endwin()
if __name__ == '__browse__':
try:
browse()
except:
RestoreScreen()
# print error message re exception
traceback.print_exc()
#####################################################
#
# Return 1 if curses have been activated
#
#####################################################
def CursesIsActive():
return gb.init
#####################################################
#
# The next set of functions replicate the previous
# ones without using curses library.
# They are intended to provide a simpler way select
# options through a terminal-based replacement.
#
#####################################################
def Print_no_curses(message, sleep, row):
global xglb
# if (row == 1): os.system("clear")
print(message)
time.sleep(sleep)
######################################################
def Prompt_no_curses (message):
#
#
######################################################
os.system("clear")
print(message)
q = raw_input()
######################################################
def Browse_no_curses(entries, default, options):
#
#
######################################################
q = "c"
while (q != ''):
os.system("clear")
print(">> ",gb.title+"\n")
for x in entries:
i = entries.index(x)
if (len(default) > 0):
print(str(i).rjust(2),') ',x.ljust(28), default[i])
else:
print(str(i).rjust(2),') ',x.ljust(28))
print (" ")
q = raw_input(">> choice ? ")
if (q == ''):
print("Enter")
else:
try:
q = int(q)
if (len(default) == 0): return entries[q]
except:
continue
opt_list = ''
for x in options[q]:
i = options[q].index(x)
opt_list += repr(i)+") "+x+" "
print("\n"+entries[q]+": ",opt_list)
c = raw_input(">> choice ["+default[q]+"] ? ")
try:
c = int(c)
except:
continue
default[q] = options[q][c]
return
######################################################
def Insert_no_curses(entries, names):
#
#
######################################################
q = "c"
while (q != ''):
os.system("clear")
print (">> ",gb.title+"\n")
for x in entries:
i = entries.index(x)
print(str(i).rjust(2),') ',names[i].ljust(28))
print (" ")
q = raw_input(">> choice ? ")
if (q == ''):
print("Enter")
else:
try:
q = int(q)
except:
continue
newname = raw_input(">> new name ? ")
names[q] = newname
return | unknown | codeparrot/codeparrot-clean | ||
#
# SPDX-License-Identifier: ISC
#
# Author: Ulf Magnusson
# https://github.com/ulfalizer/Kconfiglib
# This is Kconfiglib, a Python library for scripting, debugging, and extracting
# information from Kconfig-based configuration systems. To view the
# documentation, run
#
# $ pydoc kconfiglib
#
# or, if you prefer HTML,
#
# $ pydoc -w kconfiglib
#
# The examples/ subdirectory contains examples, to be run with e.g.
#
# $ make scriptconfig SCRIPT=Kconfiglib/examples/print_tree.py
#
# Look in testsuite.py for the test suite.
"""
Kconfiglib is a Python library for scripting and extracting information from
Kconfig-based configuration systems. Features include the following:
- Symbol values and properties can be looked up and values assigned
programmatically.
- .config files can be read and written.
- Expressions can be evaluated in the context of a Kconfig configuration.
- Relations between symbols can be quickly determined, such as finding all
symbols that reference a particular symbol.
- Highly compatible with the scripts/kconfig/*conf utilities. The test suite
automatically compares outputs between Kconfiglib and the C implementation
for a large number of cases.
For the Linux kernel, scripts are run using
$ make scriptconfig [ARCH=<arch>] SCRIPT=<path to script> [SCRIPT_ARG=<arg>]
Using the 'scriptconfig' target ensures that required environment variables
(SRCARCH, ARCH, srctree, KERNELVERSION, etc.) are set up correctly.
Scripts receive the name of the Kconfig file to load in sys.argv[1]. As of
Linux 4.1.0-rc5, this is always "Kconfig" from the kernel top-level directory.
If an argument is provided with SCRIPT_ARG, it appears as sys.argv[2].
To get an interactive Python prompt with Kconfiglib preloaded and a Config
object 'c' created, run
$ make iscriptconfig [ARCH=<arch>]
Kconfiglib supports both Python 2 and Python 3. For (i)scriptconfig, the Python
interpreter to use can be passed in PYTHONCMD, which defaults to 'python'. PyPy
works well too, and might give a nice speedup for long-running jobs.
The examples/ directory contains short example scripts, which can be run with
e.g.
$ make scriptconfig SCRIPT=Kconfiglib/examples/print_tree.py
or
$ make scriptconfig SCRIPT=Kconfiglib/examples/help_grep.py SCRIPT_ARG=kernel
testsuite.py contains the test suite. See the top of the script for how to run
it.
Credits: Written by Ulf "Ulfalizer" Magnusson
Send bug reports, suggestions and other feedback to ulfalizer a.t Google's
email service. Don't wrestle with internal APIs. Tell me what you need and I
might add it in a safe way as a client API instead."""
import os
import platform
import re
import sys
# File layout:
#
# Public classes
# Public functions
# Internal classes
# Internal functions
# Internal global constants
# Line length: 79 columns
#
# Public classes
#
class Config(object):
"""Represents a Kconfig configuration, e.g. for i386 or ARM. This is the
set of symbols and other items appearing in the configuration together with
their values. Creating any number of Config objects -- including for
different architectures -- is safe; Kconfiglib has no global state."""
#
# Public interface
#
def __init__(self, filename="Kconfig", base_dir=None, print_warnings=True,
print_undef_assign=False):
"""Creates a new Config object, representing a Kconfig configuration.
Raises Kconfig_Syntax_Error on syntax errors.
filename (default: "Kconfig"): The base Kconfig file of the
configuration. For the Linux kernel, you'll probably want "Kconfig"
from the top-level directory, as environment variables will make
sure the right Kconfig is included from there
(arch/<architecture>/Kconfig). If you are using Kconfiglib via 'make
scriptconfig', the filename of the base base Kconfig file will be in
sys.argv[1].
base_dir (default: None): The base directory relative to which 'source'
statements within Kconfig files will work. For the Linux kernel this
should be the top-level directory of the kernel tree. $-references
to existing environment variables will be expanded.
If None (the default), the environment variable 'srctree' will be
used if set, and the current directory otherwise. 'srctree' is set
by the Linux makefiles to the top-level kernel directory. A default
of "." would not work with an alternative build directory.
print_warnings (default: True): Set to True if warnings related to this
configuration should be printed to stderr. This can be changed later
with Config.set_print_warnings(). It is provided as a constructor
argument since warnings might be generated during parsing.
print_undef_assign (default: False): Set to True if informational
messages related to assignments to undefined symbols should be
printed to stderr for this configuration. Can be changed later with
Config.set_print_undef_assign()."""
# The set of all symbols, indexed by name (a string)
self.syms = {}
# Python 2/3 compatibility hack. This is the only one needed.
self.syms_iter = self.syms.values if sys.version_info[0] >= 3 else \
self.syms.itervalues
# The set of all defined symbols in the configuration in the order they
# appear in the Kconfig files. This excludes the special symbols n, m,
# and y as well as symbols that are referenced but never defined.
self.kconfig_syms = []
# The set of all named choices (yes, choices can have names), indexed
# by name (a string)
self.named_choices = {}
# Lists containing all choices, menus and comments in the configuration
self.choices = []
self.menus = []
self.comments = []
def register_special_symbol(type_, name, val):
sym = Symbol()
sym.is_special_ = True
sym.is_defined_ = True
sym.config = self
sym.name = name
sym.type = type_
sym.cached_val = val
self.syms[name] = sym
return sym
# The special symbols n, m and y, used as shorthand for "n", "m" and
# "y"
self.n = register_special_symbol(TRISTATE, "n", "n")
self.m = register_special_symbol(TRISTATE, "m", "m")
self.y = register_special_symbol(TRISTATE, "y", "y")
# DEFCONFIG_LIST uses this
register_special_symbol(STRING, "UNAME_RELEASE", platform.uname()[2])
# The symbol with "option defconfig_list" set, containing a list of
# default .config files
self.defconfig_sym = None
# See Symbol.get_(src)arch()
self.arch = os.environ.get("ARCH")
self.srcarch = os.environ.get("SRCARCH")
# If you set CONFIG_ in the environment, Kconfig will prefix all symbols
# with its value when saving the configuration, instead of using the default, "CONFIG_".
self.config_prefix = os.environ.get("CONFIG_")
if self.config_prefix is None:
self.config_prefix = "CONFIG_"
# See Config.__init__(). We need this for get_defconfig_filename().
self.srctree = os.environ.get("srctree")
if self.srctree is None:
self.srctree = "."
self.filename = filename
self.base_dir = self.srctree if base_dir is None else \
os.path.expandvars(base_dir)
# The 'mainmenu' text
self.mainmenu_text = None
# The filename of the most recently loaded .config file
self.config_filename = None
# The textual header of the most recently loaded .config, uncommented
self.config_header = None
self.print_warnings = print_warnings
self.print_undef_assign = print_undef_assign
self._warnings = []
# For parsing routines that stop when finding a line belonging to a
# different construct, these holds that line and the tokenized version
# of that line. The purpose is to avoid having to re-tokenize the line,
# which is inefficient and causes problems when recording references to
# symbols.
self.end_line = None
self.end_line_tokens = None
# See the comment in _parse_expr().
self._cur_item = None
self._line = None
self._filename = None
self._linenr = None
self._transform_m = None
# Parse the Kconfig files
self.top_block = []
self._parse_file(filename, None, None, None, self.top_block)
# Build Symbol.dep for all symbols
self._build_dep()
def get_arch(self):
"""Returns the value the environment variable ARCH had at the time the
Config instance was created, or None if ARCH was not set. For the
kernel, this corresponds to the architecture being built for, with
values such as "i386" or "mips"."""
return self.arch
def get_srcarch(self):
"""Returns the value the environment variable SRCARCH had at the time
the Config instance was created, or None if SRCARCH was not set. For
the kernel, this corresponds to the particular arch/ subdirectory
containing architecture-specific code."""
return self.srcarch
def get_srctree(self):
"""Returns the value the environment variable srctree had at the time
the Config instance was created, or None if srctree was not defined.
This variable points to the source directory and is used when building
in a separate directory."""
return self.srctree
def get_base_dir(self):
"""Returns the base directory relative to which 'source' statements
will work, passed as an argument to Config.__init__()."""
return self.base_dir
def get_kconfig_filename(self):
"""Returns the name of the (base) kconfig file this configuration was
loaded from."""
return self.filename
def get_config_filename(self):
"""Returns the filename of the most recently loaded configuration file,
or None if no configuration has been loaded."""
return self.config_filename
def get_config_header(self):
"""Returns the (uncommented) textual header of the .config file most
recently loaded with load_config(). Returns None if no .config file has
been loaded or if the most recently loaded .config file has no header.
The header consists of all lines up to but not including the first line
that either
1. Does not start with "#"
2. Has the form "# CONFIG_FOO is not set."
"""
return self.config_header
def get_mainmenu_text(self):
"""Returns the text of the 'mainmenu' statement (with $-references to
symbols replaced by symbol values), or None if the configuration has no
'mainmenu' statement."""
return None if self.mainmenu_text is None else \
self._expand_sym_refs(self.mainmenu_text)
def get_defconfig_filename(self):
"""Returns the name of the defconfig file, which is the first existing
file in the list given in a symbol having 'option defconfig_list' set.
$-references to symbols will be expanded ("$FOO bar" -> "foo bar" if
FOO has the value "foo"). Returns None in case of no defconfig file.
Setting 'option defconfig_list' on multiple symbols currently results
in undefined behavior.
If the environment variable 'srctree' was set when the Config was
created, get_defconfig_filename() will first look relative to that
directory before looking in the current directory; see
Config.__init__().
WARNING: A wart here is that scripts/kconfig/Makefile sometimes uses
the --defconfig=<defconfig> option when calling the C implementation of
e.g. 'make defconfig'. This option overrides the 'option
defconfig_list' symbol, meaning the result from
get_defconfig_filename() might not match what 'make defconfig' would
use. That probably ought to be worked around somehow, so that this
function always gives the "expected" result."""
if self.defconfig_sym is None:
return None
for filename, cond_expr in self.defconfig_sym.def_exprs:
if self._eval_expr(cond_expr) == "y":
filename = self._expand_sym_refs(filename)
# We first look in $srctree. os.path.join() won't work here as
# an absolute path in filename would override $srctree.
srctree_filename = os.path.normpath(self.srctree + "/" +
filename)
if os.path.exists(srctree_filename):
return srctree_filename
if os.path.exists(filename):
return filename
return None
def get_symbol(self, name):
"""Returns the symbol with name 'name', or None if no such symbol
appears in the configuration. An alternative shorthand is conf[name],
where conf is a Config instance, though that will instead raise
KeyError if the symbol does not exist."""
return self.syms.get(name)
def __getitem__(self, name):
"""Returns the symbol with name 'name'. Raises KeyError if the symbol
does not appear in the configuration."""
return self.syms[name]
def get_symbols(self, all_symbols=True):
"""Returns a list of symbols from the configuration. An alternative for
iterating over all defined symbols (in the order of definition) is
for sym in config:
...
which relies on Config implementing __iter__() and is equivalent to
for sym in config.get_symbols(False):
...
all_symbols (default: True): If True, all symbols -- including special
and undefined symbols -- will be included in the result, in an
undefined order. If False, only symbols actually defined and not
merely referred to in the configuration will be included in the
result, and will appear in the order that they are defined within
the Kconfig configuration files."""
return list(self.syms.values()) if all_symbols else self.kconfig_syms
def __iter__(self):
"""Convenience function for iterating over the set of all defined
symbols in the configuration, used like
for sym in conf:
...
The iteration happens in the order of definition within the Kconfig
configuration files. Symbols only referred to but not defined will not
be included, nor will the special symbols n, m, and y. If you want to
include such symbols as well, see config.get_symbols()."""
return iter(self.kconfig_syms)
def get_choices(self):
"""Returns a list containing all choice statements in the
configuration, in the order they appear in the Kconfig files."""
return self.choices
def get_menus(self):
"""Returns a list containing all menus in the configuration, in the
order they appear in the Kconfig files."""
return self.menus
def get_comments(self):
"""Returns a list containing all comments in the configuration, in the
order they appear in the Kconfig files."""
return self.comments
def get_top_level_items(self):
"""Returns a list containing the items (symbols, menus, choices, and
comments) at the top level of the configuration -- that is, all items
that do not appear within a menu or choice. The items appear in the
same order as within the configuration."""
return self.top_block
def load_config(self, filename, replace=True):
"""Loads symbol values from a file in the familiar .config format.
Equivalent to calling Symbol.set_user_value() to set each of the
values.
"# CONFIG_FOO is not set" within a .config file is treated specially
and sets the user value of FOO to 'n'. The C implementation works the
same way.
filename: The .config file to load. $-references to existing
environment variables will be expanded. For scripts to work even when
an alternative build directory is used with the Linux kernel, you
need to refer to the top-level kernel directory with "$srctree".
replace (default: True): True if the configuration should replace the
old configuration; False if it should add to it.
Returns a list or warnings (hopefully empty)
"""
self._warnings = []
# Regular expressions for parsing .config files
_set_re_match = re.compile(r"{}(\w+)=(.*)".format(self.config_prefix)).match
_unset_re_match = re.compile(r"# {}(\w+) is not set".format(self.config_prefix)).match
# Put this first so that a missing file doesn't screw up our state
filename = os.path.expandvars(filename)
line_feeder = _FileFeed(filename)
self.config_filename = filename
#
# Read header
#
def is_header_line(line):
return line is not None and line.startswith("#") and \
not _unset_re_match(line)
self.config_header = None
line = line_feeder.peek_next()
if is_header_line(line):
self.config_header = ""
while is_header_line(line_feeder.peek_next()):
self.config_header += line_feeder.get_next()[1:]
# Remove trailing newline
if self.config_header.endswith("\n"):
self.config_header = self.config_header[:-1]
#
# Read assignments. Hotspot for some workloads.
#
def warn_override(filename, linenr, name, old_user_val, new_user_val):
self._warn('overriding the value of {0}. '
'Old value: "{1}", new value: "{2}".'
.format(name, old_user_val, new_user_val),
filename, linenr)
# Invalidate everything to keep things simple. It might be possible to
# improve performance for the case where multiple configurations are
# loaded by only invalidating a symbol (and its dependent symbols) if
# the new user value differs from the old. One complication would be
# that symbols not mentioned in the .config must lose their user value
# when replace = True, which is the usual case.
if replace:
self.unset_user_values()
else:
self._invalidate_all()
while 1:
line = line_feeder.get_next()
if line is None:
return self._warnings
line = line.rstrip()
set_match = _set_re_match(line)
if set_match:
name, val = set_match.groups()
if val.startswith('"'):
if len(val) < 2 or val[-1] != '"':
_parse_error(line, "malformed string literal",
line_feeder.filename, line_feeder.linenr)
# Strip quotes and remove escapings. The unescaping
# procedure should be safe since " can only appear as \"
# inside the string.
val = val[1:-1].replace('\\"', '"').replace("\\\\", "\\")
if name in self.syms:
sym = self.syms[name]
if sym.user_val is not None:
warn_override(line_feeder.filename, line_feeder.linenr,
name, sym.user_val, val)
if sym.is_choice_sym:
user_mode = sym.parent.user_mode
if user_mode is not None and user_mode != val:
self._warn("assignment to {0} changes mode of "
'containing choice from "{1}" to "{2}".'
.format(name, val, user_mode),
line_feeder.filename,
line_feeder.linenr)
sym._set_user_value_no_invalidate(val, True)
else:
if self.print_undef_assign:
_stderr_msg('note: attempt to assign the value "{0}" '
"to the undefined symbol {1}."
.format(val, name),
line_feeder.filename, line_feeder.linenr)
else:
unset_match = _unset_re_match(line)
if unset_match:
name = unset_match.group(1)
if name in self.syms:
sym = self.syms[name]
if sym.user_val is not None:
warn_override(line_feeder.filename,
line_feeder.linenr,
name, sym.user_val, "n")
sym._set_user_value_no_invalidate("n", True)
def write_config(self, filename, header=None):
"""Writes out symbol values in the familiar .config format.
Kconfiglib makes sure the format matches what the C implementation
would generate, down to whitespace. This eases testing.
filename: The filename under which to save the configuration.
header (default: None): A textual header that will appear at the
beginning of the file, with each line commented out automatically.
None means no header."""
for sym in self.syms_iter():
sym.already_written = False
with open(filename, "w") as f:
# Write header
if header is not None:
f.write(_comment(header) + "\n")
# Build and write configuration
conf_strings = []
_make_block_conf(self.top_block, conf_strings.append)
f.write("\n".join(conf_strings) + "\n")
def eval(self, s):
"""Returns the value of the expression 's' -- where 's' is represented
as a string -- in the context of the configuration. Raises
Kconfig_Syntax_Error if syntax errors are detected in 's'.
For example, if FOO and BAR are tristate symbols at least one of which
has the value "y", then config.eval("y && (FOO || BAR)") => "y"
This function always yields a tristate value. To get the value of
non-bool, non-tristate symbols, use Symbol.get_value().
The result of this function is consistent with how evaluation works for
conditional expressions in the configuration as well as in the C
implementation. "m" and m are rewritten as '"m" && MODULES' and 'm &&
MODULES', respectively, and a result of "m" will get promoted to "y" if
we're running without modules.
Syntax checking is somewhat lax, partly to be compatible with lax
parsing in the C implementation."""
return self._eval_expr(self._parse_expr(self._tokenize(s, True), # Feed
None, # Current symbol/choice
s)) # line
def unset_user_values(self):
"""Resets the values of all symbols, as if Config.load_config() or
Symbol.set_user_value() had never been called."""
for sym in self.syms_iter():
sym._unset_user_value_no_recursive_invalidate()
def set_print_warnings(self, print_warnings):
"""Determines whether warnings related to this configuration (for
things like attempting to assign illegal values to symbols with
Symbol.set_user_value()) should be printed to stderr.
print_warnings: True if warnings should be printed."""
self.print_warnings = print_warnings
def set_print_undef_assign(self, print_undef_assign):
"""Determines whether informational messages related to assignments to
undefined symbols should be printed to stderr for this configuration.
print_undef_assign: If True, such messages will be printed."""
self.print_undef_assign = print_undef_assign
def __str__(self):
"""Returns a string containing various information about the Config."""
return _lines("Configuration",
"File : " +
self.filename,
"Base directory : " +
self.base_dir,
"Value of $ARCH at creation time : " +
("(not set)" if self.arch is None else self.arch),
"Value of $SRCARCH at creation time : " +
("(not set)" if self.srcarch is None else
self.srcarch),
"Source tree (derived from $srctree;",
"defaults to '.' if $srctree isn't set) : " +
self.srctree,
"Most recently loaded .config : " +
("(no .config loaded)"
if self.config_filename is None else
self.config_filename),
"Print warnings : " +
BOOL_STR[self.print_warnings],
"Print assignments to undefined symbols : " +
BOOL_STR[self.print_undef_assign])
#
# Private methods
#
#
# Kconfig parsing
#
def _parse_file(self, filename, parent, deps, visible_if_deps, block):
"""Parses the Kconfig file 'filename'. Appends the Items in the file
(and any file it sources) to the list passed in the 'block' parameter.
See _parse_block() for the meaning of the parameters."""
self._parse_block(_FileFeed(filename), None, parent, deps,
visible_if_deps, block)
def _parse_block(self, line_feeder, end_marker, parent, deps,
visible_if_deps, block):
"""Parses a block, which is the contents of either a file or an if,
menu, or choice statement. Appends the Items to the list passed in the
'block' parameter.
line_feeder: A _FileFeed instance feeding lines from a file. The
Kconfig language is line-based in practice.
end_marker: The token that ends the block, e.g. T_ENDIF ("endif") for
ifs. None for files.
parent: The enclosing menu or choice, or None if we're at the top
level.
deps: Dependencies from enclosing menus, choices and ifs.
visible_if_deps (default: None): 'visible if' dependencies from
enclosing menus.
block: The list to add items to."""
while 1:
# Do we already have a tokenized line that we determined wasn't
# part of whatever we were parsing earlier? See comment in
# Config.__init__().
if self.end_line is not None:
line = self.end_line
tokens = self.end_line_tokens
tokens.unget_all()
self.end_line = None
self.end_line_tokens = None
else:
line = line_feeder.get_next()
if line is None:
if end_marker is not None:
raise Kconfig_Syntax_Error("Unexpected end of file {0}"
.format(line_feeder.filename))
return
tokens = self._tokenize(line, False, line_feeder.filename,
line_feeder.linenr)
t0 = tokens.get_next()
if t0 is None:
continue
# Cases are ordered roughly by frequency, which speeds things up a
# bit
if t0 == T_CONFIG or t0 == T_MENUCONFIG:
# The tokenizer will automatically allocate a new Symbol object
# for any new names it encounters, so we don't need to worry
# about that here.
sym = tokens.get_next()
# Symbols defined in multiple places get the parent of their
# first definition. However, for symbols whose parents are
# choice statements, the choice statement takes precedence.
if not sym.is_defined_ or isinstance(parent, Choice):
sym.parent = parent
sym.is_defined_ = True
self._parse_properties(line_feeder, sym, deps, visible_if_deps)
self.kconfig_syms.append(sym)
block.append(sym)
elif t0 == T_SOURCE:
kconfig_file = tokens.get_next()
exp_kconfig_file = self._expand_sym_refs(kconfig_file)
f = os.path.join(self.base_dir, exp_kconfig_file)
if not os.path.exists(f):
raise IOError('{0}:{1}: sourced file "{2}" (expands to '
'"{3}") not found. Perhaps base_dir '
'(argument to Config.__init__(), currently '
'"{4}") is set to the wrong value.'
.format(line_feeder.filename,
line_feeder.linenr,
kconfig_file, exp_kconfig_file,
self.base_dir))
# Add items to the same block
self._parse_file(f, parent, deps, visible_if_deps, block)
elif t0 == end_marker:
# We have reached the end of the block
return
elif t0 == T_IF:
# If statements are treated as syntactic sugar for adding
# dependencies to enclosed items and do not have an explicit
# object representation.
dep_expr = self._parse_expr(tokens, None, line,
line_feeder.filename,
line_feeder.linenr)
# Add items to the same block
self._parse_block(line_feeder, T_ENDIF, parent,
_make_and(dep_expr, deps),
visible_if_deps, block)
elif t0 == T_COMMENT:
comment = Comment()
comment.config = self
comment.parent = parent
comment.filename = line_feeder.filename
comment.linenr = line_feeder.linenr
comment.text = tokens.get_next()
self._parse_properties(line_feeder, comment, deps,
visible_if_deps)
self.comments.append(comment)
block.append(comment)
elif t0 == T_MENU:
menu = Menu()
menu.config = self
menu.parent = parent
menu.filename = line_feeder.filename
menu.linenr = line_feeder.linenr
menu.title = tokens.get_next()
self._parse_properties(line_feeder, menu, deps,
visible_if_deps)
# This needs to go before _parse_block() so that we get the
# proper menu ordering in the case of nested functions
self.menus.append(menu)
# Parse contents and put Items in menu.block
self._parse_block(line_feeder, T_ENDMENU, menu, menu.dep_expr,
_make_and(visible_if_deps,
menu.visible_if_expr),
menu.block)
block.append(menu)
elif t0 == T_CHOICE:
name = tokens.get_next()
if name is None:
choice = Choice()
self.choices.append(choice)
else:
# Named choice
choice = self.named_choices.get(name)
if choice is None:
choice = Choice()
choice.name = name
self.named_choices[name] = choice
self.choices.append(choice)
choice.config = self
choice.parent = parent
choice.def_locations.append((line_feeder.filename,
line_feeder.linenr))
self._parse_properties(line_feeder, choice, deps,
visible_if_deps)
# Parse contents and put Items in choice.block
self._parse_block(line_feeder, T_ENDCHOICE, choice, deps,
visible_if_deps, choice.block)
choice._determine_actual_symbols()
# If no type is specified for the choice, its type is that of
# the first choice item with a specified type
if choice.type == UNKNOWN:
for item in choice.actual_symbols:
if item.type != UNKNOWN:
choice.type = item.type
break
# Each choice item of UNKNOWN type gets the type of the choice
for item in choice.actual_symbols:
if item.type == UNKNOWN:
item.type = choice.type
block.append(choice)
elif t0 == T_MAINMENU:
text = tokens.get_next()
if self.mainmenu_text is not None:
self._warn("overriding 'mainmenu' text. "
'Old value: "{0}", new value: "{1}".'
.format(self.mainmenu_text, text),
line_feeder.filename, line_feeder.linenr)
self.mainmenu_text = text
else:
_parse_error(line, "unrecognized construct",
line_feeder.filename, line_feeder.linenr)
def _parse_properties(self, line_feeder, stmt, deps, visible_if_deps):
"""Parsing of properties for symbols, menus, choices, and comments.
Takes care of propagating dependencies from enclosing menus and ifs."""
def parse_val_and_cond(tokens, line, filename, linenr):
"""Parses '<expr1> if <expr2>' constructs, where the 'if' part is
optional. Returns a tuple containing the parsed expressions, with
None as the second element if the 'if' part is missing."""
return (self._parse_expr(tokens, stmt, line, filename, linenr,
False),
self._parse_expr(tokens, stmt, line, filename, linenr)
if tokens.check(T_IF) else None)
# In case the symbol is defined in multiple locations, we need to
# remember what prompts, defaults, selects, and implies are new for
# this definition, as "depends on" should only apply to the local
# definition.
new_prompt = None
new_def_exprs = []
new_selects = []
new_implies = []
# Dependencies from 'depends on' statements
depends_on_expr = None
while 1:
line = line_feeder.get_next()
if line is None:
break
filename = line_feeder.filename
linenr = line_feeder.linenr
tokens = self._tokenize(line, False, filename, linenr)
t0 = tokens.get_next()
if t0 is None:
continue
# Cases are ordered roughly by frequency, which speeds things up a
# bit
if t0 == T_DEPENDS:
if not tokens.check(T_ON):
_parse_error(line, 'expected "on" after "depends"',
filename, linenr)
parsed_deps = self._parse_expr(tokens, stmt, line, filename,
linenr)
if isinstance(stmt, (Menu, Comment)):
stmt.orig_deps = _make_and(stmt.orig_deps, parsed_deps)
else:
depends_on_expr = _make_and(depends_on_expr, parsed_deps)
elif t0 == T_HELP:
# Find first non-blank (not all-space) line and get its
# indentation
line = line_feeder.next_nonblank()
if line is None:
stmt.help = ""
break
indent = _indentation(line)
if indent == 0:
# If the first non-empty lines has zero indent, there is no
# help text
stmt.help = ""
line_feeder.unget()
break
# The help text goes on till the first non-empty line with less
# indent
help_lines = [_deindent(line, indent)]
while 1:
line = line_feeder.get_next()
if line is None or \
(not line.isspace() and _indentation(line) < indent):
stmt.help = "".join(help_lines)
break
help_lines.append(_deindent(line, indent))
if line is None:
break
line_feeder.unget()
elif t0 == T_SELECT:
target = tokens.get_next()
stmt.referenced_syms.add(target)
stmt.selected_syms.add(target)
new_selects.append(
(target,
self._parse_expr(tokens, stmt, line, filename, linenr)
if tokens.check(T_IF) else None))
elif t0 == T_IMPLY:
target = tokens.get_next()
stmt.referenced_syms.add(target)
stmt.implied_syms.add(target)
new_implies.append(
(target,
self._parse_expr(tokens, stmt, line, filename, linenr)
if tokens.check(T_IF) else None))
elif t0 in (T_BOOL, T_TRISTATE, T_INT, T_HEX, T_STRING):
stmt.type = TOKEN_TO_TYPE[t0]
if tokens.peek_next() is not None:
new_prompt = parse_val_and_cond(tokens, line, filename,
linenr)
elif t0 == T_DEFAULT:
new_def_exprs.append(parse_val_and_cond(tokens, line, filename,
linenr))
elif t0 == T_DEF_BOOL:
stmt.type = BOOL
if tokens.peek_next() is not None:
new_def_exprs.append(parse_val_and_cond(tokens, line,
filename, linenr))
elif t0 == T_PROMPT:
# 'prompt' properties override each other within a single
# definition of a symbol, but additional prompts can be added
# by defining the symbol multiple times; hence 'new_prompt'
# instead of 'prompt'.
new_prompt = parse_val_and_cond(tokens, line, filename, linenr)
elif t0 == T_RANGE:
low = tokens.get_next()
high = tokens.get_next()
stmt.referenced_syms.add(low)
stmt.referenced_syms.add(high)
stmt.ranges.append(
(low, high,
self._parse_expr(tokens, stmt, line, filename, linenr)
if tokens.check(T_IF) else None))
elif t0 == T_DEF_TRISTATE:
stmt.type = TRISTATE
if tokens.peek_next() is not None:
new_def_exprs.append(parse_val_and_cond(tokens, line,
filename, linenr))
elif t0 == T_OPTION:
if tokens.check(T_ENV) and tokens.check(T_EQUAL):
env_var = tokens.get_next()
stmt.is_special_ = True
stmt.is_from_env = True
if env_var not in os.environ:
self._warn("The symbol {0} references the "
"non-existent environment variable {1} and "
"will get the empty string as its value. "
"If you're using Kconfiglib via "
"'make (i)scriptconfig', it should have "
"set up the environment correctly for you. "
"If you still got this message, that "
"might be an error, and you should email "
"ulfalizer a.t Google's email service."""
.format(stmt.name, env_var),
filename, linenr)
stmt.cached_val = ""
else:
stmt.cached_val = os.environ[env_var]
elif tokens.check(T_DEFCONFIG_LIST):
self.defconfig_sym = stmt
elif tokens.check(T_MODULES):
# To reduce warning spam, only warn if 'option modules' is
# set on some symbol that isn't MODULES, which should be
# safe. I haven't run into any projects that make use
# modules besides the kernel yet, and there it's likely to
# keep being called "MODULES".
if stmt.name != "MODULES":
self._warn("the 'modules' option is not supported. "
"Let me know if this is a problem for you; "
"it shouldn't be that hard to implement. "
"(Note that modules are still supported -- "
"Kconfiglib just assumes the symbol name "
"MODULES, like older versions of the C "
"implementation did when 'option modules' "
"wasn't used.)",
filename, linenr)
elif tokens.check(T_ALLNOCONFIG_Y):
if not isinstance(stmt, Symbol):
_parse_error(line,
"the 'allnoconfig_y' option is only "
"valid for symbols",
filename, linenr)
stmt.allnoconfig_y = True
else:
_parse_error(line, "unrecognized option", filename, linenr)
elif t0 == T_VISIBLE:
if not tokens.check(T_IF):
_parse_error(line, 'expected "if" after "visible"',
filename, linenr)
if not isinstance(stmt, Menu):
_parse_error(line,
"'visible if' is only valid for menus",
filename, linenr)
parsed_deps = self._parse_expr(tokens, stmt, line, filename,
linenr)
stmt.visible_if_expr = _make_and(stmt.visible_if_expr,
parsed_deps)
elif t0 == T_OPTIONAL:
if not isinstance(stmt, Choice):
_parse_error(line,
'"optional" is only valid for choices',
filename,
linenr)
stmt.optional = True
else:
# See comment in Config.__init__()
self.end_line = line
self.end_line_tokens = tokens
break
# Done parsing properties. Now propagate 'depends on' and enclosing
# menu/if dependencies to expressions.
# The set of symbols referenced directly by the statement plus all
# symbols referenced by enclosing menus and ifs
stmt.all_referenced_syms = stmt.referenced_syms | _get_expr_syms(deps)
# Save original dependencies from enclosing menus and ifs
stmt.deps_from_containing = deps
if isinstance(stmt, (Menu, Comment)):
stmt.dep_expr = _make_and(stmt.orig_deps, deps)
else:
# Symbol or Choice
# See comment for 'menu_dep'
stmt.menu_dep = _make_and(deps, depends_on_expr)
# Propagate dependencies to prompts
if new_prompt is not None:
prompt, cond_expr = new_prompt
# Propagate 'visible if' dependencies from menus and local
# 'depends on' dependencies
cond_expr = _make_and(_make_and(cond_expr, visible_if_deps),
depends_on_expr)
# Save original
stmt.orig_prompts.append((prompt, cond_expr))
# Finalize with dependencies from enclosing menus and ifs
stmt.prompts.append((prompt, _make_and(cond_expr, deps)))
# Propagate dependencies to defaults
# Propagate 'depends on' dependencies
new_def_exprs = [(val_expr, _make_and(cond_expr, depends_on_expr))
for val_expr, cond_expr in new_def_exprs]
# Save original
stmt.orig_def_exprs.extend(new_def_exprs)
# Finalize with dependencies from enclosing menus and ifs
stmt.def_exprs.extend([(val_expr, _make_and(cond_expr, deps))
for val_expr, cond_expr in new_def_exprs])
# Propagate dependencies to selects and implies
# Only symbols can select and imply
if isinstance(stmt, Symbol):
# Propagate 'depends on' dependencies
new_selects = [(target, _make_and(cond_expr, depends_on_expr))
for target, cond_expr in new_selects]
new_implies = [(target, _make_and(cond_expr, depends_on_expr))
for target, cond_expr in new_implies]
# Save original
stmt.orig_selects.extend(new_selects)
stmt.orig_implies.extend(new_implies)
# Finalize with dependencies from enclosing menus and ifs
for target, cond in new_selects:
target.rev_dep = \
_make_or(target.rev_dep,
_make_and(stmt, _make_and(cond, deps)))
for target, cond in new_implies:
target.weak_rev_dep = \
_make_or(target.weak_rev_dep,
_make_and(stmt, _make_and(cond, deps)))
def _parse_expr(self, feed, cur_item, line, filename=None, linenr=None,
transform_m=True):
"""Parses an expression from the tokens in 'feed' using a simple
top-down approach. The result has the form
'(<operator>, [<parsed operands>])', where <operator> is e.g.
kconfiglib.AND. If there is only one operand (i.e., no && or ||), then
the operand is returned directly. This also goes for subexpressions.
feed: _Feed instance containing the tokens for the expression.
cur_item: The item (Symbol, Choice, Menu, or Comment) currently being
parsed, or None if we're not parsing an item. Used for recording
references to symbols.
line: The line containing the expression being parsed.
filename (default: None): The file containing the expression.
linenr (default: None): The line number containing the expression.
transform_m (default: False): Determines if 'm' should be rewritten to
'm && MODULES' -- see parse_val_and_cond().
Expression grammar, in decreasing order of precedence:
<expr> -> <symbol>
<symbol> '=' <symbol>
<symbol> '!=' <symbol>
'(' <expr> ')'
'!' <expr>
<expr> '&&' <expr>
<expr> '||' <expr>"""
# Use instance variables to avoid having to pass these as arguments
# through the top-down parser in _parse_expr_rec(), which is tedious
# and obfuscates the code. A profiler run shows no noticeable
# performance difference.
self._cur_item = cur_item
self._transform_m = transform_m
self._line = line
self._filename = filename
self._linenr = linenr
return self._parse_expr_rec(feed)
def _parse_expr_rec(self, feed):
or_term = self._parse_or_term(feed)
if not feed.check(T_OR):
# Common case -- no need for an OR node since it's just a single
# operand
return or_term
or_terms = [or_term, self._parse_or_term(feed)]
while feed.check(T_OR):
or_terms.append(self._parse_or_term(feed))
return (OR, or_terms)
def _parse_or_term(self, feed):
and_term = self._parse_factor(feed)
if not feed.check(T_AND):
# Common case -- no need for an AND node since it's just a single
# operand
return and_term
and_terms = [and_term, self._parse_factor(feed)]
while feed.check(T_AND):
and_terms.append(self._parse_factor(feed))
return (AND, and_terms)
def _parse_factor(self, feed):
token = feed.get_next()
if isinstance(token, (Symbol, str)):
if self._cur_item is not None and isinstance(token, Symbol):
self._cur_item.referenced_syms.add(token)
next_token = feed.peek_next()
# For conditional expressions ('depends on <expr>',
# '... if <expr>', # etc.), "m" and m are rewritten to
# "m" && MODULES.
if next_token != T_EQUAL and next_token != T_UNEQUAL:
if self._transform_m and (token is self.m or token == "m"):
return (AND, ["m", self._sym_lookup("MODULES")])
return token
relation = EQUAL if (feed.get_next() == T_EQUAL) else UNEQUAL
token_2 = feed.get_next()
if self._cur_item is not None and isinstance(token_2, Symbol):
self._cur_item.referenced_syms.add(token_2)
return (relation, token, token_2)
if token == T_NOT:
return (NOT, self._parse_factor(feed))
if token == T_OPEN_PAREN:
expr_parse = self._parse_expr_rec(feed)
if not feed.check(T_CLOSE_PAREN):
_parse_error(self._line, "missing end parenthesis",
self._filename, self._linenr)
return expr_parse
_parse_error(self._line, "malformed expression", self._filename,
self._linenr)
def _tokenize(self, s, for_eval, filename=None, linenr=None):
"""Returns a _Feed instance containing tokens derived from the string
's'. Registers any new symbols encountered (via _sym_lookup()).
(I experimented with a pure regular expression implementation, but it
came out slower, less readable, and wouldn't have been as flexible.)
for_eval: True when parsing an expression for a call to Config.eval(),
in which case we should not treat the first token specially nor
register new symbols."""
s = s.strip()
if s == "" or s[0] == "#":
return _Feed([])
if for_eval:
previous = None # The previous token seen
tokens = []
i = 0 # The current index in the string being tokenized
else:
# The initial word on a line is parsed specially. Let
# command_chars = [A-Za-z0-9_]. Then
# - leading non-command_chars characters are ignored, and
# - the first token consists the following one or more
# command_chars characters.
# This is why things like "----help--" are accepted.
initial_token_match = _initial_token_re_match(s)
if initial_token_match is None:
return _Feed([])
keyword = _get_keyword(initial_token_match.group(1))
if keyword == T_HELP:
# Avoid junk after "help", e.g. "---", being registered as a
# symbol
return _Feed([T_HELP])
if keyword is None:
# We expect a keyword as the first token
_tokenization_error(s, filename, linenr)
previous = keyword
tokens = [keyword]
# The current index in the string being tokenized
i = initial_token_match.end()
# _tokenize() is a hotspot during parsing, and this speeds things up a
# bit
strlen = len(s)
append = tokens.append
# Main tokenization loop. (Handles tokens past the first one.)
while i < strlen:
# Test for an identifier/keyword preceded by whitespace first; this
# is the most common case.
id_keyword_match = _id_keyword_re_match(s, i)
if id_keyword_match:
# We have an identifier or keyword. The above also stripped any
# whitespace for us.
name = id_keyword_match.group(1)
# Jump past it
i = id_keyword_match.end()
keyword = _get_keyword(name)
if keyword is not None:
# It's a keyword
append(keyword)
elif previous in STRING_LEX:
# What would ordinarily be considered an identifier is
# treated as a string after certain tokens
append(name)
else:
# It's a symbol name. _sym_lookup() will take care of
# allocating a new Symbol instance if it's the first time
# we see it.
sym = self._sym_lookup(name, for_eval)
if previous == T_CONFIG or previous == T_MENUCONFIG:
# If the previous token is T_(MENU)CONFIG
# ("(menu)config"), we're tokenizing the first line of
# a symbol definition, and should remember this as a
# location where the symbol is defined
sym.def_locations.append((filename, linenr))
else:
# Otherwise, it's a reference to the symbol
sym.ref_locations.append((filename, linenr))
append(sym)
else:
# Not an identifier/keyword
while i < strlen and s[i].isspace():
i += 1
if i == strlen:
break
c = s[i]
i += 1
# String literal (constant symbol)
if c == '"' or c == "'":
if "\\" in s:
# Slow path: This could probably be sped up, but it's a
# very unusual case anyway.
quote = c
val = ""
while 1:
if i >= len(s):
_tokenization_error(s, filename, linenr)
c = s[i]
if c == quote:
break
if c == "\\":
if i + 1 >= len(s):
_tokenization_error(s, filename, linenr)
val += s[i + 1]
i += 2
else:
val += c
i += 1
i += 1
append(val)
else:
# Fast path: If the string contains no backslashes
# (almost always) we can simply look for the matching
# quote.
end = s.find(c, i)
if end == -1:
_tokenization_error(s, filename, linenr)
append(s[i:end])
i = end + 1
elif c == "&":
# Invalid characters are ignored
if i >= len(s) or s[i] != "&": continue
append(T_AND)
i += 1
elif c == "|":
# Invalid characters are ignored
if i >= len(s) or s[i] != "|": continue
append(T_OR)
i += 1
elif c == "!":
if i < len(s) and s[i] == "=":
append(T_UNEQUAL)
i += 1
else:
append(T_NOT)
elif c == "=": append(T_EQUAL)
elif c == "(": append(T_OPEN_PAREN)
elif c == ")": append(T_CLOSE_PAREN)
elif c == "#": break # Comment
else: continue # Invalid characters are ignored
previous = tokens[-1]
return _Feed(tokens)
def _sym_lookup(self, name, for_eval=False):
"""Fetches the symbol 'name' from the symbol table, creating and
registering it if it does not exist. If 'for_eval' is True, the symbol
won't be added to the symbol table if it does not exist -- this is for
Config.eval()."""
if name in self.syms:
return self.syms[name]
new_sym = Symbol()
new_sym.config = self
new_sym.name = name
if for_eval:
self._warn("no symbol {0} in configuration".format(name))
else:
self.syms[name] = new_sym
return new_sym
#
# Expression evaluation
#
def _eval_expr(self, expr):
"""Evaluates an expression to "n", "m", or "y"."""
# Handles e.g. an "x if y" condition where the "if y" part is missing.
if expr is None:
return "y"
res = self._eval_expr_rec(expr)
if res == "m":
# Promote "m" to "y" if we're running without modules.
#
# Internally, "m" is often rewritten to "m" && MODULES by both the
# C implementation and Kconfiglib, which takes care of cases where
# "m" should be demoted to "n" instead.
modules_sym = self.syms.get("MODULES")
if modules_sym is None or modules_sym.get_value() != "y":
return "y"
return res
def _eval_expr_rec(self, expr):
if isinstance(expr, Symbol):
# Non-bool/tristate symbols are always "n" in a tristate sense,
# regardless of their value
if expr.type != BOOL and expr.type != TRISTATE:
return "n"
return expr.get_value()
if isinstance(expr, str):
return expr if (expr == "y" or expr == "m") else "n"
# Ordered by frequency
if expr[0] == AND:
res = "y"
for subexpr in expr[1]:
ev = self._eval_expr_rec(subexpr)
# Return immediately upon discovering an "n" term
if ev == "n":
return "n"
if ev == "m":
res = "m"
# 'res' is either "m" or "y" here; we already handled the
# short-circuiting "n" case in the loop.
return res
if expr[0] == NOT:
ev = self._eval_expr_rec(expr[1])
if ev == "y":
return "n"
return "y" if (ev == "n") else "m"
if expr[0] == OR:
res = "n"
for subexpr in expr[1]:
ev = self._eval_expr_rec(subexpr)
# Return immediately upon discovering a "y" term
if ev == "y":
return "y"
if ev == "m":
res = "m"
# 'res' is either "n" or "m" here; we already handled the
# short-circuiting "y" case in the loop.
return res
if expr[0] == EQUAL:
return "y" if (_str_val(expr[1]) == _str_val(expr[2])) else "n"
if expr[0] == UNEQUAL:
return "y" if (_str_val(expr[1]) != _str_val(expr[2])) else "n"
_internal_error("Internal error while evaluating expression: "
"unknown operation {0}.".format(expr[0]))
def _eval_min(self, e1, e2):
"""Returns the minimum value of the two expressions. Equates None with
'y'."""
e1_eval = self._eval_expr(e1)
e2_eval = self._eval_expr(e2)
return e1_eval if tri_less(e1_eval, e2_eval) else e2_eval
def _eval_max(self, e1, e2):
"""Returns the maximum value of the two expressions. Equates None with
'y'."""
e1_eval = self._eval_expr(e1)
e2_eval = self._eval_expr(e2)
return e1_eval if tri_greater(e1_eval, e2_eval) else e2_eval
#
# Dependency tracking (for caching and invalidation)
#
def _build_dep(self):
"""Populates the Symbol.dep sets, linking the symbol to the symbols
that immediately depend on it in the sense that changing the value of
the symbol might affect the values of those other symbols. This is used
for caching/invalidation purposes. The calculated sets might be larger
than necessary as we don't do any complicated analysis of the
expressions."""
# Adds 'sym' as a directly dependent symbol to all symbols that appear
# in the expression 'e'
def add_expr_deps(e, sym):
for s in _get_expr_syms(e):
s.dep.add(sym)
# The directly dependent symbols of a symbol are:
# - Any symbols whose prompts, default values, rev_dep (select
# condition), weak_rev_dep (imply condition) or ranges depend on the
# symbol
# - Any symbols that belong to the same choice statement as the symbol
# (these won't be included in 'dep' as that makes the dependency
# graph unwieldy, but Symbol._get_dependent() will include them)
# - Any symbols in a choice statement that depends on the symbol
for sym in self.syms_iter():
for _, e in sym.prompts:
add_expr_deps(e, sym)
for v, e in sym.def_exprs:
add_expr_deps(v, sym)
add_expr_deps(e, sym)
add_expr_deps(sym.rev_dep, sym)
add_expr_deps(sym.weak_rev_dep, sym)
for l, u, e in sym.ranges:
add_expr_deps(l, sym)
add_expr_deps(u, sym)
add_expr_deps(e, sym)
if sym.is_choice_sym:
choice = sym.parent
for _, e in choice.prompts:
add_expr_deps(e, sym)
for _, e in choice.def_exprs:
add_expr_deps(e, sym)
def _eq_to_sym(self, eq):
"""_expr_depends_on() helper. For (in)equalities of the form sym = y/m
or sym != n, returns sym. For other (in)equalities, returns None."""
relation, left, right = eq
def transform_y_m_n(item):
if item is self.y: return "y"
if item is self.m: return "m"
if item is self.n: return "n"
return item
left = transform_y_m_n(left)
right = transform_y_m_n(right)
# Make sure the symbol (if any) appears to the left
if not isinstance(left, Symbol):
left, right = right, left
if not isinstance(left, Symbol):
return None
if (relation == EQUAL and (right == "y" or right == "m")) or \
(relation == UNEQUAL and right == "n"):
return left
return None
def _expr_depends_on(self, expr, sym):
"""Reimplementation of expr_depends_symbol() from mconf.c. Used to
determine if a submenu should be implicitly created, which influences
what items inside choice statements are considered choice items."""
if expr is None:
return False
def rec(expr):
if isinstance(expr, str):
return False
if isinstance(expr, Symbol):
return expr is sym
if expr[0] in (EQUAL, UNEQUAL):
return self._eq_to_sym(expr) is sym
if expr[0] == AND:
for and_expr in expr[1]:
if rec(and_expr):
return True
return False
return rec(expr)
def _invalidate_all(self):
for sym in self.syms_iter():
sym._invalidate()
#
# Printing and misc.
#
def _expand_sym_refs(self, s):
"""Expands $-references to symbols in 's' to symbol values, or to the
empty string for undefined symbols."""
while 1:
sym_ref_match = _sym_ref_re_search(s)
if sym_ref_match is None:
return s
sym_name = sym_ref_match.group(0)[1:]
sym = self.syms.get(sym_name)
expansion = "" if sym is None else sym.get_value()
s = s[:sym_ref_match.start()] + \
expansion + \
s[sym_ref_match.end():]
def _expr_val_str(self, expr, no_value_str="(none)",
get_val_instead_of_eval=False):
"""Printing helper. Returns a string with 'expr' and its value.
no_value_str: String to return when 'expr' is missing (None).
get_val_instead_of_eval: Assume 'expr' is a symbol or string (constant
symbol) and get its value directly instead of evaluating it to a
tristate value."""
if expr is None:
return no_value_str
if get_val_instead_of_eval:
if isinstance(expr, str):
return _expr_to_str(expr)
val = expr.get_value()
else:
val = self._eval_expr(expr)
return "{0} (value: {1})".format(_expr_to_str(expr), _expr_to_str(val))
def _get_sym_or_choice_str(self, sc):
"""Symbols and choices have many properties in common, so we factor out
common __str__() stuff here. "sc" is short for "symbol or choice"."""
# As we deal a lot with string representations here, use some
# convenient shorthand:
s = _expr_to_str
#
# Common symbol/choice properties
#
user_val_str = "(no user value)" if sc.user_val is None else \
s(sc.user_val)
# Build prompts string
if not sc.prompts:
prompts_str = " (no prompts)"
else:
prompts_str_rows = []
for prompt, cond_expr in sc.orig_prompts:
prompts_str_rows.append(
' "{0}"'.format(prompt) if cond_expr is None else
' "{0}" if {1}'.format(prompt,
self._expr_val_str(cond_expr)))
prompts_str = "\n".join(prompts_str_rows)
# Build locations string
locations_str = "(no locations)" if not sc.def_locations else \
" ".join(["{0}:{1}".format(filename, linenr) for
filename, linenr in sc.def_locations])
# Build additional-dependencies-from-menus-and-ifs string
additional_deps_str = " " + \
self._expr_val_str(sc.deps_from_containing,
"(no additional dependencies)")
#
# Symbol-specific stuff
#
if isinstance(sc, Symbol):
# Build ranges string
if isinstance(sc, Symbol):
if not sc.ranges:
ranges_str = " (no ranges)"
else:
ranges_str_rows = []
for l, u, cond_expr in sc.ranges:
ranges_str_rows.append(
" [{0}, {1}]".format(s(l), s(u))
if cond_expr is None else
" [{0}, {1}] if {2}"
.format(s(l), s(u), self._expr_val_str(cond_expr)))
ranges_str = "\n".join(ranges_str_rows)
# Build default values string
if not sc.def_exprs:
defaults_str = " (no default values)"
else:
defaults_str_rows = []
for val_expr, cond_expr in sc.orig_def_exprs:
row_str = " " + self._expr_val_str(val_expr, "(none)",
sc.type == STRING)
defaults_str_rows.append(row_str)
defaults_str_rows.append(" Condition: " +
self._expr_val_str(cond_expr))
defaults_str = "\n".join(defaults_str_rows)
# Build selects string
if not sc.orig_selects:
selects_str = " (no selects)"
else:
selects_str_rows = []
for target, cond_expr in sc.orig_selects:
selects_str_rows.append(
" {0}".format(target.name) if cond_expr is None else
" {0} if {1}".format(target.name,
self._expr_val_str(cond_expr)))
selects_str = "\n".join(selects_str_rows)
# Build implies string
if not sc.orig_implies:
implies_str = " (no implies)"
else:
implies_str_rows = []
for target, cond_expr in sc.orig_implies:
implies_str_rows.append(
" {0}".format(target.name) if cond_expr is None else
" {0} if {1}".format(target.name,
self._expr_val_str(cond_expr)))
implies_str = "\n".join(implies_str_rows)
res = _lines("Symbol " +
("(no name)" if sc.name is None else sc.name),
"Type : " + TYPENAME[sc.type],
"Value : " + s(sc.get_value()),
"User value : " + user_val_str,
"Visibility : " + s(_get_visibility(sc)),
"Is choice item : " + BOOL_STR[sc.is_choice_sym],
"Is defined : " + BOOL_STR[sc.is_defined_],
"Is from env. : " + BOOL_STR[sc.is_from_env],
"Is special : " + BOOL_STR[sc.is_special_] + "\n")
if sc.ranges:
res += _lines("Ranges:", ranges_str + "\n")
res += _lines("Prompts:",
prompts_str,
"Default values:",
defaults_str,
"Selects:",
selects_str,
"Implies:",
implies_str,
"Reverse (select-related) dependencies:",
" (no reverse dependencies)"
if sc.rev_dep == "n"
else " " + self._expr_val_str(sc.rev_dep),
"Weak reverse (imply-related) dependencies:",
" (no weak reverse dependencies)"
if sc.weak_rev_dep == "n"
else " " + self._expr_val_str(sc.weak_rev_dep),
"Additional dependencies from enclosing menus "
"and ifs:",
additional_deps_str,
"Locations: " + locations_str)
return res
#
# Choice-specific stuff
#
# Build selected symbol string
sel = sc.get_selection()
sel_str = "(no selection)" if sel is None else sel.name
# Build default values string
if not sc.def_exprs:
defaults_str = " (no default values)"
else:
defaults_str_rows = []
for sym, cond_expr in sc.orig_def_exprs:
defaults_str_rows.append(
" {0}".format(sym.name) if cond_expr is None else
" {0} if {1}".format(sym.name,
self._expr_val_str(cond_expr)))
defaults_str = "\n".join(defaults_str_rows)
# Build contained symbols string
names = [sym.name for sym in sc.actual_symbols]
syms_string = " ".join(names) if names else "(empty)"
return _lines("Choice",
"Name (for named choices): " +
("(no name)" if sc.name is None else sc.name),
"Type : " + TYPENAME[sc.type],
"Selected symbol : " + sel_str,
"User value : " + user_val_str,
"Mode : " + s(sc.get_mode()),
"Visibility : " + s(_get_visibility(sc)),
"Optional : " + BOOL_STR[sc.optional],
"Prompts:",
prompts_str,
"Defaults:",
defaults_str,
"Choice symbols:",
" " + syms_string,
"Additional dependencies from enclosing menus and "
"ifs:",
additional_deps_str,
"Locations: " + locations_str)
def _warn(self, msg, filename=None, linenr=None):
"""For printing warnings to stderr."""
msg = _build_msg("warning: " + msg, filename, linenr)
if self.print_warnings:
sys.stderr.write(msg + "\n")
self._warnings.append(msg)
class Item(object):
"""Base class for symbols and other Kconfig constructs. Subclasses are
Symbol, Choice, Menu, and Comment."""
def is_symbol(self):
"""Returns True if the item is a symbol. Short for
isinstance(item, kconfiglib.Symbol)."""
return isinstance(self, Symbol)
def is_choice(self):
"""Returns True if the item is a choice. Short for
isinstance(item, kconfiglib.Choice)."""
return isinstance(self, Choice)
def is_menu(self):
"""Returns True if the item is a menu. Short for
isinstance(item, kconfiglib.Menu)."""
return isinstance(self, Menu)
def is_comment(self):
"""Returns True if the item is a comment. Short for
isinstance(item, kconfiglib.Comment)."""
return isinstance(self, Comment)
class Symbol(Item):
"""Represents a configuration symbol - e.g. FOO for
config FOO
..."""
#
# Public interface
#
def get_config(self):
"""Returns the Config instance this symbol is from."""
return self.config
def get_name(self):
"""Returns the name of the symbol."""
return self.name
def get_type(self):
"""Returns the type of the symbol: one of UNKNOWN, BOOL, TRISTATE,
STRING, HEX, or INT. These are defined at the top level of the module,
so you'd do something like
if sym.get_type() == kconfiglib.STRING:
..."""
return self.type
def get_prompts(self):
"""Returns a list of prompts defined for the symbol, in the order they
appear in the configuration files. Returns the empty list for symbols
with no prompt.
This list will have a single entry for the vast majority of symbols
having prompts, but having multiple prompts for a single symbol is
possible through having multiple 'config' entries for it."""
return [prompt for prompt, _ in self.orig_prompts]
def get_help(self):
"""Returns the help text of the symbol, or None if the symbol has no
help text."""
return self.help
def get_parent(self):
"""Returns the menu or choice statement that contains the symbol, or
None if the symbol is at the top level. Note that if statements are
treated as syntactic and do not have an explicit class
representation."""
return self.parent
def get_def_locations(self):
"""Returns a list of (filename, linenr) tuples, where filename (string)
and linenr (int) represent a location where the symbol is defined. For
the vast majority of symbols this list will only contain one element.
For the following Kconfig, FOO would get two entries: the lines marked
with *.
config FOO *
bool "foo prompt 1"
config FOO *
bool "foo prompt 2"
"""
return self.def_locations
def get_ref_locations(self):
"""Returns a list of (filename, linenr) tuples, where filename (string)
and linenr (int) represent a location where the symbol is referenced in
the configuration. For example, the lines marked by * would be included
for FOO below:
config A
bool
default BAR || FOO *
config B
tristate
depends on FOO *
default m if FOO *
if FOO *
config A
bool "A"
endif
config FOO (definition not included)
bool
"""
return self.ref_locations
def get_value(self):
"""Calculate and return the value of the symbol. See also
Symbol.set_user_value()."""
if self.cached_val is not None:
return self.cached_val
# As a quirk of Kconfig, undefined symbols get their name as their
# value. This is why things like "FOO = bar" work for seeing if FOO has
# the value "bar".
if self.type == UNKNOWN:
self.cached_val = self.name
return self.name
new_val = DEFAULT_VALUE[self.type]
vis = _get_visibility(self)
# This is easiest to calculate together with the value
self.write_to_conf = False
if self.type == BOOL or self.type == TRISTATE:
# The visibility and mode (modules-only or single-selection) of
# choice items will be taken into account in _get_visibility()
if self.is_choice_sym:
if vis != "n":
choice = self.parent
mode = choice.get_mode()
self.write_to_conf = (mode != "n")
if mode == "y":
new_val = "y" if choice.get_selection() is self \
else "n"
elif mode == "m":
if self.user_val == "m" or self.user_val == "y":
new_val = "m"
else:
# If the symbol is visible and has a user value, use that.
# Otherwise, look at defaults and weak reverse dependencies
# (implies).
use_defaults_and_weak_rev_deps = True
if vis != "n":
self.write_to_conf = True
if self.user_val is not None:
new_val = self.config._eval_min(self.user_val, vis)
use_defaults_and_weak_rev_deps = False
if use_defaults_and_weak_rev_deps:
for val_expr, cond_expr in self.def_exprs:
cond_eval = self.config._eval_expr(cond_expr)
if cond_eval != "n":
self.write_to_conf = True
new_val = self.config._eval_min(val_expr,
cond_eval)
break
weak_rev_dep_val = \
self.config._eval_expr(self.weak_rev_dep)
if weak_rev_dep_val != "n":
self.write_to_conf = True
new_val = self.config._eval_max(new_val,
weak_rev_dep_val)
# Reverse (select-related) dependencies take precedence
rev_dep_val = self.config._eval_expr(self.rev_dep)
if rev_dep_val != "n":
self.write_to_conf = True
new_val = self.config._eval_max(new_val, rev_dep_val)
# We need to promote "m" to "y" in two circumstances:
# 1) If our type is boolean
# 2) If our weak_rev_dep (from IMPLY) is "y"
if new_val == "m" and \
(self.type == BOOL or
self.config._eval_expr(self.weak_rev_dep) == "y"):
new_val = "y"
elif self.type == INT or self.type == HEX:
has_active_range = False
low = None
high = None
use_defaults = True
base = 16 if self.type == HEX else 10
for l, h, cond_expr in self.ranges:
if self.config._eval_expr(cond_expr) != "n":
has_active_range = True
low_str = _str_val(l)
high_str = _str_val(h)
low = int(low_str, base) if \
_is_base_n(low_str, base) else 0
high = int(high_str, base) if \
_is_base_n(high_str, base) else 0
break
if vis != "n":
self.write_to_conf = True
if self.user_val is not None and \
_is_base_n(self.user_val, base) and \
(not has_active_range or
low <= int(self.user_val, base) <= high):
# If the user value is OK, it is stored in exactly the same
# form as specified in the assignment (with or without
# "0x", etc).
use_defaults = False
new_val = self.user_val
if use_defaults:
for val_expr, cond_expr in self.def_exprs:
if self.config._eval_expr(cond_expr) != "n":
self.write_to_conf = True
# If the default value is OK, it is stored in exactly
# the same form as specified. Otherwise, it is clamped
# to the range, and the output has "0x" as appropriate
# for the type.
new_val = _str_val(val_expr)
if _is_base_n(new_val, base):
new_val_num = int(new_val, base)
if has_active_range:
clamped_val = None
if new_val_num < low:
clamped_val = low
elif new_val_num > high:
clamped_val = high
if clamped_val is not None:
new_val = (hex(clamped_val) if \
self.type == HEX else str(clamped_val))
break
else: # For the for loop
# If no user value or default kicks in but the hex/int has
# an active range, then the low end of the range is used,
# provided it's > 0, with "0x" prepended as appropriate.
if has_active_range and low > 0:
new_val = (hex(low) if self.type == HEX else str(low))
elif self.type == STRING:
use_defaults = True
if vis != "n":
self.write_to_conf = True
if self.user_val is not None:
new_val = self.user_val
use_defaults = False
if use_defaults:
for val_expr, cond_expr in self.def_exprs:
if self.config._eval_expr(cond_expr) != "n":
self.write_to_conf = True
new_val = _str_val(val_expr)
break
self.cached_val = new_val
return new_val
def get_user_value(self):
"""Returns the value assigned to the symbol in a .config or via
Symbol.set_user_value() (provided the value was valid for the type of
the symbol). Returns None in case of no user value."""
return self.user_val
def get_upper_bound(self):
"""For string/hex/int symbols and for bool and tristate symbols that
cannot be modified (see is_modifiable()), returns None.
Otherwise, returns the highest value the symbol can be set to with
Symbol.set_user_value() (that will not be truncated): one of "m" or
"y", arranged from lowest to highest. This corresponds to the highest
value the symbol could be given in e.g. the 'make menuconfig'
interface.
See also the tri_less*() and tri_greater*() functions, which could come
in handy."""
if self.type != BOOL and self.type != TRISTATE:
return None
rev_dep = self.config._eval_expr(self.rev_dep)
# A bool selected to "m" gets promoted to "y", pinning it
if rev_dep == "m" and self.type == BOOL:
return None
vis = _get_visibility(self)
if TRI_TO_INT[vis] > TRI_TO_INT[rev_dep]:
return vis
return None
def get_lower_bound(self):
"""For string/hex/int symbols and for bool and tristate symbols that
cannot be modified (see is_modifiable()), returns None.
Otherwise, returns the lowest value the symbol can be set to with
Symbol.set_user_value() (that will not be truncated): one of "n" or
"m", arranged from lowest to highest. This corresponds to the lowest
value the symbol could be given in e.g. the 'make menuconfig'
interface.
See also the tri_less*() and tri_greater*() functions, which could come
in handy."""
if self.type != BOOL and self.type != TRISTATE:
return None
rev_dep = self.config._eval_expr(self.rev_dep)
# A bool selected to "m" gets promoted to "y", pinning it
if rev_dep == "m" and self.type == BOOL:
return None
if TRI_TO_INT[_get_visibility(self)] > TRI_TO_INT[rev_dep]:
return rev_dep
return None
def get_assignable_values(self):
"""For string/hex/int symbols and for bool and tristate symbols that
cannot be modified (see is_modifiable()), returns the empty list.
Otherwise, returns a list containing the user values that can be
assigned to the symbol (that won't be truncated). Usage example:
if "m" in sym.get_assignable_values():
sym.set_user_value("m")
This is basically a more convenient interface to
get_lower/upper_bound() when wanting to test if a particular tristate
value can be assigned."""
if self.type != BOOL and self.type != TRISTATE:
return []
rev_dep = self.config._eval_expr(self.rev_dep)
# A bool selected to "m" gets promoted to "y", pinning it
if rev_dep == "m" and self.type == BOOL:
return []
res = ["n", "m", "y"][TRI_TO_INT[rev_dep] :
TRI_TO_INT[_get_visibility(self)] + 1]
return res if len(res) > 1 else []
def get_visibility(self):
"""Returns the visibility of the symbol: one of "n", "m" or "y". For
bool and tristate symbols, this is an upper bound on the value users
can set for the symbol. For other types of symbols, a visibility of "n"
means the user value will be ignored. A visibility of "n" corresponds
to not being visible in the 'make *config' interfaces.
Example (assuming we're running with modules enabled -- i.e., MODULES
set to 'y'):
# Assume this has been assigned 'n'
config N_SYM
tristate "N_SYM"
# Assume this has been assigned 'm'
config M_SYM
tristate "M_SYM"
# Has visibility 'n'
config A
tristate "A"
depends on N_SYM
# Has visibility 'm'
config B
tristate "B"
depends on M_SYM
# Has visibility 'y'
config C
tristate "C"
# Has no prompt, and hence visibility 'n'
config D
tristate
Having visibility be tri-valued ensures that e.g. a symbol cannot be
set to "y" by the user if it depends on a symbol with value "m", which
wouldn't be safe.
You should probably look at get_lower/upper_bound(),
get_assignable_values() and is_modifiable() before using this."""
return _get_visibility(self)
def get_referenced_symbols(self, refs_from_enclosing=False):
"""Returns the set() of all symbols referenced by this symbol. For
example, the symbol defined by
config FOO
bool
prompt "foo" if A && B
default C if D
depends on E
select F if G
references the symbols A through G.
refs_from_enclosing (default: False): If True, the symbols referenced
by enclosing menus and ifs will be included in the result."""
return self.all_referenced_syms if refs_from_enclosing else \
self.referenced_syms
def get_selected_symbols(self):
"""Returns the set() of all symbols X for which this symbol has a
'select X' or 'select X if Y' (regardless of whether Y is satisfied or
not). This is a subset of the symbols returned by
get_referenced_symbols()."""
return self.selected_syms
def get_implied_symbols(self):
"""Returns the set() of all symbols X for which this symbol has an
'imply X' or 'imply X if Y' (regardless of whether Y is satisfied or
not). This is a subset of the symbols returned by
get_referenced_symbols()."""
return self.implied_syms
def set_user_value(self, v):
"""Sets the user value of the symbol.
Equal in effect to assigning the value to the symbol within a .config
file. Use get_lower/upper_bound() or get_assignable_values() to find
the range of currently assignable values for bool and tristate symbols;
setting values outside this range will cause the user value to differ
from the result of Symbol.get_value() (be truncated). Values that are
invalid for the type (such as a_bool.set_user_value("foo")) are
ignored, and a warning is emitted if an attempt is made to assign such
a value.
For any type of symbol, is_modifiable() can be used to check if a user
value will currently have any effect on the symbol, as determined by
its visibility and range of assignable values. Any value that is valid
for the type (bool, tristate, etc.) will end up being reflected in
get_user_value() though, and might have an effect later if conditions
change. To get rid of the user value, use unset_user_value().
Any symbols dependent on the symbol are (recursively) invalidated, so
things will just work with regards to dependencies.
v: The user value to give to the symbol."""
self._set_user_value_no_invalidate(v, False)
# There might be something more efficient you could do here, but play
# it safe.
if self.name == "MODULES":
self.config._invalidate_all()
return
self._invalidate()
self._invalidate_dependent()
def unset_user_value(self):
"""Resets the user value of the symbol, as if the symbol had never
gotten a user value via Config.load_config() or
Symbol.set_user_value()."""
self._unset_user_value_no_recursive_invalidate()
self._invalidate_dependent()
def is_modifiable(self):
"""Returns True if the value of the symbol could be modified by calling
Symbol.set_user_value().
For bools and tristates, this corresponds to the symbol being visible
in the 'make menuconfig' interface and not already being pinned to a
specific value (e.g. because it is selected by another symbol).
For strings and numbers, this corresponds to just being visible. (See
Symbol.get_visibility().)"""
if self.is_special_:
return False
if self.type == BOOL or self.type == TRISTATE:
rev_dep = self.config._eval_expr(self.rev_dep)
# A bool selected to "m" gets promoted to "y", pinning it
if rev_dep == "m" and self.type == BOOL:
return False
return TRI_TO_INT[_get_visibility(self)] > TRI_TO_INT[rev_dep]
return _get_visibility(self) != "n"
def is_defined(self):
"""Returns False if the symbol is referred to in the Kconfig but never
actually defined."""
return self.is_defined_
def is_special(self):
"""Returns True if the symbol is one of the special symbols n, m, y, or
UNAME_RELEASE, or gets its value from the environment."""
return self.is_special_
def is_from_environment(self):
"""Returns True if the symbol gets its value from the environment."""
return self.is_from_env
def has_ranges(self):
"""Returns True if the symbol is of type INT or HEX and has ranges that
limit what values it can take on."""
return bool(self.ranges)
def is_choice_symbol(self):
"""Returns True if the symbol is in a choice statement and is an actual
choice symbol (see Choice.get_symbols())."""
return self.is_choice_sym
def is_choice_selection(self):
"""Returns True if the symbol is contained in a choice statement and is
the selected item. Equivalent to
sym.is_choice_symbol() and sym.get_parent().get_selection() is sym"""
return self.is_choice_sym and self.parent.get_selection() is self
def is_allnoconfig_y(self):
"""Returns True if the symbol has the 'allnoconfig_y' option set."""
return self.allnoconfig_y
def __str__(self):
"""Returns a string containing various information about the symbol."""
return self.config._get_sym_or_choice_str(self)
#
# Private methods
#
def __init__(self):
"""Symbol constructor -- not intended to be called directly by
Kconfiglib clients."""
self.name = None
self.type = UNKNOWN
self.prompts = []
self.def_exprs = [] # 'default' properties
self.ranges = [] # 'range' properties (for int and hex)
self.help = None # Help text
self.rev_dep = "n" # Reverse (select-related) dependencies
self.weak_rev_dep = "n" # Weak reverse (imply-related) dependencies
self.config = None
self.parent = None
self.user_val = None # Value set by user
# The prompt, default value, select, and imply conditions without any
# dependencies from menus and ifs propagated to them
self.orig_prompts = []
self.orig_def_exprs = []
self.orig_selects = []
self.orig_implies = []
# Dependencies inherited from containing menus and ifs
self.deps_from_containing = None
# The set of symbols referenced by this symbol (see
# get_referenced_symbols())
self.referenced_syms = set()
# The set of symbols selected by this symbol (see
# get_selected_symbols())
self.selected_syms = set()
# The set of symbols implied by this symbol (see get_implied_symbols())
self.implied_syms = set()
# Like 'referenced_syms', but includes symbols from
# dependencies inherited from enclosing menus and ifs
self.all_referenced_syms = set()
# This records only dependencies from enclosing ifs and menus together
# with local 'depends on' dependencies. Needed when determining actual
# choice items (hrrrr...). See Choice._determine_actual_symbols().
self.menu_dep = None
# See Symbol.get_ref/def_locations().
self.def_locations = []
self.ref_locations = []
# Populated in Config._build_dep() after parsing. Links the symbol to
# the symbols that immediately depend on it (in a caching/invalidation
# sense). The total set of dependent symbols for the symbol (the
# transitive closure) is calculated on an as-needed basis in
# _get_dependent().
self.dep = set()
# Cached values
# Caches the calculated value
self.cached_val = None
# Caches the visibility, which acts as an upper bound on the value
self.cached_visibility = None
# Caches the total list of dependent symbols. Calculated in
# _get_dependent().
self.cached_deps = None
# Flags
# Does the symbol have an entry in the Kconfig file? The trailing
# underscore avoids a collision with is_defined().
self.is_defined_ = False
# Should the symbol get an entry in .config?
self.write_to_conf = False
# Set to true when _make_conf() is called on a symbol, so that symbols
# defined in multiple locations only get one .config entry. We need to
# reset it prior to writing out a new .config.
self.already_written = False
# This is set to True for "actual" choice symbols; see
# Choice._determine_actual_symbols().
self.is_choice_sym = False
# Does the symbol get its value in some special way, e.g. from the
# environment or by being one of the special symbols n, m, and y? If
# so, the value is stored in self.cached_val, which is never
# invalidated. The trailing underscore avoids a collision with
# is_special().
self.is_special_ = False
# Does the symbol get its value from the environment?
self.is_from_env = False
# Does the symbol have the 'allnoconfig_y' option set?
self.allnoconfig_y = False
def _invalidate(self):
if self.is_special_:
return
if self.is_choice_sym:
self.parent._invalidate()
self.cached_val = None
self.cached_visibility = None
def _invalidate_dependent(self):
for sym in self._get_dependent():
sym._invalidate()
def _set_user_value_no_invalidate(self, v, suppress_load_warnings):
"""Like set_user_value(), but does not invalidate any symbols.
suppress_load_warnings: some warnings are annoying when loading a
.config that can be helpful when manually invoking set_user_value().
This flag is set to True to suppress such warnings.
Perhaps this could be made optional for load_config() instead."""
if self.is_special_:
if self.is_from_env:
self.config._warn('attempt to assign the value "{0}" to the '
'symbol {1}, which gets its value from the '
'environment. Assignment ignored.'
.format(v, self.name))
else:
self.config._warn('attempt to assign the value "{0}" to the '
'special symbol {1}. Assignment ignored.'
.format(v, self.name))
return
if not self.is_defined_:
filename, linenr = self.ref_locations[0]
if self.config.print_undef_assign:
_stderr_msg('note: attempt to assign the value "{0}" to {1}, '
"which is referenced at {2}:{3} but never "
"defined. Assignment ignored."
.format(v, self.name, filename, linenr))
return
# Check if the value is valid for our type
if not ((self.type == BOOL and (v == "y" or v == "n") ) or
(self.type == TRISTATE and (v == "y" or v == "m" or
v == "n") ) or
(self.type == STRING ) or
(self.type == INT and _is_base_n(v, 10) ) or
(self.type == HEX and _is_base_n(v, 16) )):
self.config._warn('the value "{0}" is invalid for {1}, which has '
"type {2}. Assignment ignored."
.format(v, self.name, TYPENAME[self.type]))
return
if not self.prompts and not suppress_load_warnings:
self.config._warn('assigning "{0}" to the symbol {1} which '
'lacks prompts and thus has visibility "n". '
'The assignment will have no effect.'
.format(v, self.name))
self.user_val = v
if self.is_choice_sym and (self.type == BOOL or self.type == TRISTATE):
choice = self.parent
if v == "y":
choice.user_val = self
choice.user_mode = "y"
elif v == "m":
choice.user_val = None
choice.user_mode = "m"
def _unset_user_value_no_recursive_invalidate(self):
self._invalidate()
self.user_val = None
if self.is_choice_sym:
self.parent._unset_user_value()
def _make_conf(self, append_fn):
if self.already_written:
return
self.already_written = True
# Note: write_to_conf is determined in get_value()
val = self.get_value()
if not self.write_to_conf:
return
if self.type == BOOL or self.type == TRISTATE:
append_fn("{0}{1}={2}".format(self.config.config_prefix, self.name, val)
if val == "y" or val == "m" else
"# {0}{1} is not set".format(self.config.config_prefix, self.name))
elif self.type == INT or self.type == HEX:
append_fn("{0}{1}={2}".format(self.config.config_prefix, self.name, val))
elif self.type == STRING:
# Escape \ and "
append_fn('{0}{1}="{2}"'
.format(self.config.config_prefix, self.name,
val.replace("\\", "\\\\").replace('"', '\\"')))
else:
_internal_error("Internal error while creating .config: unknown "
'type "{0}".'.format(self.type))
def _get_dependent(self):
"""Returns the set of symbols that should be invalidated if the value
of the symbol changes, because they might be affected by the change.
Note that this is an internal API -- it's probably of limited
usefulness to clients."""
if self.cached_deps is not None:
return self.cached_deps
res = set(self.dep)
for s in self.dep:
res |= s._get_dependent()
if self.is_choice_sym:
# Choice symbols also depend (recursively) on their siblings. The
# siblings are not included in 'dep' to avoid dependency loops.
for sibling in self.parent.actual_symbols:
if sibling is not self:
res.add(sibling)
res |= sibling.dep
for s in sibling.dep:
res |= s._get_dependent()
self.cached_deps = res
return res
def _has_auto_menu_dep_on(self, on):
"""See Choice._determine_actual_symbols()."""
if not isinstance(self.parent, Choice):
_internal_error("Attempt to determine auto menu dependency for "
"symbol ouside of choice.")
if not self.prompts:
# If we have no prompt, use the menu dependencies instead (what was
# specified with 'depends on')
return self.menu_dep is not None and \
self.config._expr_depends_on(self.menu_dep, on)
for _, cond_expr in self.prompts:
if self.config._expr_depends_on(cond_expr, on):
return True
return False
class Menu(Item):
"""Represents a menu statement."""
#
# Public interface
#
def get_config(self):
"""Return the Config instance this menu is from."""
return self.config
def get_title(self):
"""Returns the title text of the menu."""
return self.title
def get_parent(self):
"""Returns the menu or choice statement that contains the menu, or
None if the menu is at the top level. Note that if statements are
treated as syntactic sugar and do not have an explicit class
representation."""
return self.parent
def get_location(self):
"""Returns the location of the menu as a (filename, linenr) tuple,
where filename is a string and linenr an int."""
return (self.filename, self.linenr)
def get_items(self, recursive=False):
"""Returns a list containing the items (symbols, menus, choice
statements and comments) in in the menu, in the same order that the
items appear within the menu.
recursive (default: False): True if items contained in items within the
menu should be included recursively (preorder)."""
if not recursive:
return self.block
res = []
for item in self.block:
res.append(item)
if isinstance(item, Menu):
res.extend(item.get_items(True))
elif isinstance(item, Choice):
res.extend(item.get_items())
return res
def get_symbols(self, recursive=False):
"""Returns a list containing the symbols in the menu, in the same order
that they appear within the menu.
recursive (default: False): True if symbols contained in items within
the menu should be included recursively."""
return [item for item in self.get_items(recursive) if
isinstance(item, Symbol)]
def get_visibility(self):
"""Returns the visibility of the menu. This also affects the visibility
of subitems. See also Symbol.get_visibility()."""
return self.config._eval_expr(self.dep_expr)
def get_visible_if_visibility(self):
"""Returns the visibility the menu gets from its 'visible if'
condition. "y" if the menu has no 'visible if' condition."""
return self.config._eval_expr(self.visible_if_expr)
def get_referenced_symbols(self, refs_from_enclosing=False):
"""See Symbol.get_referenced_symbols()."""
return self.all_referenced_syms if refs_from_enclosing else \
self.referenced_syms
def __str__(self):
"""Returns a string containing various information about the menu."""
depends_on_str = self.config._expr_val_str(self.orig_deps,
"(no dependencies)")
visible_if_str = self.config._expr_val_str(self.visible_if_expr,
"(no dependencies)")
additional_deps_str = " " + \
self.config._expr_val_str(self.deps_from_containing,
"(no additional dependencies)")
return _lines("Menu",
"Title : " + self.title,
"'depends on' dependencies : " + depends_on_str,
"'visible if' dependencies : " + visible_if_str,
"Additional dependencies from enclosing menus and "
"ifs:",
additional_deps_str,
"Location: {0}:{1}".format(self.filename, self.linenr))
#
# Private methods
#
def __init__(self):
"""Menu constructor -- not intended to be called directly by
Kconfiglib clients."""
self.title = None
self.dep_expr = None
self.visible_if_expr = None
self.block = [] # List of contained items
self.config = None
self.parent = None
# Dependency expression without dependencies from enclosing menus and
# ifs propagated
self.orig_deps = None
# Dependencies inherited from containing menus and ifs
self.deps_from_containing = None
# The set of symbols referenced by this menu (see
# get_referenced_symbols())
self.referenced_syms = set()
# Like 'referenced_syms', but includes symbols from
# dependencies inherited from enclosing menus and ifs
self.all_referenced_syms = None
self.filename = None
self.linenr = None
def _make_conf(self, append_fn):
if self.config._eval_expr(self.dep_expr) != "n" and \
self.config._eval_expr(self.visible_if_expr) != "n":
append_fn("\n#\n# {0}\n#".format(self.title))
_make_block_conf(self.block, append_fn)
class Choice(Item):
"""Represents a choice statement. A choice can be in one of three modes:
"n" - The choice is not visible and no symbols can be selected.
"m" - Any number of symbols can be set to "m". The rest will be "n". This
is safe since potentially conflicting options don't actually get
compiled into the kernel simultaneously with "m".
"y" - One symbol will be "y" while the rest are "n".
Only tristate choices can be in "m" mode, and the visibility of the choice
is an upper bound on the mode, so that e.g. a choice that depends on a
symbol with value "m" will be in "m" mode.
The mode changes automatically when a value is assigned to a symbol within
the choice.
See Symbol.get_visibility() too."""
#
# Public interface
#
def get_config(self):
"""Returns the Config instance this choice is from."""
return self.config
def get_name(self):
"""For named choices, returns the name. Returns None for unnamed
choices. No named choices appear anywhere in the kernel Kconfig files
as of Linux 3.7.0-rc8."""
return self.name
def get_type(self):
"""Returns the type of the choice. See Symbol.get_type()."""
return self.type
def get_prompts(self):
"""Returns a list of prompts defined for the choice, in the order they
appear in the configuration files. Returns the empty list for choices
with no prompt.
This list will have a single entry for the vast majority of choices
having prompts, but having multiple prompts for a single choice is
possible through having multiple 'choice' entries for it (though I'm
not sure if that ever happens in practice)."""
return [prompt for prompt, _ in self.orig_prompts]
def get_help(self):
"""Returns the help text of the choice, or None if the choice has no
help text."""
return self.help
def get_parent(self):
"""Returns the menu or choice statement that contains the choice, or
None if the choice is at the top level. Note that if statements are
treated as syntactic sugar and do not have an explicit class
representation."""
return self.parent
def get_def_locations(self):
"""Returns a list of (filename, linenr) tuples, where filename (string)
and linenr (int) represent a location where the choice is defined. For
the vast majority of choices (all of them as of Linux 3.7.0-rc8) this
list will only contain one element, but its possible for named choices
to be defined in multiple locations."""
return self.def_locations
def get_selection(self):
"""Returns the symbol selected (either by the user or through
defaults), or None if either no symbol is selected or the mode is not
"y"."""
if self.cached_selection is not None:
if self.cached_selection == NO_SELECTION:
return None
return self.cached_selection
if self.get_mode() != "y":
return self._cache_ret(None)
# User choice available?
if self.user_val is not None and _get_visibility(self.user_val) == "y":
return self._cache_ret(self.user_val)
if self.optional:
return self._cache_ret(None)
return self._cache_ret(self.get_selection_from_defaults())
def get_selection_from_defaults(self):
"""Like Choice.get_selection(), but acts as if no symbol has been
selected by the user and no 'optional' flag is in effect."""
if not self.actual_symbols:
return None
for symbol, cond_expr in self.def_exprs:
if self.config._eval_expr(cond_expr) != "n":
chosen_symbol = symbol
break
else:
chosen_symbol = self.actual_symbols[0]
# Is the chosen symbol visible?
if _get_visibility(chosen_symbol) != "n":
return chosen_symbol
# Otherwise, pick the first visible symbol
for sym in self.actual_symbols:
if _get_visibility(sym) != "n":
return sym
return None
def get_user_selection(self):
"""If the choice is in "y" mode and has a user-selected symbol, returns
that symbol. Otherwise, returns None."""
return self.user_val
def get_items(self):
"""Gets all items contained in the choice in the same order as within
the configuration ("items" instead of "symbols" since choices and
comments might appear within choices. This only happens in one place as
of Linux 3.7.0-rc8, in drivers/usb/gadget/Kconfig)."""
return self.block
def get_symbols(self):
"""Returns a list containing the choice's symbols.
A quirk (perhaps a bug) of Kconfig is that you can put items within a
choice that will not be considered members of the choice insofar as
selection is concerned. This happens for example if one symbol within a
choice 'depends on' the symbol preceding it, or if you put non-symbol
items within choices.
As of Linux 3.7.0-rc8, this seems to be used intentionally in one
place: drivers/usb/gadget/Kconfig.
This function returns the "proper" symbols of the choice in the order
they appear in the choice, excluding such items. If you want all items
in the choice, use get_items()."""
return self.actual_symbols
def get_referenced_symbols(self, refs_from_enclosing=False):
"""See Symbol.get_referenced_symbols()."""
return self.all_referenced_syms if refs_from_enclosing else \
self.referenced_syms
def get_visibility(self):
"""Returns the visibility of the choice statement: one of "n", "m" or
"y". This acts as an upper limit on the mode of the choice (though bool
choices can only have the mode "y"). See the class documentation for an
explanation of modes."""
return _get_visibility(self)
def get_mode(self):
"""Returns the mode of the choice. See the class documentation for
an explanation of modes."""
minimum_mode = "n" if self.optional else "m"
mode = self.user_mode if self.user_mode is not None else minimum_mode
mode = self.config._eval_min(mode, _get_visibility(self))
# Promote "m" to "y" for boolean choices
if mode == "m" and self.type == BOOL:
return "y"
return mode
def is_optional(self):
"""Returns True if the choice has the 'optional' flag set (and so will
default to "n" mode)."""
return self.optional
def __str__(self):
"""Returns a string containing various information about the choice
statement."""
return self.config._get_sym_or_choice_str(self)
#
# Private methods
#
def __init__(self):
"""Choice constructor -- not intended to be called directly by
Kconfiglib clients."""
self.name = None # Yes, choices can be named
self.type = UNKNOWN
self.prompts = []
self.def_exprs = [] # 'default' properties
self.help = None # Help text
self.block = [] # List of contained items
self.config = None
self.parent = None
self.user_val = None
self.user_mode = None
# We need to filter out symbols that appear within the choice block but
# are not considered choice items (see
# Choice._determine_actual_symbols()) This list holds the "actual"
# choice items.
self.actual_symbols = []
# The prompts and default values without any dependencies from
# enclosing menus and ifs propagated
self.orig_prompts = []
self.orig_def_exprs = []
# Dependencies inherited from containing menus and ifs
self.deps_from_containing = None
# The set of symbols referenced by this choice (see
# get_referenced_symbols())
self.referenced_syms = set()
# Like 'referenced_syms', but includes symbols from
# dependencies inherited from enclosing menus and ifs
self.all_referenced_syms = set()
# See Choice.get_def_locations()
self.def_locations = []
# Cached values
self.cached_selection = None
self.cached_visibility = None
self.optional = False
def _determine_actual_symbols(self):
"""If a symbol's visibility depends on the preceding symbol within a
choice, it is no longer viewed as a choice item. (This is quite
possibly a bug, but some things consciously use it... ugh. It stems
from automatic submenu creation.) In addition, it's possible to have
choices and comments within choices, and those shouldn't be considered
choice items either. Only drivers/usb/gadget/Kconfig seems to depend on
any of this. This method computes the "actual" items in the choice and
sets the is_choice_sym flag on them (retrieved via is_choice_symbol()).
Don't let this scare you: an earlier version simply checked for a
sequence of symbols where all symbols after the first appeared in the
'depends on' expression of the first, and that worked fine. The added
complexity is to be future-proof in the event that
drivers/usb/gadget/Kconfig turns even more sinister. It might very well
be overkilling things (especially if that file is refactored ;)."""
# Items might depend on each other in a tree structure, so we need a
# stack to keep track of the current tentative parent
stack = []
for item in self.block:
if not isinstance(item, Symbol):
stack = []
continue
while stack:
if item._has_auto_menu_dep_on(stack[-1]):
# The item should not be viewed as a choice item, so don't
# set item.is_choice_sym
stack.append(item)
break
else:
stack.pop()
else:
item.is_choice_sym = True
self.actual_symbols.append(item)
stack.append(item)
def _cache_ret(self, selection):
# As None is used to indicate the lack of a cached value we can't use
# that to cache the fact that the choice has no selection. Instead, we
# use the symbolic constant NO_SELECTION.
if selection is None:
self.cached_selection = NO_SELECTION
else:
self.cached_selection = selection
return selection
def _invalidate(self):
self.cached_selection = None
self.cached_visibility = None
def _unset_user_value(self):
self._invalidate()
self.user_val = None
self.user_mode = None
def _make_conf(self, append_fn):
_make_block_conf(self.block, append_fn)
class Comment(Item):
"""Represents a comment statement."""
#
# Public interface
#
def get_config(self):
"""Returns the Config instance this comment is from."""
return self.config
def get_text(self):
"""Returns the text of the comment."""
return self.text
def get_parent(self):
"""Returns the menu or choice statement that contains the comment, or
None if the comment is at the top level. Note that if statements are
treated as syntactic sugar and do not have an explicit class
representation."""
return self.parent
def get_location(self):
"""Returns the location of the comment as a (filename, linenr) tuple,
where filename is a string and linenr an int."""
return (self.filename, self.linenr)
def get_visibility(self):
"""Returns the visibility of the comment. See also
Symbol.get_visibility()."""
return self.config._eval_expr(self.dep_expr)
def get_referenced_symbols(self, refs_from_enclosing=False):
"""See Symbol.get_referenced_symbols()."""
return self.all_referenced_syms if refs_from_enclosing else \
self.referenced_syms
def __str__(self):
"""Returns a string containing various information about the
comment."""
dep_str = self.config._expr_val_str(self.orig_deps,
"(no dependencies)")
additional_deps_str = " " + \
self.config._expr_val_str(self.deps_from_containing,
"(no additional dependencies)")
return _lines("Comment",
"Text: " + str(self.text),
"Dependencies: " + dep_str,
"Additional dependencies from enclosing menus and "
"ifs:",
additional_deps_str,
"Location: {0}:{1}".format(self.filename, self.linenr))
#
# Private methods
#
def __init__(self):
"""Comment constructor -- not intended to be called directly by
Kconfiglib clients."""
self.text = None
self.dep_expr = None
self.config = None
self.parent = None
# Dependency expression without dependencies from enclosing menus and
# ifs propagated
self.orig_deps = None
# Dependencies inherited from containing menus and ifs
self.deps_from_containing = None
# The set of symbols referenced by this comment (see
# get_referenced_symbols())
self.referenced_syms = set()
# Like 'referenced_syms', but includes symbols from
# dependencies inherited from enclosing menus and ifs
self.all_referenced_syms = None
self.filename = None
self.linenr = None
def _make_conf(self, append_fn):
if self.config._eval_expr(self.dep_expr) != "n":
append_fn("\n#\n# {0}\n#".format(self.text))
class Kconfig_Syntax_Error(Exception):
"""Exception raised for syntax errors."""
pass
class Internal_Error(Exception):
"""Exception raised for internal errors."""
pass
#
# Public functions
#
def tri_less(v1, v2):
"""Returns True if the tristate v1 is less than the tristate v2, where "n",
"m" and "y" are ordered from lowest to highest."""
return TRI_TO_INT[v1] < TRI_TO_INT[v2]
def tri_less_eq(v1, v2):
"""Returns True if the tristate v1 is less than or equal to the tristate
v2, where "n", "m" and "y" are ordered from lowest to highest."""
return TRI_TO_INT[v1] <= TRI_TO_INT[v2]
def tri_greater(v1, v2):
"""Returns True if the tristate v1 is greater than the tristate v2, where
"n", "m" and "y" are ordered from lowest to highest."""
return TRI_TO_INT[v1] > TRI_TO_INT[v2]
def tri_greater_eq(v1, v2):
"""Returns True if the tristate v1 is greater than or equal to the tristate
v2, where "n", "m" and "y" are ordered from lowest to highest."""
return TRI_TO_INT[v1] >= TRI_TO_INT[v2]
#
# Internal classes
#
class _Feed(object):
"""Class for working with sequences in a stream-like fashion; handy for
tokens."""
# This would be more helpful on the item classes, but would remove some
# flexibility
__slots__ = ['items', 'length', 'i']
def __init__(self, items):
self.items = items
self.length = len(self.items)
self.i = 0
def get_next(self):
if self.i >= self.length:
return None
item = self.items[self.i]
self.i += 1
return item
def peek_next(self):
return None if self.i >= self.length else self.items[self.i]
def check(self, token):
"""Check if the next token is 'token'. If so, remove it from the token
feed and return True. Otherwise, leave it in and return False."""
if self.i < self.length and self.items[self.i] == token:
self.i += 1
return True
return False
def unget_all(self):
self.i = 0
class _FileFeed(object):
"""Feeds lines from a file. Keeps track of the filename and current line
number. Joins any line ending in \\ with the following line. We need to be
careful to get the line number right in the presence of continuation
lines."""
__slots__ = ['filename', 'lines', 'length', 'linenr']
def __init__(self, filename):
self.filename = _clean_up_path(filename)
with open(filename, "r") as f:
# No interleaving of I/O and processing yet. Don't know if it would
# help.
self.lines = f.readlines()
self.length = len(self.lines)
self.linenr = 0
def get_next(self):
if self.linenr >= self.length:
return None
line = self.lines[self.linenr]
self.linenr += 1
while line.endswith("\\\n"):
line = line[:-2] + self.lines[self.linenr]
self.linenr += 1
return line
def peek_next(self):
linenr = self.linenr
if linenr >= self.length:
return None
line = self.lines[linenr]
while line.endswith("\\\n"):
linenr += 1
line = line[:-2] + self.lines[linenr]
return line
def unget(self):
self.linenr -= 1
while self.lines[self.linenr].endswith("\\\n"):
self.linenr -= 1
def next_nonblank(self):
"""Removes lines up to and including the next non-blank (not all-space)
line and returns it. Returns None if there are no more non-blank
lines."""
while 1:
line = self.get_next()
if line is None or not line.isspace():
return line
#
# Internal functions
#
def _get_visibility(sc):
"""Symbols and Choices have a "visibility" that acts as an upper bound on
the values a user can set for them, corresponding to the visibility in e.g.
'make menuconfig'. This function calculates the visibility for the Symbol
or Choice 'sc' -- the logic is nearly identical."""
if sc.cached_visibility is None:
vis = "n"
for _, cond_expr in sc.prompts:
vis = sc.config._eval_max(vis, cond_expr)
if isinstance(sc, Symbol) and sc.is_choice_sym:
if sc.type == TRISTATE and vis == "m" and \
sc.parent.get_mode() == "y":
# Choice symbols with visibility "m" are not visible if the
# choice has mode "y"
vis = "n"
else:
vis = sc.config._eval_min(vis, _get_visibility(sc.parent))
# Promote "m" to "y" if we're dealing with a non-tristate
if vis == "m" and sc.type != TRISTATE:
vis = "y"
sc.cached_visibility = vis
return sc.cached_visibility
def _make_and(e1, e2):
"""Constructs an AND (&&) expression. Performs trivial simplification.
Nones equate to 'y'.
Note: returns None if e1 == e2 == None."""
if e1 is None or e1 == "y":
return e2
if e2 is None or e2 == "y":
return e1
# Prefer to merge argument lists if possible to reduce the number of nodes
if isinstance(e1, tuple) and e1[0] == AND:
if isinstance(e2, tuple) and e2[0] == AND:
return (AND, e1[1] + e2[1])
return (AND, e1[1] + [e2])
if isinstance(e2, tuple) and e2[0] == AND:
return (AND, e2[1] + [e1])
return (AND, [e1, e2])
def _make_or(e1, e2):
"""Constructs an OR (||) expression. Performs trivial simplification and
avoids Nones. Nones equate to 'y', which is usually what we want, but needs
to be kept in mind."""
# Perform trivial simplification and avoid None's (which
# correspond to y's)
if e1 is None or e2 is None or e1 == "y" or e2 == "y":
return "y"
if e1 == "n":
return e2
# Prefer to merge argument lists if possible to reduce the number of nodes
if isinstance(e1, tuple) and e1[0] == OR:
if isinstance(e2, tuple) and e2[0] == OR:
return (OR, e1[1] + e2[1])
return (OR, e1[1] + [e2])
if isinstance(e2, tuple) and e2[0] == OR:
return (OR, e2[1] + [e1])
return (OR, [e1, e2])
def _get_expr_syms_rec(expr, res):
"""_get_expr_syms() helper. Recurses through expressions."""
if isinstance(expr, Symbol):
res.add(expr)
elif isinstance(expr, str):
return
elif expr[0] == AND or expr[0] == OR:
for term in expr[1]:
_get_expr_syms_rec(term, res)
elif expr[0] == NOT:
_get_expr_syms_rec(expr[1], res)
elif expr[0] == EQUAL or expr[0] == UNEQUAL:
if isinstance(expr[1], Symbol):
res.add(expr[1])
if isinstance(expr[2], Symbol):
res.add(expr[2])
else:
_internal_error("Internal error while fetching symbols from an "
"expression with token stream {0}.".format(expr))
def _get_expr_syms(expr):
"""Returns the set() of symbols appearing in expr."""
res = set()
if expr is not None:
_get_expr_syms_rec(expr, res)
return res
def _str_val(obj):
"""Returns the value of obj as a string. If obj is not a string (constant
symbol), it must be a Symbol."""
return obj if isinstance(obj, str) else obj.get_value()
def _make_block_conf(block, append_fn):
"""Returns a list of .config strings for a block (list) of items."""
# Collect the substrings in a list and later use join() instead of += to
# build the final .config contents. With older Python versions, this yields
# linear instead of quadratic complexity.
for item in block:
item._make_conf(append_fn)
def _sym_str_string(sym_or_str):
if isinstance(sym_or_str, str):
return '"' + sym_or_str + '"'
return sym_or_str.name
def _intersperse(lst, op):
"""_expr_to_str() helper. Gets the string representation of each expression
in lst and produces a list where op has been inserted between the
elements."""
if not lst:
return ""
res = []
def handle_sub_expr(expr):
no_parens = isinstance(expr, (str, Symbol)) or \
expr[0] in (EQUAL, UNEQUAL) or \
PRECEDENCE[op] <= PRECEDENCE[expr[0]]
if not no_parens:
res.append("(")
res.extend(_expr_to_str_rec(expr))
if not no_parens:
res.append(")")
op_str = OP_TO_STR[op]
handle_sub_expr(lst[0])
for expr in lst[1:]:
res.append(op_str)
handle_sub_expr(expr)
return res
def _expr_to_str_rec(expr):
if expr is None:
return [""]
if isinstance(expr, (Symbol, str)):
return [_sym_str_string(expr)]
if expr[0] in (AND, OR):
return _intersperse(expr[1], expr[0])
if expr[0] == NOT:
need_parens = not isinstance(expr[1], (str, Symbol))
res = ["!"]
if need_parens:
res.append("(")
res.extend(_expr_to_str_rec(expr[1]))
if need_parens:
res.append(")")
return res
if expr[0] in (EQUAL, UNEQUAL):
return [_sym_str_string(expr[1]),
OP_TO_STR[expr[0]],
_sym_str_string(expr[2])]
def _expr_to_str(expr):
return "".join(_expr_to_str_rec(expr))
def _indentation(line):
"""Returns the length of the line's leading whitespace, treating tab stops
as being spaced 8 characters apart."""
line = line.expandtabs()
return len(line) - len(line.lstrip())
def _deindent(line, indent):
"""Deindent 'line' by 'indent' spaces."""
line = line.expandtabs()
if len(line) <= indent:
return line
return line[indent:]
def _is_base_n(s, n):
try:
int(s, n)
return True
except ValueError:
return False
def _lines(*args):
"""Returns a string consisting of all arguments, with newlines inserted
between them."""
return "\n".join(args)
def _comment(s):
"""Returns a new string with "#" inserted before each line in 's'."""
if not s:
return "#"
res = "".join(["#" + line for line in s.splitlines(True)])
if s.endswith("\n"):
return res + "#"
return res
def _clean_up_path(path):
"""Strips an initial "./" and any trailing slashes from 'path'."""
if path.startswith("./"):
path = path[2:]
return path.rstrip("/")
def _build_msg(msg, filename, linenr):
if filename is not None:
msg = "{0}:{1}: ".format(_clean_up_path(filename), linenr) + msg
return msg
def _stderr_msg(msg, filename, linenr):
sys.stderr.write(_build_msg(msg, filename, linenr) + "\n")
def _tokenization_error(s, filename, linenr):
loc = "" if filename is None else "{0}:{1}: ".format(filename, linenr)
raise Kconfig_Syntax_Error("{0}Couldn't tokenize '{1}'"
.format(loc, s.strip()))
def _parse_error(s, msg, filename, linenr):
loc = "" if filename is None else "{0}:{1}: ".format(filename, linenr)
raise Kconfig_Syntax_Error("{0}Couldn't parse '{1}'{2}"
.format(loc, s.strip(),
"." if msg is None else ": " + msg))
def _internal_error(msg):
raise Internal_Error(msg +
"\nSorry! You may want to send an email to ulfalizer a.t Google's "
"email service to tell me about this. Include the message above and the "
"stack trace and describe what you were doing.")
#
# Internal global constants
#
# Tokens
(T_AND, T_OR, T_NOT,
T_OPEN_PAREN, T_CLOSE_PAREN,
T_EQUAL, T_UNEQUAL,
T_MAINMENU, T_MENU, T_ENDMENU,
T_SOURCE, T_CHOICE, T_ENDCHOICE,
T_COMMENT, T_CONFIG, T_MENUCONFIG,
T_HELP, T_IF, T_ENDIF, T_DEPENDS, T_ON,
T_OPTIONAL, T_PROMPT, T_DEFAULT,
T_BOOL, T_TRISTATE, T_HEX, T_INT, T_STRING,
T_DEF_BOOL, T_DEF_TRISTATE,
T_SELECT, T_IMPLY, T_RANGE, T_OPTION, T_ALLNOCONFIG_Y, T_ENV,
T_DEFCONFIG_LIST, T_MODULES, T_VISIBLE) = range(40)
# The leading underscore before the function assignments below prevent pydoc
# from listing them. The constants could be hidden too, but they're fairly
# obviously internal anyway, so don't bother spamming the code.
# Keyword to token map. Note that the get() method is assigned directly as a
# small optimization.
_get_keyword = \
{"mainmenu": T_MAINMENU, "menu": T_MENU, "endmenu": T_ENDMENU,
"endif": T_ENDIF, "endchoice": T_ENDCHOICE, "source": T_SOURCE,
"choice": T_CHOICE, "config": T_CONFIG, "comment": T_COMMENT,
"menuconfig": T_MENUCONFIG, "help": T_HELP, "if": T_IF,
"depends": T_DEPENDS, "on": T_ON, "optional": T_OPTIONAL,
"prompt": T_PROMPT, "default": T_DEFAULT, "bool": T_BOOL, "boolean": T_BOOL,
"tristate": T_TRISTATE, "int": T_INT, "hex": T_HEX, "def_bool": T_DEF_BOOL,
"def_tristate": T_DEF_TRISTATE, "string": T_STRING, "select": T_SELECT,
"imply" : T_IMPLY, "range": T_RANGE, "option": T_OPTION,
"allnoconfig_y": T_ALLNOCONFIG_Y, "env": T_ENV,
"defconfig_list": T_DEFCONFIG_LIST, "modules": T_MODULES,
"visible": T_VISIBLE}.get
# Strings to use for True and False
BOOL_STR = {False: "false", True: "true"}
# Tokens after which identifier-like lexemes are treated as strings. T_CHOICE
# is included to avoid symbols being registered for named choices.
STRING_LEX = frozenset((T_BOOL, T_TRISTATE, T_INT, T_HEX, T_STRING, T_CHOICE,
T_PROMPT, T_MENU, T_COMMENT, T_SOURCE, T_MAINMENU))
# Matches the initial token on a line; see _tokenize(). Also eats trailing
# whitespace as an optimization.
_initial_token_re_match = re.compile(r"[^\w]*(\w+)\s*").match
# Matches an identifier/keyword optionally preceded by whitespace. Also eats
# trailing whitespace as an optimization.
_id_keyword_re_match = re.compile(r"\s*([\w./-]+)\s*").match
# Regular expression for finding $-references to symbols in strings
_sym_ref_re_search = re.compile(r"\$[A-Za-z0-9_]+").search
# Integers representing symbol types
UNKNOWN, BOOL, TRISTATE, STRING, HEX, INT = range(6)
# Strings to use for types
TYPENAME = {UNKNOWN: "unknown", BOOL: "bool", TRISTATE: "tristate",
STRING: "string", HEX: "hex", INT: "int"}
# Token to type mapping
TOKEN_TO_TYPE = {T_BOOL: BOOL, T_TRISTATE: TRISTATE, T_STRING: STRING,
T_INT: INT, T_HEX: HEX}
# Default values for symbols of different types (the value the symbol gets if
# it is not assigned a user value and none of its 'default' clauses kick in)
DEFAULT_VALUE = {BOOL: "n", TRISTATE: "n", STRING: "", INT: "", HEX: ""}
# Indicates that no item is selected in a choice statement
NO_SELECTION = 0
# Integers representing expression types
AND, OR, NOT, EQUAL, UNEQUAL = range(5)
# Map from tristate values to integers
TRI_TO_INT = {"n": 0, "m": 1, "y": 2}
# Printing-related stuff
OP_TO_STR = {AND: " && ", OR: " || ", EQUAL: " = ", UNEQUAL: " != "}
PRECEDENCE = {OR: 0, AND: 1, NOT: 2} | unknown | codeparrot/codeparrot-clean | ||
'''
Created on Aug 26, 2014
@author: moloyc
'''
import sqlalchemy
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy.orm import exc
import util
import logging
import contextlib
from model import Base, Device, InterfaceDefinition, LeafSetting
from common import SingletonBase
moduleName = 'dao'
logger = None
class AbstractDao(SingletonBase):
def __init__(self):
global logger
logger = logging.getLogger(moduleName)
debugSql = False
if logger.isEnabledFor(logging.DEBUG):
debugSql = True
self.__engine = None
self.__sessionFactory = None
dbUrl = self._getDbUrl()
if 'sqlite:' in dbUrl:
self.__engine = sqlalchemy.create_engine(dbUrl, echo = debugSql)
elif 'mysql:' in dbUrl:
self.__engine = sqlalchemy.create_engine(dbUrl, echo = debugSql,
pool_recycle = 7200, isolation_level = "READ COMMITTED")
else:
logger.error('Unsupported DB dialect: %s' % dbUrl)
raise ValueError('Unsupported DB dialect: %s' % dbUrl)
Base.metadata.create_all(self.__engine)
self.__sessionFactory = sessionmaker(bind=self.__engine)
logger.debug('Dao is initialized with Engine')
def __del__(self):
if self.__engine:
self.__sessionFactory.close_all()
self.__engine.dispose()
@contextlib.contextmanager
def getReadSession(self):
try:
session = scoped_session(self.__sessionFactory)
yield session
except Exception as ex:
logger.error(ex)
raise
finally:
session.remove()
@contextlib.contextmanager
def getReadWriteSession(self):
try:
session = scoped_session(self.__sessionFactory)
yield session
session.commit()
except Exception as ex:
session.rollback()
logger.error(ex)
raise
finally:
session.remove()
def _getRawSession(self):
return scoped_session(self.__sessionFactory)
def _getDbUrl(self):
raise NotImplementedError
def createObjects(self, session, objects):
session.add_all(objects)
def createObjectsAndCommitNow(self, session, objects):
try:
session.add_all(objects)
session.commit()
except Exception as ex:
logger.error(ex)
session.rollback()
#raise
def deleteObject(self, session, obj):
session.delete(obj)
def deleteObjects(self, session, objects):
for obj in objects:
session.delete(obj)
def updateObjects(self, session, objects):
for obj in objects:
session.merge(obj)
def updateObjectsAndCommitNow(self, session, objects):
try:
for obj in objects:
session.merge(obj)
session.commit()
except Exception as ex:
logger.error(ex)
session.rollback()
#raise
def getAll(self, session, objectType):
return session.query(objectType).order_by(objectType.name).all()
def getObjectById(self, session, objectType, id):
return session.query(objectType).filter_by(id = id).one()
def getUniqueObjectByName(self, session, objectType, name):
try:
return session.query(objectType).filter_by(name = name).one()
except (exc.NoResultFound, exc.MultipleResultsFound) as ex:
logger.info(str(ex))
def getObjectsByName(self, session, objectType, name):
return session.query(objectType).filter_by(name = name).all()
def getIfdByDeviceNamePortName(self, session, deviceName, portName):
try:
device = session.query(Device).filter_by(name = deviceName).one()
return session.query(InterfaceDefinition).filter_by(device_id = device.id).filter_by(name = portName).one()
except (exc.NoResultFound, exc.MultipleResultsFound) as ex:
logger.info(str(ex))
def getLeafSetting(self, session, podId, deviceFamily):
try:
return session.query(LeafSetting).filter_by(pod_id = podId).filter_by(deviceFamily = deviceFamily).one()
except (exc.NoResultFound) as ex:
logger.info(str(ex))
def getConnectedInterconnectIFDsFilterFakeOnes(self, session, device):
'''
Get interconnect IFDs except following ..
1. no peer configured
2. port name is uplink-* for device with known family
'''
interconnectPorts = session.query(InterfaceDefinition).filter(InterfaceDefinition.device_id == device.id)\
.filter(InterfaceDefinition.peer != None).order_by(InterfaceDefinition.sequenceNum).all()
ports = []
for port in interconnectPorts:
if device.family != 'unknown' and 'uplink-' in port.name:
continue
ports.append(port)
return ports
class Dao(AbstractDao):
def _getDbUrl(self):
return util.getDbUrl() | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.