code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.autoconfigureprocessor;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Alternative to Spring Boot's {@code @AutoConfigureOrder} for testing (removes the need
* for a dependency on the real annotation).
*
* @author Phillip Webb
*/
@Retention(RetentionPolicy.RUNTIME)
@Target({ ElementType.TYPE, ElementType.METHOD, ElementType.FIELD })
@Documented
public @interface TestAutoConfigureOrder {
int value() default Integer.MAX_VALUE;
} | java | github | https://github.com/spring-projects/spring-boot | core/spring-boot-autoconfigure-processor/src/test/java/org/springframework/boot/autoconfigureprocessor/TestAutoConfigureOrder.java |
/*
MIT License http://www.opensource.org/licenses/mit-license.php
*/
"use strict";
const RuntimeGlobals = require("../RuntimeGlobals");
const Template = require("../Template");
const HelperRuntimeModule = require("./HelperRuntimeModule");
/** @typedef {import("../Compilation")} Compilation */
class DefinePropertyGettersRuntimeModule extends HelperRuntimeModule {
constructor() {
super("define property getters");
}
/**
* @returns {string | null} runtime code
*/
generate() {
const compilation = /** @type {Compilation} */ (this.compilation);
const { runtimeTemplate } = compilation;
const fn = RuntimeGlobals.definePropertyGetters;
return Template.asString([
"// define getter functions for harmony exports",
`${fn} = ${runtimeTemplate.basicFunction("exports, definition", [
"for(var key in definition) {",
Template.indent([
`if(${RuntimeGlobals.hasOwnProperty}(definition, key) && !${RuntimeGlobals.hasOwnProperty}(exports, key)) {`,
Template.indent([
"Object.defineProperty(exports, key, { enumerable: true, get: definition[key] });"
]),
"}"
]),
"}"
])};`
]);
}
}
module.exports = DefinePropertyGettersRuntimeModule; | javascript | github | https://github.com/webpack/webpack | lib/runtime/DefinePropertyGettersRuntimeModule.js |
from contextlib import suppress
from docutils import nodes
from docutils.parsers.rst import Directive
from sklearn.utils import all_estimators
from sklearn.utils._test_common.instance_generator import _construct_instances
from sklearn.utils._testing import SkipTest
class AllowNanEstimators(Directive):
@staticmethod
def make_paragraph_for_estimator_type(estimator_type):
intro = nodes.list_item()
intro += nodes.strong(text="Estimators that allow NaN values for type ")
intro += nodes.literal(text=f"{estimator_type}")
intro += nodes.strong(text=":\n")
exists = False
lst = nodes.bullet_list()
for name, est_class in all_estimators(type_filter=estimator_type):
with suppress(SkipTest):
# Here we generate the text only for one instance. This directive
# should not be used for meta-estimators where tags depend on the
# sub-estimator.
est = next(_construct_instances(est_class))
if est.__sklearn_tags__().input_tags.allow_nan:
module_name = ".".join(est_class.__module__.split(".")[:2])
class_title = f"{est_class.__name__}"
class_url = f"./generated/{module_name}.{class_title}.html"
item = nodes.list_item()
para = nodes.paragraph()
para += nodes.reference(
class_title, text=class_title, internal=False, refuri=class_url
)
exists = True
item += para
lst += item
intro += lst
return [intro] if exists else None
def run(self):
lst = nodes.bullet_list()
for i in ["cluster", "regressor", "classifier", "transformer"]:
item = self.make_paragraph_for_estimator_type(i)
if item is not None:
lst += item
return [lst]
def setup(app):
app.add_directive("allow_nan_estimators", AllowNanEstimators)
return {
"version": "0.1",
"parallel_read_safe": True,
"parallel_write_safe": True,
} | python | github | https://github.com/scikit-learn/scikit-learn | doc/sphinxext/allow_nan_estimators.py |
# -*- coding: utf-8 -*-
from __future__ import division, print_function
from __future__ import absolute_import, unicode_literals
from ..compat import socketserver
from ..compat import SimpleHTTPServer
from ..base import cv2
import time
import socket
import re
import threading
__all__ = [
'JpegStreamHandler', 'JpegStreamer', 'JpegTCPServer', 'VideoStream'
]
_jpeg_streamers = {}
class JpegStreamHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""
Handles requests to the threaded HTTP server.
Once initialized, any request to this port will receive
a multipart/replace jpeg.
"""
def get(self):
global _jpeg_streamers
if self.path == '/' or not self.path:
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write("""
<html>
<head>
<style type=text/css>
body {
background-image: url(/stream);
background-repeat: no-repeat;
background-position: center top;
background-attachment: fixed;
height: 100%;
}
</style>
</head>
<body>
 
</body>
</html>
""")
return
elif self.path == '/stream':
self.send_response(200)
self.send_header('Connection', 'close')
self.send_header('Max-Age', '0')
self.send_header('Expires', '0')
self.send_header('Cache-Control', 'no-cache, private')
self.send_header('Pragma', 'no-cache')
self.send_header('Content-Type', 'multipart/x-mixed-replace; boundary=--BOUNDARYSTRING')
self.end_headers()
host, port = self.server.socket.getsockname()[:2]
count = 0
timeout = 0.75
last_time_served = 0
while True:
if (_jpeg_streamers[port].refreshtime > last_time_served or
time.time() - timeout > last_time_served):
try:
self.wfile.write('--BOUNDARYSTRING\r\n')
self.send_header('Content-type', 'image/jpeg')
self.send_header('Content-Length', str(len(
_jpeg_streamers[port].jpgdata.getvalue()
)))
self.end_headers()
self.wfile.write(_jpeg_streamers[port].jpgdata.getvalue() + '\r\n')
last_time_served = time.time()
except socket.error:
return
except IOError:
return
count += 1
time.sleep(_jpeg_streamers[port].sleeptime)
class JpegTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
allow_reuse_address = True
daemon_threads = True
# factory class for jpeg tcp server.
class JpegStreamer(object):
"""
Allow user to stream a jpeg encoded file to a HTTP port. Any
updates to the jpeg file will automatically be pushed to the
browser via multipart/replace content type.
initialization:
js = JpegStreamer()
update:
img.save(js)
open a browser and display:
import webbrowser
webbrowser.open(js.url)
Note 3 optional parameters on the constructor:
- port (default 8080) which sets the TCP port you need to connect to
- sleep time (default 0.1) how often to update. Above 1 second seems
to cause dropped connections in Google chrome Once initialized,
the buffer and sleeptime can be modified and will function
properly -- port will not.
"""
server = ''
host = ''
port = ''
sleep_time = ''
frame_buffer = ''
counter = 0
refresh_time = 0
def __init__(self, host_port=8080, sleeptime=0.1):
global _jpeg_streamers
if isinstance(host_port, int):
self.port = host_port
self.host = 'localhost'
elif isinstance(host_port, str) and re.search(':', host_port):
self.host, self.port = host_port.split(':')
self.port = int(self.port)
elif isinstance(host_port, tuple):
self.host, self.port = host_port
else:
self.port = 8080
self.host = 'localhost'
self.sleep_time = sleeptime
self.server = JpegTCPServer((self.host, self.host), JpegStreamHandler)
self.server_thread = threading.Thread(target=self.server.serve_forever)
_jpeg_streamers[self.port] = self
self.server_thread.daemon = True
self.server_thread.start()
self.frame_buffer = self
def url(self):
"""
Returns the JpegStreams Webbrowser-appropriate URL, if not provided
in the constructor, it defaults to "http://localhost:8080"
:return: url
"""
return 'http://' + self.host + ':' + str(self.port) + '/'
def stream_url(self):
"""
Returns the URL of the MJPEG stream. If host and port are not set in
the constructor, defaults to "http://localhost:8080/stream/"
:return: url
"""
return self.url() + 'stream'
class VideoStream(object):
"""
Allows user save video files in different formats.
You can initialize it by specifying the file you want to output::
vs = VideoStream("hello.avi")
You can also specify a framerate, and if you want to "fill" in
missed frames. So if you want to record a real time video you may
want to do this::
# note these are default values
vs = VideoStream("myvideo.avi", 25, True)
Where if you want to do a stop-motion animation, you would want to
turn fill off::
vs_animation = VideoStream("cartoon.avi", 15, False)
If you select a fill, the VideoStream will do its best to stay
close to "real time" by duplicating frames or dropping frames
when the clock doesn't sync up with the file writes.
You can save a frame to the video by using the Image.save() function::
my_camera.getImage().save(vs)
"""
fps = 25
filename = ''
writer = ''
fourcc = ''
frame_fill = True
video_time = 0.0
start_time = 0.0
frame_count = 0
last_frame = None
def __init__(self, filename, fps=25, frame_fill=True):
"""
TODO: details
:param filename:
:param fps:
:param frame_fill:
"""
self.filename = filename
self.fps = fps
self.frame_fill = frame_fill
self.fourcc = cv2.VideoWriter_fourcc('I', 'Y', 'U', 'V')
def init_writer(self, size):
"""
TODO: details
:param size:
:return:
"""
self.writer = cv2.VideoWriter(self.filename, self.fourcc, self.fps,
size, 1)
self.video_time = 0.0
self.start_time = time.time()
def write_frame(self, img):
"""
Write a frame to the display object. this is automatically called
by image.save() but you can use this function to save just the
bitmap as well so image markup is not implicit,typically you use
image.save() but this allows for more finer control
Args:
img (Image, array like): the image to be write
Returns:
None
"""
if not self.writer:
self.init_writer(img.size)
self.last_frame = img
frame_time = 1.0 / float(self.fps)
target_time = self.start_time + frame_time * self.frame_count
real_time = time.time()
if self.frame_fill:
# see if we need to do anything to adjust to real time
if target_time > real_time + frame_time:
# if we're more than one frame ahead,
# save the last_frame, but don't write to video out
self.last_frame = img
return
elif target_time < real_time - frame_time:
# we're at least one frame behind
frames_behind = int((real_time - target_time) * self.fps) + 1
# figure out how many frames behind we are
last_frames = frames_behind / 2
for i in range(0, last_frames):
self.frame_count += 1
self.writer.write(self.last_frame.narray)
frames = frames_behind - last_frames
for i in range(0, frames):
self.frame_count += 1
self.writer.write(img.narray)
else:
self.frame_count += 1
self.writer.write(img.narray)
else:
self.frame_count += 1
self.writer.write(img.narray)
self.last_frame = img | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2013 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc
from neutron.api.v2 import attributes as attrs
from neutron.db import db_base_plugin_v2
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import portsecurity as psec
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class PortSecurityBinding(model_base.BASEV2):
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
primary_key=True)
port_security_enabled = sa.Column(sa.Boolean(), nullable=False)
# Add a relationship to the Port model in order to be to able to
# instruct SQLAlchemy to eagerly load port security binding
port = orm.relationship(
models_v2.Port,
backref=orm.backref("port_security", uselist=False,
cascade='delete', lazy='joined'))
class NetworkSecurityBinding(model_base.BASEV2):
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
port_security_enabled = sa.Column(sa.Boolean(), nullable=False)
# Add a relationship to the Port model in order to be able to instruct
# SQLAlchemy to eagerly load default port security setting for ports
# on this network
network = orm.relationship(
models_v2.Network,
backref=orm.backref("port_security", uselist=False,
cascade='delete', lazy='joined'))
class PortSecurityDbMixin(object):
"""Mixin class to add port security."""
def _process_network_port_security_create(
self, context, network_req, network_res):
with context.session.begin(subtransactions=True):
db = NetworkSecurityBinding(
network_id=network_res['id'],
port_security_enabled=network_req[psec.PORTSECURITY])
context.session.add(db)
network_res[psec.PORTSECURITY] = network_req[psec.PORTSECURITY]
return self._make_network_port_security_dict(db)
def _process_port_port_security_create(
self, context, port_req, port_res):
with context.session.begin(subtransactions=True):
db = PortSecurityBinding(
port_id=port_res['id'],
port_security_enabled=port_req[psec.PORTSECURITY])
context.session.add(db)
port_res[psec.PORTSECURITY] = port_req[psec.PORTSECURITY]
return self._make_port_security_dict(db)
def _extend_port_security_dict(self, response_data, db_data):
if ('port-security' in
getattr(self, 'supported_extension_aliases', [])):
psec_value = db_data['port_security'][psec.PORTSECURITY]
response_data[psec.PORTSECURITY] = psec_value
def _get_network_security_binding(self, context, network_id):
try:
query = self._model_query(context, NetworkSecurityBinding)
binding = query.filter(
NetworkSecurityBinding.network_id == network_id).one()
except exc.NoResultFound:
raise psec.PortSecurityBindingNotFound()
return binding[psec.PORTSECURITY]
def _get_port_security_binding(self, context, port_id):
try:
query = self._model_query(context, PortSecurityBinding)
binding = query.filter(
PortSecurityBinding.port_id == port_id).one()
except exc.NoResultFound:
raise psec.PortSecurityBindingNotFound()
return binding[psec.PORTSECURITY]
def _process_port_port_security_update(
self, context, port_req, port_res):
if psec.PORTSECURITY in port_req:
port_security_enabled = port_req[psec.PORTSECURITY]
else:
return
try:
query = self._model_query(context, PortSecurityBinding)
port_id = port_res['id']
binding = query.filter(
PortSecurityBinding.port_id == port_id).one()
binding.port_security_enabled = port_security_enabled
port_res[psec.PORTSECURITY] = port_security_enabled
except exc.NoResultFound:
raise psec.PortSecurityBindingNotFound()
def _process_network_port_security_update(
self, context, network_req, network_res):
if psec.PORTSECURITY in network_req:
port_security_enabled = network_req[psec.PORTSECURITY]
else:
return
try:
query = self._model_query(context, NetworkSecurityBinding)
network_id = network_res['id']
binding = query.filter(
NetworkSecurityBinding.network_id == network_id).one()
binding.port_security_enabled = port_security_enabled
network_res[psec.PORTSECURITY] = port_security_enabled
except exc.NoResultFound:
raise psec.PortSecurityBindingNotFound()
def _make_network_port_security_dict(self, port_security, fields=None):
res = {'network_id': port_security['network_id'],
psec.PORTSECURITY: port_security[psec.PORTSECURITY]}
return self._fields(res, fields)
def _determine_port_security_and_has_ip(self, context, port):
"""Returns a tuple of booleans (port_security_enabled, has_ip).
Port_security is the value associated with the port if one is present
otherwise the value associated with the network is returned. has_ip is
if the port is associated with an ip or not.
"""
has_ip = self._ip_on_port(port)
# we don't apply security groups for dhcp, router
if (port.get('device_owner') and
port['device_owner'].startswith('network:')):
return (False, has_ip)
if (psec.PORTSECURITY in port and
isinstance(port[psec.PORTSECURITY], bool)):
port_security_enabled = port[psec.PORTSECURITY]
# If port has an ip and security_groups are passed in
# conveniently set port_security_enabled to true this way
# user doesn't also have to pass in port_security_enabled=True
# when creating ports.
elif (has_ip and attrs.is_attr_set('security_groups')):
port_security_enabled = True
else:
port_security_enabled = self._get_network_security_binding(
context, port['network_id'])
return (port_security_enabled, has_ip)
def _make_port_security_dict(self, port, fields=None):
res = {'port_id': port['port_id'],
psec.PORTSECURITY: port[psec.PORTSECURITY]}
return self._fields(res, fields)
def _ip_on_port(self, port):
return bool(port.get('fixed_ips'))
# Register dict extend functions for ports and networks
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attrs.NETWORKS, ['_extend_port_security_dict'])
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attrs.PORTS, ['_extend_port_security_dict']) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2018, David Passante <@dpassante>
# (c) 2017, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_disk_offering
description:
- Create and delete disk offerings for guest VMs.
- Update display_text or display_offering of existing disk offering.
short_description: Manages disk offerings on Apache CloudStack based clouds.
version_added: "2.7"
author:
- "David Passante (@dpassante)"
- "René Moser(@resmo)"
options:
disk_size:
description:
- Size of the disk offering in GB (1GB = 1,073,741,824 bytes).
bytes_read_rate:
description:
- Bytes read rate of the disk offering.
bytes_write_rate:
description:
- Bytes write rate of the disk offering.
display_text:
description:
- Display text of the disk offering.
- If not set, C(name) will be used as C(display_text) while creating.
domain:
description:
- Domain the disk offering is related to.
- Public for all domains and subdomains if not set.
hypervisor_snapshot_reserve:
description:
- Hypervisor snapshot reserve space as a percent of a volume.
- Only for managed storage using Xen or VMware.
customized:
description:
- Whether disk offering iops is custom or not.
type: bool
default: false
iops_read_rate:
description:
- IO requests read rate of the disk offering.
iops_write_rate:
description:
- IO requests write rate of the disk offering.
iops_max:
description:
- Max. iops of the disk offering.
iops_min:
description:
- Min. iops of the disk offering.
name:
description:
- Name of the disk offering.
required: true
provisioning_type:
description:
- Provisioning type used to create volumes.
choices:
- thin
- sparse
- fat
state:
description:
- State of the disk offering.
choices:
- present
- absent
default: present
storage_type:
description:
- The storage type of the disk offering.
choices:
- local
- shared
storage_tags:
description:
- The storage tags for this disk offering.
aliases:
- storage_tag
display_offering:
description:
- An optional field, whether to display the offering to the end user or not.
type: bool
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
- name: Create a disk offering with local storage
local_action:
module: cs_disk_offering
name: small
display_text: Small 10GB
disk_size: 10
storage_type: local
- name: Create or update a disk offering with shared storage
local_action:
module: cs_disk_offering
name: small
display_text: Small 10GB
disk_size: 10
storage_type: shared
storage_tags: SAN01
- name: Remove a disk offering
local_action:
module: cs_disk_offering
name: small
state: absent
'''
RETURN = '''
---
id:
description: UUID of the disk offering
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
disk_size:
description: Size of the disk offering in GB
returned: success
type: int
sample: 10
iops_max:
description: Max iops of the disk offering
returned: success
type: int
sample: 1000
iops_min:
description: Min iops of the disk offering
returned: success
type: int
sample: 500
bytes_read_rate:
description: Bytes read rate of the disk offering
returned: success
type: int
sample: 1000
bytes_write_rate:
description: Bytes write rate of the disk offering
returned: success
type: int
sample: 1000
iops_read_rate:
description: IO requests per second read rate of the disk offering
returned: success
type: int
sample: 1000
iops_write_rate:
description: IO requests per second write rate of the disk offering
returned: success
type: int
sample: 1000
created:
description: Date the offering was created
returned: success
type: string
sample: 2017-11-19T10:48:59+0000
display_text:
description: Display text of the offering
returned: success
type: string
sample: Small 10GB
domain:
description: Domain the offering is into
returned: success
type: string
sample: ROOT
storage_tags:
description: List of storage tags
returned: success
type: list
sample: [ 'eco' ]
customized:
description: Whether the offering uses custom IOPS or not
returned: success
type: bool
sample: false
name:
description: Name of the system offering
returned: success
type: string
sample: Micro
provisioning_type:
description: Provisioning type used to create volumes
returned: success
type: string
sample: thin
storage_type:
description: Storage type used to create volumes
returned: success
type: string
sample: shared
display_offering:
description: Whether to display the offering to the end user or not.
returned: success
type: bool
sample: false
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together,
)
class AnsibleCloudStackDiskOffering(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackDiskOffering, self).__init__(module)
self.returns = {
'disksize': 'disk_size',
'diskBytesReadRate': 'bytes_read_rate',
'diskBytesWriteRate': 'bytes_write_rate',
'diskIopsReadRate': 'iops_read_rate',
'diskIopsWriteRate': 'iops_write_rate',
'maxiops': 'iops_max',
'miniops': 'iops_min',
'hypervisorsnapshotreserve': 'hypervisor_snapshot_reserve',
'customized': 'customized',
'provisioningtype': 'provisioning_type',
'storagetype': 'storage_type',
'tags': 'storage_tags',
'displayoffering': 'display_offering',
}
self.disk_offering = None
def get_disk_offering(self):
args = {
'name': self.module.params.get('name'),
'domainid': self.get_domain(key='id'),
}
disk_offerings = self.query_api('listDiskOfferings', **args)
if disk_offerings:
for disk_offer in disk_offerings['diskoffering']:
if args['name'] == disk_offer['name']:
self.disk_offering = disk_offer
return self.disk_offering
def present_disk_offering(self):
disk_offering = self.get_disk_offering()
if not disk_offering:
disk_offering = self._create_offering(disk_offering)
else:
disk_offering = self._update_offering(disk_offering)
return disk_offering
def absent_disk_offering(self):
disk_offering = self.get_disk_offering()
if disk_offering:
self.result['changed'] = True
if not self.module.check_mode:
args = {
'id': disk_offering['id'],
}
self.query_api('deleteDiskOffering', **args)
return disk_offering
def _create_offering(self, disk_offering):
self.result['changed'] = True
args = {
'name': self.module.params.get('name'),
'displaytext': self.get_or_fallback('display_text', 'name'),
'disksize': self.module.params.get('disk_size'),
'bytesreadrate': self.module.params.get('bytes_read_rate'),
'byteswriterate': self.module.params.get('bytes_write_rate'),
'customized': self.module.params.get('customized'),
'domainid': self.get_domain(key='id'),
'hypervisorsnapshotreserve': self.module.params.get('hypervisor_snapshot_reserve'),
'iopsreadrate': self.module.params.get('iops_read_rate'),
'iopswriterate': self.module.params.get('iops_write_rate'),
'maxiops': self.module.params.get('iops_max'),
'miniops': self.module.params.get('iops_min'),
'provisioningtype': self.module.params.get('provisioning_type'),
'diskofferingdetails': self.module.params.get('disk_offering_details'),
'storagetype': self.module.params.get('storage_type'),
'tags': self.module.params.get('storage_tags'),
'displayoffering': self.module.params.get('display_offering'),
}
if not self.module.check_mode:
res = self.query_api('createDiskOffering', **args)
disk_offering = res['diskoffering']
return disk_offering
def _update_offering(self, disk_offering):
args = {
'id': disk_offering['id'],
'name': self.module.params.get('name'),
'displaytext': self.get_or_fallback('display_text', 'name'),
'displayoffering': self.module.params.get('display_offering'),
}
if self.has_changed(args, disk_offering):
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('updateDiskOffering', **args)
disk_offering = res['diskoffering']
return disk_offering
def get_result(self, disk_offering):
super(AnsibleCloudStackDiskOffering, self).get_result(disk_offering)
if disk_offering:
# Prevent confusion, the api returns a tags key for storage tags.
if 'tags' in disk_offering:
self.result['storage_tags'] = disk_offering['tags'].split(',') or [disk_offering['tags']]
if 'tags' in self.result:
del self.result['tags']
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
display_text=dict(),
domain=dict(),
disk_size=dict(type='int'),
display_offering=dict(type='bool'),
hypervisor_snapshot_reserve=dict(type='int'),
bytes_read_rate=dict(type='int'),
bytes_write_rate=dict(type='int'),
customized=dict(type='bool'),
iops_read_rate=dict(type='int'),
iops_write_rate=dict(type='int'),
iops_max=dict(type='int'),
iops_min=dict(type='int'),
provisioning_type=dict(choices=['thin', 'sparse', 'fat']),
storage_type=dict(choices=['local', 'shared']),
storage_tags=dict(type='list', aliases=['storage_tag']),
state=dict(choices=['present', 'absent'], default='present'),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
acs_do = AnsibleCloudStackDiskOffering(module)
state = module.params.get('state')
if state == "absent":
disk_offering = acs_do.absent_disk_offering()
else:
disk_offering = acs_do.present_disk_offering()
result = acs_do.get_result(disk_offering)
module.exit_json(**result)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#
# Unit tests for the multiprocessing package
#
import unittest
import Queue
import time
import sys
import os
import gc
import signal
import array
import socket
import random
import logging
import errno
from test import test_support
from StringIO import StringIO
_multiprocessing = test_support.import_module('_multiprocessing')
# import threading after _multiprocessing to raise a more relevant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
import threading
# Work around broken sem_open implementations
test_support.import_module('multiprocessing.synchronize')
import multiprocessing.dummy
import multiprocessing.connection
import multiprocessing.managers
import multiprocessing.heap
import multiprocessing.pool
from multiprocessing import util
try:
from multiprocessing import reduction
HAS_REDUCTION = True
except ImportError:
HAS_REDUCTION = False
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
import msvcrt
except ImportError:
msvcrt = None
#
#
#
latin = str
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double
except ImportError:
Structure = object
c_int = c_double = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.time()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.time() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
return
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
@classmethod
def _test_terminate(cls):
time.sleep(1000)
def test_terminate(self):
if self.TYPE == 'threads':
return
p = self.Process(target=self._test_terminate)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
p.terminate()
join = TimingWrapper(p.join)
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
# XXX sometimes get p.exitcode == 0 on Windows ...
#self.assertEqual(p.exitcode, -signal.SIGTERM)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
from multiprocessing import forking
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(Queue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(Queue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(Queue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(Queue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(Queue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(Queue.Empty, queue.get, False)
p.join()
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
return
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
if sys.version_info < (2, 5) and not hasattr(queue, 'task_done'):
self.skipTest("requires 'queue.task_done()' method")
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in xrange(4)]
for p in workers:
p.daemon = True
p.start()
for i in xrange(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
return
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
# wait for them all to sleep
for i in xrange(6):
sleeping.acquire()
# check they have all timed out
for i in xrange(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
# wait for them to all sleep
for i in xrange(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
time.sleep(DELTA)
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, None)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), range(10))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_accepts_long(self):
arr = self.Array('i', 10L)
self.assertEqual(len(arr), 10)
raw_arr = self.RawArray('i', 10L)
self.assertEqual(len(raw_arr), 10)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', range(10))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', range(10), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', range(10), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(range(10))
self.assertEqual(a[:], range(10))
b = self.list()
self.assertEqual(b[:], [])
b.extend(range(5))
self.assertEqual(b[:], range(5))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], range(10))
d = [a, b]
e = self.list(d)
self.assertEqual(
e[:],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']])
def test_dict(self):
d = self.dict()
indices = range(65, 70)
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
class _TestPool(BaseTestCase):
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, range(10)), map(sqr, range(10)))
self.assertEqual(pmap(sqr, range(100), chunksize=20),
map(sqr, range(100)))
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 0.2))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, range(10))
self.assertEqual(list(it), map(sqr, range(10)))
it = self.pool.imap(sqr, range(10))
for i in range(10):
self.assertEqual(it.next(), i*i)
self.assertRaises(StopIteration, it.next)
it = self.pool.imap(sqr, range(1000), chunksize=100)
for i in range(1000):
self.assertEqual(it.next(), i*i)
self.assertRaises(StopIteration, it.next)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, range(1000))
self.assertEqual(sorted(it), map(sqr, range(1000)))
it = self.pool.imap_unordered(sqr, range(1000), chunksize=53)
self.assertEqual(sorted(it), map(sqr, range(1000)))
def test_make_pool(self):
self.assertRaises(ValueError, multiprocessing.Pool, -1)
self.assertRaises(ValueError, multiprocessing.Pool, 0)
p = multiprocessing.Pool(3)
self.assertEqual(3, len(p._pool))
p.close()
p.join()
def test_terminate(self):
if self.TYPE == 'manager':
# On Unix a forked process increfs each shared object to
# which its parent process held a reference. If the
# forked process gets terminated then there is likely to
# be a reference leak. So to prevent
# _TestZZZNumberOfObjects from failing we skip this test
# when using a manager.
return
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
self.assertTrue(join.elapsed < 0.2)
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
#
# Test that manager has expected number of shared objects left
#
class _TestZZZNumberOfObjects(BaseTestCase):
# Because test cases are sorted alphabetically, this one will get
# run after all the other tests for the manager. It tests that
# there have been no "reference leaks" for the manager's shared
# objects. Note the comment in _TestPool.test_terminate().
ALLOWED_TYPES = ('manager',)
def test_number_of_objects(self):
EXPECTED_NUMBER = 1 # the pool object is still alive
multiprocessing.active_children() # discard dead process objs
gc.collect() # do garbage collection
refs = self.manager._number_of_objects()
debug_info = self.manager._debug_info()
if refs != EXPECTED_NUMBER:
print self.manager._debug_info()
print debug_info
self.assertEqual(refs, EXPECTED_NUMBER)
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in xrange(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('next', '__next__')
def __iter__(self):
return self
def next(self):
return self._callmethod('next')
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
manager.shutdown()
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = Queue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
queue.put(('hello world', None, True, 2.25))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=('localhost', 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
self.assertEqual(queue.get(), ['hello world', None, True, 2.25])
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
manager.shutdown()
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=('localhost', 0), authkey=authkey, serializer=SERIALIZER)
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
manager.start()
manager.shutdown()
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', range(4))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort, e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(IOError, reader.send, 2)
self.assertRaises(IOError, writer.recv)
self.assertRaises(IOError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
return
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
with open(test_support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(test_support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
with open(test_support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test_support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
#
# Test of sending connection and socket objects between processes
#
"""
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def _listener(self, conn, families):
for fam in families:
l = self.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
if self.TYPE == 'processes':
l = socket.socket()
l.bind(('localhost', 0))
conn.send(l.getsockname())
l.listen(1)
new_conn, addr = l.accept()
conn.send(new_conn)
conn.recv()
def _remote(self, conn):
for (address, msg) in iter(conn.recv, None):
client = self.connection.Client(address)
client.send(msg.upper())
client.close()
if self.TYPE == 'processes':
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
try:
multiprocessing.allow_connection_pickling()
except ImportError:
return
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
if self.TYPE == 'processes':
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
if hasattr(socket, 'fromfd'):
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(100), msg.upper())
else:
# XXX On Windows with Py2.6 need to backport fromfd()
discard = lconn.recv_bytes()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
"""
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# create and destroy lots of blocks of different sizes
for i in xrange(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
# verify the state of the heap
all = []
occupied = 0
heap._lock.acquire()
self.addCleanup(heap._lock.release)
for L in heap._len_to_seq.values():
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
for arena, start, stop in heap._allocated_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
self.assertTrue((arena != narena and nstart == 0) or
(stop == nstart))
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, foo, arr, string):
x.value *= 2
y.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', range(10), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0)
bar = copy(foo)
foo.x = 0
foo.y = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
#
# Test that from ... import * works for each module
#
class _TestImportStar(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_import(self):
modules = [
'multiprocessing', 'multiprocessing.connection',
'multiprocessing.heap', 'multiprocessing.managers',
'multiprocessing.pool', 'multiprocessing.process',
'multiprocessing.synchronize', 'multiprocessing.util'
]
if HAS_REDUCTION:
modules.append('multiprocessing.reduction')
if c_int is not None:
# This module requires _ctypes
modules.append('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
for attr in getattr(mod, '__all__', ()):
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.daemon = True
p.start()
self.assertEqual(LEVEL1, reader.recv())
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.daemon = True
p.start()
self.assertEqual(LEVEL2, reader.recv())
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = _multiprocessing.Connection(44977608)
self.assertRaises(IOError, conn.poll)
self.assertRaises(IOError, _multiprocessing.Connection, -1)
#
# Functions used to create test cases from the base ones in this module
#
def get_attributes(Source, names):
d = {}
for name in names:
obj = getattr(Source, name)
if type(obj) == type(get_attributes):
obj = staticmethod(obj)
d[name] = obj
return d
def create_test_cases(Mixin, type):
result = {}
glob = globals()
Type = type.capitalize()
for name in glob.keys():
if name.startswith('_Test'):
base = glob[name]
if type in base.ALLOWED_TYPES:
newname = 'With' + Type + name[1:]
class Temp(base, unittest.TestCase, Mixin):
pass
result[newname] = Temp
Temp.__name__ = newname
Temp.__module__ = Mixin.__module__
return result
#
# Create test cases
#
class ProcessesMixin(object):
TYPE = 'processes'
Process = multiprocessing.Process
locals().update(get_attributes(multiprocessing, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'RawValue',
'RawArray', 'current_process', 'active_children', 'Pipe',
'connection', 'JoinableQueue'
)))
testcases_processes = create_test_cases(ProcessesMixin, type='processes')
globals().update(testcases_processes)
class ManagerMixin(object):
TYPE = 'manager'
Process = multiprocessing.Process
manager = object.__new__(multiprocessing.managers.SyncManager)
locals().update(get_attributes(manager, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'list', 'dict',
'Namespace', 'JoinableQueue'
)))
testcases_manager = create_test_cases(ManagerMixin, type='manager')
globals().update(testcases_manager)
class ThreadsMixin(object):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
locals().update(get_attributes(multiprocessing.dummy, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'current_process',
'active_children', 'Pipe', 'connection', 'dict', 'list',
'Namespace', 'JoinableQueue'
)))
testcases_threads = create_test_cases(ThreadsMixin, type='threads')
globals().update(testcases_threads)
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _ThisSubProcess(q):
try:
item = q.get(block=False)
except Queue.Empty:
pass
def _TestProcess(q):
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_ThisSubProcess, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
queue = multiprocessing.Queue()
proc = multiprocessing.Process(target=_TestProcess, args=(queue,))
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
testcases_other = [OtherTest, TestInvalidHandle, TestInitializers,
TestStdinBadfiledescriptor]
#
#
#
def test_main(run=None):
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, see issue 3111!")
check_enough_semaphores()
if run is None:
from test.test_support import run_unittest as run
util.get_temp_dir() # creates temp directory for use by all processes
multiprocessing.get_logger().setLevel(LOG_LEVEL)
ProcessesMixin.pool = multiprocessing.Pool(4)
ThreadsMixin.pool = multiprocessing.dummy.Pool(4)
ManagerMixin.manager.__init__()
ManagerMixin.manager.start()
ManagerMixin.pool = ManagerMixin.manager.Pool(4)
testcases = (
sorted(testcases_processes.values(), key=lambda tc:tc.__name__) +
sorted(testcases_threads.values(), key=lambda tc:tc.__name__) +
sorted(testcases_manager.values(), key=lambda tc:tc.__name__) +
testcases_other
)
loadTestsFromTestCase = unittest.defaultTestLoader.loadTestsFromTestCase
suite = unittest.TestSuite(loadTestsFromTestCase(tc) for tc in testcases)
# (ncoghlan): Whether or not sys.exc_clear is executed by the threading
# module during these tests is at least platform dependent and possibly
# non-deterministic on any given platform. So we don't mind if the listed
# warnings aren't actually raised.
with test_support.check_py3k_warnings(
(".+__(get|set)slice__ has been removed", DeprecationWarning),
(r"sys.exc_clear\(\) not supported", DeprecationWarning),
quiet=True):
run(suite)
ThreadsMixin.pool.terminate()
ProcessesMixin.pool.terminate()
ManagerMixin.pool.terminate()
ManagerMixin.manager.shutdown()
del ProcessesMixin.pool, ThreadsMixin.pool, ManagerMixin.pool
def main():
test_main(unittest.TextTestRunner(verbosity=2).run)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# Triage issues
Triage helps ensure that GitHub issues resolve quickly by:
- Ensuring the issue's intent and purpose is conveyed precisely. This is necessary because it can be difficult for an issue to explain how an end user experiences a problem and what actions they took.
- Giving a contributor the information they need before they commit to resolving an issue.
- Lowering the issue count by preventing duplicate issues.
- Streamlining the development process by preventing duplicate discussions.
This document gives you some ideas on what you can do to help. For more information, read more about [how the core Grafana team triage issues](/contribute/ISSUE_TRIAGE.md).
## Improve issues
Improve issues by suggesting improvements to the title and description of the issue. If you think an issue has formatting issues, bad language, or grammatical errors, post a comment to let the author and maintainers know.
## Report resolved issues
If you think an issue has been resolved, or is no longer relevant, suggest that we close it. Add a comment on the issue in which you explain the reason that it should be closed. Make sure to include any related issues and pull requests.
## Investigate issues
Investigate issues that we haven't been able to reproduce yet. In some cases, there are many combinations of panels, dashboards, and data sources that make it difficult for us to reproduce certain issues. Help us by adding more information.
## Vote on issues
Use [GitHub reactions](https://github.blog/news-insights/product-news/add-reactions-to-pull-requests-issues-and-comments/) to let us know what's important to you. Vote on bugs if you've experienced the same problem. **Don't vote, or react, by commenting on the issue.**
Read more about [how we prioritize issues](/contribute/ISSUE_TRIAGE.md#4-prioritization-of-issues).
## Report duplicates
If you find two issues that describe the same thing, add a comment in one of the issues. In the comment, include a reference (`#<issue number>`) to the duplicate. Explain why you think the issue is duplicated. | unknown | github | https://github.com/grafana/grafana | contribute/triage-issues.md |
A borrowed variable was used by a closure.
Erroneous code example:
```compile_fail,E0500
fn you_know_nothing(jon_snow: &mut i32) {
let nights_watch = &jon_snow;
let starks = || {
*jon_snow = 3; // error: closure requires unique access to `jon_snow`
// but it is already borrowed
};
println!("{}", nights_watch);
}
```
In here, `jon_snow` is already borrowed by the `nights_watch` reference, so it
cannot be borrowed by the `starks` closure at the same time. To fix this issue,
you can create the closure after the borrow has ended:
```
fn you_know_nothing(jon_snow: &mut i32) {
let nights_watch = &jon_snow;
println!("{}", nights_watch);
let starks = || {
*jon_snow = 3;
};
}
```
Or, if the type implements the `Clone` trait, you can clone it between
closures:
```
fn you_know_nothing(jon_snow: &mut i32) {
let mut jon_copy = jon_snow.clone();
let starks = || {
*jon_snow = 3;
};
println!("{}", jon_copy);
}
``` | unknown | github | https://github.com/rust-lang/rust | compiler/rustc_error_codes/src/error_codes/E0500.md |
"""
Module that holds all archiving functions
"""
import os
import shutil
import logging
import sys
import numpy as np
import subprocess
import factor._logging
import factor.parset
import factor.directions
from factor.lib.direction import Direction
from lofarpipe.support.data_map import DataMap
from factor.scripts import sort_times_into_freqGroups
from lofarpipe.support.utilities import create_directory
import glob
import pickle
log = logging.getLogger('factor:archive')
def check_existing_files(mapfile):
"""
Checks if files in input mapfile exist
Parameters
----------
mapfile : str
Filename of mapfile to check
Returns
-------
file : list
List of files
"""
all_exist = True
all_files = []
log.info('Checking for existing files...')
try:
datamap = DataMap.load(mapfile)
for item in datamap:
# Handle case in which item.file is a Python list
if item.file[0] == '[' and item.file[-1] == ']':
files = item.file.strip('[]').split(',')
else:
files = [item.file]
for f in files:
if not os.path.exists(f):
all_exist = False
all_files.extend(files)
if all_exist:
log.info('...all files exist')
else:
log.warning('...one or more files not found')
return all_files
except IOError:
return []
def load_directions(parset_file):
"""
Return directions for a run
"""
# Read parset
orig_dir = os.path.abspath('.')
parset = factor.parset.parset_read(parset_file, use_log_file=False)
os.chdir(orig_dir)
# Load directions. First check for user-supplied directions file then for
# Factor-generated file from a previous run
direction_list = []
dir_parset = parset['direction_specific']
if 'directions_file' in dir_parset:
directions = factor.directions.directions_read(dir_parset['directions_file'],
parset['dir_working'])
elif os.path.exists(os.path.join(parset['dir_working'], 'factor_directions.txt')):
directions = factor.directions.directions_read(os.path.join(parset['dir_working'],
'factor_directions.txt'), parset['dir_working'])
else:
log.error('No directions found. Please run this tool after '
'the directions have been defined')
sys.exit(1)
# Add the target to the directions list if desired
target_ra = dir_parset['target_ra']
target_dec = dir_parset['target_dec']
target_radius_arcmin = dir_parset['target_radius_arcmin']
target_has_own_facet = dir_parset['target_has_own_facet']
if target_has_own_facet:
if target_ra is not None and target_dec is not None and target_radius_arcmin is not None:
# Make target object
target = Direction('target', target_ra, target_dec,
factor_working_dir=parset['dir_working'])
# Add target to directions list
directions.append(target)
else:
log.critical('target_has_own_facet = True, but target RA, Dec, or radius not found in parset')
sys.exit(1)
for direction in directions:
has_state = direction.load_state()
if has_state:
direction_list.append(direction)
return direction_list, parset
def copy(path_from, dir_to, clobber, use_symlinks=False):
"""
Copy a file or directory
Parameters
----------
path_from : str
Input file or directory
dir_to : str
Output directory
clobber : bool
Clobber existing file or directory?
use_symlinks : bool, optional
Use symlinks instead of copying files?
"""
if not os.path.exists(path_from):
log.warning('{} not found. Please check the '
'working directory'.format(path_from))
return
path_to = os.path.join(dir_to, os.path.basename(path_from))
if os.path.exists(path_to):
if not clobber:
log.warning(' Destination "{}" exists and clobber = False. '
'Skipping it...'.format(path_to))
return
else:
create_directory(dir_to)
if use_symlinks:
if os.path.exists(path_to):
p = subprocess.Popen('rm -rf {0}'.format(path_to), shell=True,
stdout=subprocess.PIPE)
r = p.communicate()
os.symlink(path_from, path_to)
else:
p = subprocess.Popen('rsync -a {0} {1}'.format(path_from, dir_to),
shell=True, stdout=subprocess.PIPE)
r = p.communicate()
if p.returncode != 0:
log.critical('rsync exited abnormally when attempting to archive {}'.format(path_from))
sys.exit(1)
def dppp_concat(mslist, msout):
"""
Run DPPP to concat a list of files
Parameters
----------
mslist : str
List of input ms files, given as a string (e.g., '[ms1,ms2,...]')
msout : str
Filename of output ms file
"""
# Call DPPP
p = subprocess.Popen("DPPP msin={0} steps=[] msout={1} msin.missingdata=true "
"msin.orderms=false".format(mslist, msout), shell=True, stdout=subprocess.PIPE)
r = p.communicate()
if p.returncode != 0:
log.critical('DPPP exited abnormally when attempting to concat {}'.format(mslist))
sys.exit(1)
def archive(parset_file, directions, dir_output, full=False, archive_subdata=False,
archive_state=False, archive_misc=True, archive_images=True,
archive_inst=False, archive_pipestate=False, archive_models=False,
archive_plots=True, clobber=False):
"""
Archives data from a Factor run
Parameters
----------
parset_file : str
Filename of Factor parset for run of interest
directions : list of str
List of direction names for which to archive the calibrated data
dir_output : str
Name of output directory where archived data will be stored
full : bool, optional
Make a full archive suitable for resuming?
archive_subdata : bool, optional
Archive the subtracted data MS files?
archive_state : bool, optional
Archive the state files?
archive_misc : bool, optional
Archive miscelaneous files?
archive_images : bool, optional
Archive the facet and field images?
archive_inst : bool, optional
Archive the instrument tables?
archive_pipestate : bool, optional
Archive the pipeline state files?
archive_models : bool, optional
Archive the sky models?
archive_plots : bool, optional
Archive the selfcal plots?
clobber : bool, optional
Clobber existing files in output directory?
"""
# Read in parset and get directions
all_directions, parset = load_directions(parset_file)
if len(all_directions) == 0:
log.error('No directions found in Factor working directory. Please check '
'the parset')
sys.exit(1)
all_names = [d.name for d in all_directions]
if len(directions) != 0:
if directions[0].lower() == 'all':
directions = all_names
for dname in directions:
if dname not in all_names:
log.warning('Direction {} not found. Skipping it...'.format(dname))
if full:
# Archive everything
archive_subdata = True
archive_state = True
archive_misc = True
archive_images = True
archive_inst = True
archive_pipestate = True
archive_models = True
archive_plots = True
working_dir = all_directions[0].working_dir
if archive_subdata:
log.info('Archiving subtracted data files...')
chunks_dir = os.path.join(working_dir, 'chunks')
copy(chunks_dir, dir_output, clobber)
if archive_state:
log.info('Archiving state files...')
state_dir = os.path.join(working_dir, 'state')
copy(state_dir, dir_output, clobber)
if archive_misc:
log.info('Archiving miscelaneous files...')
misc_dir = os.path.join(dir_output, 'misc')
if 'directions_file' in parset['direction_specific']:
directions_file = parset['direction_specific']['directions_file']
else:
directions_file = os.path.join(working_dir, 'factor_directions.txt')
file_list = [directions_file,
parset_file,
'{}/factor.log'.format(working_dir),
'{}/regions/facets_ds9.reg'.format(working_dir),
'{}/regions/calimages_ds9.reg'.format(working_dir)]
for f in file_list:
copy(f, misc_dir, clobber)
if archive_images:
log.info('Archiving field images...')
file_list = glob.glob(os.path.join(working_dir, 'results',
'field*', 'field', '*.fits'))
if len(file_list) == 0:
log.warning('No field images found.')
else:
for i, f in enumerate(file_list):
log.info(' Archiving image {0} of {1}...'.format(i+1, len(file_list)))
subdir = f.split('/')[-3]
image_dir = os.path.join(dir_output, 'images', 'field', subdir)
copy(f, image_dir, clobber)
if archive_models:
log.info('Archiving direction-independent sky models...')
band_state_files = glob.glob(os.path.join(working_dir, 'state',
'Band_*'))
file_list = []
band_list = []
for bf in band_state_files:
try:
with open(bf, 'r') as f:
b = pickle.load(f)
file_list.append(b['skymodel_dirindep'])
band_list.append(b['name'])
except:
pass
for i, f in enumerate(file_list):
skymodel_dir = os.path.join(dir_output, 'chunks', band_list[i])
log.info(' Copying sky model file {0} of {1}...'.format(i+1, len(file_list)))
copy(f, skymodel_dir, clobber)
for d in all_directions:
if archive_images:
log.info('Archiving facet images for direction {}...'.format(d.name))
file_list = glob.glob(os.path.join(working_dir, 'results',
'facetimage*', d.name, '*full2*image.fits'))
if len(file_list) == 0:
log.warning('No facet images found for direction {}.'.format(d.name))
else:
for i, f in enumerate(file_list):
subdir = f.split('/')[-3]
image_dir = os.path.join(dir_output, 'images', d.name, subdir)
copy(f, image_dir, clobber)
if archive_models:
log.info('Archiving sky models for direction {}...'.format(d.name))
if hasattr(d, 'sourcedb_new_facet_sources'):
file_list = check_existing_files(d.sourcedb_new_facet_sources)
else:
file_list = []
if len(file_list) == 0:
log.warning('No sky models found for direction {}.'.format(d.name))
else:
sourcedb_dir = os.path.join(dir_output, 'sky_models', d.name)
for i, f in enumerate(file_list):
log.info(' Copying sky model file {0} of {1}...'.format(i+1, len(file_list)))
copy(f, sourcedb_dir, clobber)
if archive_inst:
log.info('Archiving instrument tables for direction {}...'.format(d.name))
if hasattr(d, 'preapply_h5parm_mapfile'):
file_list.append(check_existing_files(d.preapply_parmdb_mapfile))
if len(file_list) == 0:
log.warning('No h5parms found for direction {}.'.format(d.name))
else:
inst_table_dir = os.path.join(dir_output, 'h5parms', d.name)
for i, f in enumerate(file_list):
log.info(' Copying h5parm file {0} of {1}...'.format(i+1, len(file_list)))
copy(f, inst_table_dir, clobber)
if archive_plots:
log.info('Archiving plots for direction {}...'.format(d.name))
file_list = glob.glob(os.path.join(working_dir, 'results', 'facetselfcal', d.name, '*png'))
if len(file_list) == 0:
file_list = glob.glob(os.path.join(working_dir, 'results', 'facetpeel', d.name, '*png'))
if len(file_list) == 0:
file_list = glob.glob(os.path.join(working_dir, 'results', 'outlierpeel', d.name, '*png'))
if len(file_list) == 0:
log.warning('No plots found for direction {}.'.format(d.name))
else:
plot_dir = os.path.join(dir_output, 'plots', d.name)
for i, f in enumerate(file_list):
copy(f, plot_dir, clobber)
if archive_pipestate:
log.info('Archiving pipeline state files for direction {}...'.format(d.name))
file_list = glob.glob(os.path.join(working_dir, 'results', 'facetselfcal', d.name, 'mapfiles', '*'))
op_name = 'facetselfcal'
if len(file_list) == 0:
file_list = glob.glob(os.path.join(working_dir, 'results', 'facetpeel', d.name, 'mapfiles', '*'))
op_name = 'facetpeel'
if len(file_list) == 0:
file_list = glob.glob(os.path.join(working_dir, 'results', 'outlierpeel', d.name, 'mapfiles', '*'))
op_name = 'outlierpeel'
if len(file_list) == 0:
log.warning('No pipeline state files found for direction {}.'.format(d.name))
else:
mapfile_dir = os.path.join(dir_output, 'pipeline_state', d.name, op_name)
for f in file_list:
copy(f, mapfile_dir, clobber)
# Also archive "final_image" mapfile for facetimage (needed for mosaicking)
file_list = glob.glob(os.path.join(working_dir, 'results',
'facetimage*', d.name, 'mapfiles', 'final_image.mapfile'))
if len(file_list) > 0:
for i, f in enumerate(file_list):
subdir = f.split('/')[-4]
mapfile_dir = os.path.join(dir_output, 'pipeline_state', d.name, subdir)
copy(f, mapfile_dir, clobber)
if d.name in directions:
log.info('Archiving calibrated data for direction {}...'.format(d.name))
if hasattr(d, 'image_data_mapfile'):
file_list = check_existing_files(d.image_data_mapfile)
else:
file_list = []
if len(file_list) == 0:
log.warning('No data found for direction {}. Skipping it...'.format(d.name))
continue
# Make the output directory
cal_data_dir = os.path.join(dir_output, 'calibrated_data', d.name)
create_directory(cal_data_dir)
# Sort the files into time chunks
data_mapfile = d.name+'_calibrated_data.mapfile'
sort_times_into_freqGroups.main(file_list, filename=data_mapfile,
mapfile_dir=cal_data_dir)
# Read the new, grouped file lists
datamap = DataMap.load(os.path.join(cal_data_dir, data_mapfile))
# Run DPPP to concatenate each time chunk in frequency
nchunks = len(datamap)
for i, item in enumerate(datamap):
log.info(' Concatenating files for time chunk {0} of {1}...'.format(i+1, nchunks))
outfile = os.path.join(cal_data_dir, '{0}_calibrated_data_chunk{1}.ms'.format(d.name, i))
if os.path.exists(outfile):
if not clobber:
log.warning(' Output file for this chuck exists and clobber = False. Skipping it...')
continue
else:
os.system('rm -rf {0}'.format(outfile))
dppp_concat(item.file, outfile)
# Clean up
os.system('rm -f {0}'.format(os.path.join(cal_data_dir, data_mapfile)))
os.system('rm -f {0}_groups'.format(os.path.join(cal_data_dir, data_mapfile)))
log.info('Archiving complete.') | unknown | codeparrot/codeparrot-clean | ||
"""
Given a graph which consists of several edges connecting the nodes in it.
It is required to find a subgraph of the given graph with the following properties:
The subgraph contains all the nodes present in the original graph.
The subgraph is of minimum overall weight (sum of all edges) among all such subgraphs.
It is also required that there is exactly one, exclusive path between any two nodes of the subgraph.
One specific node is fixed as the starting point of finding the subgraph.
Find the total weight of such a subgraph (sum of all edges in the subgraph)
Input Format
First line has two integers
N, denoting the number of nodes in the graph and
M, denoting the number of edges in the graph.
The next M lines
each consist of three space separated integers x y r, where
x and y denote the two nodes between which the undirected edge exists,
r denotes the length of edge between the corresponding nodes.
The last line has an integer S, denoting the starting node.
NOTE: If there are edges between the same pair of nodes with different weights,
they are to be considered as is, like multiple edges.
Ref: https://www.hackerrank.com/challenges/primsmstsub
"""
from heapq import *
import unittest
import sys
def prims_mst(graph, start_node):
visited = set()
to_visit = []
heappush(to_visit, (0, start_node))
min_tree_value = 0
while True:
current_node = None
while to_visit:
current_node = heappop(to_visit)
if current_node[1] not in visited:
min_tree_value += current_node[0]
current_node = current_node[1]
visited.add(current_node)
break
else:
current_node = None
if not current_node:
break
edges = graph[current_node] if graph[current_node] else []
for path in edges:
if path[1] not in visited:
heappush(to_visit, path)
return min_tree_value
class MyTestCases(unittest.TestCase):
def test_primps_mst(self):
graph = {
1: [(3, 2), (4, 3)],
2: [(5, 3), (3, 1), (6, 4), (2, 5)],
3: [(7, 5), (5, 2), (4, 1)],
4: [(6, 2)],
5: [(2, 2), (7, 3)]
}
self.assertEqual(prims_mst(graph, 1), 15)
if __name__ == '__main__':
n, m = [int(i) for i in sys.stdin.readline().split()]
graph = {}
for i in range(n + 1):
graph[i] = []
for i in range(m):
node_1, node_2, weight = [int(j) for j in sys.stdin.readline().split()]
graph[node_1].append((weight, node_2))
graph[node_2].append((weight, node_1))
start_node = int(sys.stdin.readline())
print(prims_mst(graph, start_node)) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import unittest
from webkitpy.common.checkout.changelog_unittest import ChangeLogTest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.tool.mocktool import MockOptions, MockTool
from webkitpy.tool.steps.preparechangelog import PrepareChangeLog
class PrepareChangeLogTest(ChangeLogTest):
def test_ensure_bug_url(self):
capture = OutputCapture()
step = PrepareChangeLog(MockTool(), MockOptions())
changelog_contents = u"%s\n%s" % (self._new_entry_boilerplate, self._example_changelog)
changelog_path = self._write_tmp_file_with_contents(changelog_contents.encode("utf-8"))
state = {
"bug_title": "Example title",
"bug_id": 1234,
"changelogs": [changelog_path],
}
capture.assert_outputs(self, step.run, [state])
actual_contents = self._read_file_contents(changelog_path, "utf-8")
expected_message = "Example title\n http://example.com/1234"
expected_contents = changelog_contents.replace("Need a short description and bug URL (OOPS!)", expected_message)
os.remove(changelog_path)
self.assertEquals(actual_contents, expected_contents) | unknown | codeparrot/codeparrot-clean | ||
<?php
namespace Illuminate\Support;
use ArgumentCountError;
use ArrayAccess;
use Closure;
use Illuminate\Contracts\Support\Arrayable;
use Illuminate\Contracts\Support\Jsonable;
use Illuminate\Support\Traits\Macroable;
use InvalidArgumentException;
use JsonSerializable;
use Random\Randomizer;
use Traversable;
use WeakMap;
class Arr
{
use Macroable;
/**
* Determine whether the given value is array accessible.
*
* @param mixed $value
* @return bool
*/
public static function accessible($value)
{
return is_array($value) || $value instanceof ArrayAccess;
}
/**
* Determine whether the given value is arrayable.
*
* @param mixed $value
* @return ($value is array
* ? true
* : ($value is \Illuminate\Contracts\Support\Arrayable
* ? true
* : ($value is \Traversable
* ? true
* : ($value is \Illuminate\Contracts\Support\Jsonable
* ? true
* : ($value is \JsonSerializable ? true : false)
* )
* )
* )
* )
*/
public static function arrayable($value)
{
return is_array($value)
|| $value instanceof Arrayable
|| $value instanceof Traversable
|| $value instanceof Jsonable
|| $value instanceof JsonSerializable;
}
/**
* Add an element to an array using "dot" notation if it doesn't exist.
*
* @param array $array
* @param string|int|float $key
* @param mixed $value
* @return array
*/
public static function add($array, $key, $value)
{
if (is_null(static::get($array, $key))) {
static::set($array, $key, $value);
}
return $array;
}
/**
* Get an array item from an array using "dot" notation.
*
* @throws \InvalidArgumentException
*/
public static function array(ArrayAccess|array $array, string|int|null $key, ?array $default = null): array
{
$value = Arr::get($array, $key, $default);
if (! is_array($value)) {
throw new InvalidArgumentException(
sprintf('Array value for key [%s] must be an array, %s found.', $key, gettype($value))
);
}
return $value;
}
/**
* Get a boolean item from an array using "dot" notation.
*
* @throws \InvalidArgumentException
*/
public static function boolean(ArrayAccess|array $array, string|int|null $key, ?bool $default = null): bool
{
$value = Arr::get($array, $key, $default);
if (! is_bool($value)) {
throw new InvalidArgumentException(
sprintf('Array value for key [%s] must be a boolean, %s found.', $key, gettype($value))
);
}
return $value;
}
/**
* Collapse an array of arrays into a single array.
*
* @param iterable $array
* @return array
*/
public static function collapse($array)
{
$results = [];
foreach ($array as $values) {
if ($values instanceof Collection) {
$results[] = $values->all();
} elseif (is_array($values)) {
$results[] = $values;
}
}
return array_merge([], ...$results);
}
/**
* Cross join the given arrays, returning all possible permutations.
*
* @template TValue
*
* @param iterable<TValue> ...$arrays
* @return array<int, array<array-key, TValue>>
*/
public static function crossJoin(...$arrays)
{
$results = [[]];
foreach ($arrays as $index => $array) {
$append = [];
foreach ($results as $product) {
foreach ($array as $item) {
$product[$index] = $item;
$append[] = $product;
}
}
$results = $append;
}
return $results;
}
/**
* Divide an array into two arrays. One with keys and the other with values.
*
* @template TKey of array-key
* @template TValue
*
* @param array<TKey, TValue> $array
* @return array{TKey[], TValue[]}
*/
public static function divide($array)
{
return [array_keys($array), array_values($array)];
}
/**
* Flatten a multi-dimensional associative array with dots.
*
* @param iterable $array
* @param string $prepend
* @return array
*/
public static function dot($array, $prepend = '')
{
$results = [];
$flatten = function ($data, $prefix) use (&$results, &$flatten): void {
foreach ($data as $key => $value) {
$newKey = $prefix.$key;
if (is_array($value) && ! empty($value)) {
$flatten($value, $newKey.'.');
} else {
$results[$newKey] = $value;
}
}
};
$flatten($array, $prepend);
// Destroy self-referencing closure to avoid memory leak...
$flatten = null;
return $results;
}
/**
* Convert a flatten "dot" notation array into an expanded array.
*
* @param iterable $array
* @return array
*/
public static function undot($array)
{
$results = [];
foreach ($array as $key => $value) {
static::set($results, $key, $value);
}
return $results;
}
/**
* Get all of the given array except for a specified array of keys.
*
* @param array $array
* @param array|string|int|float $keys
* @return array
*/
public static function except($array, $keys)
{
static::forget($array, $keys);
return $array;
}
/**
* Get all of the given array except for a specified array of values.
*
* @param array $array
* @param mixed $values
* @param bool $strict
* @return array
*/
public static function exceptValues($array, $values, $strict = false)
{
$values = (array) $values;
return array_filter($array, function ($value) use ($values, $strict) {
return ! in_array($value, $values, $strict);
});
}
/**
* Determine if the given key exists in the provided array.
*
* @param \ArrayAccess|array $array
* @param string|int|float $key
* @return bool
*/
public static function exists($array, $key)
{
if ($array instanceof Enumerable) {
return $array->has($key);
}
if ($array instanceof ArrayAccess) {
return $array->offsetExists($key);
}
if (is_float($key) || is_null($key)) {
$key = (string) $key;
}
return array_key_exists($key, $array);
}
/**
* Return the first element in an iterable passing a given truth test.
*
* @template TKey
* @template TValue
* @template TFirstDefault
*
* @param iterable<TKey, TValue> $array
* @param (callable(TValue, TKey): bool)|null $callback
* @param TFirstDefault|(\Closure(): TFirstDefault) $default
* @return TValue|TFirstDefault
*/
public static function first($array, ?callable $callback = null, $default = null)
{
if (is_null($callback)) {
if (empty($array)) {
return value($default);
}
if (is_array($array)) {
return array_first($array);
}
foreach ($array as $item) {
return $item;
}
return value($default);
}
$array = static::from($array);
$key = array_find_key($array, $callback);
return $key !== null ? $array[$key] : value($default);
}
/**
* Return the last element in an array passing a given truth test.
*
* @template TKey
* @template TValue
* @template TLastDefault
*
* @param iterable<TKey, TValue> $array
* @param (callable(TValue, TKey): bool)|null $callback
* @param TLastDefault|(\Closure(): TLastDefault) $default
* @return TValue|TLastDefault
*/
public static function last($array, ?callable $callback = null, $default = null)
{
if (is_null($callback)) {
return empty($array) ? value($default) : array_last($array);
}
return static::first(array_reverse($array, true), $callback, $default);
}
/**
* Take the first or last {$limit} items from an array.
*
* @param array $array
* @param int $limit
* @return array
*/
public static function take($array, $limit)
{
if ($limit < 0) {
return array_slice($array, $limit, abs($limit));
}
return array_slice($array, 0, $limit);
}
/**
* Flatten a multi-dimensional array into a single level.
*
* @param iterable $array
* @param int $depth
* @return array
*/
public static function flatten($array, $depth = INF)
{
$result = [];
foreach ($array as $item) {
$item = $item instanceof Collection ? $item->all() : $item;
if (! is_array($item)) {
$result[] = $item;
} else {
$values = $depth === 1
? array_values($item)
: static::flatten($item, $depth - 1);
foreach ($values as $value) {
$result[] = $value;
}
}
}
return $result;
}
/**
* Get a float item from an array using "dot" notation.
*
* @throws \InvalidArgumentException
*/
public static function float(ArrayAccess|array $array, string|int|null $key, ?float $default = null): float
{
$value = Arr::get($array, $key, $default);
if (! is_float($value)) {
throw new InvalidArgumentException(
sprintf('Array value for key [%s] must be a float, %s found.', $key, gettype($value))
);
}
return $value;
}
/**
* Remove one or many array items from a given array using "dot" notation.
*
* @param array $array
* @param array|string|int|float $keys
* @return void
*/
public static function forget(&$array, $keys)
{
$original = &$array;
$keys = (array) $keys;
if (count($keys) === 0) {
return;
}
foreach ($keys as $key) {
// if the exact key exists in the top-level, remove it
if (static::exists($array, $key)) {
unset($array[$key]);
continue;
}
$parts = explode('.', $key);
// clean up before each pass
$array = &$original;
while (count($parts) > 1) {
$part = array_shift($parts);
if (isset($array[$part]) && static::accessible($array[$part])) {
$array = &$array[$part];
} else {
continue 2;
}
}
unset($array[array_shift($parts)]);
}
}
/**
* Get the underlying array of items from the given argument.
*
* @template TKey of array-key = array-key
* @template TValue = mixed
*
* @param array<TKey, TValue>|Enumerable<TKey, TValue>|Arrayable<TKey, TValue>|WeakMap<object, TValue>|Traversable<TKey, TValue>|Jsonable|JsonSerializable|object $items
* @return ($items is WeakMap ? list<TValue> : array<TKey, TValue>)
*
* @throws \InvalidArgumentException
*/
public static function from($items)
{
return match (true) {
is_array($items) => $items,
$items instanceof Enumerable => $items->all(),
$items instanceof Arrayable => $items->toArray(),
$items instanceof WeakMap => iterator_to_array($items, false),
$items instanceof Traversable => iterator_to_array($items),
$items instanceof Jsonable => json_decode($items->toJson(), true),
$items instanceof JsonSerializable => (array) $items->jsonSerialize(),
is_object($items) => (array) $items,
default => throw new InvalidArgumentException('Items cannot be represented by a scalar value.'),
};
}
/**
* Get an item from an array using "dot" notation.
*
* @param \ArrayAccess|array $array
* @param string|int|null $key
* @param mixed $default
* @return mixed
*/
public static function get($array, $key, $default = null)
{
if (! static::accessible($array)) {
return value($default);
}
if (is_null($key)) {
return $array;
}
if (static::exists($array, $key)) {
return $array[$key];
}
if (! str_contains($key, '.')) {
return value($default);
}
foreach (explode('.', $key) as $segment) {
if (static::accessible($array) && static::exists($array, $segment)) {
$array = $array[$segment];
} else {
return value($default);
}
}
return $array;
}
/**
* Check if an item or items exist in an array using "dot" notation.
*
* @param \ArrayAccess|array $array
* @param string|array $keys
* @return bool
*/
public static function has($array, $keys)
{
$keys = (array) $keys;
if (! $array || $keys === []) {
return false;
}
foreach ($keys as $key) {
$subKeyArray = $array;
if (static::exists($array, $key)) {
continue;
}
foreach (explode('.', $key) as $segment) {
if (static::accessible($subKeyArray) && static::exists($subKeyArray, $segment)) {
$subKeyArray = $subKeyArray[$segment];
} else {
return false;
}
}
}
return true;
}
/**
* Determine if all keys exist in an array using "dot" notation.
*
* @param \ArrayAccess|array $array
* @param string|array $keys
* @return bool
*/
public static function hasAll($array, $keys)
{
$keys = (array) $keys;
if (! $array || $keys === []) {
return false;
}
foreach ($keys as $key) {
if (! static::has($array, $key)) {
return false;
}
}
return true;
}
/**
* Determine if any of the keys exist in an array using "dot" notation.
*
* @param \ArrayAccess|array $array
* @param string|array $keys
* @return bool
*/
public static function hasAny($array, $keys)
{
if (is_null($keys)) {
return false;
}
$keys = (array) $keys;
if (! $array) {
return false;
}
if ($keys === []) {
return false;
}
foreach ($keys as $key) {
if (static::has($array, $key)) {
return true;
}
}
return false;
}
/**
* Determine if all items pass the given truth test.
*
* @param iterable $array
* @param (callable(mixed, array-key): bool) $callback
* @return bool
*/
public static function every($array, callable $callback)
{
return array_all($array, $callback);
}
/**
* Determine if some items pass the given truth test.
*
* @param iterable $array
* @param (callable(mixed, array-key): bool) $callback
* @return bool
*/
public static function some($array, callable $callback)
{
return array_any($array, $callback);
}
/**
* Get an integer item from an array using "dot" notation.
*
* @throws \InvalidArgumentException
*/
public static function integer(ArrayAccess|array $array, string|int|null $key, ?int $default = null): int
{
$value = Arr::get($array, $key, $default);
if (! is_int($value)) {
throw new InvalidArgumentException(
sprintf('Array value for key [%s] must be an integer, %s found.', $key, gettype($value))
);
}
return $value;
}
/**
* Determines if an array is associative.
*
* An array is "associative" if it doesn't have sequential numerical keys beginning with zero.
*
* @param array $array
* @return ($array is list ? false : true)
*/
public static function isAssoc(array $array)
{
return ! array_is_list($array);
}
/**
* Determines if an array is a list.
*
* An array is a "list" if all array keys are sequential integers starting from 0 with no gaps in between.
*
* @param array $array
* @return ($array is list ? true : false)
*/
public static function isList($array)
{
return array_is_list($array);
}
/**
* Join all items using a string. The final items can use a separate glue string.
*
* @param array $array
* @param string $glue
* @param string $finalGlue
* @return string
*/
public static function join($array, $glue, $finalGlue = '')
{
if ($finalGlue === '') {
return implode($glue, $array);
}
if (count($array) === 0) {
return '';
}
if (count($array) === 1) {
return array_last($array);
}
$finalItem = array_pop($array);
return implode($glue, $array).$finalGlue.$finalItem;
}
/**
* Key an associative array by a field or using a callback.
*
* @param iterable $array
* @param callable|array|string $keyBy
* @return array
*/
public static function keyBy($array, $keyBy)
{
return (new Collection($array))->keyBy($keyBy)->all();
}
/**
* Prepend the key names of an associative array.
*
* @param array $array
* @param string $prependWith
* @return array
*/
public static function prependKeysWith($array, $prependWith)
{
return static::mapWithKeys($array, fn ($item, $key) => [$prependWith.$key => $item]);
}
/**
* Get a subset of the items from the given array.
*
* @param array $array
* @param array|string $keys
* @return array
*/
public static function only($array, $keys)
{
return array_intersect_key($array, array_flip((array) $keys));
}
/**
* Get a subset of the items from the given array by value.
*
* @param array $array
* @param mixed $values
* @param bool $strict
* @return array
*/
public static function onlyValues($array, $values, $strict = false)
{
$values = (array) $values;
return array_filter($array, function ($value) use ($values, $strict) {
return in_array($value, $values, $strict);
});
}
/**
* Select an array of values from an array.
*
* @param array $array
* @param array|string $keys
* @return array
*/
public static function select($array, $keys)
{
$keys = static::wrap($keys);
return static::map($array, function ($item) use ($keys) {
$result = [];
foreach ($keys as $key) {
if (Arr::accessible($item) && Arr::exists($item, $key)) {
$result[$key] = $item[$key];
} elseif (is_object($item) && isset($item->{$key})) {
$result[$key] = $item->{$key};
}
}
return $result;
});
}
/**
* Pluck an array of values from an array.
*
* @param iterable $array
* @param string|array|int|Closure|null $value
* @param string|array|Closure|null $key
* @return array
*/
public static function pluck($array, $value, $key = null)
{
$results = [];
[$value, $key] = static::explodePluckParameters($value, $key);
foreach ($array as $item) {
$itemValue = $value instanceof Closure
? $value($item)
: data_get($item, $value);
// If the key is "null", we will just append the value to the array and keep
// looping. Otherwise we will key the array using the value of the key we
// received from the developer. Then we'll return the final array form.
if (is_null($key)) {
$results[] = $itemValue;
} else {
$itemKey = $key instanceof Closure
? $key($item)
: data_get($item, $key);
if (is_object($itemKey) && method_exists($itemKey, '__toString')) {
$itemKey = (string) $itemKey;
}
$results[$itemKey] = $itemValue;
}
}
return $results;
}
/**
* Explode the "value" and "key" arguments passed to "pluck".
*
* @param Closure|array|string $value
* @param string|array|Closure|null $key
* @return array
*/
protected static function explodePluckParameters($value, $key)
{
$value = is_string($value) ? explode('.', $value) : $value;
$key = is_null($key) || is_array($key) || $key instanceof Closure ? $key : explode('.', $key);
return [$value, $key];
}
/**
* Run a map over each of the items in the array.
*
* @param array $array
* @param callable $callback
* @return array
*/
public static function map(array $array, callable $callback)
{
$keys = array_keys($array);
try {
$items = array_map($callback, $array, $keys);
} catch (ArgumentCountError) {
$items = array_map($callback, $array);
}
return array_combine($keys, $items);
}
/**
* Run an associative map over each of the items.
*
* The callback should return an associative array with a single key/value pair.
*
* @template TKey
* @template TValue
* @template TMapWithKeysKey of array-key
* @template TMapWithKeysValue
*
* @param array<TKey, TValue> $array
* @param callable(TValue, TKey): array<TMapWithKeysKey, TMapWithKeysValue> $callback
* @return array
*/
public static function mapWithKeys(array $array, callable $callback)
{
$result = [];
foreach ($array as $key => $value) {
$assoc = $callback($value, $key);
foreach ($assoc as $mapKey => $mapValue) {
$result[$mapKey] = $mapValue;
}
}
return $result;
}
/**
* Run a map over each nested chunk of items.
*
* @template TKey
* @template TValue
*
* @param array<TKey, array> $array
* @param callable(mixed...): TValue $callback
* @return array<TKey, TValue>
*/
public static function mapSpread(array $array, callable $callback)
{
return static::map($array, function ($chunk, $key) use ($callback) {
$chunk[] = $key;
return $callback(...$chunk);
});
}
/**
* Push an item onto the beginning of an array.
*
* @param array $array
* @param mixed $value
* @param mixed $key
* @return array
*/
public static function prepend($array, $value, $key = null)
{
if (func_num_args() == 2) {
array_unshift($array, $value);
} else {
$array = [$key => $value] + $array;
}
return $array;
}
/**
* Get a value from the array, and remove it.
*
* @param array $array
* @param string|int $key
* @param mixed $default
* @return mixed
*/
public static function pull(&$array, $key, $default = null)
{
$value = static::get($array, $key, $default);
static::forget($array, $key);
return $value;
}
/**
* Convert the array into a query string.
*
* @param array $array
* @return string
*/
public static function query($array)
{
return http_build_query($array, '', '&', PHP_QUERY_RFC3986);
}
/**
* Get one or a specified number of random values from an array.
*
* @param array $array
* @param int|null $number
* @param bool $preserveKeys
* @return mixed
*
* @throws \InvalidArgumentException
*/
public static function random($array, $number = null, $preserveKeys = false)
{
$requested = is_null($number) ? 1 : $number;
$count = count($array);
if ($requested > $count) {
throw new InvalidArgumentException(
"You requested {$requested} items, but there are only {$count} items available."
);
}
if (empty($array) || (! is_null($number) && $number <= 0)) {
return is_null($number) ? null : [];
}
$keys = (new Randomizer)->pickArrayKeys($array, $requested);
if (is_null($number)) {
return $array[$keys[0]];
}
$results = [];
if ($preserveKeys) {
foreach ($keys as $key) {
$results[$key] = $array[$key];
}
} else {
foreach ($keys as $key) {
$results[] = $array[$key];
}
}
return $results;
}
/**
* Set an array item to a given value using "dot" notation.
*
* If no key is given to the method, the entire array will be replaced.
*
* @param array $array
* @param string|int|null $key
* @param mixed $value
* @return array
*/
public static function set(&$array, $key, $value)
{
if (is_null($key)) {
return $array = $value;
}
$keys = explode('.', $key);
foreach ($keys as $i => $key) {
if (count($keys) === 1) {
break;
}
unset($keys[$i]);
// If the key doesn't exist at this depth, we will just create an empty array
// to hold the next value, allowing us to create the arrays to hold final
// values at the correct depth. Then we'll keep digging into the array.
if (! isset($array[$key]) || ! is_array($array[$key])) {
$array[$key] = [];
}
$array = &$array[$key];
}
$array[array_shift($keys)] = $value;
return $array;
}
/**
* Push an item into an array using "dot" notation.
*
* @param \ArrayAccess|array $array
* @param string|int|null $key
* @param mixed $values
* @return array
*/
public static function push(ArrayAccess|array &$array, string|int|null $key, mixed ...$values): array
{
$target = static::array($array, $key, []);
array_push($target, ...$values);
return static::set($array, $key, $target);
}
/**
* Shuffle the given array and return the result.
*
* @param array $array
* @return array
*/
public static function shuffle($array)
{
return (new Randomizer)->shuffleArray($array);
}
/**
* Get the first item in the array, but only if exactly one item exists. Otherwise, throw an exception.
*
* @param array $array
* @param (callable(mixed, array-key): array)|null $callback
*
* @throws \Illuminate\Support\ItemNotFoundException
* @throws \Illuminate\Support\MultipleItemsFoundException
*/
public static function sole($array, ?callable $callback = null)
{
if ($callback) {
$array = static::where($array, $callback);
}
$count = count($array);
if ($count === 0) {
throw new ItemNotFoundException;
}
if ($count > 1) {
throw new MultipleItemsFoundException($count);
}
return static::first($array);
}
/**
* Sort the array using the given callback or "dot" notation.
*
* @template TKey of array-key
* @template TValue
*
* @param iterable<TKey, TValue> $array
* @param callable|string|null|array<int, (callable(TValue, TValue): -1|0|1)|array{string, 'asc'|'desc'}> $callback
* @return array<TKey, TValue>
*/
public static function sort($array, $callback = null)
{
return (new Collection($array))->sortBy($callback)->all();
}
/**
* Sort the array in descending order using the given callback or "dot" notation.
*
* @template TKey of array-key
* @template TValue
*
* @param iterable<TKey, TValue> $array
* @param callable|string|null|array<int, (callable(TValue, TValue): -1|0|1)|array{string, 'asc'|'desc'}> $callback
* @return array<TKey, TValue>
*/
public static function sortDesc($array, $callback = null)
{
return (new Collection($array))->sortByDesc($callback)->all();
}
/**
* Recursively sort an array by keys and values.
*
* @template TKey of array-key
* @template TValue
*
* @param array<TKey, TValue> $array
* @param int-mask-of<SORT_REGULAR|SORT_NUMERIC|SORT_STRING|SORT_LOCALE_STRING|SORT_NATURAL|SORT_FLAG_CASE> $options
* @param bool $descending
* @return array<TKey, TValue>
*/
public static function sortRecursive($array, $options = SORT_REGULAR, $descending = false)
{
foreach ($array as &$value) {
if (is_array($value)) {
$value = static::sortRecursive($value, $options, $descending);
}
}
if (! array_is_list($array)) {
$descending
? krsort($array, $options)
: ksort($array, $options);
} else {
$descending
? rsort($array, $options)
: sort($array, $options);
}
return $array;
}
/**
* Recursively sort an array by keys and values in descending order.
*
* @template TKey of array-key
* @template TValue
*
* @param array<TKey, TValue> $array
* @param int-mask-of<SORT_REGULAR|SORT_NUMERIC|SORT_STRING|SORT_LOCALE_STRING|SORT_NATURAL|SORT_FLAG_CASE> $options
* @param int $options
* @return array<TKey, TValue>
*/
public static function sortRecursiveDesc($array, $options = SORT_REGULAR)
{
return static::sortRecursive($array, $options, true);
}
/**
* Get a string item from an array using "dot" notation.
*
* @throws \InvalidArgumentException
*/
public static function string(ArrayAccess|array $array, string|int|null $key, ?string $default = null): string
{
$value = Arr::get($array, $key, $default);
if (! is_string($value)) {
throw new InvalidArgumentException(
sprintf('Array value for key [%s] must be a string, %s found.', $key, gettype($value))
);
}
return $value;
}
/**
* Conditionally compile classes from an array into a CSS class list.
*
* @param array<string, bool>|array<int, string|int>|string $array
* @return ($array is array<string, false> ? '' : ($array is '' ? '' : ($array is array{} ? '' : non-empty-string)))
*/
public static function toCssClasses($array)
{
$classList = static::wrap($array);
$classes = [];
foreach ($classList as $class => $constraint) {
if (is_numeric($class)) {
$classes[] = $constraint;
} elseif ($constraint) {
$classes[] = $class;
}
}
return implode(' ', $classes);
}
/**
* Conditionally compile styles from an array into a style list.
*
* @param array<string, bool>|array<int, string|int>|string $array
* @return ($array is array<string, false> ? '' : ($array is '' ? '' : ($array is array{} ? '' : non-empty-string)))
*/
public static function toCssStyles($array)
{
$styleList = static::wrap($array);
$styles = [];
foreach ($styleList as $class => $constraint) {
if (is_numeric($class)) {
$styles[] = Str::finish($constraint, ';');
} elseif ($constraint) {
$styles[] = Str::finish($class, ';');
}
}
return implode(' ', $styles);
}
/**
* Filter the array using the given callback.
*
* @template TKey of array-key
* @template TValue
*
* @param array<TKey, TValue> $array
* @param callable(TValue, TKey): bool $callback
* @return array<TKey, TValue>
*/
public static function where($array, callable $callback)
{
return array_filter($array, $callback, ARRAY_FILTER_USE_BOTH);
}
/**
* Filter the array using the negation of the given callback.
*
* @template TKey of array-key
* @template TValue
*
* @param array<TKey, TValue> $array
* @param callable(TValue, TKey): bool $callback
* @return array<TKey, TValue>
*/
public static function reject($array, callable $callback)
{
return static::where($array, fn ($value, $key) => ! $callback($value, $key));
}
/**
* Partition the array into two arrays using the given callback.
*
* @template TKey of array-key
* @template TValue of mixed
*
* @param iterable<TKey, TValue> $array
* @param callable(TValue, TKey): bool $callback
* @return array<int<0, 1>, array<TKey, TValue>>
*/
public static function partition($array, callable $callback)
{
$passed = [];
$failed = [];
foreach ($array as $key => $item) {
if ($callback($item, $key)) {
$passed[$key] = $item;
} else {
$failed[$key] = $item;
}
}
return [$passed, $failed];
}
/**
* Filter items where the value is not null.
*
* @param array $array
* @return array
*/
public static function whereNotNull($array)
{
return static::where($array, fn ($value) => ! is_null($value));
}
/**
* If the given value is not an array and not null, wrap it in one.
*
* @template TKey of array-key = array-key
* @template TValue
*
* @param array<TKey, TValue>|TValue|null $value
* @return ($value is null ? array{} : ($value is array ? array<TKey, TValue> : array{TValue}))
*/
public static function wrap($value)
{
if (is_null($value)) {
return [];
}
return is_array($value) ? $value : [$value];
}
} | php | github | https://github.com/laravel/framework | src/Illuminate/Collections/Arr.php |
from django.db import transaction
from django.test import TransactionTestCase, override_settings
from wagtail.core.models import Collection
from wagtail.images import get_image_model, signal_handlers
from wagtail.images.tests.utils import get_test_image_file
class TestFilesDeletedForDefaultModels(TransactionTestCase):
'''
Because we expect file deletion to only happen once a transaction is
successfully committed, we must run these tests using TransactionTestCase
per the following documentation:
Django's TestCase class wraps each test in a transaction and rolls back that
transaction after each test, in order to provide test isolation. This means
that no transaction is ever actually committed, thus your on_commit()
callbacks will never be run. If you need to test the results of an
on_commit() callback, use a TransactionTestCase instead.
https://docs.djangoproject.com/en/1.10/topics/db/transactions/#use-in-tests
'''
def setUp(self):
# Required to create root collection because the TransactionTestCase
# does not make initial data loaded in migrations available and
# serialized_rollback=True causes other problems in the test suite.
# ref: https://docs.djangoproject.com/en/1.10/topics/testing/overview/#rollback-emulation
Collection.objects.get_or_create(
name="Root",
path='0001',
depth=1,
numchild=0,
)
def test_image_file_deleted_oncommit(self):
with transaction.atomic():
image = get_image_model().objects.create(title="Test Image", file=get_test_image_file())
filename = image.file.name
self.assertTrue(image.file.storage.exists(filename))
image.delete()
self.assertTrue(image.file.storage.exists(filename))
self.assertFalse(image.file.storage.exists(filename))
def test_rendition_file_deleted_oncommit(self):
with transaction.atomic():
image = get_image_model().objects.create(title="Test Image", file=get_test_image_file())
rendition = image.get_rendition('original')
filename = rendition.file.name
self.assertTrue(rendition.file.storage.exists(filename))
rendition.delete()
self.assertTrue(rendition.file.storage.exists(filename))
self.assertFalse(rendition.file.storage.exists(filename))
@override_settings(WAGTAILIMAGES_IMAGE_MODEL='tests.CustomImage')
class TestFilesDeletedForCustomModels(TestFilesDeletedForDefaultModels):
def setUp(self):
# Required to create root collection because the TransactionTestCase
# does not make initial data loaded in migrations available and
# serialized_rollback=True causes other problems in the test suite.
# ref: https://docs.djangoproject.com/en/1.10/topics/testing/overview/#rollback-emulation
Collection.objects.get_or_create(
name="Root",
path='0001',
depth=1,
numchild=0,
)
#: Sadly signal receivers only get connected when starting django.
#: We will re-attach them here to mimic the django startup behavior
#: and get the signals connected to our custom model..
signal_handlers.register_signal_handlers()
def test_image_model(self):
cls = get_image_model()
self.assertEqual('%s.%s' % (cls._meta.app_label, cls.__name__), 'tests.CustomImage') | unknown | codeparrot/codeparrot-clean | ||
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from dateutil.relativedelta import relativedelta
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class purchase_requisition(osv.osv):
_name = "purchase.requisition"
_description = "Purchase Requisition"
_inherit = ['mail.thread', 'ir.needaction_mixin']
def _get_po_line(self, cr, uid, ids, field_names, arg=None, context=None):
result = {}.fromkeys(ids, [])
for element in self.browse(cr, uid, ids, context=context):
for po in element.purchase_ids:
result[element.id] += [po_line.id for po_line in po.order_line]
return result
_columns = {
'name': fields.char('Call for Bids Reference', size=32, required=True),
'origin': fields.char('Source Document', size=32),
'ordering_date': fields.date('Scheduled Ordering Date'),
'date_end': fields.datetime('Bid Submission Deadline'),
'schedule_date': fields.date('Scheduled Date', select=True, help="The expected and scheduled date where all the products are received"),
'user_id': fields.many2one('res.users', 'Responsible'),
'exclusive': fields.selection([('exclusive', 'Select only one RFQ (exclusive)'), ('multiple', 'Select multiple RFQ')], 'Bid Selection Type', required=True, help="Select only one RFQ (exclusive): On the confirmation of a purchase order, it cancels the remaining purchase order.\nSelect multiple RFQ: It allows to have multiple purchase orders.On confirmation of a purchase order it does not cancel the remaining orders"""),
'description': fields.text('Description'),
'company_id': fields.many2one('res.company', 'Company', required=True),
'purchase_ids': fields.one2many('purchase.order', 'requisition_id', 'Purchase Orders', states={'done': [('readonly', True)]}),
'po_line_ids': fields.function(_get_po_line, method=True, type='one2many', relation='purchase.order.line', string='Products by supplier'),
'line_ids': fields.one2many('purchase.requisition.line', 'requisition_id', 'Products to Purchase', states={'done': [('readonly', True)]}),
'procurement_id': fields.many2one('procurement.order', 'Procurement', ondelete='set null'),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse'),
'state': fields.selection([('draft', 'Draft'), ('in_progress', 'Confirmed'), ('open', 'Bid Selection'), ('done', 'PO Created'), ('cancel', 'Cancelled')],
'Status', track_visibility='onchange', required=True),
'multiple_rfq_per_supplier': fields.boolean('Multiple RFQ per supplier'),
'account_analytic_id': fields.many2one('account.analytic.account', 'Analytic Account'),
'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type', required=True),
}
def _get_picking_in(self, cr, uid, context=None):
obj_data = self.pool.get('ir.model.data')
return obj_data.get_object_reference(cr, uid, 'stock','picking_type_in')[1]
_defaults = {
'state': 'draft',
'exclusive': 'multiple',
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'purchase.requisition', context=c),
'user_id': lambda self, cr, uid, c: self.pool.get('res.users').browse(cr, uid, uid, c).id,
'name': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'purchase.order.requisition'),
'picking_type_id': _get_picking_in,
}
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
default.update({
'state': 'draft',
'purchase_ids': [],
'name': self.pool.get('ir.sequence').get(cr, uid, 'purchase.order.requisition'),
})
return super(purchase_requisition, self).copy(cr, uid, id, default, context)
def tender_cancel(self, cr, uid, ids, context=None):
purchase_order_obj = self.pool.get('purchase.order')
#try to set all associated quotations to cancel state
purchase_ids = []
for tender in self.browse(cr, uid, ids, context=context):
for purchase_order in tender.purchase_ids:
purchase_order_obj.action_cancel(cr, uid, [purchase_order.id], context=context)
purchase_order_obj.message_post(cr, uid, [purchase_order.id], body=_('Cancelled by the tender associated to this quotation.'), context=context)
return self.write(cr, uid, ids, {'state': 'cancel'})
def tender_in_progress(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'in_progress'}, context=context)
def tender_open(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'open'}, context=context)
def tender_reset(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'draft'})
for p_id in ids:
# Deleting the existing instance of workflow for PO
self.delete_workflow(cr, uid, [p_id])
self.create_workflow(cr, uid, [p_id])
return True
def tender_done(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'done'}, context=context)
def open_product_line(self, cr, uid, ids, context=None):
""" This opens product line view to view all lines from the different quotations, groupby default by product and partner to show comparaison
between supplier price
@return: the product line tree view
"""
if context is None:
context = {}
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'purchase_requisition', 'purchase_line_tree', context=context)
res['context'] = context
po_lines = self.browse(cr, uid, ids, context=context)[0].po_line_ids
res['context'] = {
'search_default_groupby_product': True,
'search_default_hide_cancelled': True,
}
res['domain'] = [('id', 'in', [line.id for line in po_lines])]
return res
def open_rfq(self, cr, uid, ids, context=None):
""" This opens rfq view to view all quotations associated to the call for bids
@return: the RFQ tree view
"""
if context is None:
context = {}
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'purchase', 'purchase_rfq', context=context)
res['context'] = context
po_ids = [po.id for po in self.browse(cr, uid, ids, context=context)[0].purchase_ids]
res['domain'] = [('id', 'in', po_ids)]
return res
def _prepare_purchase_order(self, cr, uid, requisition, supplier, context=None):
supplier_pricelist = supplier.property_product_pricelist_purchase and supplier.property_product_pricelist_purchase.id or False
picking_type_in = self.pool.get("purchase.order")._get_picking_in(cr, uid, context=context)
return {
'origin': requisition.name,
'date_order': requisition.date_end or fields.date.context_today(self, cr, uid, context=context),
'partner_id': supplier.id,
'pricelist_id': supplier_pricelist,
'location_id': requisition.picking_type_id.default_location_dest_id.id,
'company_id': requisition.company_id.id,
'fiscal_position': supplier.property_account_position and supplier.property_account_position.id or False,
'requisition_id': requisition.id,
'notes': requisition.description,
'picking_type_id': picking_type_in,
}
def _prepare_purchase_order_line(self, cr, uid, requisition, requisition_line, purchase_id, supplier, context=None):
po_line_obj = self.pool.get('purchase.order.line')
product_uom = self.pool.get('product.uom')
product = requisition_line.product_id
default_uom_po_id = product.uom_po_id.id
date_order = requisition.ordering_date or fields.date.context_today(self, cr, uid, context=context)
qty = product_uom._compute_qty(cr, uid, requisition_line.product_uom_id.id, requisition_line.product_qty, default_uom_po_id)
supplier_pricelist = supplier.property_product_pricelist_purchase and supplier.property_product_pricelist_purchase.id or False
vals = po_line_obj.onchange_product_id(cr, uid, [], supplier_pricelist, product.id, qty, default_uom_po_id,
supplier.id, date_order=date_order, fiscal_position_id=supplier.property_account_position, date_planned=requisition_line.schedule_date,
name=False, price_unit=False, state='draft', context=context)['value']
vals.update({
'order_id': purchase_id,
'product_id': product.id,
'account_analytic_id': requisition_line.account_analytic_id.id,
})
return vals
def make_purchase_order(self, cr, uid, ids, partner_id, context=None):
"""
Create New RFQ for Supplier
"""
if context is None:
context = {}
assert partner_id, 'Supplier should be specified'
purchase_order = self.pool.get('purchase.order')
purchase_order_line = self.pool.get('purchase.order.line')
res_partner = self.pool.get('res.partner')
supplier = res_partner.browse(cr, uid, partner_id, context=context)
res = {}
for requisition in self.browse(cr, uid, ids, context=context):
if not requisition.multiple_rfq_per_supplier and supplier.id in filter(lambda x: x, [rfq.state != 'cancel' and rfq.partner_id.id or None for rfq in requisition.purchase_ids]):
raise osv.except_osv(_('Warning!'), _('You have already one %s purchase order for this partner, you must cancel this purchase order to create a new quotation.') % rfq.state)
context.update({'mail_create_nolog': True})
purchase_id = purchase_order.create(cr, uid, self._prepare_purchase_order(cr, uid, requisition, supplier, context=context), context=context)
purchase_order.message_post(cr, uid, [purchase_id], body=_("RFQ created"), context=context)
res[requisition.id] = purchase_id
for line in requisition.line_ids:
purchase_order_line.create(cr, uid, self._prepare_purchase_order_line(cr, uid, requisition, line, purchase_id, supplier, context=context), context=context)
return res
def check_valid_quotation(self, cr, uid, quotation, context=None):
"""
Check if a quotation has all his order lines bid in order to confirm it if its the case
return True if all order line have been selected during bidding process, else return False
args : 'quotation' must be a browse record
"""
for line in quotation.order_line:
if line.state != 'confirmed' or line.product_qty != line.quantity_bid:
return False
return True
def _prepare_po_from_tender(self, cr, uid, tender, context=None):
""" Prepare the values to write in the purchase order
created from a tender.
:param tender: the source tender from which we generate a purchase order
"""
return {'order_line': [],
'requisition_id': tender.id,
'origin': tender.name}
def _prepare_po_line_from_tender(self, cr, uid, tender, line, purchase_id, context=None):
""" Prepare the values to write in the purchase order line
created from a line of the tender.
:param tender: the source tender from which we generate a purchase order
:param line: the source tender's line from which we generate a line
:param purchase_id: the id of the new purchase
"""
return {'product_qty': line.quantity_bid,
'order_id': purchase_id}
def generate_po(self, cr, uid, ids, context=None):
"""
Generate all purchase order based on selected lines, should only be called on one tender at a time
"""
if context is None:
contex = {}
po = self.pool.get('purchase.order')
poline = self.pool.get('purchase.order.line')
id_per_supplier = {}
for tender in self.browse(cr, uid, ids, context=context):
if tender.state == 'done':
raise osv.except_osv(_('Warning!'), _('You have already generate the purchase order(s).'))
confirm = False
#check that we have at least confirm one line
for po_line in tender.po_line_ids:
if po_line.state == 'confirmed':
confirm = True
break
if not confirm:
raise osv.except_osv(_('Warning!'), _('You have no line selected for buying.'))
#check for complete RFQ
for quotation in tender.purchase_ids:
if (self.check_valid_quotation(cr, uid, quotation, context=context)):
#use workflow to set PO state to confirm
po.signal_purchase_confirm(cr, uid, [quotation.id])
#get other confirmed lines per supplier
for po_line in tender.po_line_ids:
#only take into account confirmed line that does not belong to already confirmed purchase order
if po_line.state == 'confirmed' and po_line.order_id.state in ['draft', 'sent', 'bid']:
if id_per_supplier.get(po_line.partner_id.id):
id_per_supplier[po_line.partner_id.id].append(po_line)
else:
id_per_supplier[po_line.partner_id.id] = [po_line]
#generate po based on supplier and cancel all previous RFQ
ctx = context.copy()
ctx['force_requisition_id'] = True
for supplier, product_line in id_per_supplier.items():
#copy a quotation for this supplier and change order_line then validate it
quotation_id = po.search(cr, uid, [('requisition_id', '=', tender.id), ('partner_id', '=', supplier)], limit=1)[0]
vals = self._prepare_po_from_tender(cr, uid, tender, context=context)
new_po = po.copy(cr, uid, quotation_id, default=vals, context=ctx)
#duplicate po_line and change product_qty if needed and associate them to newly created PO
for line in product_line:
vals = self._prepare_po_line_from_tender(cr, uid, tender, line, new_po, context=context)
poline.copy(cr, uid, line.id, default=vals, context=context)
#use workflow to set new PO state to confirm
po.signal_purchase_confirm(cr, uid, [new_po])
#cancel other orders
self.cancel_unconfirmed_quotations(cr, uid, tender, context=context)
#set tender to state done
self.signal_done(cr, uid, [tender.id])
return True
def cancel_unconfirmed_quotations(self, cr, uid, tender, context=None):
#cancel other orders
po = self.pool.get('purchase.order')
for quotation in tender.purchase_ids:
if quotation.state in ['draft', 'sent', 'bid']:
self.pool.get('purchase.order').signal_purchase_cancel(cr, uid, [quotation.id])
po.message_post(cr, uid, [quotation.id], body=_('Cancelled by the call for bids associated to this request for quotation.'), context=context)
return True
class purchase_requisition_line(osv.osv):
_name = "purchase.requisition.line"
_description = "Purchase Requisition Line"
_rec_name = 'product_id'
_columns = {
'product_id': fields.many2one('product.product', 'Product'),
'product_uom_id': fields.many2one('product.uom', 'Product Unit of Measure'),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure')),
'requisition_id': fields.many2one('purchase.requisition', 'Call for Bids', ondelete='cascade'),
'company_id': fields.related('requisition_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
'account_analytic_id': fields.many2one('account.analytic.account', 'Analytic Account',),
'schedule_date': fields.date('Scheduled Date'),
}
def onchange_product_id(self, cr, uid, ids, product_id, product_uom_id, parent_analytic_account, analytic_account, parent_date, date, context=None):
""" Changes UoM and name if product_id changes.
@param name: Name of the field
@param product_id: Changed product_id
@return: Dictionary of changed values
"""
value = {'product_uom_id': ''}
if product_id:
prod = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
value = {'product_uom_id': prod.uom_id.id, 'product_qty': 1.0}
if not analytic_account:
value.update({'account_analytic_id': parent_analytic_account})
if not date:
value.update({'schedule_date': parent_date})
return {'value': value}
_defaults = {
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'purchase.requisition.line', context=c),
}
class purchase_order(osv.osv):
_inherit = "purchase.order"
_columns = {
'requisition_id': fields.many2one('purchase.requisition', 'Call for Bids'),
}
def wkf_confirm_order(self, cr, uid, ids, context=None):
res = super(purchase_order, self).wkf_confirm_order(cr, uid, ids, context=context)
proc_obj = self.pool.get('procurement.order')
for po in self.browse(cr, uid, ids, context=context):
if po.requisition_id and (po.requisition_id.exclusive == 'exclusive'):
for order in po.requisition_id.purchase_ids:
if order.id != po.id:
proc_ids = proc_obj.search(cr, uid, [('purchase_id', '=', order.id)])
if proc_ids and po.state == 'confirmed':
proc_obj.write(cr, uid, proc_ids, {'purchase_id': po.id})
self.signal_purchase_cancel(cr, uid, [order.id])
po.requisition_id.tender_done(context=context)
return res
def copy(self, cr, uid, id, default=None, context=None):
if context is None:
context = {}
if not context.get('force_requisition_id'):
default = default or {}
default.update({'requisition_id': False})
return super(purchase_order, self).copy(cr, uid, id, default=default, context=context)
def _prepare_order_line_move(self, cr, uid, order, order_line, picking_id, group_id, context=None):
stock_move_lines = super(purchase_order, self)._prepare_order_line_move(cr, uid, order, order_line, picking_id, group_id, context=context)
if order.requisition_id and order.requisition_id.procurement_id and order.requisition_id.procurement_id.move_dest_id:
for i in range(0, len(stock_move_lines)):
stock_move_lines[i]['move_dest_id'] = order.requisition_id.procurement_id.move_dest_id.id
return stock_move_lines
class purchase_order_line(osv.osv):
_inherit = 'purchase.order.line'
_columns = {
'quantity_bid': fields.float('Quantity Bid', digits_compute=dp.get_precision('Product Unit of Measure'), help="Technical field for not loosing the initial information about the quantity proposed in the bid"),
}
def action_draft(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'draft'}, context=context)
def action_confirm(self, cr, uid, ids, context=None):
super(purchase_order_line, self).action_confirm(cr, uid, ids, context=context)
for element in self.browse(cr, uid, ids, context=context):
if not element.quantity_bid:
self.write(cr, uid, ids, {'quantity_bid': element.product_qty}, context=context)
return True
def generate_po(self, cr, uid, tender_id, context=None):
#call generate_po from tender with active_id. Called from js widget
return self.pool.get('purchase.requisition').generate_po(cr, uid, [tender_id], context=context)
class product_template(osv.osv):
_inherit = 'product.template'
_columns = {
'purchase_requisition': fields.boolean('Call for Bids', help="Check this box to generate Call for Bids instead of generating requests for quotation from procurement.")
}
class procurement_order(osv.osv):
_inherit = 'procurement.order'
_columns = {
'requisition_id': fields.many2one('purchase.requisition', 'Latest Requisition')
}
def _run(self, cr, uid, procurement, context=None):
requisition_obj = self.pool.get('purchase.requisition')
warehouse_obj = self.pool.get('stock.warehouse')
if procurement.rule_id and procurement.rule_id.action == 'buy' and procurement.product_id.purchase_requisition:
warehouse_id = warehouse_obj.search(cr, uid, [('company_id', '=', procurement.company_id.id)], context=context)
requisition_id = requisition_obj.create(cr, uid, {
'origin': procurement.origin,
'date_end': procurement.date_planned,
'warehouse_id': warehouse_id and warehouse_id[0] or False,
'company_id': procurement.company_id.id,
'procurement_id': procurement.id,
'line_ids': [(0, 0, {
'product_id': procurement.product_id.id,
'product_uom_id': procurement.product_uom.id,
'product_qty': procurement.product_qty
})],
})
self.message_post(cr, uid, [procurement.id], body=_("Purchase Requisition created"), context=context)
return self.write(cr, uid, [procurement.id], {'requisition_id': requisition_id}, context=context)
return super(procurement_order, self)._run(cr, uid, procurement, context=context)
def _check(self, cr, uid, procurement, context=None):
requisition_obj = self.pool.get('purchase.requisition')
if procurement.rule_id and procurement.rule_id.action == 'buy' and procurement.product_id.purchase_requisition:
if procurement.requisition_id.state == 'done':
if any([purchase.shipped for purchase in procurement.requisition_id.purchase_ids]):
return True
return False
return super(procurement_order, self)._check(cr, uid, procurement, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
import numpy as np
import pytest
import scipy.sparse as sp
from numpy.random import RandomState
from numpy.testing import assert_array_almost_equal, assert_array_equal
from scipy import linalg
from sklearn.datasets import make_classification
from sklearn.utils._testing import assert_allclose
from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS, LIL_CONTAINERS
from sklearn.utils.sparsefuncs import (
_implicit_column_offset,
count_nonzero,
csc_median_axis_0,
incr_mean_variance_axis,
inplace_column_scale,
inplace_row_scale,
inplace_swap_column,
inplace_swap_row,
mean_variance_axis,
min_max_axis,
sparse_matmul_to_dense,
)
from sklearn.utils.sparsefuncs_fast import (
assign_rows_csr,
csr_row_norms,
inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2,
)
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
@pytest.mark.parametrize("lil_container", LIL_CONTAINERS)
def test_mean_variance_axis0(csc_container, csr_container, lil_container):
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = lil_container(X)
X_lil[1, 0] = 0
X[1, 0] = 0
with pytest.raises(TypeError):
mean_variance_axis(X_lil, axis=0)
X_csr = csr_container(X_lil)
X_csc = csc_container(X_lil)
expected_dtypes = [
(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64),
]
for input_dtype, output_dtype in expected_dtypes:
X_test = X.astype(input_dtype)
for X_sparse in (X_csr, X_csc):
X_sparse = X_sparse.astype(input_dtype)
X_means, X_vars = mean_variance_axis(X_sparse, axis=0)
assert X_means.dtype == output_dtype
assert X_vars.dtype == output_dtype
assert_array_almost_equal(X_means, np.mean(X_test, axis=0))
assert_array_almost_equal(X_vars, np.var(X_test, axis=0))
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("sparse_constructor", CSC_CONTAINERS + CSR_CONTAINERS)
def test_mean_variance_axis0_precision(dtype, sparse_constructor):
# Check that there's no big loss of precision when the real variance is
# exactly 0. (#19766)
rng = np.random.RandomState(0)
X = np.full(fill_value=100.0, shape=(1000, 1), dtype=dtype)
# Add some missing records which should be ignored:
missing_indices = rng.choice(np.arange(X.shape[0]), 10, replace=False)
X[missing_indices, 0] = np.nan
X = sparse_constructor(X)
# Random positive weights:
sample_weight = rng.rand(X.shape[0]).astype(dtype)
_, var = mean_variance_axis(X, weights=sample_weight, axis=0)
assert var < np.finfo(dtype).eps
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
@pytest.mark.parametrize("lil_container", LIL_CONTAINERS)
def test_mean_variance_axis1(csc_container, csr_container, lil_container):
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = lil_container(X)
X_lil[1, 0] = 0
X[1, 0] = 0
with pytest.raises(TypeError):
mean_variance_axis(X_lil, axis=1)
X_csr = csr_container(X_lil)
X_csc = csc_container(X_lil)
expected_dtypes = [
(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64),
]
for input_dtype, output_dtype in expected_dtypes:
X_test = X.astype(input_dtype)
for X_sparse in (X_csr, X_csc):
X_sparse = X_sparse.astype(input_dtype)
X_means, X_vars = mean_variance_axis(X_sparse, axis=0)
assert X_means.dtype == output_dtype
assert X_vars.dtype == output_dtype
assert_array_almost_equal(X_means, np.mean(X_test, axis=0))
assert_array_almost_equal(X_vars, np.var(X_test, axis=0))
@pytest.mark.parametrize(
["Xw", "X", "weights"],
[
([[0, 0, 1], [0, 2, 3]], [[0, 0, 1], [0, 2, 3]], [1, 1, 1]),
([[0, 0, 1], [0, 1, 1]], [[0, 0, 0, 1], [0, 1, 1, 1]], [1, 2, 1]),
([[0, 0, 1], [0, 1, 1]], [[0, 0, 1], [0, 1, 1]], None),
(
[[0, np.nan, 2], [0, np.nan, np.nan]],
[[0, np.nan, 2], [0, np.nan, np.nan]],
[1.0, 1.0, 1.0],
),
(
[[0, 0], [1, np.nan], [2, 0], [0, 3], [np.nan, np.nan], [np.nan, 2]],
[
[0, 0, 0],
[1, 1, np.nan],
[2, 2, 0],
[0, 0, 3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 2],
],
[2.0, 1.0],
),
(
[[1, 0, 1], [0, 3, 1]],
[[1, 0, 0, 0, 1], [0, 3, 3, 3, 1]],
np.array([1, 3, 1]),
),
],
)
@pytest.mark.parametrize("sparse_constructor", CSC_CONTAINERS + CSR_CONTAINERS)
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_incr_mean_variance_axis_weighted_axis1(
Xw, X, weights, sparse_constructor, dtype
):
axis = 1
Xw_sparse = sparse_constructor(Xw).astype(dtype)
X_sparse = sparse_constructor(X).astype(dtype)
last_mean = np.zeros(np.shape(Xw)[0], dtype=dtype)
last_var = np.zeros_like(last_mean, dtype=dtype)
last_n = np.zeros_like(last_mean, dtype=np.int64)
means0, vars0, n_incr0 = incr_mean_variance_axis(
X=X_sparse,
axis=axis,
last_mean=last_mean,
last_var=last_var,
last_n=last_n,
weights=None,
)
means_w0, vars_w0, n_incr_w0 = incr_mean_variance_axis(
X=Xw_sparse,
axis=axis,
last_mean=last_mean,
last_var=last_var,
last_n=last_n,
weights=weights,
)
assert means_w0.dtype == dtype
assert vars_w0.dtype == dtype
assert n_incr_w0.dtype == dtype
means_simple, vars_simple = mean_variance_axis(X=X_sparse, axis=axis)
assert_array_almost_equal(means0, means_w0)
assert_array_almost_equal(means0, means_simple)
assert_array_almost_equal(vars0, vars_w0)
assert_array_almost_equal(vars0, vars_simple)
assert_array_almost_equal(n_incr0, n_incr_w0)
# check second round for incremental
means1, vars1, n_incr1 = incr_mean_variance_axis(
X=X_sparse,
axis=axis,
last_mean=means0,
last_var=vars0,
last_n=n_incr0,
weights=None,
)
means_w1, vars_w1, n_incr_w1 = incr_mean_variance_axis(
X=Xw_sparse,
axis=axis,
last_mean=means_w0,
last_var=vars_w0,
last_n=n_incr_w0,
weights=weights,
)
assert_array_almost_equal(means1, means_w1)
assert_array_almost_equal(vars1, vars_w1)
assert_array_almost_equal(n_incr1, n_incr_w1)
assert means_w1.dtype == dtype
assert vars_w1.dtype == dtype
assert n_incr_w1.dtype == dtype
@pytest.mark.parametrize(
["Xw", "X", "weights"],
[
([[0, 0, 1], [0, 2, 3]], [[0, 0, 1], [0, 2, 3]], [1, 1]),
([[0, 0, 1], [0, 1, 1]], [[0, 0, 1], [0, 1, 1], [0, 1, 1]], [1, 2]),
([[0, 0, 1], [0, 1, 1]], [[0, 0, 1], [0, 1, 1]], None),
(
[[0, np.nan, 2], [0, np.nan, np.nan]],
[[0, np.nan, 2], [0, np.nan, np.nan]],
[1.0, 1.0],
),
(
[[0, 0, 1, np.nan, 2, 0], [0, 3, np.nan, np.nan, np.nan, 2]],
[
[0, 0, 1, np.nan, 2, 0],
[0, 0, 1, np.nan, 2, 0],
[0, 3, np.nan, np.nan, np.nan, 2],
],
[2.0, 1.0],
),
(
[[1, 0, 1], [0, 0, 1]],
[[1, 0, 1], [0, 0, 1], [0, 0, 1], [0, 0, 1]],
np.array([1, 3]),
),
],
)
@pytest.mark.parametrize("sparse_constructor", CSC_CONTAINERS + CSR_CONTAINERS)
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_incr_mean_variance_axis_weighted_axis0(
Xw, X, weights, sparse_constructor, dtype
):
axis = 0
Xw_sparse = sparse_constructor(Xw).astype(dtype)
X_sparse = sparse_constructor(X).astype(dtype)
last_mean = np.zeros(np.size(Xw, 1), dtype=dtype)
last_var = np.zeros_like(last_mean)
last_n = np.zeros_like(last_mean, dtype=np.int64)
means0, vars0, n_incr0 = incr_mean_variance_axis(
X=X_sparse,
axis=axis,
last_mean=last_mean,
last_var=last_var,
last_n=last_n,
weights=None,
)
means_w0, vars_w0, n_incr_w0 = incr_mean_variance_axis(
X=Xw_sparse,
axis=axis,
last_mean=last_mean,
last_var=last_var,
last_n=last_n,
weights=weights,
)
assert means_w0.dtype == dtype
assert vars_w0.dtype == dtype
assert n_incr_w0.dtype == dtype
means_simple, vars_simple = mean_variance_axis(X=X_sparse, axis=axis)
assert_array_almost_equal(means0, means_w0)
assert_array_almost_equal(means0, means_simple)
assert_array_almost_equal(vars0, vars_w0)
assert_array_almost_equal(vars0, vars_simple)
assert_array_almost_equal(n_incr0, n_incr_w0)
# check second round for incremental
means1, vars1, n_incr1 = incr_mean_variance_axis(
X=X_sparse,
axis=axis,
last_mean=means0,
last_var=vars0,
last_n=n_incr0,
weights=None,
)
means_w1, vars_w1, n_incr_w1 = incr_mean_variance_axis(
X=Xw_sparse,
axis=axis,
last_mean=means_w0,
last_var=vars_w0,
last_n=n_incr_w0,
weights=weights,
)
assert_array_almost_equal(means1, means_w1)
assert_array_almost_equal(vars1, vars_w1)
assert_array_almost_equal(n_incr1, n_incr_w1)
assert means_w1.dtype == dtype
assert vars_w1.dtype == dtype
assert n_incr_w1.dtype == dtype
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
@pytest.mark.parametrize("lil_container", LIL_CONTAINERS)
def test_incr_mean_variance_axis(csc_container, csr_container, lil_container):
for axis in [0, 1]:
rng = np.random.RandomState(0)
n_features = 50
n_samples = 10
if axis == 0:
data_chunks = [rng.randint(0, 2, size=n_features) for i in range(n_samples)]
else:
data_chunks = [rng.randint(0, 2, size=n_samples) for i in range(n_features)]
# default params for incr_mean_variance
last_mean = np.zeros(n_features) if axis == 0 else np.zeros(n_samples)
last_var = np.zeros_like(last_mean)
last_n = np.zeros_like(last_mean, dtype=np.int64)
# Test errors
X = np.array(data_chunks[0])
X = np.atleast_2d(X)
X = X.T if axis == 1 else X
X_lil = lil_container(X)
X_csr = csr_container(X_lil)
with pytest.raises(TypeError):
incr_mean_variance_axis(
X=axis, axis=last_mean, last_mean=last_var, last_var=last_n
)
with pytest.raises(TypeError):
incr_mean_variance_axis(
X_lil, axis=axis, last_mean=last_mean, last_var=last_var, last_n=last_n
)
# Test _incr_mean_and_var with a 1 row input
X_means, X_vars = mean_variance_axis(X_csr, axis)
X_means_incr, X_vars_incr, n_incr = incr_mean_variance_axis(
X_csr, axis=axis, last_mean=last_mean, last_var=last_var, last_n=last_n
)
assert_array_almost_equal(X_means, X_means_incr)
assert_array_almost_equal(X_vars, X_vars_incr)
# X.shape[axis] picks # samples
assert_array_equal(X.shape[axis], n_incr)
X_csc = csc_container(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis)
assert_array_almost_equal(X_means, X_means_incr)
assert_array_almost_equal(X_vars, X_vars_incr)
assert_array_equal(X.shape[axis], n_incr)
# Test _incremental_mean_and_var with whole data
X = np.vstack(data_chunks)
X = X.T if axis == 1 else X
X_lil = lil_container(X)
X_csr = csr_container(X_lil)
X_csc = csc_container(X_lil)
expected_dtypes = [
(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64),
]
for input_dtype, output_dtype in expected_dtypes:
for X_sparse in (X_csr, X_csc):
X_sparse = X_sparse.astype(input_dtype)
last_mean = last_mean.astype(output_dtype)
last_var = last_var.astype(output_dtype)
X_means, X_vars = mean_variance_axis(X_sparse, axis)
X_means_incr, X_vars_incr, n_incr = incr_mean_variance_axis(
X_sparse,
axis=axis,
last_mean=last_mean,
last_var=last_var,
last_n=last_n,
)
assert X_means_incr.dtype == output_dtype
assert X_vars_incr.dtype == output_dtype
assert_array_almost_equal(X_means, X_means_incr)
assert_array_almost_equal(X_vars, X_vars_incr)
assert_array_equal(X.shape[axis], n_incr)
@pytest.mark.parametrize("sparse_constructor", CSC_CONTAINERS + CSR_CONTAINERS)
def test_incr_mean_variance_axis_dim_mismatch(sparse_constructor):
"""Check that we raise proper error when axis=1 and the dimension mismatch.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/pull/18655
"""
n_samples, n_features = 60, 4
rng = np.random.RandomState(42)
X = sparse_constructor(rng.rand(n_samples, n_features))
last_mean = np.zeros(n_features)
last_var = np.zeros_like(last_mean)
last_n = np.zeros(last_mean.shape, dtype=np.int64)
kwargs = dict(last_mean=last_mean, last_var=last_var, last_n=last_n)
mean0, var0, _ = incr_mean_variance_axis(X, axis=0, **kwargs)
assert_allclose(np.mean(X.toarray(), axis=0), mean0)
assert_allclose(np.var(X.toarray(), axis=0), var0)
# test ValueError if axis=1 and last_mean.size == n_features
with pytest.raises(ValueError):
incr_mean_variance_axis(X, axis=1, **kwargs)
# test inconsistent shapes of last_mean, last_var, last_n
kwargs = dict(last_mean=last_mean[:-1], last_var=last_var, last_n=last_n)
with pytest.raises(ValueError):
incr_mean_variance_axis(X, axis=0, **kwargs)
@pytest.mark.parametrize(
"X1, X2",
[
(
sp.random(5, 2, density=0.8, format="csr", random_state=0),
sp.random(13, 2, density=0.8, format="csr", random_state=0),
),
(
sp.random(5, 2, density=0.8, format="csr", random_state=0),
sp.hstack(
[
np.full((13, 1), fill_value=np.nan),
sp.random(13, 1, density=0.8, random_state=42),
],
format="csr",
),
),
],
)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_incr_mean_variance_axis_equivalence_mean_variance(X1, X2, csr_container):
# non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/16448
# check that computing the incremental mean and variance is equivalent to
# computing the mean and variance on the stacked dataset.
X1 = csr_container(X1)
X2 = csr_container(X2)
axis = 0
last_mean, last_var = np.zeros(X1.shape[1]), np.zeros(X1.shape[1])
last_n = np.zeros(X1.shape[1], dtype=np.int64)
updated_mean, updated_var, updated_n = incr_mean_variance_axis(
X1, axis=axis, last_mean=last_mean, last_var=last_var, last_n=last_n
)
updated_mean, updated_var, updated_n = incr_mean_variance_axis(
X2, axis=axis, last_mean=updated_mean, last_var=updated_var, last_n=updated_n
)
X = sp.vstack([X1, X2])
assert_allclose(updated_mean, np.nanmean(X.toarray(), axis=axis))
assert_allclose(updated_var, np.nanvar(X.toarray(), axis=axis))
assert_allclose(updated_n, np.count_nonzero(~np.isnan(X.toarray()), axis=0))
def test_incr_mean_variance_no_new_n():
# check the behaviour when we update the variance with an empty matrix
axis = 0
X1 = sp.random(5, 1, density=0.8, random_state=0).tocsr()
X2 = sp.random(0, 1, density=0.8, random_state=0).tocsr()
last_mean, last_var = np.zeros(X1.shape[1]), np.zeros(X1.shape[1])
last_n = np.zeros(X1.shape[1], dtype=np.int64)
last_mean, last_var, last_n = incr_mean_variance_axis(
X1, axis=axis, last_mean=last_mean, last_var=last_var, last_n=last_n
)
# update statistic with a column which should ignored
updated_mean, updated_var, updated_n = incr_mean_variance_axis(
X2, axis=axis, last_mean=last_mean, last_var=last_var, last_n=last_n
)
assert_allclose(updated_mean, last_mean)
assert_allclose(updated_var, last_var)
assert_allclose(updated_n, last_n)
def test_incr_mean_variance_n_float():
# check the behaviour when last_n is just a number
axis = 0
X = sp.random(5, 2, density=0.8, random_state=0).tocsr()
last_mean, last_var = np.zeros(X.shape[1]), np.zeros(X.shape[1])
last_n = 0
_, _, new_n = incr_mean_variance_axis(
X, axis=axis, last_mean=last_mean, last_var=last_var, last_n=last_n
)
assert_allclose(new_n, np.full(X.shape[1], X.shape[0]))
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("sparse_constructor", CSC_CONTAINERS + CSR_CONTAINERS)
def test_incr_mean_variance_axis_ignore_nan(axis, sparse_constructor):
old_means = np.array([535.0, 535.0, 535.0, 535.0])
old_variances = np.array([4225.0, 4225.0, 4225.0, 4225.0])
old_sample_count = np.array([2, 2, 2, 2], dtype=np.int64)
X = sparse_constructor(
np.array([[170, 170, 170, 170], [430, 430, 430, 430], [300, 300, 300, 300]])
)
X_nan = sparse_constructor(
np.array(
[
[170, np.nan, 170, 170],
[np.nan, 170, 430, 430],
[430, 430, np.nan, 300],
[300, 300, 300, np.nan],
]
)
)
# we avoid creating specific data for axis 0 and 1: translating the data is
# enough.
if axis:
X = X.T
X_nan = X_nan.T
# take a copy of the old statistics since they are modified in place.
X_means, X_vars, X_sample_count = incr_mean_variance_axis(
X,
axis=axis,
last_mean=old_means.copy(),
last_var=old_variances.copy(),
last_n=old_sample_count.copy(),
)
X_nan_means, X_nan_vars, X_nan_sample_count = incr_mean_variance_axis(
X_nan,
axis=axis,
last_mean=old_means.copy(),
last_var=old_variances.copy(),
last_n=old_sample_count.copy(),
)
assert_allclose(X_nan_means, X_means)
assert_allclose(X_nan_vars, X_vars)
assert_allclose(X_nan_sample_count, X_sample_count)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_mean_variance_illegal_axis(csr_container):
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_csr = csr_container(X)
with pytest.raises(ValueError):
mean_variance_axis(X_csr, axis=-3)
with pytest.raises(ValueError):
mean_variance_axis(X_csr, axis=2)
with pytest.raises(ValueError):
mean_variance_axis(X_csr, axis=-1)
with pytest.raises(ValueError):
incr_mean_variance_axis(
X_csr, axis=-3, last_mean=None, last_var=None, last_n=None
)
with pytest.raises(ValueError):
incr_mean_variance_axis(
X_csr, axis=2, last_mean=None, last_var=None, last_n=None
)
with pytest.raises(ValueError):
incr_mean_variance_axis(
X_csr, axis=-1, last_mean=None, last_var=None, last_n=None
)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_densify_rows(csr_container):
for dtype in (np.float32, np.float64):
X = csr_container(
[[0, 3, 0], [2, 4, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=dtype
)
X_rows = np.array([0, 2, 3], dtype=np.intp)
out = np.ones((6, X.shape[1]), dtype=dtype)
out_rows = np.array([1, 3, 4], dtype=np.intp)
expect = np.ones_like(out)
expect[out_rows] = X[X_rows, :].toarray()
assign_rows_csr(X, X_rows, out_rows, out)
assert_array_equal(out, expect)
def test_inplace_column_scale():
rng = np.random.RandomState(0)
X = sp.random(100, 200, density=0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(200)
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
with pytest.raises(TypeError):
inplace_column_scale(X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
with pytest.raises(TypeError):
inplace_column_scale(X.tolil(), scale)
def test_inplace_row_scale():
rng = np.random.RandomState(0)
X = sp.random(100, 200, density=0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(100)
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
with pytest.raises(TypeError):
inplace_column_scale(X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
with pytest.raises(TypeError):
inplace_column_scale(X.tolil(), scale)
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_inplace_swap_row(csc_container, csr_container):
X = np.array(
[[0, 3, 0], [2, 4, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64
)
X_csr = csr_container(X)
X_csc = csc_container(X)
swap = linalg.get_blas_funcs(("swap",), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
with pytest.raises(TypeError):
inplace_swap_row(X_csr.tolil())
X = np.array(
[[0, 3, 0], [2, 4, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float32
)
X_csr = csr_container(X)
X_csc = csc_container(X)
swap = linalg.get_blas_funcs(("swap",), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
with pytest.raises(TypeError):
inplace_swap_row(X_csr.tolil())
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_inplace_swap_column(csc_container, csr_container):
X = np.array(
[[0, 3, 0], [2, 4, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64
)
X_csr = csr_container(X)
X_csc = csc_container(X)
swap = linalg.get_blas_funcs(("swap",), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
with pytest.raises(TypeError):
inplace_swap_column(X_csr.tolil())
X = np.array(
[[0, 3, 0], [2, 4, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float32
)
X_csr = csr_container(X)
X_csc = csc_container(X)
swap = linalg.get_blas_funcs(("swap",), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
with pytest.raises(TypeError):
inplace_swap_column(X_csr.tolil())
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("axis", [0, 1, None])
@pytest.mark.parametrize("sparse_format", CSC_CONTAINERS + CSR_CONTAINERS)
@pytest.mark.parametrize(
"missing_values, min_func, max_func, ignore_nan",
[(0, np.min, np.max, False), (np.nan, np.nanmin, np.nanmax, True)],
)
@pytest.mark.parametrize("large_indices", [True, False])
def test_min_max(
dtype,
axis,
sparse_format,
missing_values,
min_func,
max_func,
ignore_nan,
large_indices,
):
X = np.array(
[
[0, 3, 0],
[2, -1, missing_values],
[0, 0, 0],
[9, missing_values, 7],
[4, 0, 5],
],
dtype=dtype,
)
X_sparse = sparse_format(X)
if large_indices:
X_sparse.indices = X_sparse.indices.astype("int64")
X_sparse.indptr = X_sparse.indptr.astype("int64")
mins_sparse, maxs_sparse = min_max_axis(X_sparse, axis=axis, ignore_nan=ignore_nan)
assert_array_equal(mins_sparse, min_func(X, axis=axis))
assert_array_equal(maxs_sparse, max_func(X, axis=axis))
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_min_max_axis_errors(csc_container, csr_container):
X = np.array(
[[0, 3, 0], [2, -1, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64
)
X_csr = csr_container(X)
X_csc = csc_container(X)
with pytest.raises(TypeError):
min_max_axis(X_csr.tolil(), axis=0)
with pytest.raises(ValueError):
min_max_axis(X_csr, axis=2)
with pytest.raises(ValueError):
min_max_axis(X_csc, axis=-3)
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_count_nonzero(csc_container, csr_container):
X = np.array(
[[0, 3, 0], [2, -1, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64
)
X_csr = csr_container(X)
X_csc = csc_container(X)
X_nonzero = X != 0
sample_weight = [0.5, 0.2, 0.3, 0.1, 0.1]
X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None]
for axis in [0, 1, -1, -2, None]:
assert_array_almost_equal(
count_nonzero(X_csr, axis=axis), X_nonzero.sum(axis=axis)
)
assert_array_almost_equal(
count_nonzero(X_csr, axis=axis, sample_weight=sample_weight),
X_nonzero_weighted.sum(axis=axis),
)
with pytest.raises(TypeError):
count_nonzero(X_csc)
with pytest.raises(ValueError):
count_nonzero(X_csr, axis=2)
assert count_nonzero(X_csr, axis=0).dtype == count_nonzero(X_csr, axis=1).dtype
assert (
count_nonzero(X_csr, axis=0, sample_weight=sample_weight).dtype
== count_nonzero(X_csr, axis=1, sample_weight=sample_weight).dtype
)
# Check dtypes with large sparse matrices too
# XXX: test fails on 32bit (Windows/Linux)
try:
X_csr.indices = X_csr.indices.astype(np.int64)
X_csr.indptr = X_csr.indptr.astype(np.int64)
assert count_nonzero(X_csr, axis=0).dtype == count_nonzero(X_csr, axis=1).dtype
assert (
count_nonzero(X_csr, axis=0, sample_weight=sample_weight).dtype
== count_nonzero(X_csr, axis=1, sample_weight=sample_weight).dtype
)
except TypeError as e:
assert "according to the rule 'safe'" in e.args[0] and np.intp().nbytes < 8, e
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_csc_row_median(csc_container, csr_container):
# Test csc_row_median actually calculates the median.
# Test that it gives the same output when X is dense.
rng = np.random.RandomState(0)
X = rng.rand(100, 50)
dense_median = np.median(X, axis=0)
csc = csc_container(X)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test that it gives the same output when X is sparse
X = rng.rand(51, 100)
X[X < 0.7] = 0.0
ind = rng.randint(0, 50, 10)
X[ind] = -X[ind]
csc = csc_container(X)
dense_median = np.median(X, axis=0)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test for toy data.
X = [[0, -2], [-1, -1], [1, 0], [2, 1]]
csc = csc_container(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5]))
X = [[0, -2], [-1, -5], [1, -3]]
csc = csc_container(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0.0, -3]))
# Test that it raises an Error for non-csc matrices.
with pytest.raises(TypeError):
csc_median_axis_0(csr_container(X))
@pytest.mark.parametrize(
"inplace_csr_row_normalize",
(inplace_csr_row_normalize_l1, inplace_csr_row_normalize_l2),
)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_inplace_normalize(csr_container, inplace_csr_row_normalize):
if csr_container is sp.csr_matrix:
ones = np.ones((10, 1))
else:
ones = np.ones(10)
rs = RandomState(10)
for dtype in (np.float64, np.float32):
X = rs.randn(10, 5).astype(dtype)
X_csr = csr_container(X)
for index_dtype in [np.int32, np.int64]:
# csr_matrix will use int32 indices by default,
# up-casting those to int64 when necessary
if index_dtype is np.int64:
X_csr.indptr = X_csr.indptr.astype(index_dtype)
X_csr.indices = X_csr.indices.astype(index_dtype)
assert X_csr.indices.dtype == index_dtype
assert X_csr.indptr.dtype == index_dtype
inplace_csr_row_normalize(X_csr)
assert X_csr.dtype == dtype
if inplace_csr_row_normalize is inplace_csr_row_normalize_l2:
X_csr.data **= 2
assert_array_almost_equal(np.abs(X_csr).sum(axis=1), ones)
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_csr_row_norms(dtype):
# checks that csr_row_norms returns the same output as
# scipy.sparse.linalg.norm, and that the dype is the same as X.dtype.
X = sp.random(100, 10, format="csr", dtype=dtype, random_state=42)
scipy_norms = sp.linalg.norm(X, axis=1) ** 2
norms = csr_row_norms(X)
assert norms.dtype == dtype
rtol = 1e-6 if dtype == np.float32 else 1e-7
assert_allclose(norms, scipy_norms, rtol=rtol)
@pytest.fixture(scope="module", params=CSR_CONTAINERS + CSC_CONTAINERS)
def centered_matrices(request):
"""Returns equivalent tuple[sp.linalg.LinearOperator, np.ndarray]."""
sparse_container = request.param
random_state = np.random.default_rng(42)
X_sparse = sparse_container(
sp.random(500, 100, density=0.1, format="csr", random_state=random_state)
)
X_dense = X_sparse.toarray()
mu = np.asarray(X_sparse.mean(axis=0)).ravel()
X_sparse_centered = _implicit_column_offset(X_sparse, mu)
X_dense_centered = X_dense - mu
return X_sparse_centered, X_dense_centered
def test_implicit_center_matmat(global_random_seed, centered_matrices):
X_sparse_centered, X_dense_centered = centered_matrices
rng = np.random.default_rng(global_random_seed)
Y = rng.standard_normal((X_dense_centered.shape[1], 50))
assert_allclose(X_dense_centered @ Y, X_sparse_centered.matmat(Y))
assert_allclose(X_dense_centered @ Y, X_sparse_centered @ Y)
def test_implicit_center_matvec(global_random_seed, centered_matrices):
X_sparse_centered, X_dense_centered = centered_matrices
rng = np.random.default_rng(global_random_seed)
y = rng.standard_normal(X_dense_centered.shape[1])
assert_allclose(X_dense_centered @ y, X_sparse_centered.matvec(y))
assert_allclose(X_dense_centered @ y, X_sparse_centered @ y)
def test_implicit_center_rmatmat(global_random_seed, centered_matrices):
X_sparse_centered, X_dense_centered = centered_matrices
rng = np.random.default_rng(global_random_seed)
Y = rng.standard_normal((X_dense_centered.shape[0], 50))
assert_allclose(X_dense_centered.T @ Y, X_sparse_centered.rmatmat(Y))
assert_allclose(X_dense_centered.T @ Y, X_sparse_centered.T @ Y)
def test_implit_center_rmatvec(global_random_seed, centered_matrices):
X_sparse_centered, X_dense_centered = centered_matrices
rng = np.random.default_rng(global_random_seed)
y = rng.standard_normal(X_dense_centered.shape[0])
assert_allclose(X_dense_centered.T @ y, X_sparse_centered.rmatvec(y))
assert_allclose(X_dense_centered.T @ y, X_sparse_centered.T @ y)
@pytest.mark.parametrize(
["A", "B", "out", "msg"],
[
(sp.eye(3, format="csr"), sp.eye(2, format="csr"), None, "Shapes must fulfil"),
(sp.eye(2, format="csr"), sp.eye(2, format="csr"), np.eye(3), "Shape of out"),
(sp.eye(2, format="coo"), sp.eye(2, format="csr"), None, "Input 'A' must"),
(sp.eye(2, format="csr"), sp.eye(2, format="coo"), None, "Input 'B' must"),
(
sp.eye(2, format="csr", dtype=np.int32),
sp.eye(2, format="csr"),
None,
"Dtype of A and B",
),
(
sp.eye(2, format="csr", dtype=np.float32),
sp.eye(2, format="csr", dtype=np.float64),
None,
"Dtype of A and B",
),
],
)
def test_sparse_matmul_to_dense_raises(A, B, out, msg):
"""Test that sparse_matmul_to_dense raises when it should."""
with pytest.raises(ValueError, match=msg):
sparse_matmul_to_dense(A, B, out=out)
@pytest.mark.parametrize("out_is_None", [False, True])
@pytest.mark.parametrize("a_container", CSC_CONTAINERS + CSR_CONTAINERS)
@pytest.mark.parametrize("b_container", CSC_CONTAINERS + CSR_CONTAINERS)
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_sparse_matmul_to_dense(
global_random_seed, out_is_None, a_container, b_container, dtype
):
"""Test that sparse_matmul_to_dense computes correctly."""
rng = np.random.default_rng(global_random_seed)
n1, n2, n3 = 10, 19, 13
a_dense = rng.standard_normal((n1, n2)).astype(dtype)
b_dense = rng.standard_normal((n2, n3)).astype(dtype)
a_dense.flat[rng.choice([False, True], size=n1 * n2, p=[0.5, 0.5])] = 0
b_dense.flat[rng.choice([False, True], size=n2 * n3, p=[0.5, 0.5])] = 0
a = a_container(a_dense)
b = b_container(b_dense)
if out_is_None:
out = None
else:
out = np.empty((n1, n3), dtype=dtype)
result = sparse_matmul_to_dense(a, b, out=out)
# Use atol to account for the wide range of values in the computed matrix.
assert_allclose(result, a_dense @ b_dense, atol=1e-7)
if not out_is_None:
assert_allclose(out, result, atol=1e-7) | python | github | https://github.com/scikit-learn/scikit-learn | sklearn/utils/tests/test_sparsefuncs.py |
import { flushSync } from 'svelte';
import { test } from '../../test';
/** @type {Array<{ name: string }>} */
let values = [];
/** @type {Array<Array<{ name: string }>>} */
let selected_array = [];
export default test({
before_test() {
values = [{ name: 'Alpha' }, { name: 'Beta' }, { name: 'Gamma' }];
selected_array = [[values[1]], [], [values[2]]];
},
get props() {
return { values, selected_array };
},
html: `
<div>
<label>
<input type="checkbox" value="[object Object]"> Alpha
</label>
<label>
<input type="checkbox" value="[object Object]"> Beta
</label>
<label>
<input type="checkbox" value="[object Object]"> Gamma
</label>
<p>Beta</p>
</div>
<div>
<label>
<input type="checkbox" value="[object Object]"> Alpha
</label>
<label>
<input type="checkbox" value="[object Object]"> Beta
</label>
<label>
<input type="checkbox" value="[object Object]"> Gamma
</label>
<p></p>
</div>
<div>
<label>
<input type="checkbox" value="[object Object]"> Alpha
</label>
<label>
<input type="checkbox" value="[object Object]"> Beta
</label>
<label>
<input type="checkbox" value="[object Object]"> Gamma
</label>
<p>Gamma</p>
</div>
`,
ssrHtml: `
<div>
<label>
<input type="checkbox" value="[object Object]"> Alpha
</label>
<label>
<input type="checkbox" value="[object Object]" checked> Beta
</label>
<label>
<input type="checkbox" value="[object Object]"> Gamma
</label>
<p>Beta</p>
</div>
<div>
<label>
<input type="checkbox" value="[object Object]"> Alpha
</label>
<label>
<input type="checkbox" value="[object Object]"> Beta
</label>
<label>
<input type="checkbox" value="[object Object]"> Gamma
</label>
<p></p>
</div>
<div>
<label>
<input type="checkbox" value="[object Object]"> Alpha
</label>
<label>
<input type="checkbox" value="[object Object]"> Beta
</label>
<label>
<input type="checkbox" value="[object Object]" checked> Gamma
</label>
<p>Gamma</p>
</div>
`,
test({ assert, component, target, window }) {
const inputs = target.querySelectorAll('input');
assert.equal(inputs[0].checked, false);
assert.equal(inputs[1].checked, true);
assert.equal(inputs[2].checked, false);
assert.equal(inputs[3].checked, false);
assert.equal(inputs[4].checked, false);
assert.equal(inputs[5].checked, false);
assert.equal(inputs[6].checked, false);
assert.equal(inputs[7].checked, false);
assert.equal(inputs[8].checked, true);
const event = new window.Event('change');
inputs[0].checked = true;
inputs[0].dispatchEvent(event);
flushSync();
assert.htmlEqual(
target.innerHTML,
`
<div>
<label>
<input type="checkbox" value="[object Object]"> Alpha
</label>
<label>
<input type="checkbox" value="[object Object]"> Beta
</label>
<label>
<input type="checkbox" value="[object Object]"> Gamma
</label>
<p>Alpha, Beta</p>
</div>
<div>
<label>
<input type="checkbox" value="[object Object]"> Alpha
</label>
<label>
<input type="checkbox" value="[object Object]"> Beta
</label>
<label>
<input type="checkbox" value="[object Object]"> Gamma
</label>
<p></p>
</div>
<div>
<label>
<input type="checkbox" value="[object Object]"> Alpha
</label>
<label>
<input type="checkbox" value="[object Object]"> Beta
</label>
<label>
<input type="checkbox" value="[object Object]"> Gamma
</label>
<p>Gamma</p>
</div>
`
);
inputs[3].checked = true;
inputs[3].dispatchEvent(event);
flushSync();
assert.htmlEqual(
target.innerHTML,
`
<div>
<label>
<input type="checkbox" value="[object Object]"> Alpha
</label>
<label>
<input type="checkbox" value="[object Object]"> Beta
</label>
<label>
<input type="checkbox" value="[object Object]"> Gamma
</label>
<p>Alpha, Beta</p>
</div>
<div>
<label>
<input type="checkbox" value="[object Object]"> Alpha
</label>
<label>
<input type="checkbox" value="[object Object]"> Beta
</label>
<label>
<input type="checkbox" value="[object Object]"> Gamma
</label>
<p>Alpha</p>
</div>
<div>
<label>
<input type="checkbox" value="[object Object]"> Alpha
</label>
<label>
<input type="checkbox" value="[object Object]"> Beta
</label>
<label>
<input type="checkbox" value="[object Object]"> Gamma
</label>
<p>Gamma</p>
</div>
`
);
inputs[8].checked = false;
inputs[8].dispatchEvent(event);
flushSync();
assert.htmlEqual(
target.innerHTML,
`
<div>
<label>
<input type="checkbox" value="[object Object]"> Alpha
</label>
<label>
<input type="checkbox" value="[object Object]"> Beta
</label>
<label>
<input type="checkbox" value="[object Object]"> Gamma
</label>
<p>Alpha, Beta</p>
</div>
<div>
<label>
<input type="checkbox" value="[object Object]"> Alpha
</label>
<label>
<input type="checkbox" value="[object Object]"> Beta
</label>
<label>
<input type="checkbox" value="[object Object]"> Gamma
</label>
<p>Alpha</p>
</div>
<div>
<label>
<input type="checkbox" value="[object Object]"> Alpha
</label>
<label>
<input type="checkbox" value="[object Object]"> Beta
</label>
<label>
<input type="checkbox" value="[object Object]"> Gamma
</label>
<p></p>
</div>
`
);
component.selected_array = [[component.values[1], component.values[2]], [component.values[2]]];
assert.equal(inputs[0].checked, false);
assert.equal(inputs[1].checked, true);
assert.equal(inputs[2].checked, true);
assert.equal(inputs[3].checked, false);
assert.equal(inputs[4].checked, false);
assert.equal(inputs[5].checked, true);
assert.htmlEqual(
target.innerHTML,
`
<div>
<label>
<input type="checkbox" value="[object Object]"> Alpha
</label>
<label>
<input type="checkbox" value="[object Object]"> Beta
</label>
<label>
<input type="checkbox" value="[object Object]"> Gamma
</label>
<p>Beta, Gamma</p>
</div>
<div>
<label>
<input type="checkbox" value="[object Object]"> Alpha
</label>
<label>
<input type="checkbox" value="[object Object]"> Beta
</label>
<label>
<input type="checkbox" value="[object Object]"> Gamma
</label>
<p>Gamma</p>
</div>
`
);
}
}); | javascript | github | https://github.com/sveltejs/svelte | packages/svelte/tests/runtime-legacy/samples/binding-input-group-each-3/_config.js |
"""
Check that all changes to Wagtail models have had migrations created. If there
are outstanding model changes that need migrations, fail the tests.
"""
from django.utils.six import iteritems
from django.test import TransactionTestCase
from django.apps import apps
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.state import ProjectState
from django.db.migrations.questioner import MigrationQuestioner
class TestForMigrations(TransactionTestCase):
def test__migrations(self):
app_labels = set(app.label for app in apps.get_app_configs()
if app.name.startswith('wagtail.'))
for app_label in app_labels:
apps.get_app_config(app_label.split('.')[-1])
loader = MigrationLoader(None, ignore_no_migrations=True)
conflicts = dict(
(app_label, conflict)
for app_label, conflict in iteritems(loader.detect_conflicts())
if app_label in app_labels
)
if conflicts:
name_str = "; ".join("%s in %s" % (", ".join(names), app)
for app, names in conflicts.items())
self.fail("Conflicting migrations detected (%s)." % name_str)
autodetector = MigrationAutodetector(
loader.project_state(),
ProjectState.from_apps(apps),
MigrationQuestioner(specified_apps=app_labels, dry_run=True),
)
changes = autodetector.changes(
graph=loader.graph,
trim_to_apps=app_labels or None,
convert_apps=app_labels or None,
)
if changes:
migrations = '\n'.join((
' {migration}\n{changes}'.format(
migration=migration,
changes='\n'.join(' {0}'.format(operation.describe())
for operation in migration.operations))
for (_, migrations) in changes.items()
for migration in migrations))
self.fail('Model changes with no migrations detected:\n%s' % migrations) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
network ping sensor.
'''
from ping import do_one as ping
from whmonit.client.sensors import TaskSensorBase
from whmonit.common.units import unit_reg
class Sensor(TaskSensorBase):
'''Generic 'ping' sensor.'''
# W0232: Class has no __init__ method
# R0201: Method could be a function
# R0903: Too few public methods
# pylint: disable=W0232,R0201,R0903
name = 'ping'
streams = {
'default': {
'type': float,
'description':
'Time from sending message to destination host '
'to receiving acknowledgment.',
'unit': str(unit_reg.second)
}
}
config_schema = {
'$schema': 'http://json-schema.org/schema#',
'type': 'object',
'properties': {'host': {'type': 'string'}},
'required': ['host'],
'additionalProperties': False
}
def do_run(self):
'''Returns time to ping a host.'''
try:
delay = float(ping(self.config['host'], 5))
except: # TODO: should be time TimeOut and some other errors
return ()
return (("default", delay), ) | unknown | codeparrot/codeparrot-clean | ||
name: Ubuntu on WSL
on:
push:
paths-ignore:
- 'doc/**'
- '**/man/*'
- '**.md'
- '**.rdoc'
- '**/.document'
- '.*.yml'
pull_request:
# Do not use paths-ignore for required status checks
# https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/collaborating-on-repositories-with-code-quality-features/troubleshooting-required-status-checks#handling-skipped-but-required-checks
merge_group:
jobs:
wsl:
runs-on: windows-2025
if: >-
${{!(false
|| contains(github.event.head_commit.message, '[DOC]')
|| contains(github.event.pull_request.title, '[DOC]')
|| contains(github.event.pull_request.labels.*.name, 'Documentation')
|| (github.event_name == 'push' && github.event.pull_request.user.login == 'dependabot[bot]')
)}}
steps:
- name: Install or update WSL
uses: Ubuntu/WSL/.github/actions/wsl-install@main
with:
distro: Ubuntu-24.04
- name: Install dependencies
uses: Ubuntu/WSL/.github/actions/wsl-bash@main
with:
distro: Ubuntu-24.04
working-dir: /tmp/github/
exec: |
DEBIAN_FRONTEND=noninteractive sudo apt update
DEBIAN_FRONTEND=noninteractive sudo apt install -y ruby build-essential autoconf libssl-dev libyaml-dev zlib1g-dev libgmp-dev libffi-dev
- name: Check out the repository
uses: Ubuntu/WSL/.github/actions/wsl-checkout@main
with:
distro: Ubuntu-24.04
working-dir: /tmp/github/
submodules: true
- name: Build
uses: Ubuntu/WSL/.github/actions/wsl-bash@main
with:
distro: Ubuntu-24.04
working-dir: /tmp/github/
exec: |
./autogen.sh
./configure --disable-install-doc
make ruby -j4
make extract-gems
make -j4
- name: Test
uses: Ubuntu/WSL/.github/actions/wsl-bash@main
with:
distro: Ubuntu-24.04
working-dir: /tmp/github/
exec: |
./ruby -v
# make check TESTS="-j4" MSPECOPT="-j" | unknown | github | https://github.com/ruby/ruby | .github/workflows/wsl.yml |
import copy
import sys
import subprocess
from subprocess import PIPE
import requests
# Requests is built ontop of urllib3,
# here we prevent general request logging
import logging
logging.getLogger('urllib3').setLevel(logging.CRITICAL)
from pprint import pprint
class FrameworkTestType:
'''
Interface between a test type (json, query, plaintext, etc) and
the rest of TFB. A test type defines a number of keys it expects
to find in the benchmark_config.json, and this base class handles extracting
those keys and injecting them into the test. For example, if
benchmark_config.json contains a line `"spam" : "foobar"` and a subclasses X
passes an argument list of ['spam'], then after parsing there will
exist a member `X.spam = 'foobar'`.
'''
def __init__(self, name, requires_db=False, accept_header=None, args=[]):
self.name = name
self.requires_db = requires_db
self.args = args
self.out = sys.stdout
self.err = sys.stderr
if accept_header is None:
self.accept_header = self.accept('json')
else:
self.accept_header = accept_header
self.passed = None
self.failed = None
self.warned = None
def accept(self, content_type):
return {
'json': 'application/json,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7',
'html': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'plaintext': 'text/plain,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7'
}[content_type]
def setup_out(self, out):
'''
Sets up file-like objects for logging. Used in
cases where it is hard just return the output. Any
output sent to these file objects is also printed to
the console
NOTE: I detest this. It would be much better to use
logging like it's intended
'''
self.out = out
def parse(self, test_keys):
'''
Takes the dict of key/value pairs describing a FrameworkTest
and collects all variables needed by this FrameworkTestType
Raises AttributeError if required keys are missing
'''
if all(arg in test_keys for arg in self.args):
self.__dict__.update({arg: test_keys[arg] for arg in self.args})
return self
else: # This is quite common - most tests don't support all types
raise AttributeError(
"A %s requires the benchmark_config.json to contain %s" % (self.name, self.args))
def request_headers_and_body(self, url):
'''
Downloads a URL and returns the HTTP response headers
and body content as a tuple
'''
print "Accessing URL %s:" % url
self.out.write("Accessing URL %s \n" % url)
headers = {'Accept': self.accept_header}
r = requests.get(url, timeout=15, headers=headers)
headers = r.headers
body = r.content
self.out.write(str(headers))
self.out.write(body)
b = 40
print " Response (trimmed to %d bytes): \"%s\"" % (b, body.strip()[:b])
return headers, body
def verify(self, base_url):
'''
Accesses URL used by this test type and checks the return
values for correctness. Most test types run multiple checks,
so this returns a list of results. Each result is a 3-tuple
of (String result, String reason, String urlTested).
- result : 'pass','warn','fail'
- reason : Short human-readable reason if result was
warn or fail. Please do not print the response as part of this,
other parts of TFB will do that based upon the current logging
settings if this method indicates a failure happened
- urlTested: The exact URL that was queried
Subclasses should make a best-effort attempt to report as many
failures and warnings as they can to help users avoid needing
to run TFB repeatedly while debugging
'''
# TODO make String result into an enum to enforce
raise NotImplementedError("Subclasses must provide verify")
def get_url(self):
'''Returns the URL for this test, like '/json'''
# This is a method because each test type uses a different key
# for their URL so the base class can't know which arg is the URL
raise NotImplementedError("Subclasses must provide get_url")
def copy(self):
'''
Returns a copy that can be safely modified.
Use before calling parse
'''
return copy.copy(self) | unknown | codeparrot/codeparrot-clean | ||
////////////////////////////////////////////////////////////////////////////
//
// Copyright 2014 Realm Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////
#import <Realm/RLMProperty.h>
#import <objc/runtime.h>
@class RLMObjectBase;
RLM_HEADER_AUDIT_BEGIN(nullability)
BOOL RLMPropertyTypeIsComputed(RLMPropertyType propertyType);
FOUNDATION_EXTERN void RLMValidateSwiftPropertyName(NSString *name);
// Translate an rlmtype to a string representation
static inline NSString *RLMTypeToString(RLMPropertyType type) {
switch (type) {
case RLMPropertyTypeString:
return @"string";
case RLMPropertyTypeInt:
return @"int";
case RLMPropertyTypeBool:
return @"bool";
case RLMPropertyTypeDate:
return @"date";
case RLMPropertyTypeData:
return @"data";
case RLMPropertyTypeDouble:
return @"double";
case RLMPropertyTypeFloat:
return @"float";
case RLMPropertyTypeAny:
return @"mixed";
case RLMPropertyTypeObject:
return @"object";
case RLMPropertyTypeLinkingObjects:
return @"linking objects";
case RLMPropertyTypeDecimal128:
return @"decimal128";
case RLMPropertyTypeObjectId:
return @"object id";
case RLMPropertyTypeUUID:
return @"uuid";
}
return @"Unknown";
}
// private property interface
@interface RLMProperty () {
@public
RLMPropertyType _type;
}
- (instancetype)initWithName:(NSString *)name
indexed:(BOOL)indexed
linkPropertyDescriptor:(nullable RLMPropertyDescriptor *)linkPropertyDescriptor
property:(objc_property_t)property;
- (instancetype)initSwiftPropertyWithName:(NSString *)name
indexed:(BOOL)indexed
linkPropertyDescriptor:(nullable RLMPropertyDescriptor *)linkPropertyDescriptor
property:(objc_property_t)property
instance:(RLMObjectBase *)objectInstance;
- (void)updateAccessors;
// private setters
@property (nonatomic, readwrite) NSString *name;
@property (nonatomic, readwrite, assign) RLMPropertyType type;
@property (nonatomic, readwrite) BOOL indexed;
@property (nonatomic, readwrite) BOOL optional;
@property (nonatomic, readwrite) BOOL array;
@property (nonatomic, readwrite) BOOL set;
@property (nonatomic, readwrite) BOOL dictionary;
@property (nonatomic, copy, nullable) NSString *objectClassName;
@property (nonatomic, copy, nullable) NSString *linkOriginPropertyName;
// private properties
@property (nonatomic, readwrite, nullable) NSString *columnName;
@property (nonatomic, assign) NSUInteger index;
@property (nonatomic, assign) BOOL isPrimary;
@property (nonatomic, assign) BOOL isLegacy;
@property (nonatomic, assign) ptrdiff_t swiftIvar;
@property (nonatomic, assign, nullable) Class swiftAccessor;
@property (nonatomic, readwrite, assign) RLMPropertyType dictionaryKeyType;
@property (nonatomic, readwrite) BOOL customMappingIsOptional;
// getter and setter names
@property (nonatomic, copy) NSString *getterName;
@property (nonatomic, copy) NSString *setterName;
@property (nonatomic, nullable) SEL getterSel;
@property (nonatomic, nullable) SEL setterSel;
- (RLMProperty *)copyWithNewName:(NSString *)name;
- (NSString *)typeName;
@end
@interface RLMProperty (Dynamic)
/**
This method is useful only in specialized circumstances, for example, in conjunction with
+[RLMObjectSchema initWithClassName:objectClass:properties:]. If you are simply building an
app on Realm, it is not recommended to use this method.
Initialize an RLMProperty
@warning This method is useful only in specialized circumstances.
@param name The property name.
@param type The property type.
@param objectClassName The object type used for Object and Array types.
@param linkOriginPropertyName The property name of the origin of a link. Used for linking objects properties.
@return An initialized instance of RLMProperty.
*/
- (instancetype)initWithName:(NSString *)name
type:(RLMPropertyType)type
objectClassName:(nullable NSString *)objectClassName
linkOriginPropertyName:(nullable NSString *)linkOriginPropertyName
indexed:(BOOL)indexed
optional:(BOOL)optional;
@end
RLM_HEADER_AUDIT_END(nullability) | c | github | https://github.com/realm/realm-swift | Realm/RLMProperty_Private.h |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2014 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import absolute_import
import os
import re
import sys
if sys.version_info[0] < 3:
from StringIO import StringIO
string_types = basestring,
text_type = unicode
from types import FileType as file_type
import __builtin__ as builtins
import ConfigParser as configparser
from ._backport import shutil
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit
from urllib import (urlretrieve, quote as _quote, unquote, url2pathname,
pathname2url, ContentTooShortError, splittype)
def quote(s):
if isinstance(s, unicode):
s = s.encode('utf-8')
return _quote(s)
import urllib2
from urllib2 import (Request, urlopen, URLError, HTTPError,
HTTPBasicAuthHandler, HTTPPasswordMgr,
HTTPSHandler, HTTPHandler, HTTPRedirectHandler,
build_opener)
import httplib
import xmlrpclib
import Queue as queue
from HTMLParser import HTMLParser
import htmlentitydefs
raw_input = raw_input
from itertools import ifilter as filter
from itertools import ifilterfalse as filterfalse
_userprog = None
def splituser(host):
"""splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'."""
global _userprog
if _userprog is None:
import re
_userprog = re.compile('^(.*)@(.*)$')
match = _userprog.match(host)
if match: return match.group(1, 2)
return None, host
else:
from io import StringIO
string_types = str,
text_type = str
from io import TextIOWrapper as file_type
import builtins
import configparser
import shutil
from urllib.parse import (urlparse, urlunparse, urljoin, splituser, quote,
unquote, urlsplit, urlunsplit, splittype)
from urllib.request import (urlopen, urlretrieve, Request, url2pathname,
pathname2url,
HTTPBasicAuthHandler, HTTPPasswordMgr,
HTTPSHandler, HTTPHandler, HTTPRedirectHandler,
build_opener)
from urllib.error import HTTPError, URLError, ContentTooShortError
import http.client as httplib
import urllib.request as urllib2
import xmlrpc.client as xmlrpclib
import queue
from html.parser import HTMLParser
import html.entities as htmlentitydefs
raw_input = input
from itertools import filterfalse
filter = filter
try:
from ssl import match_hostname, CertificateError
except ImportError:
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
parts = dn.split('.')
leftmost, remainder = parts[0], parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survery of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate, match_hostname needs a "
"SSL socket or SSL context with either "
"CERT_OPTIONAL or CERT_REQUIRED")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
try:
from types import SimpleNamespace as Container
except ImportError:
class Container(object):
"""
A generic container for when multiple values need to be returned
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
try:
from shutil import which
except ImportError:
# Implementation from Python 3.3
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to the
# current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if not os.curdir in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if not normdir in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
# ZipFile is a context manager in 2.7, but not in 2.6
from zipfile import ZipFile as BaseZipFile
if hasattr(BaseZipFile, '__enter__'):
ZipFile = BaseZipFile
else:
from zipfile import ZipExtFile as BaseZipExtFile
class ZipExtFile(BaseZipExtFile):
def __init__(self, base):
self.__dict__.update(base.__dict__)
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
# return None, so if an exception occurred, it will propagate
class ZipFile(BaseZipFile):
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
# return None, so if an exception occurred, it will propagate
def open(self, *args, **kwargs):
base = BaseZipFile.open(self, *args, **kwargs)
return ZipExtFile(base)
try:
from platform import python_implementation
except ImportError: # pragma: no cover
def python_implementation():
"""Return a string identifying the Python implementation."""
if 'PyPy' in sys.version:
return 'PyPy'
if os.name == 'java':
return 'Jython'
if sys.version.startswith('IronPython'):
return 'IronPython'
return 'CPython'
try:
import sysconfig
except ImportError: # pragma: no cover
from ._backport import sysconfig
try:
callable = callable
except NameError: # pragma: no cover
from collections import Callable
def callable(obj):
return isinstance(obj, Callable)
try:
fsencode = os.fsencode
fsdecode = os.fsdecode
except AttributeError: # pragma: no cover
_fsencoding = sys.getfilesystemencoding()
if _fsencoding == 'mbcs':
_fserrors = 'strict'
else:
_fserrors = 'surrogateescape'
def fsencode(filename):
if isinstance(filename, bytes):
return filename
elif isinstance(filename, text_type):
return filename.encode(_fsencoding, _fserrors)
else:
raise TypeError("expect bytes or str, not %s" %
type(filename).__name__)
def fsdecode(filename):
if isinstance(filename, text_type):
return filename
elif isinstance(filename, bytes):
return filename.decode(_fsencoding, _fserrors)
else:
raise TypeError("expect bytes or str, not %s" %
type(filename).__name__)
try:
from tokenize import detect_encoding
except ImportError: # pragma: no cover
from codecs import BOM_UTF8, lookup
import re
cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argment, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present,
but disagree, a SyntaxError will be raised. If the encoding cookie is an
invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
try:
filename = readline.__self__.name
except AttributeError:
filename = None
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return b''
def find_cookie(line):
try:
# Decode as UTF-8. Either the line is an encoding declaration,
# in which case it should be pure ASCII, or it must be UTF-8
# per default encoding.
line_string = line.decode('utf-8')
except UnicodeDecodeError:
msg = "invalid or missing encoding declaration"
if filename is not None:
msg = '{} for {!r}'.format(msg, filename)
raise SyntaxError(msg)
matches = cookie_re.findall(line_string)
if not matches:
return None
encoding = _get_normal_name(matches[0])
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
if filename is None:
msg = "unknown encoding: " + encoding
else:
msg = "unknown encoding for {!r}: {}".format(filename,
encoding)
raise SyntaxError(msg)
if bom_found:
if codec.name != 'utf-8':
# This behaviour mimics the Python interpreter
if filename is None:
msg = 'encoding problem: utf-8'
else:
msg = 'encoding problem for {!r}: utf-8'.format(filename)
raise SyntaxError(msg)
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
# For converting & <-> & etc.
try:
from html import escape
except ImportError:
from cgi import escape
if sys.version_info[:2] < (3, 4):
unescape = HTMLParser().unescape
else:
from html import unescape
try:
from collections import ChainMap
except ImportError: # pragma: no cover
from collections import MutableMapping
try:
from reprlib import recursive_repr as _recursive_repr
except ImportError:
def _recursive_repr(fillvalue='...'):
'''
Decorator to make a repr function return fillvalue for a recursive
call
'''
def decorating_function(user_function):
repr_running = set()
def wrapper(self):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
# Can't use functools.wraps() here because of bootstrap issues
wrapper.__module__ = getattr(user_function, '__module__')
wrapper.__doc__ = getattr(user_function, '__doc__')
wrapper.__name__ = getattr(user_function, '__name__')
wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
return wrapper
return decorating_function
class ChainMap(MutableMapping):
''' A ChainMap groups multiple dicts (or other mappings) together
to create a single, updateable view.
The underlying mappings are stored in a list. That list is public and can
accessed or updated using the *maps* attribute. There is no other state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates, and deletions only operate on the first
mapping.
'''
def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
'''
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
return mapping[key] # can't use 'key in mapping' with defaultdict
except KeyError:
pass
return self.__missing__(key) # support subclasses that define __missing__
def get(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
return len(set().union(*self.maps)) # reuses stored hash values if possible
def __iter__(self):
return iter(set().union(*self.maps))
def __contains__(self, key):
return any(key in m for m in self.maps)
def __bool__(self):
return any(self.maps)
@_recursive_repr()
def __repr__(self):
return '{0.__class__.__name__}({1})'.format(
self, ', '.join(map(repr, self.maps)))
@classmethod
def fromkeys(cls, iterable, *args):
'Create a ChainMap with a single dict created from the iterable.'
return cls(dict.fromkeys(iterable, *args))
def copy(self):
'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self): # like Django's Context.push()
'New ChainMap with a new dict followed by all previous maps.'
return self.__class__({}, *self.maps)
@property
def parents(self): # like Django's Context.pop()
'New ChainMap from maps[1:].'
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def popitem(self):
'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError('No keys found in the first mapping.')
def pop(self, key, *args):
'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
try:
return self.maps[0].pop(key, *args)
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def clear(self):
'Clear maps[0], leaving maps[1:] intact.'
self.maps[0].clear()
try:
from imp import cache_from_source
except ImportError: # pragma: no cover
def cache_from_source(path, debug_override=None):
assert path.endswith('.py')
if debug_override is None:
debug_override = __debug__
if debug_override:
suffix = 'c'
else:
suffix = 'o'
return path + suffix
try:
from collections import OrderedDict
except ImportError: # pragma: no cover
## {{{ http://code.activestate.com/recipes/576693/ (r9)
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running=None):
'od.__repr__() <==> repr(od)'
if not _repr_running: _repr_running = {}
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
try:
from logging.config import BaseConfigurator, valid_ident
except ImportError: # pragma: no cover
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
def valid_ident(s):
m = IDENTIFIER.match(s)
if not m:
raise ValueError('Not a valid Python identifier: %r' % s)
return True
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
# equivalents, whereas strings which match a conversion format are converted
# appropriately.
#
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
class ConvertingDict(dict):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def get(self, key, default=None):
value = dict.get(self, key, default)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, key, default=None):
value = dict.pop(self, key, default)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class ConvertingList(list):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, idx=-1):
value = list.pop(self, idx)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
return result
class ConvertingTuple(tuple):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class BaseConfigurator(object):
"""
The configurator base class which defines some useful defaults.
"""
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
'ext' : 'ext_convert',
'cfg' : 'cfg_convert',
}
# We might want to use a different one, e.g. importlib
importer = staticmethod(__import__)
def __init__(self, config):
self.config = ConvertingDict(config)
self.config.configurator = self
def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
def ext_convert(self, value):
"""Default converter for the ext:// protocol."""
return self.resolve(value)
def cfg_convert(self, value):
"""Default converter for the cfg:// protocol."""
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
#print d, rest
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx) # try as number first (most likely)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError('Unable to convert '
'%r at %r' % (value, rest))
#rest should be empty
return d
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, string_types):
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
def configure_custom(self, config):
"""Configure an object with a user-supplied factory."""
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
result = c(**kwargs)
if props:
for name, value in props.items():
setattr(result, name, value)
return result
def as_tuple(self, value):
"""Utility function which converts lists to tuples."""
if isinstance(value, list):
value = tuple(value)
return value | unknown | codeparrot/codeparrot-clean | ||
package kotlinx.coroutines.flow
import kotlinx.atomicfu.*
import kotlinx.coroutines.*
import kotlinx.coroutines.channels.*
import kotlinx.coroutines.flow.internal.*
import kotlinx.coroutines.internal.*
import kotlin.coroutines.*
/**
* A [SharedFlow] that represents a read-only state with a single updatable data [value] that emits updates
* to the value to its collectors. A state flow is a _hot_ flow because its active instance exists independently
* of the presence of collectors. Its current value can be retrieved via the [value] property.
*
* **State flow never completes**. A call to [Flow.collect] on a state flow never completes normally, and
* neither does a coroutine started by the [Flow.launchIn] function. An active collector of a state flow is called a _subscriber_.
*
* A [mutable state flow][MutableStateFlow] is created using `MutableStateFlow(value)` constructor function with
* the initial value. The value of mutable state flow can be updated by setting its [value] property.
* Updates to the [value] are always [conflated][Flow.conflate]. So a slow collector skips fast updates,
* but always collects the most recently emitted value.
*
* [StateFlow] is useful as a data-model class to represent any kind of state.
* Derived values can be defined using various operators on the flows, with [combine] operator being especially
* useful to combine values from multiple state flows using arbitrary functions.
*
* For example, the following class encapsulates an integer state and increments its value on each call to `inc`:
*
* ```
* class CounterModel {
* private val _counter = MutableStateFlow(0) // private mutable state flow
* val counter = _counter.asStateFlow() // publicly exposed as read-only state flow
*
* fun inc() {
* _counter.update { count -> count + 1 } // atomic, safe for concurrent use
* }
* }
* ```
*
* Having two instances of the above `CounterModel` class one can define the sum of their counters like this:
*
* ```
* val aModel = CounterModel()
* val bModel = CounterModel()
* val sumFlow: Flow<Int> = aModel.counter.combine(bModel.counter) { a, b -> a + b }
* ```
*
* As an alternative to the above usage with the `MutableStateFlow(...)` constructor function,
* any _cold_ [Flow] can be converted to a state flow using the [stateIn] operator.
*
* ### Strong equality-based conflation
*
* Values in state flow are conflated using [Any.equals] comparison in a similar way to
* [distinctUntilChanged] operator. It is used to conflate incoming updates
* to [value][MutableStateFlow.value] in [MutableStateFlow] and to suppress emission of the values to collectors
* when new value is equal to the previously emitted one. State flow behavior with classes that violate
* the contract for [Any.equals] is unspecified.
*
* ### State flow is a shared flow
*
* State flow is a special-purpose, high-performance, and efficient implementation of [SharedFlow] for the narrow,
* but widely used case of sharing a state. See the [SharedFlow] documentation for the basic rules,
* constraints, and operators that are applicable to all shared flows.
*
* State flow always has an initial value, replays one most recent value to new subscribers, does not buffer any
* more values, but keeps the last emitted one, and does not support [resetReplayCache][MutableSharedFlow.resetReplayCache].
* A state flow behaves identically to a shared flow when it is created
* with the following parameters and the [distinctUntilChanged] operator is applied to it:
*
* ```
* // MutableStateFlow(initialValue) is a shared flow with the following parameters:
* val shared = MutableSharedFlow(
* replay = 1,
* onBufferOverflow = BufferOverflow.DROP_OLDEST
* )
* shared.tryEmit(initialValue) // emit the initial value
* val state = shared.distinctUntilChanged() // get StateFlow-like behavior
* ```
*
* Use [SharedFlow] when you need a [StateFlow] with tweaks in its behavior such as extra buffering, replaying more
* values, or omitting the initial value.
*
* ### StateFlow vs ConflatedBroadcastChannel
*
* Conceptually, state flow is similar to [ConflatedBroadcastChannel]
* and is designed to completely replace it.
* It has the following important differences:
*
* - `StateFlow` is simpler, because it does not have to implement all the [Channel] APIs, which allows
* for faster, garbage-free implementation, unlike `ConflatedBroadcastChannel` implementation that
* allocates objects on each emitted value.
* - `StateFlow` always has a value which can be safely read at any time via [value] property.
* Unlike `ConflatedBroadcastChannel`, there is no way to create a state flow without a value.
* - `StateFlow` has a clear separation into a read-only `StateFlow` interface and a [MutableStateFlow].
* - `StateFlow` conflation is based on equality like [distinctUntilChanged] operator,
* unlike conflation in `ConflatedBroadcastChannel` that is based on reference identity.
* - `StateFlow` cannot be closed like `ConflatedBroadcastChannel` and can never represent a failure.
* All errors and completion signals should be explicitly _materialized_ if needed.
*
* `StateFlow` is designed to better cover typical use-cases of keeping track of state changes in time, taking
* more pragmatic design choices for the sake of convenience.
*
* To migrate [ConflatedBroadcastChannel] usage to [StateFlow], start by replacing usages of the `ConflatedBroadcastChannel()`
* constructor with `MutableStateFlow(initialValue)`, using `null` as an initial value if you don't have one.
* Replace [send][ConflatedBroadcastChannel.send] and [trySend][ConflatedBroadcastChannel.trySend] calls
* with updates to the state flow's [MutableStateFlow.value], and convert subscribers' code to flow operators.
* You can use the [filterNotNull] operator to mimic behavior of a `ConflatedBroadcastChannel` without initial value.
*
* ### Concurrency
*
* All methods of state flow are **thread-safe** and can be safely invoked from concurrent coroutines without
* external synchronization.
*
* ### Operator fusion
*
* Application of [flowOn][Flow.flowOn], [conflate][Flow.conflate],
* [buffer] with [CONFLATED][Channel.CONFLATED] or [RENDEZVOUS][Channel.RENDEZVOUS] capacity,
* [distinctUntilChanged][Flow.distinctUntilChanged], or [cancellable] operators to a state flow has no effect.
*
* ### Implementation notes
*
* State flow implementation is optimized for memory consumption and allocation-freedom. It uses a lock to ensure
* thread-safety, but suspending collector coroutines are resumed outside of this lock to avoid dead-locks when
* using unconfined coroutines. Adding new subscribers has `O(1)` amortized cost, but updating a [value] has `O(N)`
* cost, where `N` is the number of active subscribers.
*
* ### Not stable for inheritance
*
* **`The StateFlow` interface is not stable for inheritance in 3rd party libraries**, as new methods
* might be added to this interface in the future, but is stable for use.
* Use the `MutableStateFlow(value)` constructor function to create an implementation.
*/
@OptIn(ExperimentalSubclassOptIn::class)
@SubclassOptInRequired(ExperimentalForInheritanceCoroutinesApi::class)
public interface StateFlow<out T> : SharedFlow<T> {
/**
* The current value of this state flow.
*/
public val value: T
}
/**
* A mutable [StateFlow] that provides a setter for [value].
* An instance of `MutableStateFlow` with the given initial `value` can be created using
* `MutableStateFlow(value)` constructor function.
* See the [StateFlow] documentation for details on state flows.
* Note that all emission-related operators, such as [value]'s setter, [emit], and [tryEmit], are conflated using [Any.equals].
*
* ### Not stable for inheritance
*
* **The `MutableStateFlow` interface is not stable for inheritance in 3rd party libraries**, as new methods
* might be added to this interface in the future, but is stable for use.
* Use the `MutableStateFlow()` constructor function to create an implementation.
*/
@OptIn(ExperimentalSubclassOptIn::class)
@SubclassOptInRequired(ExperimentalForInheritanceCoroutinesApi::class)
public interface MutableStateFlow<T> : StateFlow<T>, MutableSharedFlow<T> {
/**
* The current value of this state flow.
*
* Setting a value that is [equal][Any.equals] to the previous one does nothing.
*
* This property is **thread-safe** and can be safely updated from concurrent coroutines without
* external synchronization.
*/
public override var value: T
/**
* Atomically compares the current [value] with [expect] and sets it to [update] if it is equal to [expect].
* The result is `true` if the [value] was set to [update] and `false` otherwise.
*
* This function use a regular comparison using [Any.equals]. If both [expect] and [update] are equal to the
* current [value], this function returns `true`, but it does not actually change the reference that is
* stored in the [value].
*
* This method is **thread-safe** and can be safely invoked from concurrent coroutines without
* external synchronization.
*/
public fun compareAndSet(expect: T, update: T): Boolean
}
/**
* Creates a [MutableStateFlow] with the given initial [value].
*/
@Suppress("FunctionName")
public fun <T> MutableStateFlow(value: T): MutableStateFlow<T> = StateFlowImpl(value ?: NULL)
// ------------------------------------ Update methods ------------------------------------
/**
* Updates the [MutableStateFlow.value] atomically using the specified [function] of its value, and returns the new
* value.
*
* [function] may be evaluated multiple times, if [value] is being concurrently updated.
*/
public inline fun <T> MutableStateFlow<T>.updateAndGet(function: (T) -> T): T {
while (true) {
val prevValue = value
val nextValue = function(prevValue)
if (compareAndSet(prevValue, nextValue)) {
return nextValue
}
}
}
/**
* Updates the [MutableStateFlow.value] atomically using the specified [function] of its value, and returns its
* prior value.
*
* [function] may be evaluated multiple times, if [value] is being concurrently updated.
*/
public inline fun <T> MutableStateFlow<T>.getAndUpdate(function: (T) -> T): T {
while (true) {
val prevValue = value
val nextValue = function(prevValue)
if (compareAndSet(prevValue, nextValue)) {
return prevValue
}
}
}
/**
* Updates the [MutableStateFlow.value] atomically using the specified [function] of its value.
*
* [function] may be evaluated multiple times, if [value] is being concurrently updated.
*/
public inline fun <T> MutableStateFlow<T>.update(function: (T) -> T) {
while (true) {
val prevValue = value
val nextValue = function(prevValue)
if (compareAndSet(prevValue, nextValue)) {
return
}
}
}
// ------------------------------------ Implementation ------------------------------------
private val NONE = Symbol("NONE")
private val PENDING = Symbol("PENDING")
// StateFlow slots are allocated for its collectors
private class StateFlowSlot : AbstractSharedFlowSlot<StateFlowImpl<*>>() {
/**
* Each slot can have one of the following states:
*
* - `null` -- it is not used right now. Can [allocateLocked] to new collector.
* - `NONE` -- used by a collector, but neither suspended nor has pending value.
* - `PENDING` -- pending to process new value.
* - `CancellableContinuationImpl<Unit>` -- suspended waiting for new value.
*
* It is important that default `null` value is used, because there can be a race between allocation
* of a new slot and trying to do [makePending] on this slot.
*
* ===
* This should be `atomic<Any?>(null)` instead of the atomic reference, but because of #3820
* it is used as a **temporary** solution starting from 1.8.1 version.
* Depending on the fix rollout on Android, it will be removed in 1.9.0 or 2.0.0.
* See https://issuetracker.google.com/issues/325123736
*/
private val _state = WorkaroundAtomicReference<Any?>(null)
override fun allocateLocked(flow: StateFlowImpl<*>): Boolean {
// No need for atomic check & update here, since allocated happens under StateFlow lock
if (_state.value != null) return false // not free
_state.value = NONE // allocated
return true
}
override fun freeLocked(flow: StateFlowImpl<*>): Array<Continuation<Unit>?> {
_state.value = null // free now
return EMPTY_RESUMES // nothing more to do
}
@Suppress("UNCHECKED_CAST")
fun makePending() {
_state.loop { state ->
when {
state == null -> return // this slot is free - skip it
state === PENDING -> return // already pending, nothing to do
state === NONE -> { // mark as pending
if (_state.compareAndSet(state, PENDING)) return
}
else -> { // must be a suspend continuation state
// we must still use CAS here since continuation may get cancelled and free the slot at any time
if (_state.compareAndSet(state, NONE)) {
(state as CancellableContinuationImpl<Unit>).resume(Unit)
return
}
}
}
}
}
fun takePending(): Boolean = _state.getAndSet(NONE)!!.let { state ->
assert { state !is CancellableContinuationImpl<*> }
return state === PENDING
}
suspend fun awaitPending(): Unit = suspendCancellableCoroutine sc@ { cont ->
assert { _state.value !is CancellableContinuationImpl<*> } // can be NONE or PENDING
if (_state.compareAndSet(NONE, cont)) return@sc // installed continuation, waiting for pending
// CAS failed -- the only possible reason is that it is already in pending state now
assert { _state.value === PENDING }
cont.resume(Unit)
}
}
@OptIn(ExperimentalForInheritanceCoroutinesApi::class)
private class StateFlowImpl<T>(
initialState: Any // T | NULL
) : AbstractSharedFlow<StateFlowSlot>(), MutableStateFlow<T>, CancellableFlow<T>, FusibleFlow<T> {
private val _state = atomic(initialState) // T | NULL
private var sequence = 0 // serializes updates, value update is in process when sequence is odd
public override var value: T
get() = NULL.unbox(_state.value)
set(value) { updateState(null, value ?: NULL) }
override fun compareAndSet(expect: T, update: T): Boolean =
updateState(expect ?: NULL, update ?: NULL)
private fun updateState(expectedState: Any?, newState: Any): Boolean {
var curSequence: Int
var curSlots: Array<StateFlowSlot?>? // benign race, we will not use it
synchronized(this) {
val oldState = _state.value
if (expectedState != null && oldState != expectedState) return false // CAS support
if (oldState == newState) return true // Don't do anything if value is not changing, but CAS -> true
_state.value = newState
curSequence = sequence
if (curSequence and 1 == 0) { // even sequence means quiescent state flow (no ongoing update)
curSequence++ // make it odd
sequence = curSequence
} else {
// update is already in process, notify it, and return
sequence = curSequence + 2 // change sequence to notify, keep it odd
return true // updated
}
curSlots = slots // read current reference to collectors under lock
}
/*
Fire value updates outside of the lock to avoid deadlocks with unconfined coroutines.
Loop until we're done firing all the changes. This is a sort of simple flat combining that
ensures sequential firing of concurrent updates and avoids the storm of collector resumes
when updates happen concurrently from many threads.
*/
while (true) {
// Benign race on element read from array
curSlots?.forEach {
it?.makePending()
}
// check if the value was updated again while we were updating the old one
synchronized(this) {
if (sequence == curSequence) { // nothing changed, we are done
sequence = curSequence + 1 // make sequence even again
return true // done, updated
}
// reread everything for the next loop under the lock
curSequence = sequence
curSlots = slots
}
}
}
override val replayCache: List<T>
get() = listOf(value)
override fun tryEmit(value: T): Boolean {
this.value = value
return true
}
override suspend fun emit(value: T) {
this.value = value
}
@Suppress("UNCHECKED_CAST")
override fun resetReplayCache() {
throw UnsupportedOperationException("MutableStateFlow.resetReplayCache is not supported")
}
override suspend fun collect(collector: FlowCollector<T>): Nothing {
val slot = allocateSlot()
try {
if (collector is SubscribedFlowCollector) collector.onSubscription()
val collectorJob = currentCoroutineContext()[Job]
var oldState: Any? = null // previously emitted T!! | NULL (null -- nothing emitted yet)
// The loop is arranged so that it starts delivering current value without waiting first
while (true) {
// Here the coroutine could have waited for a while to be dispatched,
// so we use the most recent state here to ensure the best possible conflation of stale values
val newState = _state.value
// always check for cancellation
collectorJob?.ensureActive()
// Conflate value emissions using equality
if (oldState == null || oldState != newState) {
collector.emit(NULL.unbox(newState))
oldState = newState
}
// Note: if awaitPending is cancelled, then it bails out of this loop and calls freeSlot
if (!slot.takePending()) { // try fast-path without suspending first
slot.awaitPending() // only suspend for new values when needed
}
}
} finally {
freeSlot(slot)
}
}
override fun createSlot() = StateFlowSlot()
override fun createSlotArray(size: Int): Array<StateFlowSlot?> = arrayOfNulls(size)
override fun fuse(context: CoroutineContext, capacity: Int, onBufferOverflow: BufferOverflow) =
fuseStateFlow(context, capacity, onBufferOverflow)
}
internal fun <T> StateFlow<T>.fuseStateFlow(
context: CoroutineContext,
capacity: Int,
onBufferOverflow: BufferOverflow
): Flow<T> {
// state flow is always conflated so additional conflation does not have any effect
assert { capacity != Channel.CONFLATED } // should be desugared by callers
if ((capacity in 0..1 || capacity == Channel.BUFFERED) && onBufferOverflow == BufferOverflow.DROP_OLDEST) {
return this
}
return fuseSharedFlow(context, capacity, onBufferOverflow)
} | kotlin | github | https://github.com/Kotlin/kotlinx.coroutines | kotlinx-coroutines-core/common/src/flow/StateFlow.kt |
//---------------------------------------------------------------------------//
// Copyright (c) 2013-2015 Kyle Lutz <kyle.r.lutz@gmail.com>
//
// Distributed under the Boost Software License, Version 1.0
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt
//
// See http://boostorg.github.com/compute for more information.
//---------------------------------------------------------------------------//
#ifndef BOOST_COMPUTE_IMAGE_HPP
#define BOOST_COMPUTE_IMAGE_HPP
/// \file
///
/// Meta-header to include all Boost.Compute image headers.
#include <boost/compute/image/image1d.hpp>
#include <boost/compute/image/image2d.hpp>
#include <boost/compute/image/image3d.hpp>
#include <boost/compute/image/image_format.hpp>
#include <boost/compute/image/image_object.hpp>
#include <boost/compute/image/image_sampler.hpp>
#endif // BOOST_COMPUTE_IMAGE_HPP | unknown | github | https://github.com/mysql/mysql-server | extra/boost/boost_1_87_0/boost/compute/image.hpp |
"""
Unit tests for stub XQueue implementation.
"""
import mock
import unittest
import json
import requests
from ..xqueue import StubXQueueService
class FakeTimer(object):
"""
Fake timer implementation that executes immediately.
"""
def __init__(self, delay, func):
self.func = func
def start(self):
self.func()
class StubXQueueServiceTest(unittest.TestCase):
def setUp(self):
super(StubXQueueServiceTest, self).setUp()
self.server = StubXQueueService()
self.url = "http://127.0.0.1:{0}/xqueue/submit".format(self.server.port)
self.addCleanup(self.server.shutdown)
# Patch the timer async calls
patcher = mock.patch('terrain.stubs.xqueue.post')
self.post = patcher.start()
self.addCleanup(patcher.stop)
# Patch POST requests
patcher = mock.patch('terrain.stubs.xqueue.Timer')
timer = patcher.start()
timer.side_effect = FakeTimer
self.addCleanup(patcher.stop)
def test_grade_request(self):
# Post a submission to the stub XQueue
callback_url = 'http://127.0.0.1:8000/test_callback'
expected_header = self._post_submission(
callback_url, 'test_queuekey', 'test_queue',
json.dumps({
'student_info': 'test',
'grader_payload': 'test',
'student_response': 'test'
})
)
# Check the response we receive
# (Should be the default grading response)
expected_body = json.dumps({'correct': True, 'score': 1, 'msg': '<div></div>'})
self._check_grade_response(callback_url, expected_header, expected_body)
def test_configure_default_response(self):
# Configure the default response for submissions to any queue
response_content = {'test_response': 'test_content'}
self.server.config['default'] = response_content
# Post a submission to the stub XQueue
callback_url = 'http://127.0.0.1:8000/test_callback'
expected_header = self._post_submission(
callback_url, 'test_queuekey', 'test_queue',
json.dumps({
'student_info': 'test',
'grader_payload': 'test',
'student_response': 'test'
})
)
# Check the response we receive
# (Should be the default grading response)
self._check_grade_response(callback_url, expected_header, json.dumps(response_content))
def test_configure_specific_response(self):
# Configure the XQueue stub response to any submission to the test queue
response_content = {'test_response': 'test_content'}
self.server.config['This is only a test.'] = response_content
# Post a submission to the XQueue stub
callback_url = 'http://127.0.0.1:8000/test_callback'
expected_header = self._post_submission(
callback_url, 'test_queuekey', 'test_queue',
json.dumps({'submission': 'This is only a test.'})
)
# Check that we receive the response we configured
self._check_grade_response(callback_url, expected_header, json.dumps(response_content))
def test_multiple_response_matches(self):
# Configure the XQueue stub with two responses that
# match the same submission
self.server.config['test_1'] = {'response': True}
self.server.config['test_2'] = {'response': False}
with mock.patch('terrain.stubs.http.LOGGER') as logger:
# Post a submission to the XQueue stub
callback_url = 'http://127.0.0.1:8000/test_callback'
self._post_submission(
callback_url, 'test_queuekey', 'test_queue',
json.dumps({'submission': 'test_1 and test_2'})
)
# Expect that we do NOT receive a response
# and that an error message is logged
self.assertFalse(self.post.called)
self.assertTrue(logger.error.called)
def test_register_submission_url(self):
# Configure the XQueue stub to notify another service
# when it receives a submission.
register_url = 'http://127.0.0.1:8000/register_submission'
self.server.config['register_submission_url'] = register_url
callback_url = 'http://127.0.0.1:8000/test_callback'
submission = json.dumps({'grader_payload': 'test payload'})
self._post_submission(callback_url, 'test_queuekey', 'test_queue', submission)
# Check that a notification was sent
self.post.assert_any_call(register_url, data={'grader_payload': u'test payload'})
def _post_submission(self, callback_url, lms_key, queue_name, xqueue_body):
"""
Post a submission to the stub XQueue implementation.
`callback_url` is the URL at which we expect to receive a grade response
`lms_key` is the authentication key sent in the header
`queue_name` is the name of the queue in which to send put the submission
`xqueue_body` is the content of the submission
Returns the header (a string) we send with the submission, which can
be used to validate the response we receive from the stub.
"""
# Post a submission to the XQueue stub
grade_request = {
'xqueue_header': json.dumps({
'lms_callback_url': callback_url,
'lms_key': 'test_queuekey',
'queue_name': 'test_queue'
}),
'xqueue_body': xqueue_body
}
resp = requests.post(self.url, data=grade_request)
# Expect that the response is success
self.assertEqual(resp.status_code, 200)
# Return back the header, so we can authenticate the response we receive
return grade_request['xqueue_header']
def _check_grade_response(self, callback_url, expected_header, expected_body):
"""
Verify that the stub sent a POST request back to us
with the expected data.
`callback_url` is the URL we expect the stub to POST to
`expected_header` is the header (a string) we expect to receive with the grade.
`expected_body` is the content (a string) we expect to receive with the grade.
Raises an `AssertionError` if the check fails.
"""
# Check the response posted back to us
# This is the default response
expected_callback_dict = {
'xqueue_header': expected_header,
'xqueue_body': expected_body,
}
# Check that the POST request was made with the correct params
self.post.assert_called_with(callback_url, data=expected_callback_dict) | unknown | codeparrot/codeparrot-clean | ||
//// [tests/cases/conformance/async/es2017/asyncArrowFunction/asyncArrowFunction3_es2017.ts] ////
//// [asyncArrowFunction3_es2017.ts]
function f(await = await) {
}
//// [asyncArrowFunction3_es2017.js]
"use strict";
function f(await = await) {
} | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/asyncArrowFunction3_es2017.js |
#!/usr/bin/env python
#
# b43 firmware file squasher
# Removes unnecessary firmware files
#
# Copyright (c) 2009 Michael Buesch <mb@bu3sch.de>
#
# Licensed under the GNU/GPL version 2 or (at your option) any later version.
#
import sys
import os
def usage():
print("Usage: %s PHYTYPES COREREVS /path/to/extracted/firmware" % sys.argv[0])
print("")
print("PHYTYPES is a comma separated list of:")
print("A => A-PHY")
print("AG => Dual A-PHY G-PHY")
print("G => G-PHY")
print("LP => LP-PHY")
print("N => N-PHY")
print("")
print("COREREVS is a comma separated list of core revision numbers.")
if len(sys.argv) != 4:
usage()
sys.exit(1)
phytypes = sys.argv[1]
corerevs = sys.argv[2]
fwpath = sys.argv[3]
phytypes = phytypes.split(',')
try:
corerevs = map(lambda r: int(r), corerevs.split(','))
except ValueError:
print("ERROR: \"%s\" is not a valid COREREVS string\n" % corerevs)
usage()
sys.exit(1)
fwfiles = os.listdir(fwpath)
fwfiles = filter(lambda str: str.endswith(".fw"), fwfiles)
if not fwfiles:
print("ERROR: No firmware files found in %s" % fwpath)
sys.exit(1)
required_fwfiles = []
def revs_match(revs_a, revs_b):
for rev in revs_a:
if rev in revs_b:
return True
return False
def phytypes_match(types_a, types_b):
for type in types_a:
type = type.strip().upper()
if type in types_b:
return True
return False
revmapping = {
"ucode2.fw" : (2,3,),
"ucode4.fw" : (4,),
"ucode5.fw" : (5,6,7,8,9,10,),
"ucode11.fw" : (11,12,),
"ucode13.fw" : (13,),
"ucode14.fw" : (14,),
"ucode15.fw" : (15,),
"ucode16_mimo.fw" : (16,),
"pcm4.fw" : (1,2,3,4,),
"pcm5.fw" : (5,6,7,8,9,10,),
}
initvalmapping = {
"a0g1initvals5.fw" : ( (5,6,7,8,9,10,), ("AG",), ),
"a0g0initvals5.fw" : ( (5,6,7,8,9,10,), ("A", "AG",), ),
"b0g0initvals2.fw" : ( (2,4,), ("G",), ),
"b0g0initvals5.fw" : ( (5,6,7,8,9,10,), ("G",), ),
"b0g0initvals13.fw" : ( (13,), ("G",), ),
"n0initvals11.fw" : ( (11,12,), ("N",), ),
"n0initvals16.fw" : ( (16,), ("N",), ),
"lp0initvals13.fw" : ( (13,), ("LP",), ),
"lp0initvals14.fw" : ( (14,), ("LP",), ),
"lp0initvals15.fw" : ( (15,), ("LP",), ),
"a0g1bsinitvals5.fw" : ( (5,6,7,8,9,10,), ("AG",), ),
"a0g0bsinitvals5.fw" : ( (5,6,7,8,9,10,), ("A", "AG"), ),
"b0g0bsinitvals5.fw" : ( (5,6,7,8,9,10,), ("G",), ),
"n0bsinitvals11.fw" : ( (11,12,), ("N",), ),
"n0bsinitvals16.fw" : ( (16,), ("N",), ),
"lp0bsinitvals13.fw" : ( (13,), ("LP",), ),
"lp0bsinitvals14.fw" : ( (14,), ("LP",), ),
"lp0bsinitvals15.fw" : ( (15,), ("LP",), ),
}
for f in fwfiles:
if f in revmapping:
if revs_match(corerevs, revmapping[f]):
required_fwfiles += [f]
continue
if f in initvalmapping:
if revs_match(corerevs, initvalmapping[f][0]) and\
phytypes_match(phytypes, initvalmapping[f][1]):
required_fwfiles += [f]
continue
print("WARNING: Firmware file %s not found in the mapping lists" % f)
for f in fwfiles:
if f not in required_fwfiles:
print("Deleting %s" % f)
os.unlink(fwpath + '/' + f) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Weight broadcasting operations.
In `tf.losses` and `tf.metrics`, we support limited weight broadcasting. This
file includes operations for those broadcasting rules.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sets
def _has_valid_dims(weights_shape, values_shape):
with ops.name_scope(
None, "has_invalid_dims", (weights_shape, values_shape)) as scope:
values_shape_2d = array_ops.expand_dims(values_shape, -1)
valid_dims = array_ops.concat(
(values_shape_2d, array_ops.ones_like(values_shape_2d)), axis=1)
weights_shape_2d = array_ops.expand_dims(weights_shape, -1)
invalid_dims = sets.set_difference(weights_shape_2d, valid_dims)
num_invalid_dims = array_ops.size(
invalid_dims.values, name="num_invalid_dims")
return math_ops.equal(0, num_invalid_dims, name=scope)
def _has_valid_nonscalar_shape(
weights_rank, weights_shape, values_rank, values_shape):
with ops.name_scope(
None, "has_valid_nonscalar_shape",
(weights_rank, weights_shape, values_rank, values_shape)) as scope:
is_same_rank = math_ops.equal(
values_rank, weights_rank, name="is_same_rank")
return control_flow_ops.cond(
is_same_rank,
lambda: _has_valid_dims(weights_shape, values_shape),
lambda: is_same_rank,
name=scope)
_ASSERT_BROADCASTABLE_ERROR_PREFIX = "weights can not be broadcast to values."
def assert_broadcastable(weights, values):
"""Asserts `weights` can be broadcast to `values`.
In `tf.losses` and `tf.metrics`, we support limited weight broadcasting. We
let weights be either scalar, or the same rank as the target values, with each
dimension either 1, or the same as the corresponding values dimension.
Args:
weights: `Tensor` of weights.
values: `Tensor` of values to which weights are applied.
Returns:
`Operation` raising `InvalidArgumentError` if `weights` has incorrect shape.
`no_op` if static checks determine `weights` has correct shape.
Raises:
ValueError: If static checks determine `weights` has incorrect shape.
"""
with ops.name_scope(None, "assert_broadcastable", (weights, values)) as scope:
with ops.name_scope(None, "weights", (weights,)) as weights_scope:
weights = ops.convert_to_tensor(weights, name=weights_scope)
weights_shape = array_ops.shape(weights, name="shape")
weights_rank = array_ops.rank(weights, name="rank")
weights_rank_static = tensor_util.constant_value(weights_rank)
with ops.name_scope(None, "values", (values,)) as values_scope:
values = ops.convert_to_tensor(values, name=values_scope)
values_shape = array_ops.shape(values, name="shape")
values_rank = array_ops.rank(values, name="rank")
values_rank_static = tensor_util.constant_value(values_rank)
# Try static checks.
if weights_rank_static is not None and values_rank_static is not None:
if weights_rank_static == 0:
return control_flow_ops.no_op(name="static_scalar_check_success")
if weights_rank_static != values_rank_static:
raise ValueError(
"%s values.rank=%s. weights.rank=%s."
" values.shape=%s. weights.shape=%s." % (
_ASSERT_BROADCASTABLE_ERROR_PREFIX, values_rank_static,
weights_rank_static, values.shape, weights.shape))
weights_shape_static = tensor_util.constant_value(weights_shape)
values_shape_static = tensor_util.constant_value(values_shape)
if weights_shape_static is not None and values_shape_static is not None:
# Sanity check, this should always be true since we checked rank above.
ndims = len(values_shape_static)
assert ndims == len(weights_shape_static)
for i in range(ndims):
if weights_shape_static[i] not in (1, values_shape_static[i]):
raise ValueError(
"%s Mismatch at dim %s. values.shape=%s weights.shape=%s." % (
_ASSERT_BROADCASTABLE_ERROR_PREFIX, i, values_shape_static,
weights_shape_static))
return control_flow_ops.no_op(name="static_dims_check_success")
# Dynamic checks.
is_scalar = math_ops.equal(0, weights_rank, name="is_scalar")
data = (
_ASSERT_BROADCASTABLE_ERROR_PREFIX,
"weights.shape=", weights.name, weights_shape,
"values.shape=", values.name, values_shape,
"is_scalar=", is_scalar,
)
is_valid_shape = control_flow_ops.cond(
is_scalar,
lambda: is_scalar,
lambda: _has_valid_nonscalar_shape( # pylint: disable=g-long-lambda
weights_rank, weights_shape, values_rank, values_shape),
name="is_valid_shape")
return control_flow_ops.Assert(is_valid_shape, data, name=scope)
def broadcast_weights(weights, values):
"""Broadcast `weights` to the same shape as `values`.
This returns a version of `weights` following the same broadcast rules as
`mul(weights, values)`, but limited to the weights shapes allowed by
`assert_broadcastable`. When computing a weighted average, use this function
to broadcast `weights` before summing them; e.g.,
`reduce_sum(w * v) / reduce_sum(_broadcast_weights(w, v))`.
Args:
weights: `Tensor` whose shape is broadcastable to `values` according to the
rules of `assert_broadcastable`.
values: `Tensor` of any shape.
Returns:
`weights` broadcast to `values` shape according to the rules of
`assert_broadcastable`.
"""
with ops.name_scope(None, "broadcast_weights", (weights, values)) as scope:
values = ops.convert_to_tensor(values, name="values")
weights = ops.convert_to_tensor(
weights, dtype=values.dtype.base_dtype, name="weights")
# Try static check for exact match.
weights_shape = weights.get_shape()
values_shape = values.get_shape()
if (weights_shape.is_fully_defined() and
values_shape.is_fully_defined() and
weights_shape.is_compatible_with(values_shape)):
return weights
with ops.control_dependencies((assert_broadcastable(weights, values),)):
return math_ops.multiply(
weights, array_ops.ones_like(values), name=scope) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious with changes by pycryptools developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import hashlib
import hmac
from .main import *
from .py2specials import *
from .py3specials import *
from . import constants as version
# Version numbers for BIP32 extended keys
# standard: xprv, xpub
# segwit in p2sh: yprv, ypub
# native segwit: zprv, zpub
XPRV_HEADERS = {
'standard': 0x0488ade4,
'p2wpkh-p2sh': 0x049d7878,
'p2wsh-p2sh': 0x295b005,
'p2wpkh': 0x4b2430c,
'p2wsh': 0x2aa7a99
}
XPUB_HEADERS = {
'standard': 0x0488b21e,
'p2wpkh-p2sh': 0x049d7cb2,
'p2wsh-p2sh': 0x295b43f,
'p2wpkh': 0x4b24746,
'p2wsh': 0x2aa7ed3
}
bh2u = safe_hexlify
hfu = binascii.hexlify
bfh = safe_from_hex
hmac_sha_512 = lambda x, y: hmac.new(x, y, hashlib.sha512).digest()
def rev_hex(s):
return bh2u(bfh(s)[::-1])
def int_to_hex(i, length=1):
assert isinstance(i, int)
s = hex(i)[2:].rstrip('L')
s = "0"*(2*length - len(s)) + s
return rev_hex(s)
class InvalidPassword(Exception):
def __str__(self):
return "Incorrect password"
try:
from Cryptodome.Cipher import AES
except:
AES = None
class InvalidPasswordException(Exception):
pass
class InvalidPadding(Exception):
pass
def assert_bytes(*args):
"""
porting helper, assert args type
"""
try:
for x in args:
assert isinstance(x, (bytes, bytearray))
except:
print('assert bytes failed', list(map(type, args)))
raise
def append_PKCS7_padding(data):
assert_bytes(data)
padlen = 16 - (len(data) % 16)
return data + bytes([padlen]) * padlen
def strip_PKCS7_padding(data):
assert_bytes(data)
if len(data) % 16 != 0 or len(data) == 0:
raise InvalidPadding("invalid length")
padlen = data[-1]
if padlen > 16:
raise InvalidPadding("invalid padding byte (large)")
for i in data[-padlen:]:
if i != padlen:
raise InvalidPadding("invalid padding byte (inconsistent)")
return data[0:-padlen]
def aes_encrypt_with_iv(key, iv, data):
assert_bytes(key, iv, data)
data = append_PKCS7_padding(data)
if AES:
e = AES.new(key, AES.MODE_CBC, iv).encrypt(data)
else:
aes_cbc = pyaes.AESModeOfOperationCBC(key, iv=iv)
aes = pyaes.Encrypter(aes_cbc, padding=pyaes.PADDING_NONE)
e = aes.feed(data) + aes.feed() # empty aes.feed() flushes buffer
return e
def aes_decrypt_with_iv(key, iv, data):
assert_bytes(key, iv, data)
if AES:
cipher = AES.new(key, AES.MODE_CBC, iv)
data = cipher.decrypt(data)
else:
aes_cbc = pyaes.AESModeOfOperationCBC(key, iv=iv)
aes = pyaes.Decrypter(aes_cbc, padding=pyaes.PADDING_NONE)
data = aes.feed(data) + aes.feed() # empty aes.feed() flushes buffer
try:
return strip_PKCS7_padding(data)
except InvalidPadding:
raise InvalidPassword()
def EncodeAES(secret, s):
assert_bytes(s)
iv = bytes(os.urandom(16))
ct = aes_encrypt_with_iv(secret, iv, s)
e = iv + ct
return base64.b64encode(e)
def DecodeAES(secret, e):
e = bytes(base64.b64decode(e))
iv, e = e[:16], e[16:]
s = aes_decrypt_with_iv(secret, iv, e)
return s
def pw_encode(s, password):
if password:
secret = Hash(password)
return EncodeAES(secret, to_bytes(s, "utf8")).decode('utf8')
else:
return s
def pw_decode(s, password):
if password is not None:
secret = Hash(password)
try:
d = to_string(DecodeAES(secret, s), "utf8")
except Exception:
raise InvalidPassword()
return d
else:
return s
def is_new_seed(x, prefix=version.SEED_PREFIX):
from . import mnemonic
x = mnemonic.normalize_text(x)
s = bh2u(hmac_sha_512(b"Seed version", x.encode('utf8')))
return s.startswith(prefix)
def seed_type(x):
if is_new_seed(x):
return 'standard'
elif is_new_seed(x, version.SEED_PREFIX_SW):
return 'segwit'
elif is_new_seed(x, version.SEED_PREFIX_2FA):
return '2fa'
return ''
is_seed = lambda x: bool(seed_type(x))
SCRIPT_TYPES = {
'p2pkh':0,
'p2wpkh':1,
'p2wpkh-p2sh':2,
'p2sh':5,
'p2wsh':6,
'p2wsh-p2sh':7
}
__b58chars = b'123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
assert len(__b58chars) == 58
__b43chars = b'0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ$*+-./:'
assert len(__b43chars) == 43
def inv_dict(d):
return {v: k for k, v in d.items()}
def is_minikey(text):
# Minikeys are typically 22 or 30 characters, but this routine
# permits any length of 20 or more provided the minikey is valid.
# A valid minikey must begin with an 'S', be in base58, and when
# suffixed with '?' have its SHA256 hash begin with a zero byte.
# They are widely used in Casascius physical bitcoins.
return (len(text) >= 20 and text[0] == 'S'
and all(ord(c) in __b58chars for c in text)
and sha256(text + '?')[0] == 0x00)
def minikey_to_private_key(text):
return sha256(text)
###################################### BIP32 ##############################
BIP32_PRIME = 0x80000000
def get_pubkeys_from_secret(secret):
# public key
pubkey = compress(privtopub(secret))
return pubkey, True
def xprv_header(xtype):
return bfh("%08x" % XPRV_HEADERS[xtype])
def xpub_header(xtype):
return bfh("%08x" % XPUB_HEADERS[xtype]) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Flavor Swap API extension."""
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
authorize = extensions.soft_extension_authorizer('compute', 'flavor_swap')
class FlavorSwapController(wsgi.Controller):
def _extend_flavors(self, req, flavors):
for flavor in flavors:
db_flavor = req.get_db_flavor(flavor['id'])
key = 'swap'
flavor[key] = db_flavor['swap'] or ""
def _show(self, req, resp_obj):
if not authorize(req.environ['nova.context']):
return
if 'flavor' in resp_obj.obj:
self._extend_flavors(req, [resp_obj.obj['flavor']])
@wsgi.extends
def show(self, req, resp_obj, id):
return self._show(req, resp_obj)
@wsgi.extends(action='create')
def create(self, req, resp_obj, body):
return self._show(req, resp_obj)
@wsgi.extends
def detail(self, req, resp_obj):
if not authorize(req.environ['nova.context']):
return
self._extend_flavors(req, list(resp_obj.obj['flavors']))
class Flavor_swap(extensions.ExtensionDescriptor):
"""Support to show the swap status of a flavor."""
name = "FlavorSwap"
alias = "os-flavor-swap"
namespace = ("http://docs.openstack.org/compute/ext/"
"flavor_swap/api/v1.1")
updated = "2012-08-29T00:00:00Z"
def get_controller_extensions(self):
controller = FlavorSwapController()
extension = extensions.ControllerExtension(self, 'flavors', controller)
return [extension] | unknown | codeparrot/codeparrot-clean | ||
import numpy as np
from ..antechamber.atomtype import atomtype
import math
def pdb(molecule, ff='amber', fn='cnt.pdb', save_dir='.'):
"""Creates a .pdb (protein data bank) type list for use with molecular dynamics packages.
pdb_lines returns list holding every line of .pdb file."""
amber_2_opls = {"CA": "CA", "HC": "HA"}
atom_lines = []
conect_lines = []
inc_index_bondList = molecule.bondList + 1 # sets index to same index as "serial", works
conect_header_bare = "CONECT"
for i in range(len(molecule.posList)):
serial = i + 1
if ff == 'amber':
name = molecule.atomtypes[i]
elif ff == 'oplsaa':
name = amber_2_opls[molecule.atomtypes[i]]
else:
print('Check ff input')
raise SystemExit
altloc = " "
#resname = "CNT"
resname = name
chainid = "A"
resseq = serial
icode = " "
x = round(molecule.posList[i][0], 3)
y = round(molecule.posList[i][1], 3)
z = round(molecule.posList[i][2], 3)
occupancy = 1.00
tempfactor = 0.00
element = atomtype.inv_atomicSymDict[molecule.zList[i]] # atomic number
charge = " "
atom_header = "ATOM {0:>5} {1:<3}{2}{3:>3} {4}{5:>4}{6} {7:>8.3f}{8:>8.3f}{9:>8.3f}{10:>6.2f}{11:>6.2f} " \
" {12:>2}{13:>2}".format(serial, name, altloc, resname,
chainid, resseq, icode, x, y, z,
occupancy, tempfactor, element, charge)
atom_lines.append(atom_header)
for j in range(len(inc_index_bondList)):
conect_header_temp = conect_header_bare
for k in range(len(inc_index_bondList[j])): # builds variable size conect header
conect_adder = "{0:>5}".format(inc_index_bondList[j][k])
conect_header_temp += conect_adder
conect_lines.append(conect_header_temp)
pdb_lines = atom_lines + conect_lines
pdb_lines.append("TER")
pdb_lines.append("END")
save_file(pdb_lines, save_dir, fn)
print('Successfully exported %s to %s' % (fn, save_dir))
def gro(molecule, scale=2.0, fn='cnt.gro', save_dir='.', periodic=False):
"""Creates a .gro file for gromacs, holds atom coordinates and unit cell size
Coordinates exported in nm, originally in angstroms"""
gro_lines = []
res_num = 1
res_name = "CNT"
elemtypes = []
a_num = []
for i in range(len(molecule.atomtypes)):
a_num_temp = atomtype.inv_atomicSymDict[molecule.zList[i]] # get atomic number
elemtypes_temp = molecule.atomtypes[i][0] # element only (first char.)
a_num.append(a_num_temp)
elemtypes.append(elemtypes_temp)
dist_to_orig = []
for i in range(len(molecule.posList)):
temp_dist = np.sqrt(molecule.posList[i][0]**2 + molecule.posList[i][1]**2 + molecule.posList[i][2]**2)
dist_to_orig.append(temp_dist)
index_min = np.argmin(dist_to_orig) # closest pt in object to origin
# move tube to true origin at 0,0,0
move_dist = np.abs(molecule.posList[index_min])
posList_cent = molecule.posList
#posList_cent += move_dist # now centered at origin
x_list, y_list, z_list = zip(*molecule.posList)
# center tube in quadrant 1 box
max_x = np.max(x_list)
max_y = np.max(y_list)
max_z = np.max(z_list)
min_x = np.min(x_list)
min_y = np.min(y_list)
min_z = np.min(z_list)
length_x = np.abs(max_x-min_x)
length_y = np.abs(max_y-min_y)
length_z = np.abs(max_z-min_z)
dist_to_move = np.abs([min_x, min_y, min_z])
max_length = np.max([length_x, length_y, length_z])
idx_max_dim = np.argmax([length_x, length_y, length_z])
dims = ['X','Y','Z']
dims_dict = {0:x_list, 1:y_list, 2:z_list}
length_str = dims[idx_max_dim]
print('Length of tube is in the %s direction.' % length_str)
box_dim = scale * max_length
new_move_dist = box_dim/2.0
# measure dist to new box origin in move in every direction
posDist_new = posList_cent
posDist_new += (new_move_dist + dist_to_move)
# now tube is centered in quadrant 0 box
if periodic:
dir_to_cut = dims_dict[idx_max_dim]
left = np.min(dir_to_cut) # picks first smallest value in list
right = np.max(dir_to_cut)
split = (right-left) / 2.0
print('Splitting %s direction around %.2f' % (length_str, split))
# find an atom closest to split line
dist_to_split = []
for i in range(len(molecule.posList)):
temp_dist = np.abs(posDist_new[i][idx_max_dim] - split)
dist_to_split.append(temp_dist)
index_split = np.argmin(dist_to_split) # closest pt in object to split
# scale everything by bond
posDist_new *= 0.1
box_dim *= 0.1
print("Box with be %dX the maximum dimension of the object.\nUsing a %.2fX%.2fX%.2f box." %
(scale, box_dim, box_dim, box_dim))
# lets write to list
tit = "SWCNT armchair"
tot_atoms = len(posDist_new)
num_atoms_line = "{0:>5}".format(tot_atoms)
gro_lines.append(tit)
gro_lines.append(num_atoms_line)
for i in range(len(posDist_new)):
_index = i + 1
temp_dist = np.sqrt(posDist_new[i][0] ** 2 + posDist_new[i][1] ** 2 + posDist_new[i][2] ** 2)
temp_line = "{0:>5}{1:<5}{2:>5}{3:>5}{4:>8.3f}{5:>8.3f}{6:>8.3f}"\
.format(res_num, res_name, a_num[i], _index,
posDist_new[i][0], posDist_new[i][1], posDist_new[i][2])
gro_lines.append(temp_line)
box_line = "{0:>8.3f}{1:>8.3f}{2:>8.3f}".format(box_dim, box_dim, box_dim)
gro_lines.append(box_line)
save_file(gro_lines, save_dir, fn)
print('Successfully exported %s to %s' % (fn, save_dir))
def restrains(mol, fn='posre.itp', save_dir='.', fc=1000):
"""Generates posre.itp file used by GROMACS to restrain atoms to a location, can be read by x2top"""
# force constant of position restraint (kJ mol^-1 nm^-2)
# IF NEEDED. REMOVING CM TRANSLATION AND ROTATION IS PROBABLY BEST.
# **********MAKE SURE MOLECULE IS HYDROGENATED FIRST********** #
itp_lines = []
funct = 1
itp_lines.extend(["; file for defining restraints in CNT, read in through X.top", ""])
itp_lines.extend(["[ position_restraints ]", "; ai funct fcx fcy fcz"])
for i in mol.hcap:
index = i + 1
temp_line = "{0:>4}{1:>6}{2:>9}{3:>8}{4:>8}".format(index, funct, fc, fc, fc)
itp_lines.append(temp_line)
itp_lines.append("") # EOL
save_file(itp_lines, save_dir, fn)
print('Successfully exported %s to %s' % (fn, save_dir))
def lammps(molecule, fn='cnt.lammps', save_dir='.', type_list=None):
"""Generates data file for use in LAMMPS
Assuming 'real' units (the unit type)
mass = grams/mole
distance = Angstroms
time = femtoseconds
energy = Kcal/mole
velocity = Angstroms/femtosecond
force = Kcal/mole-Angstrom
torque = Kcal/mole
temperature = Kelvin
pressure = atmospheres
dynamic viscosity = Poise
charge = multiple of electron charge (1.0 is a proton)
dipole = charge*Angstroms
electric field = volts/Angstrom
density = gram/cm^dim
bond_const_K = Kcal/(mole*Angstrom^2)
bond_const_r0 = Angstrom
"""
vdwDict = {1: 1.2, 6: 1.7, 7: 1.55, 8: 1.52, 9: 1.47, 15: 1.8, 16: 1.8, 17: 2.75}
amuDict = {1: 1.008, 6: 12.01, 7: 14.01, 8: 16.00, 9: 19.00,
15: 30.79, 16: 32.065, 17: 35.45}
if type_list is None:
type_list = np.ones(len(molecule.posList))
l_lines = []
l_lines.append('LAMMPS Description')
l_lines.append('')
l_lines.append('%d atoms' % len(molecule.posList))
l_lines.append('%d bonds' % len(molecule.bondList))
l_lines.append('%d angles' % len(molecule.angleList))
l_lines.append('%d dihedrals' % 0)
l_lines.append('%d impropers' % 0)
l_lines.append('')
l_lines.append('%d atom types' % len(molecule.zList))
l_lines.append('%d bond types' % len(molecule.bondList))
l_lines.append('%d angle types' % len(molecule.angleList))
l_lines.append('')
# find box dims
box_min = np.ceil(np.min(molecule.posList)) - 5.0
box_max = np.ceil(np.max(molecule.posList)) + 5.0
l_lines.append('%d %d xlo xhi' % (box_min, box_max))
l_lines.append('%d %d ylo yhi' % (box_min, box_max))
l_lines.append('%d %d zlo zhi' % (box_min, box_max))
l_lines.append('')
l_lines.append('Masses')
l_lines.append('')
for i in range(len(molecule.mass)):
l_lines.append('%d %.5f' % ((i+1), molecule.mass[i]))
l_lines.append('')
l_lines.append('Bond Coeffs')
l_lines.append('')
for i in range(len(molecule.bondList)):
l_lines.append('%d %.5f %.5f' % ((i+1), molecule.kb[i], molecule.b0[i]))
l_lines.append('')
l_lines.append('Angle Coeffs')
l_lines.append('')
for i in range(len(molecule.angleList)):
l_lines.append('%d %.5f %.5f' % ((i + 1), molecule.kt[i], molecule.t0[i]))
l_lines.append('')
l_lines.append('Atoms')
l_lines.append('')
for i in range(len(molecule.posList)):
l_lines.append('%d %d %d %.5f %.5f %.5f' % ((i+1), type_list[i], (i+1), molecule.posList[i,0], molecule.posList[i,1], molecule.posList[i,2]))
l_lines.append('')
l_lines.append('Bonds')
l_lines.append('')
for i in range(len(molecule.bondList)):
l_lines.append('%d 1 %d %d' % ((i+1), (molecule.bondList[i,0]+1), (molecule.bondList[i,1]+1)))
l_lines.append('')
l_lines.append('Angles')
l_lines.append('')
for i in range(len(molecule.angleList)):
l_lines.append('%d 1 %d %d %d' % ((i+1), (molecule.angleList[i,0]+1), (molecule.angleList[i,1]+1), (molecule.angleList[i,2]+1)))
l_lines.append('')
save_file(l_lines, save_dir, fn)
print('Successfully exported %s to %s' % (fn, save_dir))
def top(mol, ff='amber', fn='cnt.top', save_dir='.'):
"""Creates a .top (topology) type list for use in MD packages
AMBER99SB or OPLS-AA forcefields can being used"""
print("Recommended that MD export is done using .gro file only.")
if ff == 'amber':
tit = 'AMBER99SB'
ffloc = './amber99sb.ff/forcefield.itp'
elif ff == 'oplsaa':
tit = 'OPLS-AA'
ffloc = './oplsaa.ff/forcefield.itp'
else:
print('Check ff input')
raise SystemExit
top_lines = [";", "; Topology file for %s" % mol.name, ";%s force field" % ff,";"]
top_lines.extend(["; Include forcefield parameters", '#include "%s"' % ffloc, ""])
# we call our molecule or residue CNT, encompassing all atoms of the tube/functionalized ends
top_lines.extend(["[ moleculetype ]", "; Name nrexcl", "CNT 3", ""])
# ATOMS
top_lines.extend(["[ atoms ]", "; nr type resnr residue atom cgnr charge"])
for i in range(len(mol.atomtypes)):
index = i + 1
if (ff == 'oplsaa') and (mol.atomtypes[i] == 'HC'):
temp_atomtype = 'HA'
else:
temp_atomtype = mol.atomtypes[i]
a_num = atomtype.inv_atomicSymDict[mol.zList[i]] # atomic number
temp_line = "{0:>6}{1:>8}{2:>8}{3:>8}{4:>8}{5:>8}{6:>7.3f}"\
.format(index, temp_atomtype, 1, 'CNT', a_num, index, 0.000)
top_lines.append(temp_line)
# BONDS
top_lines.extend(["", "[ bonds ]", "; ai aj funct c0 c1"])
for i in range(len(mol.bondList)):
funct = 1
temp_line = "{0:>5}{1:>6}{2:>6}{3:>13}{4:>13}"\
.format(mol.bondList[i][0]+1, mol.bondList[i][1]+1, funct, "", "")
top_lines.append(temp_line)
# PAIRS
# Let L-J and Coulomb pairs auto generate from the cutoffs
# ANGLES
top_lines.extend(["", "[ angles ]", "; ai aj ak funct c0 c1"])
for i in range(len(mol.angleList)):
funct = 1
temp_line = "{0:>5}{1:>6}{2:>6}{3:>6}{4:>13}{5:>13}"\
.format(mol.angleList[i][0]+1, mol.angleList[i][1]+1, mol.angleList[i][2]+1, funct, "", "")
top_lines.append(temp_line)
# DIHEDRALS
top_lines.extend(["", "[ dihedrals ]", "; ai aj ak al funct c0 c1"])
for i in range(len(mol.dihList)):
if ff == 'amber':
funct = 9
elif ff == 'oplsaa':
funct = 3
temp_line = "{0:>5}{1:>6}{2:>6}{3:>6}{4:>6}{5:>13}{6:>13}"\
.format(mol.dihList[i][0]+1, mol.dihList[i][1]+1, mol.dihList[i][2]+1, mol.dihList[i][3]+1, funct, "", "")
top_lines.append(temp_line)
top_lines.extend(["", "[ system ]", "CNT"])
top_lines.extend(["", "[ molecules ]", "CNT 1", ""])
save_file(top_lines, save_dir, fn)
print('Successfully exported %s to %s' % (fn, save_dir))
def _build_lines(columns, spaces, size, innerColumns):
listSection = []
line = ";"
for column in columns:
line += ' '*spaces
line += column
listSection.append(line)
for i in range(size):
line = ' '
for count, col in enumerate([x[i] for x in innerColumns]):
entryLength = spaces+len(columns[count])
line += col.rjust(entryLength, ' ')
listSection.append(line)
listSection.append(' ')
return listSection
def save_file(txt_object, save_dir, name):
f = open(save_dir + "/%s" % name, 'w')
for i in range(len(txt_object)):
f.write(txt_object[i] + "\n")
f.close() | unknown | codeparrot/codeparrot-clean | ||
#ifndef CURLINC_WEBSOCKETS_H
#define CURLINC_WEBSOCKETS_H
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at https://curl.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
* SPDX-License-Identifier: curl
*
***************************************************************************/
#ifdef __cplusplus
extern "C" {
#endif
struct curl_ws_frame {
int age; /* zero */
int flags; /* See the CURLWS_* defines */
curl_off_t offset; /* the offset of this data into the frame */
curl_off_t bytesleft; /* number of pending bytes left of the payload */
size_t len; /* size of the current data chunk */
};
/* flag bits */
#define CURLWS_TEXT (1 << 0)
#define CURLWS_BINARY (1 << 1)
#define CURLWS_CONT (1 << 2)
#define CURLWS_CLOSE (1 << 3)
#define CURLWS_PING (1 << 4)
#define CURLWS_OFFSET (1 << 5)
/*
* NAME curl_ws_recv()
*
* DESCRIPTION
*
* Receives data from the websocket connection. Use after successful
* curl_easy_perform() with CURLOPT_CONNECT_ONLY option.
*/
CURL_EXTERN CURLcode curl_ws_recv(CURL *curl, void *buffer, size_t buflen,
size_t *recv,
const struct curl_ws_frame **metap);
/* flags for curl_ws_send() */
#define CURLWS_PONG (1 << 6)
/*
* NAME curl_ws_send()
*
* DESCRIPTION
*
* Sends data over the websocket connection. Use after successful
* curl_easy_perform() with CURLOPT_CONNECT_ONLY option.
*/
CURL_EXTERN CURLcode curl_ws_send(CURL *curl, const void *buffer,
size_t buflen, size_t *sent,
curl_off_t fragsize,
unsigned int flags);
/*
* NAME curl_ws_start_frame()
*
* DESCRIPTION
*
* Buffers a websocket frame header with the given flags and length.
* Errors when a previous frame is not complete, e.g. not all its
* payload has been added.
*/
CURL_EXTERN CURLcode curl_ws_start_frame(CURL *curl,
unsigned int flags,
curl_off_t frame_len);
/* bits for the CURLOPT_WS_OPTIONS bitmask: */
#define CURLWS_RAW_MODE (1L << 0)
#define CURLWS_NOAUTOPONG (1L << 1)
CURL_EXTERN const struct curl_ws_frame *curl_ws_meta(CURL *curl);
#ifdef __cplusplus
}
#endif
#endif /* CURLINC_WEBSOCKETS_H */ | c | github | https://github.com/ktorio/ktor | ktor-client/ktor-client-curl/desktop/interop/include/curl/websockets.h |
use rustc_middle::mir::*;
use rustc_middle::ty::TyCtxt;
use rustc_mir_dataflow::debuginfo::debuginfo_locals;
use rustc_session::config::MirStripDebugInfo;
/// Conditionally remove some of the VarDebugInfo in MIR.
///
/// In particular, stripping non-parameter debug info for tiny, primitive-like
/// methods in core saves work later, and nobody ever wanted to use it anyway.
pub(super) struct StripDebugInfo;
impl<'tcx> crate::MirPass<'tcx> for StripDebugInfo {
fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
sess.opts.unstable_opts.mir_strip_debuginfo != MirStripDebugInfo::None
}
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
match tcx.sess.opts.unstable_opts.mir_strip_debuginfo {
MirStripDebugInfo::None => return,
MirStripDebugInfo::AllLocals => {}
MirStripDebugInfo::LocalsInTinyFunctions
if let TerminatorKind::Return { .. } =
body.basic_blocks[START_BLOCK].terminator().kind => {}
MirStripDebugInfo::LocalsInTinyFunctions => return,
}
body.var_debug_info.retain(|vdi| {
matches!(
vdi.value,
VarDebugInfoContents::Place(place)
if place.local.as_usize() <= body.arg_count && place.local != RETURN_PLACE,
)
});
let debuginfo_locals = debuginfo_locals(body);
for data in body.basic_blocks.as_mut_preserves_cfg() {
for stmt in data.statements.iter_mut() {
stmt.debuginfos.retain(|debuginfo| match debuginfo {
StmtDebugInfo::AssignRef(local, _) | StmtDebugInfo::InvalidAssign(local) => {
debuginfo_locals.contains(*local)
}
});
}
data.after_last_stmt_debuginfos.retain(|debuginfo| match debuginfo {
StmtDebugInfo::AssignRef(local, _) | StmtDebugInfo::InvalidAssign(local) => {
debuginfo_locals.contains(*local)
}
});
}
}
fn is_required(&self) -> bool {
true
}
} | rust | github | https://github.com/rust-lang/rust | compiler/rustc_mir_transform/src/strip_debuginfo.rs |
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests application-provided metadata, status code, and details."""
import threading
import unittest
import grpc
from grpc.framework.foundation import logging_pool
from tests.unit import test_common
from tests.unit.framework.common import test_constants
from tests.unit.framework.common import test_control
_SERIALIZED_REQUEST = b'\x46\x47\x48'
_SERIALIZED_RESPONSE = b'\x49\x50\x51'
_REQUEST_SERIALIZER = lambda unused_request: _SERIALIZED_REQUEST
_REQUEST_DESERIALIZER = lambda unused_serialized_request: object()
_RESPONSE_SERIALIZER = lambda unused_response: _SERIALIZED_RESPONSE
_RESPONSE_DESERIALIZER = lambda unused_serialized_response: object()
_SERVICE = 'test.TestService'
_UNARY_UNARY = 'UnaryUnary'
_UNARY_STREAM = 'UnaryStream'
_STREAM_UNARY = 'StreamUnary'
_STREAM_STREAM = 'StreamStream'
_CLIENT_METADATA = (('client-md-key', 'client-md-key'),
('client-md-key-bin', b'\x00\x01'))
_SERVER_INITIAL_METADATA = (
('server-initial-md-key', 'server-initial-md-value'),
('server-initial-md-key-bin', b'\x00\x02'))
_SERVER_TRAILING_METADATA = (
('server-trailing-md-key', 'server-trailing-md-value'),
('server-trailing-md-key-bin', b'\x00\x03'))
_NON_OK_CODE = grpc.StatusCode.NOT_FOUND
_DETAILS = 'Test details!'
class _Servicer(object):
def __init__(self):
self._lock = threading.Lock()
self._abort_call = False
self._code = None
self._details = None
self._exception = False
self._return_none = False
self._received_client_metadata = None
def unary_unary(self, request, context):
with self._lock:
self._received_client_metadata = context.invocation_metadata()
context.send_initial_metadata(_SERVER_INITIAL_METADATA)
context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
if self._abort_call:
context.abort(self._code, self._details)
else:
if self._code is not None:
context.set_code(self._code)
if self._details is not None:
context.set_details(self._details)
if self._exception:
raise test_control.Defect()
else:
return None if self._return_none else object()
def unary_stream(self, request, context):
with self._lock:
self._received_client_metadata = context.invocation_metadata()
context.send_initial_metadata(_SERVER_INITIAL_METADATA)
context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
if self._abort_call:
context.abort(self._code, self._details)
else:
if self._code is not None:
context.set_code(self._code)
if self._details is not None:
context.set_details(self._details)
for _ in range(test_constants.STREAM_LENGTH // 2):
yield _SERIALIZED_RESPONSE
if self._exception:
raise test_control.Defect()
def stream_unary(self, request_iterator, context):
with self._lock:
self._received_client_metadata = context.invocation_metadata()
context.send_initial_metadata(_SERVER_INITIAL_METADATA)
context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
# TODO(https://github.com/grpc/grpc/issues/6891): just ignore the
# request iterator.
list(request_iterator)
if self._abort_call:
context.abort(self._code, self._details)
else:
if self._code is not None:
context.set_code(self._code)
if self._details is not None:
context.set_details(self._details)
if self._exception:
raise test_control.Defect()
else:
return None if self._return_none else _SERIALIZED_RESPONSE
def stream_stream(self, request_iterator, context):
with self._lock:
self._received_client_metadata = context.invocation_metadata()
context.send_initial_metadata(_SERVER_INITIAL_METADATA)
context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
# TODO(https://github.com/grpc/grpc/issues/6891): just ignore the
# request iterator.
list(request_iterator)
if self._abort_call:
context.abort(self._code, self._details)
else:
if self._code is not None:
context.set_code(self._code)
if self._details is not None:
context.set_details(self._details)
for _ in range(test_constants.STREAM_LENGTH // 3):
yield object()
if self._exception:
raise test_control.Defect()
def set_abort_call(self):
with self._lock:
self._abort_call = True
def set_code(self, code):
with self._lock:
self._code = code
def set_details(self, details):
with self._lock:
self._details = details
def set_exception(self):
with self._lock:
self._exception = True
def set_return_none(self):
with self._lock:
self._return_none = True
def received_client_metadata(self):
with self._lock:
return self._received_client_metadata
def _generic_handler(servicer):
method_handlers = {
_UNARY_UNARY:
grpc.unary_unary_rpc_method_handler(
servicer.unary_unary,
request_deserializer=_REQUEST_DESERIALIZER,
response_serializer=_RESPONSE_SERIALIZER),
_UNARY_STREAM:
grpc.unary_stream_rpc_method_handler(servicer.unary_stream),
_STREAM_UNARY:
grpc.stream_unary_rpc_method_handler(servicer.stream_unary),
_STREAM_STREAM:
grpc.stream_stream_rpc_method_handler(
servicer.stream_stream,
request_deserializer=_REQUEST_DESERIALIZER,
response_serializer=_RESPONSE_SERIALIZER),
}
return grpc.method_handlers_generic_handler(_SERVICE, method_handlers)
class MetadataCodeDetailsTest(unittest.TestCase):
def setUp(self):
self._servicer = _Servicer()
self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
self._server = grpc.server(
self._server_pool, handlers=(_generic_handler(self._servicer),))
port = self._server.add_insecure_port('[::]:0')
self._server.start()
channel = grpc.insecure_channel('localhost:{}'.format(port))
self._unary_unary = channel.unary_unary(
'/'.join(('', _SERVICE, _UNARY_UNARY,)),
request_serializer=_REQUEST_SERIALIZER,
response_deserializer=_RESPONSE_DESERIALIZER,)
self._unary_stream = channel.unary_stream(
'/'.join(('', _SERVICE, _UNARY_STREAM,)),)
self._stream_unary = channel.stream_unary(
'/'.join(('', _SERVICE, _STREAM_UNARY,)),)
self._stream_stream = channel.stream_stream(
'/'.join(('', _SERVICE, _STREAM_STREAM,)),
request_serializer=_REQUEST_SERIALIZER,
response_deserializer=_RESPONSE_DESERIALIZER,)
def testSuccessfulUnaryUnary(self):
self._servicer.set_details(_DETAILS)
unused_response, call = self._unary_unary.with_call(
object(), metadata=_CLIENT_METADATA)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
call.initial_metadata()))
self.assertTrue(
test_common.metadata_transmitted(_SERVER_TRAILING_METADATA,
call.trailing_metadata()))
self.assertIs(grpc.StatusCode.OK, call.code())
self.assertEqual(_DETAILS, call.details())
def testSuccessfulUnaryStream(self):
self._servicer.set_details(_DETAILS)
response_iterator_call = self._unary_stream(
_SERIALIZED_REQUEST, metadata=_CLIENT_METADATA)
received_initial_metadata = response_iterator_call.initial_metadata()
list(response_iterator_call)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
received_initial_metadata))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
response_iterator_call.trailing_metadata()))
self.assertIs(grpc.StatusCode.OK, response_iterator_call.code())
self.assertEqual(_DETAILS, response_iterator_call.details())
def testSuccessfulStreamUnary(self):
self._servicer.set_details(_DETAILS)
unused_response, call = self._stream_unary.with_call(
iter([_SERIALIZED_REQUEST] * test_constants.STREAM_LENGTH),
metadata=_CLIENT_METADATA)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
call.initial_metadata()))
self.assertTrue(
test_common.metadata_transmitted(_SERVER_TRAILING_METADATA,
call.trailing_metadata()))
self.assertIs(grpc.StatusCode.OK, call.code())
self.assertEqual(_DETAILS, call.details())
def testSuccessfulStreamStream(self):
self._servicer.set_details(_DETAILS)
response_iterator_call = self._stream_stream(
iter([object()] * test_constants.STREAM_LENGTH),
metadata=_CLIENT_METADATA)
received_initial_metadata = response_iterator_call.initial_metadata()
list(response_iterator_call)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
received_initial_metadata))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
response_iterator_call.trailing_metadata()))
self.assertIs(grpc.StatusCode.OK, response_iterator_call.code())
self.assertEqual(_DETAILS, response_iterator_call.details())
def testAbortedUnaryUnary(self):
self._servicer.set_code(_NON_OK_CODE)
self._servicer.set_details(_DETAILS)
self._servicer.set_abort_call()
with self.assertRaises(grpc.RpcError) as exception_context:
self._unary_unary.with_call(object(), metadata=_CLIENT_METADATA)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_INITIAL_METADATA,
exception_context.exception.initial_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
exception_context.exception.trailing_metadata()))
self.assertIs(_NON_OK_CODE, exception_context.exception.code())
self.assertEqual(_DETAILS, exception_context.exception.details())
def testAbortedUnaryStream(self):
self._servicer.set_code(_NON_OK_CODE)
self._servicer.set_details(_DETAILS)
self._servicer.set_abort_call()
response_iterator_call = self._unary_stream(
_SERIALIZED_REQUEST, metadata=_CLIENT_METADATA)
received_initial_metadata = response_iterator_call.initial_metadata()
with self.assertRaises(grpc.RpcError):
self.assertEqual(len(list(response_iterator_call)), 0)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
received_initial_metadata))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
response_iterator_call.trailing_metadata()))
self.assertIs(_NON_OK_CODE, response_iterator_call.code())
self.assertEqual(_DETAILS, response_iterator_call.details())
def testAbortedStreamUnary(self):
self._servicer.set_code(_NON_OK_CODE)
self._servicer.set_details(_DETAILS)
self._servicer.set_abort_call()
with self.assertRaises(grpc.RpcError) as exception_context:
self._stream_unary.with_call(
iter([_SERIALIZED_REQUEST] * test_constants.STREAM_LENGTH),
metadata=_CLIENT_METADATA)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_INITIAL_METADATA,
exception_context.exception.initial_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
exception_context.exception.trailing_metadata()))
self.assertIs(_NON_OK_CODE, exception_context.exception.code())
self.assertEqual(_DETAILS, exception_context.exception.details())
def testAbortedStreamStream(self):
self._servicer.set_code(_NON_OK_CODE)
self._servicer.set_details(_DETAILS)
self._servicer.set_abort_call()
response_iterator_call = self._stream_stream(
iter([object()] * test_constants.STREAM_LENGTH),
metadata=_CLIENT_METADATA)
received_initial_metadata = response_iterator_call.initial_metadata()
with self.assertRaises(grpc.RpcError):
self.assertEqual(len(list(response_iterator_call)), 0)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
received_initial_metadata))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
response_iterator_call.trailing_metadata()))
self.assertIs(_NON_OK_CODE, response_iterator_call.code())
self.assertEqual(_DETAILS, response_iterator_call.details())
def testCustomCodeUnaryUnary(self):
self._servicer.set_code(_NON_OK_CODE)
self._servicer.set_details(_DETAILS)
with self.assertRaises(grpc.RpcError) as exception_context:
self._unary_unary.with_call(object(), metadata=_CLIENT_METADATA)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_INITIAL_METADATA,
exception_context.exception.initial_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
exception_context.exception.trailing_metadata()))
self.assertIs(_NON_OK_CODE, exception_context.exception.code())
self.assertEqual(_DETAILS, exception_context.exception.details())
def testCustomCodeUnaryStream(self):
self._servicer.set_code(_NON_OK_CODE)
self._servicer.set_details(_DETAILS)
response_iterator_call = self._unary_stream(
_SERIALIZED_REQUEST, metadata=_CLIENT_METADATA)
received_initial_metadata = response_iterator_call.initial_metadata()
with self.assertRaises(grpc.RpcError):
list(response_iterator_call)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
received_initial_metadata))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
response_iterator_call.trailing_metadata()))
self.assertIs(_NON_OK_CODE, response_iterator_call.code())
self.assertEqual(_DETAILS, response_iterator_call.details())
def testCustomCodeStreamUnary(self):
self._servicer.set_code(_NON_OK_CODE)
self._servicer.set_details(_DETAILS)
with self.assertRaises(grpc.RpcError) as exception_context:
self._stream_unary.with_call(
iter([_SERIALIZED_REQUEST] * test_constants.STREAM_LENGTH),
metadata=_CLIENT_METADATA)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_INITIAL_METADATA,
exception_context.exception.initial_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
exception_context.exception.trailing_metadata()))
self.assertIs(_NON_OK_CODE, exception_context.exception.code())
self.assertEqual(_DETAILS, exception_context.exception.details())
def testCustomCodeStreamStream(self):
self._servicer.set_code(_NON_OK_CODE)
self._servicer.set_details(_DETAILS)
response_iterator_call = self._stream_stream(
iter([object()] * test_constants.STREAM_LENGTH),
metadata=_CLIENT_METADATA)
received_initial_metadata = response_iterator_call.initial_metadata()
with self.assertRaises(grpc.RpcError) as exception_context:
list(response_iterator_call)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
received_initial_metadata))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
exception_context.exception.trailing_metadata()))
self.assertIs(_NON_OK_CODE, exception_context.exception.code())
self.assertEqual(_DETAILS, exception_context.exception.details())
def testCustomCodeExceptionUnaryUnary(self):
self._servicer.set_code(_NON_OK_CODE)
self._servicer.set_details(_DETAILS)
self._servicer.set_exception()
with self.assertRaises(grpc.RpcError) as exception_context:
self._unary_unary.with_call(object(), metadata=_CLIENT_METADATA)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_INITIAL_METADATA,
exception_context.exception.initial_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
exception_context.exception.trailing_metadata()))
self.assertIs(_NON_OK_CODE, exception_context.exception.code())
self.assertEqual(_DETAILS, exception_context.exception.details())
def testCustomCodeExceptionUnaryStream(self):
self._servicer.set_code(_NON_OK_CODE)
self._servicer.set_details(_DETAILS)
self._servicer.set_exception()
response_iterator_call = self._unary_stream(
_SERIALIZED_REQUEST, metadata=_CLIENT_METADATA)
received_initial_metadata = response_iterator_call.initial_metadata()
with self.assertRaises(grpc.RpcError):
list(response_iterator_call)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
received_initial_metadata))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
response_iterator_call.trailing_metadata()))
self.assertIs(_NON_OK_CODE, response_iterator_call.code())
self.assertEqual(_DETAILS, response_iterator_call.details())
def testCustomCodeExceptionStreamUnary(self):
self._servicer.set_code(_NON_OK_CODE)
self._servicer.set_details(_DETAILS)
self._servicer.set_exception()
with self.assertRaises(grpc.RpcError) as exception_context:
self._stream_unary.with_call(
iter([_SERIALIZED_REQUEST] * test_constants.STREAM_LENGTH),
metadata=_CLIENT_METADATA)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_INITIAL_METADATA,
exception_context.exception.initial_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
exception_context.exception.trailing_metadata()))
self.assertIs(_NON_OK_CODE, exception_context.exception.code())
self.assertEqual(_DETAILS, exception_context.exception.details())
def testCustomCodeExceptionStreamStream(self):
self._servicer.set_code(_NON_OK_CODE)
self._servicer.set_details(_DETAILS)
self._servicer.set_exception()
response_iterator_call = self._stream_stream(
iter([object()] * test_constants.STREAM_LENGTH),
metadata=_CLIENT_METADATA)
received_initial_metadata = response_iterator_call.initial_metadata()
with self.assertRaises(grpc.RpcError):
list(response_iterator_call)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(_SERVER_INITIAL_METADATA,
received_initial_metadata))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
response_iterator_call.trailing_metadata()))
self.assertIs(_NON_OK_CODE, response_iterator_call.code())
self.assertEqual(_DETAILS, response_iterator_call.details())
def testCustomCodeReturnNoneUnaryUnary(self):
self._servicer.set_code(_NON_OK_CODE)
self._servicer.set_details(_DETAILS)
self._servicer.set_return_none()
with self.assertRaises(grpc.RpcError) as exception_context:
self._unary_unary.with_call(object(), metadata=_CLIENT_METADATA)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_INITIAL_METADATA,
exception_context.exception.initial_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
exception_context.exception.trailing_metadata()))
self.assertIs(_NON_OK_CODE, exception_context.exception.code())
self.assertEqual(_DETAILS, exception_context.exception.details())
def testCustomCodeReturnNoneStreamUnary(self):
self._servicer.set_code(_NON_OK_CODE)
self._servicer.set_details(_DETAILS)
self._servicer.set_return_none()
with self.assertRaises(grpc.RpcError) as exception_context:
self._stream_unary.with_call(
iter([_SERIALIZED_REQUEST] * test_constants.STREAM_LENGTH),
metadata=_CLIENT_METADATA)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_INITIAL_METADATA,
exception_context.exception.initial_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
exception_context.exception.trailing_metadata()))
self.assertIs(_NON_OK_CODE, exception_context.exception.code())
self.assertEqual(_DETAILS, exception_context.exception.details())
if __name__ == '__main__':
unittest.main(verbosity=2) | unknown | codeparrot/codeparrot-clean | ||
#! /usr/bin/env python3
# linktree
#
# Make a copy of a directory tree with symbolic links to all files in the
# original tree.
# All symbolic links go to a special symbolic link at the top, so you
# can easily fix things if the original source tree moves.
# See also "mkreal".
#
# usage: mklinks oldtree newtree
import sys, os
LINK = '.LINK' # Name of special symlink at the top.
debug = 0
def main():
if not 3 <= len(sys.argv) <= 4:
print('usage:', sys.argv[0], 'oldtree newtree [linkto]')
return 2
oldtree, newtree = sys.argv[1], sys.argv[2]
if len(sys.argv) > 3:
link = sys.argv[3]
link_may_fail = 1
else:
link = LINK
link_may_fail = 0
if not os.path.isdir(oldtree):
print(oldtree + ': not a directory')
return 1
try:
os.mkdir(newtree, 0o777)
except OSError as msg:
print(newtree + ': cannot mkdir:', msg)
return 1
linkname = os.path.join(newtree, link)
try:
os.symlink(os.path.join(os.pardir, oldtree), linkname)
except OSError as msg:
if not link_may_fail:
print(linkname + ': cannot symlink:', msg)
return 1
else:
print(linkname + ': warning: cannot symlink:', msg)
linknames(oldtree, newtree, link)
return 0
def linknames(old, new, link):
if debug: print('linknames', (old, new, link))
try:
names = os.listdir(old)
except OSError as msg:
print(old + ': warning: cannot listdir:', msg)
return
for name in names:
if name not in (os.curdir, os.pardir):
oldname = os.path.join(old, name)
linkname = os.path.join(link, name)
newname = os.path.join(new, name)
if debug > 1: print(oldname, newname, linkname)
if os.path.isdir(oldname) and \
not os.path.islink(oldname):
try:
os.mkdir(newname, 0o777)
ok = 1
except:
print(newname + \
': warning: cannot mkdir:', msg)
ok = 0
if ok:
linkname = os.path.join(os.pardir,
linkname)
linknames(oldname, newname, linkname)
else:
os.symlink(linkname, newname)
if __name__ == '__main__':
sys.exit(main()) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import unittest
from htmresearch.encoders import EncoderTypes
from htmresearch.frameworks.nlp.model_factory import createModel
from htmresearch.support.text_preprocess import TextPreprocess
class TestTextPreprocess(unittest.TestCase):
tokenIndexingFactor = 1000
documentLevel = {"CioDocumentFingerprint", "CioWordFingerprint"}
def setUp(self):
self.testDocuments = (
("Much of the world's data is streaming, time-series data, where "
"anomalies give significant information in critical situations; "
"examples abound in domains such as finance, IT, security, medical, "
"and energy. Yet detecting anomalies in streaming data is a difficult "
"task, requiring detectors to process data in real-time, not batches, "
"and learn while simultaneously making predictions... The goal for "
"[identifier deleted] is to provide a standard, open source framework "
"with which the research community can compare and evaluate different "
"algorithms for detecting anomalies in streaming data."),
("We propose a formal mathematical model for sparse representation in "
"neocortex based on a neuron model and associated operations... As such "
"the theory provides a unified and practical mathematical framework for "
"understanding the benefits and limits of sparse representation in "
"cortical networks."),
("Therefor the HTM sequence memory doesn't only advance our "
"understanding of how the brain may solve the sequence learning "
"problem, but it's applicable to a wide range of real-world problems "
"such as dicsrete and continuous sequence prediction, anomaly "
"detection, and sequence classification."),
("In this paper we extnd this idea by showing that a neuron with several "
"thousand synapses aranged along active dendrites can learn to "
"accurately and robustly recognize hundreds of unique patterns of "
"cellular activity, even in the presence of large amounts of noise and "
"pattern variation... Thus neurons need thousands of synapses to learn "
"the many temporal patterns in sensory stimuli and motor sequence."),
)
self.filteredProtoIds = ( [0, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 15, 16, 17,
18, 20, 23, 25, 26, 28, 29, 30, 31, 33, 34, 37, 38, 39, 40, 42, 43, 45,
47, 49, 50, 51, 52, 53, 55, 57, 58, 61, 63, 64, 65, 66, 70, 71, 72, 73,
75, 76, 77, 79, 80, 82, 83, 1001, 1003, 1004, 1005, 1007, 1008, 1010,
1011, 1014, 1015, 1017, 1018, 1022, 1023, 1025, 1027, 1028, 1029, 1031,
1033, 1035, 1037, 1038, 1040, 1041, 2000, 2002, 2003, 2004, 2005, 2007,
2008, 2009, 2013, 2015, 2017, 2018, 2019, 2022, 2025, 2026, 2028, 2029,
2032, 2034, 2035, 2036, 2037, 2038, 2040, 2041, 3002, 3004, 3006, 3008,
3011, 3013, 3014, 3015, 3016, 3017, 3018, 3019, 3020, 3021, 3023, 3025,
3026, 3027, 3029, 3030, 3032, 3033, 3034, 3037, 3039, 3040, 3042, 3044,
3045, 3046, 3047, 3048, 3049, 3051, 3053, 3055, 3056, 3057, 3059, 3060,
3062, 3063] )
def _formatResults(self, modelName, distanceArray, idList):
""" Mimics the implementation in imbu.py: Format distances to reflect the
pctOverlapOfInput metric, return a list of results.
"""
formattedDistances = (1.0 - distanceArray) * 100
results = []
for protoId, dist in zip(idList, formattedDistances):
if modelName in self.documentLevel:
results.append({"sampleId": protoId,
"wordId": 0,
"text": self.testDocuments[protoId],
"score": dist.item()})
else:
# get the sampleId from the protoId
wordId = protoId % self.tokenIndexingFactor
sampleId = (protoId - wordId) / self.tokenIndexingFactor
results.append({"sampleId": sampleId,
"wordId": wordId,
"text": self.testDocuments[sampleId],
"score": dist.item()})
return results
def testMappingsWithImbuWordModel(self):
# Create a Keywords model
modelName = "Keywords"
kwargs = {"numLabels": 1,
"k": 42,
"classifierMetric": "pctOverlapOfInput",
"filterText": True,
"verbosity": 0}
model = createModel(modelName, **kwargs)
# Train the model for use in Imbu
for seqId, text in enumerate(self.testDocuments):
tokenList, mapping = model.tokenize(text)
lastTokenIndex = len(tokenList) - 1
for i, (token, tokenIndex) in enumerate(zip(tokenList, mapping)):
wordId = seqId * self.tokenIndexingFactor + tokenIndex
model.trainToken(token,
[0],
wordId,
reset=int(i == lastTokenIndex))
# Query the model, expecting two matches from one sample
query = ("The key to artificial intelligence has always been the "
"representation.")
_, sortedIds, sortedDistances = model.inferDocument(
query, returnDetailedResults=True, sortResults=True)
# Test for expected word-token mapping (in prototype IDs)
self.assertItemsEqual(self.filteredProtoIds, sortedIds,
"List of IDs returned from inference does not match the expected list of "
"prototype IDs.")
# Test for exact matching results
self.assertSequenceEqual([0.0, 0.0, 1.0], sortedDistances[:3].tolist(),
"Expected two exact-matching prototypes.")
# Test for multiple matches per sample
results = self._formatResults(modelName, sortedDistances, sortedIds)
self.assertEqual(results[0]["sampleId"], results[1]["sampleId"])
self.assertEqual(results[0]["text"], results[1]["text"])
self.assertNotEqual(results[0]["wordId"], results[1]["wordId"])
# Test the match maps back to the query
matchingWord = results[0]["text"].split(" ")[results[0]["wordId"]]
self.assertIn(matchingWord, query, "Matching word is indexed incorrectly.")
# Query the model again, expecting five matches from two samples
query = ("sequence")
_, sortedIds, sortedDistances = model.inferDocument(
query, returnDetailedResults=True, sortResults=True)
# Test for exact matching results
self.assertSequenceEqual(
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0], sortedDistances[:6].tolist(),
"Expected five exact-matching prototypes.")
# Test the exact matches map back to the query term
results = self._formatResults(modelName, sortedDistances, sortedIds)
for r in results[:5]:
self.assertIn(r["sampleId"], (2,3))
matchingWord = r["text"].split(" ")[r["wordId"]]
self.assertIn(query, matchingWord,
"Matching word is indexed incorrectly.")
def testMappingsWithImbuDocumentModel(self):
# Create the CioDocumentFingerprint model
modelName = "CioDocumentFingerprint"
kwargs = {"numLabels": 1,
"classifierMetric": "pctOverlapOfInput",
"filterText": True,
"verbosity": 0,
"fingerprintType": EncoderTypes.document,
"cacheRoot": None}
model = createModel("CioDocumentFingerprint", **kwargs)
# Train the model for use in Imbu
for seqId, text in enumerate(self.testDocuments):
model.trainDocument(text, [0], seqId)
# Query the model, expecting two matches from one sample
query = ("The key to artificial intelligence has always been the "
"representation.")
_, sortedIds, sortedDistances = model.inferDocument(
query, returnDetailedResults=True, sortResults=True)
self.assertEqual(len(self.testDocuments), len(sortedIds),
"Document-level models should have one prototype ID per document.")
results = self._formatResults(modelName, sortedDistances, sortedIds)
for r in results:
self.assertEqual(0, r["wordId"],
"wordId is insignificant in document-level models, and should be 0.")
def testIndexMapping(self):
originalWords = self.testDocuments[2].split(" ")
tokenList, mapping = TextPreprocess().tokenizeAndFilter(
self.testDocuments[2],
ignoreCommon=50,
removeStrings=["[identifier deleted]"],
correctSpell=True,
expandAbbr=True,
expandContr=True)
self.assertEqual(len(tokenList), len(mapping),
"There should be one mapping entry for each token.")
# Test filtering results
self.assertEqual("therefore", tokenList[0], "Spelling not corrected.")
self.assertEqual("discrete", tokenList[24], "Spelling not corrected.")
self.assertSequenceEqual(["hierarchical", "temporal", "memory"],
tokenList[1:4], "Abbreviation 'HTM' not expanded.")
self.assertNotIn("but", tokenList, "Common word 'but' not removed.")
self.assertNotIn("not", tokenList, "Common word 'not' not removed.")
self.assertIn("does", tokenList, "Contraction not expanded to 'does not'.")
# Test some token-to-word-mappings
mappedWords = [originalWords[i] for i in mapping]
self.assertNotEqual(len(originalWords), len(mappedWords))
for word in mappedWords[1:4]:
self.assertEqual("HTM", word,
"Tokens don't map to 'HTM' as expected.")
if __name__ == "__main__":
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
***************************************************************************
gdalinfo.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsProcessingException,
QgsProcessingParameterDefinition,
QgsProcessingParameterString,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterBoolean,
QgsProcessingParameterFileDestination)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class gdalinfo(GdalAlgorithm):
INPUT = 'INPUT'
MIN_MAX = 'MIN_MAX'
STATS = 'STATS'
NO_GCP = 'NOGCP'
NO_METADATA = 'NO_METADATA'
EXTRA = 'EXTRA'
OUTPUT = 'OUTPUT'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT,
self.tr('Input layer')))
self.addParameter(QgsProcessingParameterBoolean(self.MIN_MAX,
self.tr('Force computation of the actual min/max values for each band'),
defaultValue=False))
self.addParameter(QgsProcessingParameterBoolean(self.STATS,
self.tr('Read and display image statistics (force computation if necessary)'),
defaultValue=False))
self.addParameter(QgsProcessingParameterBoolean(self.NO_GCP,
self.tr('Suppress GCP info'),
defaultValue=False))
self.addParameter(QgsProcessingParameterBoolean(self.NO_METADATA,
self.tr('Suppress metadata info'),
defaultValue=False))
extra_param = QgsProcessingParameterString(self.EXTRA,
self.tr('Additional command-line parameters'),
defaultValue=None,
optional=True)
extra_param.setFlags(extra_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(extra_param)
self.addParameter(QgsProcessingParameterFileDestination(self.OUTPUT,
self.tr('Layer information'),
self.tr('HTML files (*.html)')))
def name(self):
return 'gdalinfo'
def displayName(self):
return self.tr('Raster information')
def group(self):
return self.tr('Raster miscellaneous')
def groupId(self):
return 'rastermiscellaneous'
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'raster-info.png'))
def commandName(self):
return 'gdalinfo'
def getConsoleCommands(self, parameters, context, feedback, executing=True):
arguments = []
if self.parameterAsBoolean(parameters, self.MIN_MAX, context):
arguments.append('-mm')
if self.parameterAsBoolean(parameters, self.STATS, context):
arguments.append('-stats')
if self.parameterAsBoolean(parameters, self.NO_GCP, context):
arguments.append('-nogcp')
if self.parameterAsBoolean(parameters, self.NO_METADATA, context):
arguments.append('-nomd')
if self.EXTRA in parameters and parameters[self.EXTRA] not in (None, ''):
extra = self.parameterAsString(parameters, self.EXTRA, context)
arguments.append(extra)
raster = self.parameterAsRasterLayer(parameters, self.INPUT, context)
if raster is None:
raise QgsProcessingException(self.invalidRasterError(parameters, self.INPUT))
arguments.append(raster.source())
return [self.commandName(), GdalUtils.escapeAndJoin(arguments)]
def processAlgorithm(self, parameters, context, feedback):
console_output = GdalUtils.runGdal(self.getConsoleCommands(parameters, context, feedback), feedback)
output = self.parameterAsFileOutput(parameters, self.OUTPUT, context)
with open(output, 'w') as f:
f.write('<pre>')
for s in console_output[1:]:
f.write(str(s))
f.write('</pre>')
return {self.OUTPUT: output} | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the CSV Writer."""
from tests.unittest_utils import ForsetiTestCase
import mock
from google.cloud.security.common.data_access import csv_writer
class CsvWriterTest(ForsetiTestCase):
"""Tests for the CSV Writer."""
@mock.patch.object(csv_writer, 'os')
@mock.patch.object(csv_writer.csv, 'DictWriter')
@mock.patch.object(csv_writer.tempfile, 'NamedTemporaryFile')
def test_csv_file_is_removed(self, mock_tempfile,
mock_dict_writer, mock_os):
"""Test that the csv file is removed."""
csv_writer.CSV_FIELDNAME_MAP = mock.MagicMock()
with csv_writer.write_csv('foo', mock.MagicMock()) as csv_file:
csv_filename = csv_file.name
mock_os.remove.assert_called_once_with(csv_filename)
# Test that the csv file is still removed on error."""
mock_dict_writer.return_value = IOError
with csv_writer.write_csv('foo', mock.MagicMock()) as csv_file:
csv_filename = csv_file.name
self.assertEquals(2, mock_os.remove.call_count)
called_args, called_kwargs = mock_os.remove.call_args_list[1]
self.assertEquals(csv_filename, called_args[0])
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env bash
# Copyright IBM Corp. 2016, 2025
# SPDX-License-Identifier: BUSL-1.1
fail() {
echo "$1" 1>&2
exit 1
}
verify_radar_scan_output_file() {
# Given a file with a radar scan output, filter out tagged false positives and verify that no
# other secrets remain.
if ! jq -eMcn '[inputs] | [.[] | select(.type != "aws_access_key_id") | select((.tags == null) or (.tags | contains(["ignore_rule"]) | not ))] | length == 0' < "$2"; then
found=$(jq -eMn '[inputs] | [.[] | select(.type != "aws_access_key_id") | select((.tags == null) or (.tags | contains(["ignore_rule"]) | not ))]' < "$2")
fail "failed to radar secrets output: vault radar detected secrets in $1!: $found"
fi
}
set -e
[[ -z "$AUDIT_LOG_FILE_PATH" ]] && fail "AUDIT_LOG_FILE_PATH env variable has not been set"
[[ -z "$VAULT_RADAR_INSTALL_DIR" ]] && fail "VAULT_RADAR_INSTALL_DIR env variable has not been set"
# Radar implicitly requires the following for creating the index and running radar itself
[[ -z "$VAULT_RADAR_LICENSE" ]] && fail "VAULT_RADAR_LICENSE env variable has not been set"
[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set"
[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
[[ -z "$VAULT_UNIT_NAME" ]] && fail "VAULT_UNIT_NAME env variable has not been set"
radar_bin_path=${VAULT_RADAR_INSTALL_DIR}/vault-radar
test -x "$radar_bin_path" || fail "failed to scan vault audit log: unable to locate radar binary at $radar_bin_path"
# Make sure our audit log file exists.
if [ ! -f "$AUDIT_LOG_FILE_PATH" ]; then
fail "failed to scan vault audit log: no audit logifile found at $AUDIT_LOG_FILE_PATH"
fi
# Create a readable copy of the audit log.
if ! sudo cp "$AUDIT_LOG_FILE_PATH" audit.log; then
fail "failed to scan vault audit log: could not copy audit log for scanning"
fi
if ! sudo chmod +r audit.log; then
fail "failed to scan vault audit log: could not make audit log copy readable"
fi
# Create a radar index file of our KVv2 secret values.
if ! out=$($radar_bin_path index vault --offline --disable-ui --outfile index.jsonl 2>&1); then
fail "failed to generate vault-radar index of vault cluster: $out"
fi
# Write our ignore rules to avoid known false positives.
mkdir -p "$HOME/.hashicorp/vault-radar"
cat >> "$HOME/.hashicorp/vault-radar/ignore.yaml" << EOF
- secret_values:
- "hmac-sha256:*"
EOF
# Scan the audit log for known secrets via the audit log and other secrets using radars built-in
# secret types.
if ! out=$("$radar_bin_path" scan file --offline --disable-ui -p audit.log --index-file index.jsonl -f json -o audit-secrets.json 2>&1); then
fail "failed to scan vault audit log: vault-radar scan file failed: $out"
fi
verify_radar_scan_output_file vault-audit-log audit-secrets.json
# Scan the vault journal for known secrets via the audit log and other secrets using radars built-in
# secret types.
if ! out=$(sudo journalctl --no-pager -u "$VAULT_UNIT_NAME" -a | "$radar_bin_path" scan file --offline --disable-ui --index-file index.jsonl -f json -o journal-secrets.json 2>&1); then
fail "failed to scan vault journal: vault-radar scan file failed: $out"
fi
verify_radar_scan_output_file vault-journal journal-secrets.json | unknown | github | https://github.com/hashicorp/vault | enos/modules/verify_log_secrets/scripts/scan_logs_for_secrets.sh |
"""Classify responses from layer1 and strict type values."""
from datetime import datetime
from boto.compat import six
class BaseObject(object):
def __repr__(self):
result = self.__class__.__name__ + '{ '
counter = 0
for key, value in six.iteritems(self.__dict__):
# first iteration no comma
counter += 1
if counter > 1:
result += ', '
result += key + ': '
result += self._repr_by_type(value)
result += ' }'
return result
def _repr_by_type(self, value):
# Everything is either a 'Response', 'list', or 'None/str/int/bool'.
result = ''
if isinstance(value, Response):
result += value.__repr__()
elif isinstance(value, list):
result += self._repr_list(value)
else:
result += str(value)
return result
def _repr_list(self, array):
result = '['
for value in array:
result += ' ' + self._repr_by_type(value) + ','
# Check for trailing comma with a space.
if len(result) > 1:
result = result[:-1] + ' '
result += ']'
return result
class Response(BaseObject):
def __init__(self, response):
super(Response, self).__init__()
if response['ResponseMetadata']:
self.response_metadata = ResponseMetadata(response['ResponseMetadata'])
else:
self.response_metadata = None
class ResponseMetadata(BaseObject):
def __init__(self, response):
super(ResponseMetadata, self).__init__()
self.request_id = str(response['RequestId'])
class ApplicationDescription(BaseObject):
def __init__(self, response):
super(ApplicationDescription, self).__init__()
self.application_name = str(response['ApplicationName'])
self.configuration_templates = []
if response['ConfigurationTemplates']:
for member in response['ConfigurationTemplates']:
configuration_template = str(member)
self.configuration_templates.append(configuration_template)
self.date_created = datetime.fromtimestamp(response['DateCreated'])
self.date_updated = datetime.fromtimestamp(response['DateUpdated'])
self.description = str(response['Description'])
self.versions = []
if response['Versions']:
for member in response['Versions']:
version = str(member)
self.versions.append(version)
class ApplicationVersionDescription(BaseObject):
def __init__(self, response):
super(ApplicationVersionDescription, self).__init__()
self.application_name = str(response['ApplicationName'])
self.date_created = datetime.fromtimestamp(response['DateCreated'])
self.date_updated = datetime.fromtimestamp(response['DateUpdated'])
self.description = str(response['Description'])
if response['SourceBundle']:
self.source_bundle = S3Location(response['SourceBundle'])
else:
self.source_bundle = None
self.version_label = str(response['VersionLabel'])
class AutoScalingGroup(BaseObject):
def __init__(self, response):
super(AutoScalingGroup, self).__init__()
self.name = str(response['Name'])
class ConfigurationOptionDescription(BaseObject):
def __init__(self, response):
super(ConfigurationOptionDescription, self).__init__()
self.change_severity = str(response['ChangeSeverity'])
self.default_value = str(response['DefaultValue'])
self.max_length = int(response['MaxLength']) if response['MaxLength'] else None
self.max_value = int(response['MaxValue']) if response['MaxValue'] else None
self.min_value = int(response['MinValue']) if response['MinValue'] else None
self.name = str(response['Name'])
self.namespace = str(response['Namespace'])
if response['Regex']:
self.regex = OptionRestrictionRegex(response['Regex'])
else:
self.regex = None
self.user_defined = str(response['UserDefined'])
self.value_options = []
if response['ValueOptions']:
for member in response['ValueOptions']:
value_option = str(member)
self.value_options.append(value_option)
self.value_type = str(response['ValueType'])
class ConfigurationOptionSetting(BaseObject):
def __init__(self, response):
super(ConfigurationOptionSetting, self).__init__()
self.namespace = str(response['Namespace'])
self.option_name = str(response['OptionName'])
self.value = str(response['Value'])
class ConfigurationSettingsDescription(BaseObject):
def __init__(self, response):
super(ConfigurationSettingsDescription, self).__init__()
self.application_name = str(response['ApplicationName'])
self.date_created = datetime.fromtimestamp(response['DateCreated'])
self.date_updated = datetime.fromtimestamp(response['DateUpdated'])
self.deployment_status = str(response['DeploymentStatus'])
self.description = str(response['Description'])
self.environment_name = str(response['EnvironmentName'])
self.option_settings = []
if response['OptionSettings']:
for member in response['OptionSettings']:
option_setting = ConfigurationOptionSetting(member)
self.option_settings.append(option_setting)
self.solution_stack_name = str(response['SolutionStackName'])
self.template_name = str(response['TemplateName'])
class EnvironmentDescription(BaseObject):
def __init__(self, response):
super(EnvironmentDescription, self).__init__()
self.application_name = str(response['ApplicationName'])
self.cname = str(response['CNAME'])
self.date_created = datetime.fromtimestamp(response['DateCreated'])
self.date_updated = datetime.fromtimestamp(response['DateUpdated'])
self.description = str(response['Description'])
self.endpoint_url = str(response['EndpointURL'])
self.environment_id = str(response['EnvironmentId'])
self.environment_name = str(response['EnvironmentName'])
self.health = str(response['Health'])
if response['Resources']:
self.resources = EnvironmentResourcesDescription(response['Resources'])
else:
self.resources = None
self.solution_stack_name = str(response['SolutionStackName'])
self.status = str(response['Status'])
self.template_name = str(response['TemplateName'])
self.version_label = str(response['VersionLabel'])
class EnvironmentInfoDescription(BaseObject):
def __init__(self, response):
super(EnvironmentInfoDescription, self).__init__()
self.ec2_instance_id = str(response['Ec2InstanceId'])
self.info_type = str(response['InfoType'])
self.message = str(response['Message'])
self.sample_timestamp = datetime.fromtimestamp(response['SampleTimestamp'])
class EnvironmentResourceDescription(BaseObject):
def __init__(self, response):
super(EnvironmentResourceDescription, self).__init__()
self.auto_scaling_groups = []
if response['AutoScalingGroups']:
for member in response['AutoScalingGroups']:
auto_scaling_group = AutoScalingGroup(member)
self.auto_scaling_groups.append(auto_scaling_group)
self.environment_name = str(response['EnvironmentName'])
self.instances = []
if response['Instances']:
for member in response['Instances']:
instance = Instance(member)
self.instances.append(instance)
self.launch_configurations = []
if response['LaunchConfigurations']:
for member in response['LaunchConfigurations']:
launch_configuration = LaunchConfiguration(member)
self.launch_configurations.append(launch_configuration)
self.load_balancers = []
if response['LoadBalancers']:
for member in response['LoadBalancers']:
load_balancer = LoadBalancer(member)
self.load_balancers.append(load_balancer)
self.triggers = []
if response['Triggers']:
for member in response['Triggers']:
trigger = Trigger(member)
self.triggers.append(trigger)
class EnvironmentResourcesDescription(BaseObject):
def __init__(self, response):
super(EnvironmentResourcesDescription, self).__init__()
if response['LoadBalancer']:
self.load_balancer = LoadBalancerDescription(response['LoadBalancer'])
else:
self.load_balancer = None
class EventDescription(BaseObject):
def __init__(self, response):
super(EventDescription, self).__init__()
self.application_name = str(response['ApplicationName'])
self.environment_name = str(response['EnvironmentName'])
self.event_date = datetime.fromtimestamp(response['EventDate'])
self.message = str(response['Message'])
self.request_id = str(response['RequestId'])
self.severity = str(response['Severity'])
self.template_name = str(response['TemplateName'])
self.version_label = str(response['VersionLabel'])
class Instance(BaseObject):
def __init__(self, response):
super(Instance, self).__init__()
self.id = str(response['Id'])
class LaunchConfiguration(BaseObject):
def __init__(self, response):
super(LaunchConfiguration, self).__init__()
self.name = str(response['Name'])
class Listener(BaseObject):
def __init__(self, response):
super(Listener, self).__init__()
self.port = int(response['Port']) if response['Port'] else None
self.protocol = str(response['Protocol'])
class LoadBalancer(BaseObject):
def __init__(self, response):
super(LoadBalancer, self).__init__()
self.name = str(response['Name'])
class LoadBalancerDescription(BaseObject):
def __init__(self, response):
super(LoadBalancerDescription, self).__init__()
self.domain = str(response['Domain'])
self.listeners = []
if response['Listeners']:
for member in response['Listeners']:
listener = Listener(member)
self.listeners.append(listener)
self.load_balancer_name = str(response['LoadBalancerName'])
class OptionRestrictionRegex(BaseObject):
def __init__(self, response):
super(OptionRestrictionRegex, self).__init__()
self.label = response['Label']
self.pattern = response['Pattern']
class SolutionStackDescription(BaseObject):
def __init__(self, response):
super(SolutionStackDescription, self).__init__()
self.permitted_file_types = []
if response['PermittedFileTypes']:
for member in response['PermittedFileTypes']:
permitted_file_type = str(member)
self.permitted_file_types.append(permitted_file_type)
self.solution_stack_name = str(response['SolutionStackName'])
class S3Location(BaseObject):
def __init__(self, response):
super(S3Location, self).__init__()
self.s3_bucket = str(response['S3Bucket'])
self.s3_key = str(response['S3Key'])
class Trigger(BaseObject):
def __init__(self, response):
super(Trigger, self).__init__()
self.name = str(response['Name'])
class ValidationMessage(BaseObject):
def __init__(self, response):
super(ValidationMessage, self).__init__()
self.message = str(response['Message'])
self.namespace = str(response['Namespace'])
self.option_name = str(response['OptionName'])
self.severity = str(response['Severity'])
# These are the response objects layer2 uses, one for each layer1 api call.
class CheckDNSAvailabilityResponse(Response):
def __init__(self, response):
response = response['CheckDNSAvailabilityResponse']
super(CheckDNSAvailabilityResponse, self).__init__(response)
response = response['CheckDNSAvailabilityResult']
self.fully_qualified_cname = str(response['FullyQualifiedCNAME'])
self.available = bool(response['Available'])
# Our naming convension produces this class name but api names it with more
# capitals.
class CheckDnsAvailabilityResponse(CheckDNSAvailabilityResponse): pass
class CreateApplicationResponse(Response):
def __init__(self, response):
response = response['CreateApplicationResponse']
super(CreateApplicationResponse, self).__init__(response)
response = response['CreateApplicationResult']
if response['Application']:
self.application = ApplicationDescription(response['Application'])
else:
self.application = None
class CreateApplicationVersionResponse(Response):
def __init__(self, response):
response = response['CreateApplicationVersionResponse']
super(CreateApplicationVersionResponse, self).__init__(response)
response = response['CreateApplicationVersionResult']
if response['ApplicationVersion']:
self.application_version = ApplicationVersionDescription(response['ApplicationVersion'])
else:
self.application_version = None
class CreateConfigurationTemplateResponse(Response):
def __init__(self, response):
response = response['CreateConfigurationTemplateResponse']
super(CreateConfigurationTemplateResponse, self).__init__(response)
response = response['CreateConfigurationTemplateResult']
self.application_name = str(response['ApplicationName'])
self.date_created = datetime.fromtimestamp(response['DateCreated'])
self.date_updated = datetime.fromtimestamp(response['DateUpdated'])
self.deployment_status = str(response['DeploymentStatus'])
self.description = str(response['Description'])
self.environment_name = str(response['EnvironmentName'])
self.option_settings = []
if response['OptionSettings']:
for member in response['OptionSettings']:
option_setting = ConfigurationOptionSetting(member)
self.option_settings.append(option_setting)
self.solution_stack_name = str(response['SolutionStackName'])
self.template_name = str(response['TemplateName'])
class CreateEnvironmentResponse(Response):
def __init__(self, response):
response = response['CreateEnvironmentResponse']
super(CreateEnvironmentResponse, self).__init__(response)
response = response['CreateEnvironmentResult']
self.application_name = str(response['ApplicationName'])
self.cname = str(response['CNAME'])
self.date_created = datetime.fromtimestamp(response['DateCreated'])
self.date_updated = datetime.fromtimestamp(response['DateUpdated'])
self.description = str(response['Description'])
self.endpoint_url = str(response['EndpointURL'])
self.environment_id = str(response['EnvironmentId'])
self.environment_name = str(response['EnvironmentName'])
self.health = str(response['Health'])
if response['Resources']:
self.resources = EnvironmentResourcesDescription(response['Resources'])
else:
self.resources = None
self.solution_stack_name = str(response['SolutionStackName'])
self.status = str(response['Status'])
self.template_name = str(response['TemplateName'])
self.version_label = str(response['VersionLabel'])
class CreateStorageLocationResponse(Response):
def __init__(self, response):
response = response['CreateStorageLocationResponse']
super(CreateStorageLocationResponse, self).__init__(response)
response = response['CreateStorageLocationResult']
self.s3_bucket = str(response['S3Bucket'])
class DeleteApplicationResponse(Response):
def __init__(self, response):
response = response['DeleteApplicationResponse']
super(DeleteApplicationResponse, self).__init__(response)
class DeleteApplicationVersionResponse(Response):
def __init__(self, response):
response = response['DeleteApplicationVersionResponse']
super(DeleteApplicationVersionResponse, self).__init__(response)
class DeleteConfigurationTemplateResponse(Response):
def __init__(self, response):
response = response['DeleteConfigurationTemplateResponse']
super(DeleteConfigurationTemplateResponse, self).__init__(response)
class DeleteEnvironmentConfigurationResponse(Response):
def __init__(self, response):
response = response['DeleteEnvironmentConfigurationResponse']
super(DeleteEnvironmentConfigurationResponse, self).__init__(response)
class DescribeApplicationVersionsResponse(Response):
def __init__(self, response):
response = response['DescribeApplicationVersionsResponse']
super(DescribeApplicationVersionsResponse, self).__init__(response)
response = response['DescribeApplicationVersionsResult']
self.application_versions = []
if response['ApplicationVersions']:
for member in response['ApplicationVersions']:
application_version = ApplicationVersionDescription(member)
self.application_versions.append(application_version)
class DescribeApplicationsResponse(Response):
def __init__(self, response):
response = response['DescribeApplicationsResponse']
super(DescribeApplicationsResponse, self).__init__(response)
response = response['DescribeApplicationsResult']
self.applications = []
if response['Applications']:
for member in response['Applications']:
application = ApplicationDescription(member)
self.applications.append(application)
class DescribeConfigurationOptionsResponse(Response):
def __init__(self, response):
response = response['DescribeConfigurationOptionsResponse']
super(DescribeConfigurationOptionsResponse, self).__init__(response)
response = response['DescribeConfigurationOptionsResult']
self.options = []
if response['Options']:
for member in response['Options']:
option = ConfigurationOptionDescription(member)
self.options.append(option)
self.solution_stack_name = str(response['SolutionStackName'])
class DescribeConfigurationSettingsResponse(Response):
def __init__(self, response):
response = response['DescribeConfigurationSettingsResponse']
super(DescribeConfigurationSettingsResponse, self).__init__(response)
response = response['DescribeConfigurationSettingsResult']
self.configuration_settings = []
if response['ConfigurationSettings']:
for member in response['ConfigurationSettings']:
configuration_setting = ConfigurationSettingsDescription(member)
self.configuration_settings.append(configuration_setting)
class DescribeEnvironmentResourcesResponse(Response):
def __init__(self, response):
response = response['DescribeEnvironmentResourcesResponse']
super(DescribeEnvironmentResourcesResponse, self).__init__(response)
response = response['DescribeEnvironmentResourcesResult']
if response['EnvironmentResources']:
self.environment_resources = EnvironmentResourceDescription(response['EnvironmentResources'])
else:
self.environment_resources = None
class DescribeEnvironmentsResponse(Response):
def __init__(self, response):
response = response['DescribeEnvironmentsResponse']
super(DescribeEnvironmentsResponse, self).__init__(response)
response = response['DescribeEnvironmentsResult']
self.environments = []
if response['Environments']:
for member in response['Environments']:
environment = EnvironmentDescription(member)
self.environments.append(environment)
class DescribeEventsResponse(Response):
def __init__(self, response):
response = response['DescribeEventsResponse']
super(DescribeEventsResponse, self).__init__(response)
response = response['DescribeEventsResult']
self.events = []
if response['Events']:
for member in response['Events']:
event = EventDescription(member)
self.events.append(event)
self.next_tokent = str(response['NextToken'])
class ListAvailableSolutionStacksResponse(Response):
def __init__(self, response):
response = response['ListAvailableSolutionStacksResponse']
super(ListAvailableSolutionStacksResponse, self).__init__(response)
response = response['ListAvailableSolutionStacksResult']
self.solution_stack_details = []
if response['SolutionStackDetails']:
for member in response['SolutionStackDetails']:
solution_stack_detail = SolutionStackDescription(member)
self.solution_stack_details.append(solution_stack_detail)
self.solution_stacks = []
if response['SolutionStacks']:
for member in response['SolutionStacks']:
solution_stack = str(member)
self.solution_stacks.append(solution_stack)
class RebuildEnvironmentResponse(Response):
def __init__(self, response):
response = response['RebuildEnvironmentResponse']
super(RebuildEnvironmentResponse, self).__init__(response)
class RequestEnvironmentInfoResponse(Response):
def __init__(self, response):
response = response['RequestEnvironmentInfoResponse']
super(RequestEnvironmentInfoResponse, self).__init__(response)
class RestartAppServerResponse(Response):
def __init__(self, response):
response = response['RestartAppServerResponse']
super(RestartAppServerResponse, self).__init__(response)
class RetrieveEnvironmentInfoResponse(Response):
def __init__(self, response):
response = response['RetrieveEnvironmentInfoResponse']
super(RetrieveEnvironmentInfoResponse, self).__init__(response)
response = response['RetrieveEnvironmentInfoResult']
self.environment_info = []
if response['EnvironmentInfo']:
for member in response['EnvironmentInfo']:
environment_info = EnvironmentInfoDescription(member)
self.environment_info.append(environment_info)
class SwapEnvironmentCNAMEsResponse(Response):
def __init__(self, response):
response = response['SwapEnvironmentCNAMEsResponse']
super(SwapEnvironmentCNAMEsResponse, self).__init__(response)
class SwapEnvironmentCnamesResponse(SwapEnvironmentCNAMEsResponse): pass
class TerminateEnvironmentResponse(Response):
def __init__(self, response):
response = response['TerminateEnvironmentResponse']
super(TerminateEnvironmentResponse, self).__init__(response)
response = response['TerminateEnvironmentResult']
self.application_name = str(response['ApplicationName'])
self.cname = str(response['CNAME'])
self.date_created = datetime.fromtimestamp(response['DateCreated'])
self.date_updated = datetime.fromtimestamp(response['DateUpdated'])
self.description = str(response['Description'])
self.endpoint_url = str(response['EndpointURL'])
self.environment_id = str(response['EnvironmentId'])
self.environment_name = str(response['EnvironmentName'])
self.health = str(response['Health'])
if response['Resources']:
self.resources = EnvironmentResourcesDescription(response['Resources'])
else:
self.resources = None
self.solution_stack_name = str(response['SolutionStackName'])
self.status = str(response['Status'])
self.template_name = str(response['TemplateName'])
self.version_label = str(response['VersionLabel'])
class UpdateApplicationResponse(Response):
def __init__(self, response):
response = response['UpdateApplicationResponse']
super(UpdateApplicationResponse, self).__init__(response)
response = response['UpdateApplicationResult']
if response['Application']:
self.application = ApplicationDescription(response['Application'])
else:
self.application = None
class UpdateApplicationVersionResponse(Response):
def __init__(self, response):
response = response['UpdateApplicationVersionResponse']
super(UpdateApplicationVersionResponse, self).__init__(response)
response = response['UpdateApplicationVersionResult']
if response['ApplicationVersion']:
self.application_version = ApplicationVersionDescription(response['ApplicationVersion'])
else:
self.application_version = None
class UpdateConfigurationTemplateResponse(Response):
def __init__(self, response):
response = response['UpdateConfigurationTemplateResponse']
super(UpdateConfigurationTemplateResponse, self).__init__(response)
response = response['UpdateConfigurationTemplateResult']
self.application_name = str(response['ApplicationName'])
self.date_created = datetime.fromtimestamp(response['DateCreated'])
self.date_updated = datetime.fromtimestamp(response['DateUpdated'])
self.deployment_status = str(response['DeploymentStatus'])
self.description = str(response['Description'])
self.environment_name = str(response['EnvironmentName'])
self.option_settings = []
if response['OptionSettings']:
for member in response['OptionSettings']:
option_setting = ConfigurationOptionSetting(member)
self.option_settings.append(option_setting)
self.solution_stack_name = str(response['SolutionStackName'])
self.template_name = str(response['TemplateName'])
class UpdateEnvironmentResponse(Response):
def __init__(self, response):
response = response['UpdateEnvironmentResponse']
super(UpdateEnvironmentResponse, self).__init__(response)
response = response['UpdateEnvironmentResult']
self.application_name = str(response['ApplicationName'])
self.cname = str(response['CNAME'])
self.date_created = datetime.fromtimestamp(response['DateCreated'])
self.date_updated = datetime.fromtimestamp(response['DateUpdated'])
self.description = str(response['Description'])
self.endpoint_url = str(response['EndpointURL'])
self.environment_id = str(response['EnvironmentId'])
self.environment_name = str(response['EnvironmentName'])
self.health = str(response['Health'])
if response['Resources']:
self.resources = EnvironmentResourcesDescription(response['Resources'])
else:
self.resources = None
self.solution_stack_name = str(response['SolutionStackName'])
self.status = str(response['Status'])
self.template_name = str(response['TemplateName'])
self.version_label = str(response['VersionLabel'])
class ValidateConfigurationSettingsResponse(Response):
def __init__(self, response):
response = response['ValidateConfigurationSettingsResponse']
super(ValidateConfigurationSettingsResponse, self).__init__(response)
response = response['ValidateConfigurationSettingsResult']
self.messages = []
if response['Messages']:
for member in response['Messages']:
message = ValidationMessage(member)
self.messages.append(message) | unknown | codeparrot/codeparrot-clean | ||
import datetime
import time
from collections import namedtuple
from operator import itemgetter
from typing import Any, Dict, List, Set, Tuple, Union
from unittest import mock
import ujson
from django.conf import settings
from django.db import IntegrityError
from django.db.models import Q
from django.http import HttpResponse
from django.test import override_settings
from django.utils.timezone import now as timezone_now
from analytics.lib.counts import COUNT_STATS
from analytics.models import RealmCount
from zerver.decorator import JsonableError
from zerver.lib.actions import (
check_message,
check_send_stream_message,
create_mirror_user_if_needed,
do_add_alert_words,
do_change_is_api_super_user,
do_change_stream_invite_only,
do_change_stream_post_policy,
do_claim_attachments,
do_create_user,
do_deactivate_user,
do_send_messages,
do_set_realm_property,
do_update_message,
extract_private_recipients,
extract_stream_indicator,
gather_subscriptions_helper,
get_active_presence_idle_user_ids,
get_client,
get_last_message_id,
get_topic_messages,
get_user_info_for_message_updates,
internal_prep_private_message,
internal_prep_stream_message_by_name,
internal_send_huddle_message,
internal_send_private_message,
internal_send_stream_message,
internal_send_stream_message_by_name,
send_rate_limited_pm_notification_to_bot_owner,
)
from zerver.lib.addressee import Addressee
from zerver.lib.cache import cache_delete, get_stream_cache_key, to_dict_cache_key_id
from zerver.lib.create_user import create_user_profile
from zerver.lib.markdown import MentionData
from zerver.lib.markdown import version as markdown_version
from zerver.lib.message import (
MessageDict,
bulk_access_messages,
get_first_visible_message_id,
get_raw_unread_data,
get_recent_private_conversations,
has_message_access,
maybe_update_first_visible_message_id,
messages_for_ids,
render_markdown,
sew_messages_and_reactions,
update_first_visible_message_id,
)
from zerver.lib.soft_deactivation import (
add_missing_messages,
do_soft_activate_users,
do_soft_deactivate_users,
reactivate_user_if_soft_deactivated,
)
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import (
get_subscription,
get_user_messages,
make_client,
message_stream_count,
most_recent_message,
most_recent_usermessage,
queries_captured,
reset_emails_in_zulip_realm,
)
from zerver.lib.timestamp import convert_to_UTC, datetime_to_timestamp
from zerver.lib.timezone import get_timezone
from zerver.lib.topic import DB_TOPIC_NAME, LEGACY_PREV_TOPIC, TOPIC_LINKS, TOPIC_NAME
from zerver.lib.types import DisplayRecipientT, UserDisplayRecipient
from zerver.lib.upload import create_attachment
from zerver.lib.url_encoding import near_message_url
from zerver.models import (
MAX_MESSAGE_LENGTH,
MAX_TOPIC_NAME_LENGTH,
Attachment,
Message,
Reaction,
Realm,
RealmAuditLog,
RealmDomain,
RealmFilter,
Recipient,
ScheduledMessage,
Stream,
Subscription,
UserMessage,
UserPresence,
UserProfile,
bulk_get_huddle_user_ids,
flush_per_request_caches,
get_display_recipient,
get_huddle_recipient,
get_huddle_user_ids,
get_realm,
get_stream,
get_system_bot,
get_user,
)
from zerver.views.message_send import InvalidMirrorInput, create_mirrored_message_users
class MiscMessageTest(ZulipTestCase):
def test_get_last_message_id(self) -> None:
self.assertEqual(
get_last_message_id(),
Message.objects.latest('id').id,
)
Message.objects.all().delete()
self.assertEqual(get_last_message_id(), -1)
class TopicHistoryTest(ZulipTestCase):
def test_topics_history_zephyr_mirror(self) -> None:
user_profile = self.mit_user('sipbtest')
stream_name = 'new_stream'
# Send a message to this new stream from another user
self.subscribe(self.mit_user("starnine"), stream_name)
stream = get_stream(stream_name, user_profile.realm)
self.send_stream_message(self.mit_user("starnine"), stream_name,
topic_name="secret topic")
# Now subscribe this MIT user to the new stream and verify
# that the new topic is not accessible
self.login_user(user_profile)
self.subscribe(user_profile, stream_name)
endpoint = f'/json/users/me/{stream.id}/topics'
result = self.client_get(endpoint, dict(), subdomain="zephyr")
self.assert_json_success(result)
history = result.json()['topics']
self.assertEqual(history, [])
def test_topics_history(self) -> None:
# verified: int(UserMessage.flags.read) == 1
user_profile = self.example_user('iago')
self.login_user(user_profile)
stream_name = 'Verona'
stream = get_stream(stream_name, user_profile.realm)
recipient = stream.recipient
def create_test_message(topic: str) -> int:
# TODO: Clean this up to send messages the normal way.
hamlet = self.example_user('hamlet')
message = Message(
sender=hamlet,
recipient=recipient,
content='whatever',
date_sent=timezone_now(),
sending_client=get_client('whatever'),
)
message.set_topic_name(topic)
message.save()
UserMessage.objects.create(
user_profile=user_profile,
message=message,
flags=0,
)
return message.id
# our most recent topics are topic0, topic1, topic2
# Create old messages with strange spellings.
create_test_message('topic2')
create_test_message('toPIc1')
create_test_message('toPIc0')
create_test_message('topic2')
create_test_message('topic2')
create_test_message('Topic2')
# Create new messages
topic2_msg_id = create_test_message('topic2')
create_test_message('topic1')
create_test_message('topic1')
topic1_msg_id = create_test_message('topic1')
topic0_msg_id = create_test_message('topic0')
endpoint = f'/json/users/me/{stream.id}/topics'
result = self.client_get(endpoint, dict())
self.assert_json_success(result)
history = result.json()['topics']
# We only look at the most recent three topics, because
# the prior fixture data may be unreliable.
history = history[:3]
self.assertEqual([topic['name'] for topic in history], [
'topic0',
'topic1',
'topic2',
])
self.assertEqual([topic['max_id'] for topic in history], [
topic0_msg_id,
topic1_msg_id,
topic2_msg_id,
])
# Now try as cordelia, who we imagine as a totally new user in
# that she doesn't have UserMessage rows. We should see the
# same results for a public stream.
self.login('cordelia')
result = self.client_get(endpoint, dict())
self.assert_json_success(result)
history = result.json()['topics']
# We only look at the most recent three topics, because
# the prior fixture data may be unreliable.
history = history[:3]
self.assertEqual([topic['name'] for topic in history], [
'topic0',
'topic1',
'topic2',
])
self.assertIn('topic0', [topic['name'] for topic in history])
self.assertEqual([topic['max_id'] for topic in history], [
topic0_msg_id,
topic1_msg_id,
topic2_msg_id,
])
# Now make stream private, but subscribe cordelia
do_change_stream_invite_only(stream, True)
self.subscribe(self.example_user("cordelia"), stream.name)
result = self.client_get(endpoint, dict())
self.assert_json_success(result)
history = result.json()['topics']
history = history[:3]
# Cordelia doesn't have these recent history items when we
# wasn't subscribed in her results.
self.assertNotIn('topic0', [topic['name'] for topic in history])
self.assertNotIn('topic1', [topic['name'] for topic in history])
self.assertNotIn('topic2', [topic['name'] for topic in history])
def test_bad_stream_id(self) -> None:
self.login('iago')
# non-sensible stream id
endpoint = '/json/users/me/9999999999/topics'
result = self.client_get(endpoint, dict())
self.assert_json_error(result, 'Invalid stream id')
# out of realm
bad_stream = self.make_stream(
'mit_stream',
realm=get_realm('zephyr'),
)
endpoint = f'/json/users/me/{bad_stream.id}/topics'
result = self.client_get(endpoint, dict())
self.assert_json_error(result, 'Invalid stream id')
# private stream to which I am not subscribed
private_stream = self.make_stream(
'private_stream',
invite_only=True,
)
endpoint = f'/json/users/me/{private_stream.id}/topics'
result = self.client_get(endpoint, dict())
self.assert_json_error(result, 'Invalid stream id')
class TopicDeleteTest(ZulipTestCase):
def test_topic_delete(self) -> None:
initial_last_msg_id = self.get_last_message().id
stream_name = 'new_stream'
topic_name = 'new topic 2'
# NON-ADMIN USER
user_profile = self.example_user('hamlet')
self.subscribe(user_profile, stream_name)
# Send message
stream = get_stream(stream_name, user_profile.realm)
self.send_stream_message(user_profile, stream_name, topic_name=topic_name)
last_msg_id = self.send_stream_message(user_profile, stream_name, topic_name=topic_name)
# Deleting the topic
self.login_user(user_profile)
endpoint = '/json/streams/' + str(stream.id) + '/delete_topic'
result = self.client_post(endpoint, {
"topic_name": topic_name,
})
self.assert_json_error(result, "Must be an organization administrator")
self.assertEqual(self.get_last_message().id, last_msg_id)
# Make stream private with limited history
do_change_stream_invite_only(stream, invite_only=True,
history_public_to_subscribers=False)
# ADMIN USER subscribed now
user_profile = self.example_user('iago')
self.subscribe(user_profile, stream_name)
self.login_user(user_profile)
new_last_msg_id = self.send_stream_message(user_profile, stream_name, topic_name=topic_name)
# Now admin deletes all messages in topic -- which should only
# delete new_last_msg_id, i.e. the one sent since they joined.
self.assertEqual(self.get_last_message().id, new_last_msg_id)
result = self.client_post(endpoint, {
"topic_name": topic_name,
})
self.assert_json_success(result)
self.assertEqual(self.get_last_message().id, last_msg_id)
# Try to delete all messages in the topic again. There are no messages accessible
# to the administrator, so this should do nothing.
result = self.client_post(endpoint, {
"topic_name": topic_name,
})
self.assert_json_success(result)
self.assertEqual(self.get_last_message().id, last_msg_id)
# Make the stream's history public to subscribers
do_change_stream_invite_only(stream, invite_only=True,
history_public_to_subscribers=True)
# Delete the topic should now remove all messages
result = self.client_post(endpoint, {
"topic_name": topic_name,
})
self.assert_json_success(result)
self.assertEqual(self.get_last_message().id, initial_last_msg_id)
# Delete again, to test the edge case of deleting an empty topic.
result = self.client_post(endpoint, {
"topic_name": topic_name,
})
self.assert_json_success(result)
self.assertEqual(self.get_last_message().id, initial_last_msg_id)
class TestCrossRealmPMs(ZulipTestCase):
def make_realm(self, domain: str) -> Realm:
realm = Realm.objects.create(string_id=domain, invite_required=False)
RealmDomain.objects.create(realm=realm, domain=domain)
return realm
def create_user(self, email: str) -> UserProfile:
subdomain = email.split("@")[1]
self.register(email, 'test', subdomain=subdomain)
return get_user(email, get_realm(subdomain))
@override_settings(CROSS_REALM_BOT_EMAILS=['notification-bot@zulip.com',
'welcome-bot@zulip.com',
'support@3.example.com'])
def test_realm_scenarios(self) -> None:
self.make_realm('1.example.com')
r2 = self.make_realm('2.example.com')
self.make_realm('3.example.com')
def assert_message_received(to_user: UserProfile, from_user: UserProfile) -> None:
messages = get_user_messages(to_user)
self.assertEqual(messages[-1].sender.id, from_user.id)
def assert_invalid_user() -> Any:
return self.assertRaisesRegex(
JsonableError,
'Invalid user ID ')
user1_email = 'user1@1.example.com'
user1a_email = 'user1a@1.example.com'
user2_email = 'user2@2.example.com'
user3_email = 'user3@3.example.com'
notification_bot_email = 'notification-bot@zulip.com'
support_email = 'support@3.example.com' # note: not zulip.com
user1 = self.create_user(user1_email)
user1a = self.create_user(user1a_email)
user2 = self.create_user(user2_email)
user3 = self.create_user(user3_email)
notification_bot = get_system_bot(notification_bot_email)
with self.settings(CROSS_REALM_BOT_EMAILS=['notification-bot@zulip.com', 'welcome-bot@zulip.com']):
# HACK: We should probably be creating this "bot" user another
# way, but since you can't register a user with a
# cross-realm email, we need to hide this for now.
support_bot = self.create_user(support_email)
# Users can PM themselves
self.send_personal_message(user1, user1)
assert_message_received(user1, user1)
# Users on the same realm can PM each other
self.send_personal_message(user1, user1a)
assert_message_received(user1a, user1)
# Cross-realm bots in the zulip.com realm can PM any realm
# (They need lower level APIs to do this.)
internal_send_private_message(
realm=r2,
sender=get_system_bot(notification_bot_email),
recipient_user=get_user(user2_email, r2),
content='bla',
)
assert_message_received(user2, notification_bot)
# All users can PM cross-realm bots in the zulip.com realm
self.send_personal_message(user1, notification_bot)
assert_message_received(notification_bot, user1)
# Users can PM cross-realm bots on non-zulip realms.
# (The support bot represents some theoretical bot that we may
# create in the future that does not have zulip.com as its realm.)
self.send_personal_message(user1, support_bot)
assert_message_received(support_bot, user1)
# Allow sending PMs to two different cross-realm bots simultaneously.
# (We don't particularly need this feature, but since users can
# already individually send PMs to cross-realm bots, we shouldn't
# prevent them from sending multiple bots at once. We may revisit
# this if it's a nuisance for huddles.)
self.send_huddle_message(user1, [notification_bot, support_bot])
assert_message_received(notification_bot, user1)
assert_message_received(support_bot, user1)
# Prevent old loophole where I could send PMs to other users as long
# as I copied a cross-realm bot from the same realm.
with assert_invalid_user():
self.send_huddle_message(user1, [user3, support_bot])
# Users on three different realms can't PM each other,
# even if one of the users is a cross-realm bot.
with assert_invalid_user():
self.send_huddle_message(user1, [user2, notification_bot])
with assert_invalid_user():
self.send_huddle_message(notification_bot, [user1, user2])
# Users on the different realms cannot PM each other
with assert_invalid_user():
self.send_personal_message(user1, user2)
# Users on non-zulip realms can't PM "ordinary" Zulip users
with assert_invalid_user():
self.send_personal_message(user1, self.example_user('hamlet'))
# Users on three different realms cannot PM each other
with assert_invalid_user():
self.send_huddle_message(user1, [user2, user3])
class TestAddressee(ZulipTestCase):
def test_addressee_for_user_ids(self) -> None:
realm = get_realm('zulip')
user_ids = [self.example_user('cordelia').id,
self.example_user('hamlet').id,
self.example_user('othello').id]
result = Addressee.for_user_ids(user_ids=user_ids, realm=realm)
user_profiles = result.user_profiles()
result_user_ids = [user_profiles[0].id, user_profiles[1].id,
user_profiles[2].id]
self.assertEqual(set(result_user_ids), set(user_ids))
def test_addressee_for_user_ids_nonexistent_id(self) -> None:
def assert_invalid_user_id() -> Any:
return self.assertRaisesRegex(
JsonableError,
'Invalid user ID ')
with assert_invalid_user_id():
Addressee.for_user_ids(user_ids=[779], realm=get_realm('zulip'))
def test_addressee_legacy_build_for_user_ids(self) -> None:
realm = get_realm('zulip')
self.login('hamlet')
user_ids = [self.example_user('cordelia').id,
self.example_user('othello').id]
result = Addressee.legacy_build(
sender=self.example_user('hamlet'), message_type_name='private',
message_to=user_ids, topic_name='random_topic',
realm=realm,
)
user_profiles = result.user_profiles()
result_user_ids = [user_profiles[0].id, user_profiles[1].id]
self.assertEqual(set(result_user_ids), set(user_ids))
def test_addressee_legacy_build_for_stream_id(self) -> None:
realm = get_realm('zulip')
self.login('iago')
sender = self.example_user('iago')
self.subscribe(sender, "Denmark")
stream = get_stream('Denmark', realm)
result = Addressee.legacy_build(
sender=sender, message_type_name='stream',
message_to=[stream.id], topic_name='random_topic',
realm=realm,
)
stream_id = result.stream_id()
self.assertEqual(stream.id, stream_id)
class InternalPrepTest(ZulipTestCase):
def test_returns_for_internal_sends(self) -> None:
# For our internal_send_* functions we return
# if the prep stages fail. This is mostly defensive
# code, since we are generally creating the messages
# ourselves, but we want to make sure that the functions
# won't actually explode if we give them bad content.
bad_content = ''
realm = get_realm('zulip')
cordelia = self.example_user('cordelia')
hamlet = self.example_user('hamlet')
othello = self.example_user('othello')
stream = get_stream('Verona', realm)
with mock.patch('logging.exception') as m:
internal_send_private_message(
realm=realm,
sender=cordelia,
recipient_user=hamlet,
content=bad_content,
)
m.assert_called_once_with(
"Error queueing internal message by %s: %s",
"cordelia@zulip.com",
"Message must not be empty",
)
with mock.patch('logging.exception') as m:
internal_send_huddle_message(
realm=realm,
sender=cordelia,
emails=[hamlet.email, othello.email],
content=bad_content,
)
m.assert_called_once_with(
"Error queueing internal message by %s: %s",
"cordelia@zulip.com",
"Message must not be empty",
)
with mock.patch('logging.exception') as m:
internal_send_stream_message(
realm=realm,
sender=cordelia,
topic='whatever',
content=bad_content,
stream=stream,
)
m.assert_called_once_with(
"Error queueing internal message by %s: %s",
"cordelia@zulip.com",
"Message must not be empty",
)
with mock.patch('logging.exception') as m:
internal_send_stream_message_by_name(
realm=realm,
sender=cordelia,
stream_name=stream.name,
topic='whatever',
content=bad_content,
)
m.assert_called_once_with(
"Error queueing internal message by %s: %s",
"cordelia@zulip.com",
"Message must not be empty",
)
def test_error_handling(self) -> None:
realm = get_realm('zulip')
sender = self.example_user('cordelia')
recipient_user = self.example_user('hamlet')
content = 'x' * 15000
result = internal_prep_private_message(
realm=realm,
sender=sender,
recipient_user=recipient_user,
content=content)
message = result['message']
self.assertIn('message was too long', message.content)
with self.assertRaises(RuntimeError):
internal_prep_private_message(
realm=None, # should cause error
sender=sender,
recipient_user=recipient_user,
content=content)
# Simulate sending a message to somebody not in the
# realm of the sender.
recipient_user = self.mit_user('starnine')
with mock.patch('logging.exception') as logging_mock:
result = internal_prep_private_message(
realm=realm,
sender=sender,
recipient_user=recipient_user,
content=content)
logging_mock.assert_called_once_with(
"Error queueing internal message by %s: %s",
"cordelia@zulip.com",
"You can't send private messages outside of your organization.",
)
def test_ensure_stream_gets_called(self) -> None:
realm = get_realm('zulip')
sender = self.example_user('cordelia')
stream_name = 'test_stream'
topic = 'whatever'
content = 'hello'
internal_prep_stream_message_by_name(
realm=realm,
sender=sender,
stream_name=stream_name,
topic=topic,
content=content)
# This would throw an error if the stream
# wasn't automatically created.
Stream.objects.get(name=stream_name, realm_id=realm.id)
class ExtractTest(ZulipTestCase):
def test_extract_stream_indicator(self) -> None:
self.assertEqual(
extract_stream_indicator('development'),
"development",
)
self.assertEqual(
extract_stream_indicator('commas,are,fine'),
"commas,are,fine",
)
self.assertEqual(
extract_stream_indicator('"Who hasn\'t done this?"'),
"Who hasn't done this?",
)
self.assertEqual(
extract_stream_indicator("999"),
999,
)
# For legacy reasons it's plausible that users will
# put a single stream into an array and then encode it
# as JSON. We can probably eliminate this support
# by mid 2020 at the latest.
self.assertEqual(
extract_stream_indicator('["social"]'),
'social',
)
self.assertEqual(
extract_stream_indicator("[123]"),
123,
)
with self.assertRaisesRegex(JsonableError, 'Invalid data type for stream'):
extract_stream_indicator('{}')
with self.assertRaisesRegex(JsonableError, 'Invalid data type for stream'):
extract_stream_indicator('[{}]')
with self.assertRaisesRegex(JsonableError, 'Expected exactly one stream'):
extract_stream_indicator('[1,2,"general"]')
def test_extract_private_recipients_emails(self) -> None:
# JSON list w/dups, empties, and trailing whitespace
s = ujson.dumps([' alice@zulip.com ', ' bob@zulip.com ', ' ', 'bob@zulip.com'])
# sorted() gets confused by extract_private_recipients' return type
# For testing, ignorance here is better than manual casting
result = sorted(extract_private_recipients(s))
self.assertEqual(result, ['alice@zulip.com', 'bob@zulip.com'])
# simple string with one name
s = 'alice@zulip.com '
self.assertEqual(extract_private_recipients(s), ['alice@zulip.com'])
# JSON-encoded string
s = '"alice@zulip.com"'
self.assertEqual(extract_private_recipients(s), ['alice@zulip.com'])
# bare comma-delimited string
s = 'bob@zulip.com, alice@zulip.com'
result = sorted(extract_private_recipients(s))
self.assertEqual(result, ['alice@zulip.com', 'bob@zulip.com'])
# JSON-encoded, comma-delimited string
s = '"bob@zulip.com,alice@zulip.com"'
result = sorted(extract_private_recipients(s))
self.assertEqual(result, ['alice@zulip.com', 'bob@zulip.com'])
# Invalid data
s = ujson.dumps(dict(color='red'))
with self.assertRaisesRegex(JsonableError, 'Invalid data type for recipients'):
extract_private_recipients(s)
s = ujson.dumps([{}])
with self.assertRaisesRegex(JsonableError, 'Invalid data type for recipients'):
extract_private_recipients(s)
# Empty list
self.assertEqual(extract_private_recipients('[]'), [])
# Heterogeneous lists are not supported
mixed = ujson.dumps(['eeshan@example.com', 3, 4])
with self.assertRaisesRegex(JsonableError, 'Recipient lists may contain emails or user IDs, but not both.'):
extract_private_recipients(mixed)
def test_extract_recipient_ids(self) -> None:
# JSON list w/dups
s = ujson.dumps([3, 3, 12])
result = sorted(extract_private_recipients(s))
self.assertEqual(result, [3, 12])
# Invalid data
ids = ujson.dumps(dict(recipient=12))
with self.assertRaisesRegex(JsonableError, 'Invalid data type for recipients'):
extract_private_recipients(ids)
# Heterogeneous lists are not supported
mixed = ujson.dumps([3, 4, 'eeshan@example.com'])
with self.assertRaisesRegex(JsonableError, 'Recipient lists may contain emails or user IDs, but not both.'):
extract_private_recipients(mixed)
class PersonalMessagesTest(ZulipTestCase):
def test_near_pm_message_url(self) -> None:
realm = get_realm('zulip')
message = dict(
type='personal',
id=555,
display_recipient=[
dict(id=77),
dict(id=80),
],
)
url = near_message_url(
realm=realm,
message=message,
)
self.assertEqual(url, 'http://zulip.testserver/#narrow/pm-with/77,80-pm/near/555')
def test_is_private_flag_not_leaked(self) -> None:
"""
Make sure `is_private` flag is not leaked to the API.
"""
self.login('hamlet')
self.send_personal_message(self.example_user("hamlet"),
self.example_user("cordelia"),
"test")
for msg in self.get_messages():
self.assertNotIn('is_private', msg['flags'])
def test_auto_subbed_to_personals(self) -> None:
"""
Newly created users are auto-subbed to the ability to receive
personals.
"""
test_email = self.nonreg_email('test')
self.register(test_email, "test")
user_profile = self.nonreg_user('test')
old_messages_count = message_stream_count(user_profile)
self.send_personal_message(user_profile, user_profile)
new_messages_count = message_stream_count(user_profile)
self.assertEqual(new_messages_count, old_messages_count + 1)
recipient = Recipient.objects.get(type_id=user_profile.id,
type=Recipient.PERSONAL)
message = most_recent_message(user_profile)
self.assertEqual(message.recipient, recipient)
with mock.patch('zerver.models.get_display_recipient', return_value='recip'):
self.assertEqual(
str(message),
'<Message: recip / / '
'<UserProfile: {} {}>>'.format(user_profile.email, user_profile.realm))
user_message = most_recent_usermessage(user_profile)
self.assertEqual(
str(user_message),
f'<UserMessage: recip / {user_profile.email} ([])>',
)
def test_personal_to_self(self) -> None:
"""
If you send a personal to yourself, only you see it.
"""
old_user_profiles = list(UserProfile.objects.all())
test_email = self.nonreg_email('test1')
self.register(test_email, "test1")
old_messages = []
for user_profile in old_user_profiles:
old_messages.append(message_stream_count(user_profile))
user_profile = self.nonreg_user('test1')
self.send_personal_message(user_profile, user_profile)
new_messages = []
for user_profile in old_user_profiles:
new_messages.append(message_stream_count(user_profile))
self.assertEqual(old_messages, new_messages)
user_profile = self.nonreg_user('test1')
recipient = Recipient.objects.get(type_id=user_profile.id, type=Recipient.PERSONAL)
self.assertEqual(most_recent_message(user_profile).recipient, recipient)
def assert_personal(self, sender: UserProfile, receiver: UserProfile, content: str="testcontent") -> None:
"""
Send a private message from `sender_email` to `receiver_email` and check
that only those two parties actually received the message.
"""
sender_messages = message_stream_count(sender)
receiver_messages = message_stream_count(receiver)
other_user_profiles = UserProfile.objects.filter(~Q(id=sender.id) &
~Q(id=receiver.id))
old_other_messages = []
for user_profile in other_user_profiles:
old_other_messages.append(message_stream_count(user_profile))
self.send_personal_message(sender, receiver, content)
# Users outside the conversation don't get the message.
new_other_messages = []
for user_profile in other_user_profiles:
new_other_messages.append(message_stream_count(user_profile))
self.assertEqual(old_other_messages, new_other_messages)
# The personal message is in the streams of both the sender and receiver.
self.assertEqual(message_stream_count(sender),
sender_messages + 1)
self.assertEqual(message_stream_count(receiver),
receiver_messages + 1)
recipient = Recipient.objects.get(type_id=receiver.id, type=Recipient.PERSONAL)
self.assertEqual(most_recent_message(sender).recipient, recipient)
self.assertEqual(most_recent_message(receiver).recipient, recipient)
def test_personal(self) -> None:
"""
If you send a personal, only you and the recipient see it.
"""
self.login('hamlet')
self.assert_personal(
sender=self.example_user("hamlet"),
receiver=self.example_user("othello"),
)
def test_private_message_policy(self) -> None:
"""
Tests that PRIVATE_MESSAGE_POLICY_DISABLED works correctly.
"""
user_profile = self.example_user("hamlet")
self.login_user(user_profile)
do_set_realm_property(user_profile.realm, "private_message_policy",
Realm.PRIVATE_MESSAGE_POLICY_DISABLED)
with self.assertRaises(JsonableError):
self.send_personal_message(user_profile, self.example_user("cordelia"))
bot_profile = self.create_test_bot("testbot", user_profile)
self.send_personal_message(user_profile, get_system_bot(settings.NOTIFICATION_BOT))
self.send_personal_message(user_profile, bot_profile)
self.send_personal_message(bot_profile, user_profile)
def test_non_ascii_personal(self) -> None:
"""
Sending a PM containing non-ASCII characters succeeds.
"""
self.login('hamlet')
self.assert_personal(
sender=self.example_user("hamlet"),
receiver=self.example_user("othello"),
content="hümbüǵ",
)
class StreamMessagesTest(ZulipTestCase):
def assert_stream_message(self, stream_name: str, topic_name: str="test topic",
content: str="test content") -> None:
"""
Check that messages sent to a stream reach all subscribers to that stream.
"""
realm = get_realm('zulip')
subscribers = self.users_subscribed_to_stream(stream_name, realm)
# Outgoing webhook bots don't store UserMessage rows; they will be processed later.
subscribers = [subscriber for subscriber in subscribers
if subscriber.bot_type != UserProfile.OUTGOING_WEBHOOK_BOT]
old_subscriber_messages = []
for subscriber in subscribers:
old_subscriber_messages.append(message_stream_count(subscriber))
non_subscribers = [user_profile for user_profile in UserProfile.objects.all()
if user_profile not in subscribers]
old_non_subscriber_messages = []
for non_subscriber in non_subscribers:
old_non_subscriber_messages.append(message_stream_count(non_subscriber))
non_bot_subscribers = [user_profile for user_profile in subscribers
if not user_profile.is_bot]
a_subscriber = non_bot_subscribers[0]
self.login_user(a_subscriber)
self.send_stream_message(a_subscriber, stream_name,
content=content, topic_name=topic_name)
# Did all of the subscribers get the message?
new_subscriber_messages = []
for subscriber in subscribers:
new_subscriber_messages.append(message_stream_count(subscriber))
# Did non-subscribers not get the message?
new_non_subscriber_messages = []
for non_subscriber in non_subscribers:
new_non_subscriber_messages.append(message_stream_count(non_subscriber))
self.assertEqual(old_non_subscriber_messages, new_non_subscriber_messages)
self.assertEqual(new_subscriber_messages, [elt + 1 for elt in old_subscriber_messages])
def test_performance(self) -> None:
'''
This test is part of the automated test suite, but
it is more intended as an aid to measuring the
performance of do_send_messages() with consistent
data setup across different commits. You can modify
the values below and run just this test, and then
comment out the print statement toward the bottom.
'''
num_messages = 2
num_extra_users = 10
sender = self.example_user('cordelia')
realm = sender.realm
message_content = 'whatever'
stream = get_stream('Denmark', realm)
topic_name = 'lunch'
recipient = stream.recipient
sending_client = make_client(name="test suite")
for i in range(num_extra_users):
# Make every other user be idle.
long_term_idle = i % 2 > 0
email = f'foo{i}@example.com'
user = UserProfile.objects.create(
realm=realm,
email=email,
pointer=0,
long_term_idle=long_term_idle,
)
Subscription.objects.create(
user_profile=user,
recipient=recipient,
)
def send_test_message() -> None:
message = Message(
sender=sender,
recipient=recipient,
content=message_content,
date_sent=timezone_now(),
sending_client=sending_client,
)
message.set_topic_name(topic_name)
do_send_messages([dict(message=message)])
before_um_count = UserMessage.objects.count()
t = time.time()
for i in range(num_messages):
send_test_message()
delay = time.time() - t
assert(delay) # quiet down lint
# print(delay)
after_um_count = UserMessage.objects.count()
ums_created = after_um_count - before_um_count
num_active_users = num_extra_users / 2
self.assertTrue(ums_created > (num_active_users * num_messages))
def test_not_too_many_queries(self) -> None:
recipient_list = [self.example_user("hamlet"), self.example_user("iago"),
self.example_user("cordelia"), self.example_user("othello")]
for user_profile in recipient_list:
self.subscribe(user_profile, "Denmark")
sender = self.example_user('hamlet')
sending_client = make_client(name="test suite")
stream_name = 'Denmark'
topic_name = 'foo'
content = 'whatever'
realm = sender.realm
# To get accurate count of the queries, we should make sure that
# caches don't come into play. If we count queries while caches are
# filled, we will get a lower count. Caches are not supposed to be
# persistent, so our test can also fail if cache is invalidated
# during the course of the unit test.
flush_per_request_caches()
cache_delete(get_stream_cache_key(stream_name, realm.id))
with queries_captured() as queries:
check_send_stream_message(
sender=sender,
client=sending_client,
stream_name=stream_name,
topic=topic_name,
body=content,
)
self.assert_length(queries, 14)
def test_stream_message_dict(self) -> None:
user_profile = self.example_user('iago')
self.subscribe(user_profile, "Denmark")
self.send_stream_message(self.example_user("hamlet"), "Denmark",
content="whatever", topic_name="my topic")
message = most_recent_message(user_profile)
row = MessageDict.get_raw_db_rows([message.id])[0]
dct = MessageDict.build_dict_from_raw_db_row(row)
MessageDict.post_process_dicts([dct], apply_markdown=True, client_gravatar=False)
self.assertEqual(dct['display_recipient'], 'Denmark')
stream = get_stream('Denmark', user_profile.realm)
self.assertEqual(dct['stream_id'], stream.id)
def test_stream_message_unicode(self) -> None:
receiving_user_profile = self.example_user('iago')
sender = self.example_user('hamlet')
self.subscribe(receiving_user_profile, "Denmark")
self.send_stream_message(sender, "Denmark",
content="whatever", topic_name="my topic")
message = most_recent_message(receiving_user_profile)
self.assertEqual(str(message),
'<Message: Denmark / my topic / '
'<UserProfile: {} {}>>'.format(sender.email, sender.realm))
def test_message_mentions(self) -> None:
user_profile = self.example_user('iago')
self.subscribe(user_profile, "Denmark")
self.send_stream_message(self.example_user("hamlet"), "Denmark",
content="test @**Iago** rules")
message = most_recent_message(user_profile)
assert(UserMessage.objects.get(user_profile=user_profile, message=message).flags.mentioned.is_set)
def test_is_private_flag(self) -> None:
user_profile = self.example_user('iago')
self.subscribe(user_profile, "Denmark")
self.send_stream_message(self.example_user("hamlet"), "Denmark",
content="test")
message = most_recent_message(user_profile)
self.assertFalse(UserMessage.objects.get(user_profile=user_profile, message=message).flags.is_private.is_set)
self.send_personal_message(self.example_user("hamlet"), user_profile,
content="test")
message = most_recent_message(user_profile)
self.assertTrue(UserMessage.objects.get(user_profile=user_profile, message=message).flags.is_private.is_set)
def _send_stream_message(self, user: UserProfile, stream_name: str, content: str) -> Set[int]:
with mock.patch('zerver.lib.actions.send_event') as m:
self.send_stream_message(
user,
stream_name,
content=content,
)
self.assertEqual(m.call_count, 1)
users = m.call_args[0][2]
user_ids = {u['id'] for u in users}
return user_ids
def test_unsub_mention(self) -> None:
cordelia = self.example_user('cordelia')
hamlet = self.example_user('hamlet')
stream_name = 'Test Stream'
self.subscribe(hamlet, stream_name)
UserMessage.objects.filter(
user_profile=cordelia,
).delete()
def mention_cordelia() -> Set[int]:
content = 'test @**Cordelia Lear** rules'
user_ids = self._send_stream_message(
user=hamlet,
stream_name=stream_name,
content=content,
)
return user_ids
def num_cordelia_messages() -> int:
return UserMessage.objects.filter(
user_profile=cordelia,
).count()
user_ids = mention_cordelia()
self.assertEqual(0, num_cordelia_messages())
self.assertNotIn(cordelia.id, user_ids)
# Make sure test isn't too brittle-subscribing
# Cordelia and mentioning her should give her a
# message.
self.subscribe(cordelia, stream_name)
user_ids = mention_cordelia()
self.assertIn(cordelia.id, user_ids)
self.assertEqual(1, num_cordelia_messages())
def test_message_bot_mentions(self) -> None:
cordelia = self.example_user('cordelia')
hamlet = self.example_user('hamlet')
realm = hamlet.realm
stream_name = 'Test Stream'
self.subscribe(hamlet, stream_name)
normal_bot = do_create_user(
email='normal-bot@zulip.com',
password='',
realm=realm,
full_name='Normal Bot',
short_name='',
bot_type=UserProfile.DEFAULT_BOT,
bot_owner=cordelia,
)
content = 'test @**Normal Bot** rules'
user_ids = self._send_stream_message(
user=hamlet,
stream_name=stream_name,
content=content,
)
self.assertIn(normal_bot.id, user_ids)
user_message = most_recent_usermessage(normal_bot)
self.assertEqual(user_message.message.content, content)
self.assertTrue(user_message.flags.mentioned)
def test_stream_message_mirroring(self) -> None:
user = self.mit_user('starnine')
self.subscribe(user, 'Verona')
do_change_is_api_super_user(user, True)
result = self.api_post(user, "/api/v1/messages", {"type": "stream",
"to": "Verona",
"sender": self.mit_email("sipbtest"),
"client": "zephyr_mirror",
"topic": "announcement",
"content": "Everyone knows Iago rules",
"forged": "true"},
subdomain="zephyr")
self.assert_json_success(result)
do_change_is_api_super_user(user, False)
result = self.api_post(user, "/api/v1/messages", {"type": "stream",
"to": "Verona",
"sender": self.mit_email("sipbtest"),
"client": "zephyr_mirror",
"topic": "announcement",
"content": "Everyone knows Iago rules",
"forged": "true"},
subdomain="zephyr")
self.assert_json_error(result, "User not authorized for this query")
def test_message_to_stream(self) -> None:
"""
If you send a message to a stream, everyone subscribed to the stream
receives the messages.
"""
self.assert_stream_message("Scotland")
def test_non_ascii_stream_message(self) -> None:
"""
Sending a stream message containing non-ASCII characters in the stream
name, topic, or message body succeeds.
"""
self.login('hamlet')
# Subscribe everyone to a stream with non-ASCII characters.
non_ascii_stream_name = "hümbüǵ"
realm = get_realm("zulip")
stream = self.make_stream(non_ascii_stream_name)
for user_profile in UserProfile.objects.filter(is_active=True, is_bot=False,
realm=realm)[0:3]:
self.subscribe(user_profile, stream.name)
self.assert_stream_message(non_ascii_stream_name, topic_name="hümbüǵ",
content="hümbüǵ")
def test_get_raw_unread_data_for_huddle_messages(self) -> None:
users = [
self.example_user('hamlet'),
self.example_user('cordelia'),
self.example_user('iago'),
self.example_user('prospero'),
self.example_user('othello'),
]
message1_id = self.send_huddle_message(users[0], users, "test content 1")
message2_id = self.send_huddle_message(users[0], users, "test content 2")
msg_data = get_raw_unread_data(users[1])
# both the messages are present in msg_data
self.assertIn(message1_id, msg_data["huddle_dict"].keys())
self.assertIn(message2_id, msg_data["huddle_dict"].keys())
# only these two messages are present in msg_data
self.assertEqual(len(msg_data["huddle_dict"].keys()), 2)
recent_conversations = get_recent_private_conversations(users[1])
self.assertEqual(len(recent_conversations), 1)
recent_conversation = list(recent_conversations.values())[0]
self.assertEqual(set(recent_conversation['user_ids']), {user.id for user in users if
user != users[1]})
self.assertEqual(recent_conversation['max_message_id'], message2_id)
class MessageDictTest(ZulipTestCase):
def test_both_codepaths(self) -> None:
'''
We have two different codepaths that
extract a particular shape of dictionary
for messages to send to clients:
events:
These are the events we send to MANY
clients when a message is originally
sent.
fetch:
These are the messages we send to ONE
client when they fetch messages via
some narrow/search in the UI.
Different clients have different needs
when it comes to things like generating avatar
hashes or including both rendered and unrendered
markdown, so that explains the different shapes.
And then the two codepaths have different
performance needs. In the events codepath, we
have the Django view generate a single "wide"
dictionary that gets put on the event queue,
and then we send events to multiple clients,
finalizing the payload for each of them depending
on the "shape" they want. (We also avoid
doing extra work for any two clients who want
the same shape dictionary, but that's out of the
scope of this particular test).
In the fetch scenario, the single client only needs
a dictionary of one shape, but we need to re-hydrate
the sender information, since the sender details
may have changed since the message was originally
sent.
This test simply verifies that the two codepaths
ultimately provide the same result.
'''
def reload_message(msg_id: int) -> Message:
# Get a clean copy of the message, and
# clear the cache.
cache_delete(to_dict_cache_key_id(msg_id))
msg = Message.objects.get(id=msg_id)
return msg
def get_send_message_payload(
msg_id: int,
apply_markdown: bool,
client_gravatar: bool) -> Dict[str, Any]:
msg = reload_message(msg_id)
wide_dict = MessageDict.wide_dict(msg)
narrow_dict = MessageDict.finalize_payload(
wide_dict,
apply_markdown=apply_markdown,
client_gravatar=client_gravatar,
)
return narrow_dict
def get_fetch_payload(
msg_id: int,
apply_markdown: bool,
client_gravatar: bool) -> Dict[str, Any]:
msg = reload_message(msg_id)
unhydrated_dict = MessageDict.to_dict_uncached_helper([msg])[0]
# The next step mutates the dict in place
# for performance reasons.
MessageDict.post_process_dicts(
[unhydrated_dict],
apply_markdown=apply_markdown,
client_gravatar=client_gravatar,
)
final_dict = unhydrated_dict
return final_dict
def test_message_id() -> int:
hamlet = self.example_user('hamlet')
self.login_user(hamlet)
msg_id = self.send_stream_message(
hamlet,
"Scotland",
topic_name="editing",
content="before edit",
)
return msg_id
flag_setups = [
[False, False],
[False, True],
[True, False],
[True, True],
]
msg_id = test_message_id()
for (apply_markdown, client_gravatar) in flag_setups:
send_message_payload = get_send_message_payload(
msg_id,
apply_markdown=apply_markdown,
client_gravatar=client_gravatar,
)
fetch_payload = get_fetch_payload(
msg_id,
apply_markdown=apply_markdown,
client_gravatar=client_gravatar,
)
self.assertEqual(send_message_payload, fetch_payload)
def test_bulk_message_fetching(self) -> None:
sender = self.example_user('othello')
receiver = self.example_user('hamlet')
pm_recipient = Recipient.objects.get(type_id=receiver.id, type=Recipient.PERSONAL)
stream_name = 'Çiğdem'
stream = self.make_stream(stream_name)
stream_recipient = Recipient.objects.get(type_id=stream.id, type=Recipient.STREAM)
sending_client = make_client(name="test suite")
ids = []
for i in range(300):
for recipient in [pm_recipient, stream_recipient]:
message = Message(
sender=sender,
recipient=recipient,
content=f'whatever {i}',
rendered_content='DOES NOT MATTER',
rendered_content_version=markdown_version,
date_sent=timezone_now(),
sending_client=sending_client,
last_edit_time=timezone_now(),
edit_history='[]',
)
message.set_topic_name('whatever')
message.save()
ids.append(message.id)
Reaction.objects.create(user_profile=sender, message=message,
emoji_name='simple_smile')
num_ids = len(ids)
self.assertTrue(num_ids >= 600)
flush_per_request_caches()
t = time.time()
with queries_captured() as queries:
rows = list(MessageDict.get_raw_db_rows(ids))
objs = [
MessageDict.build_dict_from_raw_db_row(row)
for row in rows
]
MessageDict.post_process_dicts(objs, apply_markdown=False, client_gravatar=False)
delay = time.time() - t
# Make sure we don't take longer than 1.5ms per message to
# extract messages. Note that we increased this from 1ms to
# 1.5ms to handle tests running in parallel being a bit
# slower.
error_msg = f"Number of ids: {num_ids}. Time delay: {delay}"
self.assertTrue(delay < 0.0015 * num_ids, error_msg)
self.assert_length(queries, 7)
self.assertEqual(len(rows), num_ids)
def test_applying_markdown(self) -> None:
sender = self.example_user('othello')
receiver = self.example_user('hamlet')
recipient = Recipient.objects.get(type_id=receiver.id, type=Recipient.PERSONAL)
sending_client = make_client(name="test suite")
message = Message(
sender=sender,
recipient=recipient,
content='hello **world**',
date_sent=timezone_now(),
sending_client=sending_client,
last_edit_time=timezone_now(),
edit_history='[]',
)
message.set_topic_name('whatever')
message.save()
# An important part of this test is to get the message through this exact code path,
# because there is an ugly hack we need to cover. So don't just say "row = message".
row = MessageDict.get_raw_db_rows([message.id])[0]
dct = MessageDict.build_dict_from_raw_db_row(row)
expected_content = '<p>hello <strong>world</strong></p>'
self.assertEqual(dct['rendered_content'], expected_content)
message = Message.objects.get(id=message.id)
self.assertEqual(message.rendered_content, expected_content)
self.assertEqual(message.rendered_content_version, markdown_version)
@mock.patch("zerver.lib.message.markdown_convert")
def test_applying_markdown_invalid_format(self, convert_mock: Any) -> None:
# pretend the converter returned an invalid message without raising an exception
convert_mock.return_value = None
sender = self.example_user('othello')
receiver = self.example_user('hamlet')
recipient = Recipient.objects.get(type_id=receiver.id, type=Recipient.PERSONAL)
sending_client = make_client(name="test suite")
message = Message(
sender=sender,
recipient=recipient,
content='hello **world**',
date_sent=timezone_now(),
sending_client=sending_client,
last_edit_time=timezone_now(),
edit_history='[]',
)
message.set_topic_name('whatever')
message.save()
# An important part of this test is to get the message through this exact code path,
# because there is an ugly hack we need to cover. So don't just say "row = message".
row = MessageDict.get_raw_db_rows([message.id])[0]
dct = MessageDict.build_dict_from_raw_db_row(row)
error_content = '<p>[Zulip note: Sorry, we could not understand the formatting of your message]</p>'
self.assertEqual(dct['rendered_content'], error_content)
def test_topic_links_use_stream_realm(self) -> None:
# Set up a realm filter on 'zulip' and assert that messages
# sent to a stream on 'zulip' have the topic linkified from
# senders in both the 'zulip' and 'lear' realms as well as
# the notification bot.
zulip_realm = get_realm('zulip')
url_format_string = r"https://trac.example.com/ticket/%(id)s"
url = 'https://trac.example.com/ticket/123'
topic_name = 'test #123'
realm_filter = RealmFilter(realm=zulip_realm,
pattern=r"#(?P<id>[0-9]{2,8})",
url_format_string=url_format_string)
self.assertEqual(
realm_filter.__str__(),
'<RealmFilter(zulip): #(?P<id>[0-9]{2,8})'
' https://trac.example.com/ticket/%(id)s>')
def get_message(sender: UserProfile) -> Message:
msg_id = self.send_stream_message(sender, 'Denmark', 'hello world', topic_name,
zulip_realm)
return Message.objects.get(id=msg_id)
def assert_topic_links(links: List[str], msg: Message) -> None:
dct = MessageDict.to_dict_uncached_helper([msg])[0]
self.assertEqual(dct[TOPIC_LINKS], links)
# Send messages before and after saving the realm filter from each user.
assert_topic_links([], get_message(self.example_user('othello')))
assert_topic_links([], get_message(self.lear_user('cordelia')))
assert_topic_links([], get_message(self.notification_bot()))
realm_filter.save()
assert_topic_links([url], get_message(self.example_user('othello')))
assert_topic_links([url], get_message(self.lear_user('cordelia')))
assert_topic_links([url], get_message(self.notification_bot()))
def test_reaction(self) -> None:
sender = self.example_user('othello')
receiver = self.example_user('hamlet')
recipient = Recipient.objects.get(type_id=receiver.id, type=Recipient.PERSONAL)
sending_client = make_client(name="test suite")
message = Message(
sender=sender,
recipient=recipient,
content='hello **world**',
date_sent=timezone_now(),
sending_client=sending_client,
last_edit_time=timezone_now(),
edit_history='[]',
)
message.set_topic_name('whatever')
message.save()
reaction = Reaction.objects.create(
message=message, user_profile=sender,
emoji_name='simple_smile')
row = MessageDict.get_raw_db_rows([message.id])[0]
msg_dict = MessageDict.build_dict_from_raw_db_row(row)
self.assertEqual(msg_dict['reactions'][0]['emoji_name'],
reaction.emoji_name)
self.assertEqual(msg_dict['reactions'][0]['user_id'], sender.id)
self.assertEqual(msg_dict['reactions'][0]['user']['id'],
sender.id)
self.assertEqual(msg_dict['reactions'][0]['user']['email'],
sender.email)
self.assertEqual(msg_dict['reactions'][0]['user']['full_name'],
sender.full_name)
def test_missing_anchor(self) -> None:
self.login('hamlet')
result = self.client_get(
'/json/messages?use_first_unread_anchor=false&num_before=1&num_after=1')
self.assert_json_error(
result, "Missing 'anchor' argument.")
def test_invalid_anchor(self) -> None:
self.login('hamlet')
result = self.client_get(
'/json/messages?use_first_unread_anchor=false&num_before=1&num_after=1&anchor=chocolate')
self.assert_json_error(
result, "Invalid anchor")
class SewMessageAndReactionTest(ZulipTestCase):
def test_sew_messages_and_reaction(self) -> None:
sender = self.example_user('othello')
receiver = self.example_user('hamlet')
pm_recipient = Recipient.objects.get(type_id=receiver.id, type=Recipient.PERSONAL)
stream_name = 'Çiğdem'
stream = self.make_stream(stream_name)
stream_recipient = Recipient.objects.get(type_id=stream.id, type=Recipient.STREAM)
sending_client = make_client(name="test suite")
needed_ids = []
for i in range(5):
for recipient in [pm_recipient, stream_recipient]:
message = Message(
sender=sender,
recipient=recipient,
content=f'whatever {i}',
date_sent=timezone_now(),
sending_client=sending_client,
last_edit_time=timezone_now(),
edit_history='[]',
)
message.set_topic_name('whatever')
message.save()
needed_ids.append(message.id)
reaction = Reaction(user_profile=sender, message=message,
emoji_name='simple_smile')
reaction.save()
messages = Message.objects.filter(id__in=needed_ids).values(
*['id', 'content'])
reactions = Reaction.get_raw_db_rows(needed_ids)
tied_data = sew_messages_and_reactions(messages, reactions)
for data in tied_data:
self.assertEqual(len(data['reactions']), 1)
self.assertEqual(data['reactions'][0]['emoji_name'],
'simple_smile')
self.assertTrue(data['id'])
self.assertTrue(data['content'])
class MessagePOSTTest(ZulipTestCase):
def _send_and_verify_message(self, user: UserProfile, stream_name: str, error_msg: str=None) -> None:
if error_msg is None:
msg_id = self.send_stream_message(user, stream_name)
result = self.api_get(user, '/json/messages/' + str(msg_id))
self.assert_json_success(result)
else:
with self.assertRaisesRegex(JsonableError, error_msg):
self.send_stream_message(user, stream_name)
def test_message_to_self(self) -> None:
"""
Sending a message to a stream to which you are subscribed is
successful.
"""
self.login('hamlet')
result = self.client_post("/json/messages", {"type": "stream",
"to": "Verona",
"client": "test suite",
"content": "Test message",
"topic": "Test topic"})
self.assert_json_success(result)
def test_api_message_to_self(self) -> None:
"""
Same as above, but for the API view
"""
user = self.example_user('hamlet')
result = self.api_post(user, "/api/v1/messages", {"type": "stream",
"to": "Verona",
"client": "test suite",
"content": "Test message",
"topic": "Test topic"})
self.assert_json_success(result)
def test_message_to_stream_with_nonexistent_id(self) -> None:
cordelia = self.example_user('cordelia')
bot = self.create_test_bot(
short_name='whatever',
user_profile=cordelia,
)
result = self.api_post(
bot, "/api/v1/messages",
{
"type": "stream",
"to": ujson.dumps([99999]),
"client": "test suite",
"content": "Stream message by ID.",
"topic": "Test topic for stream ID message",
},
)
self.assert_json_error(result, "Stream with ID '99999' does not exist")
msg = self.get_last_message()
expected = ("Your bot `whatever-bot@zulip.testserver` tried to send a message to "
"stream ID 99999, but there is no stream with that ID.")
self.assertEqual(msg.content, expected)
def test_message_to_stream_by_id(self) -> None:
"""
Sending a message to a stream (by stream ID) to which you are
subscribed is successful.
"""
self.login('hamlet')
realm = get_realm('zulip')
stream = get_stream('Verona', realm)
result = self.client_post("/json/messages", {"type": "stream",
"to": ujson.dumps([stream.id]),
"client": "test suite",
"content": "Stream message by ID.",
"topic": "Test topic for stream ID message"})
self.assert_json_success(result)
sent_message = self.get_last_message()
self.assertEqual(sent_message.content, "Stream message by ID.")
def test_sending_message_as_stream_post_policy_admins(self) -> None:
"""
Sending messages to streams which only the admins can create and post to.
"""
admin_profile = self.example_user("iago")
self.login_user(admin_profile)
stream_name = "Verona"
stream = get_stream(stream_name, admin_profile.realm)
do_change_stream_post_policy(stream, Stream.STREAM_POST_POLICY_ADMINS)
# Admins and their owned bots can send to STREAM_POST_POLICY_ADMINS streams
self._send_and_verify_message(admin_profile, stream_name)
admin_owned_bot = self.create_test_bot(
short_name='whatever1',
full_name='whatever1',
user_profile=admin_profile,
)
self._send_and_verify_message(admin_owned_bot, stream_name)
non_admin_profile = self.example_user("hamlet")
self.login_user(non_admin_profile)
# Non admins and their owned bots cannot send to STREAM_POST_POLICY_ADMINS streams
self._send_and_verify_message(non_admin_profile, stream_name,
"Only organization administrators can send to this stream.")
non_admin_owned_bot = self.create_test_bot(
short_name='whatever2',
full_name='whatever2',
user_profile=non_admin_profile,
)
self._send_and_verify_message(non_admin_owned_bot, stream_name,
"Only organization administrators can send to this stream.")
# Bots without owner (except cross realm bot) cannot send to announcement only streams
bot_without_owner = do_create_user(
email='free-bot@zulip.testserver',
password='',
realm=non_admin_profile.realm,
full_name='freebot',
short_name='freebot',
bot_type=UserProfile.DEFAULT_BOT,
)
self._send_and_verify_message(bot_without_owner, stream_name,
"Only organization administrators can send to this stream.")
# Cross realm bots should be allowed
notification_bot = get_system_bot("notification-bot@zulip.com")
internal_send_stream_message(stream.realm, notification_bot, stream,
'Test topic', 'Test message by notification bot')
self.assertEqual(self.get_last_message().content, 'Test message by notification bot')
def test_sending_message_as_stream_post_policy_restrict_new_members(self) -> None:
"""
Sending messages to streams which new members cannot create and post to.
"""
admin_profile = self.example_user("iago")
self.login_user(admin_profile)
do_set_realm_property(admin_profile.realm, 'waiting_period_threshold', 10)
admin_profile.date_joined = timezone_now() - datetime.timedelta(days=9)
admin_profile.save()
self.assertTrue(admin_profile.is_new_member)
self.assertTrue(admin_profile.is_realm_admin)
stream_name = "Verona"
stream = get_stream(stream_name, admin_profile.realm)
do_change_stream_post_policy(stream, Stream.STREAM_POST_POLICY_RESTRICT_NEW_MEMBERS)
# Admins and their owned bots can send to STREAM_POST_POLICY_RESTRICT_NEW_MEMBERS streams,
# even if the admin is a new user
self._send_and_verify_message(admin_profile, stream_name)
admin_owned_bot = self.create_test_bot(
short_name='whatever1',
full_name='whatever1',
user_profile=admin_profile,
)
self._send_and_verify_message(admin_owned_bot, stream_name)
non_admin_profile = self.example_user("hamlet")
self.login_user(non_admin_profile)
non_admin_profile.date_joined = timezone_now() - datetime.timedelta(days=9)
non_admin_profile.save()
self.assertTrue(non_admin_profile.is_new_member)
self.assertFalse(non_admin_profile.is_realm_admin)
# Non admins and their owned bots can send to STREAM_POST_POLICY_RESTRICT_NEW_MEMBERS streams,
# if the user is not a new member
self._send_and_verify_message(non_admin_profile, stream_name,
"New members cannot send to this stream.")
non_admin_owned_bot = self.create_test_bot(
short_name='whatever2',
full_name='whatever2',
user_profile=non_admin_profile,
)
self._send_and_verify_message(non_admin_owned_bot, stream_name,
"New members cannot send to this stream.")
# Bots without owner (except cross realm bot) cannot send to announcement only stream
bot_without_owner = do_create_user(
email='free-bot@zulip.testserver',
password='',
realm=non_admin_profile.realm,
full_name='freebot',
short_name='freebot',
bot_type=UserProfile.DEFAULT_BOT,
)
self._send_and_verify_message(bot_without_owner, stream_name,
"New members cannot send to this stream.")
# Cross realm bots should be allowed
notification_bot = get_system_bot("notification-bot@zulip.com")
internal_send_stream_message(stream.realm, notification_bot, stream,
'Test topic', 'Test message by notification bot')
self.assertEqual(self.get_last_message().content, 'Test message by notification bot')
def test_api_message_with_default_to(self) -> None:
"""
Sending messages without a to field should be sent to the default
stream for the user_profile.
"""
user = self.example_user('hamlet')
user.default_sending_stream_id = get_stream('Verona', user.realm).id
user.save()
result = self.api_post(user, "/api/v1/messages", {"type": "stream",
"client": "test suite",
"content": "Test message no to",
"topic": "Test topic"})
self.assert_json_success(result)
sent_message = self.get_last_message()
self.assertEqual(sent_message.content, "Test message no to")
def test_message_to_nonexistent_stream(self) -> None:
"""
Sending a message to a nonexistent stream fails.
"""
self.login('hamlet')
self.assertFalse(Stream.objects.filter(name="nonexistent_stream"))
result = self.client_post("/json/messages", {"type": "stream",
"to": "nonexistent_stream",
"client": "test suite",
"content": "Test message",
"topic": "Test topic"})
self.assert_json_error(result, "Stream 'nonexistent_stream' does not exist")
def test_message_to_nonexistent_stream_with_bad_characters(self) -> None:
"""
Nonexistent stream name with bad characters should be escaped properly.
"""
self.login('hamlet')
self.assertFalse(Stream.objects.filter(name="""&<"'><non-existent>"""))
result = self.client_post("/json/messages", {"type": "stream",
"to": """&<"'><non-existent>""",
"client": "test suite",
"content": "Test message",
"topic": "Test topic"})
self.assert_json_error(result, "Stream '&<"'><non-existent>' does not exist")
def test_personal_message(self) -> None:
"""
Sending a personal message to a valid username is successful.
"""
user_profile = self.example_user("hamlet")
self.login_user(user_profile)
othello = self.example_user('othello')
result = self.client_post("/json/messages", {"type": "private",
"content": "Test message",
"client": "test suite",
"to": othello.email})
self.assert_json_success(result)
message_id = ujson.loads(result.content.decode())['id']
recent_conversations = get_recent_private_conversations(user_profile)
self.assertEqual(len(recent_conversations), 1)
recent_conversation = list(recent_conversations.values())[0]
recipient_id = list(recent_conversations.keys())[0]
self.assertEqual(set(recent_conversation['user_ids']), {othello.id})
self.assertEqual(recent_conversation['max_message_id'], message_id)
# Now send a message to yourself and see how that interacts with the data structure
result = self.client_post("/json/messages", {"type": "private",
"content": "Test message",
"client": "test suite",
"to": user_profile.email})
self.assert_json_success(result)
self_message_id = ujson.loads(result.content.decode())['id']
recent_conversations = get_recent_private_conversations(user_profile)
self.assertEqual(len(recent_conversations), 2)
recent_conversation = recent_conversations[recipient_id]
self.assertEqual(set(recent_conversation['user_ids']), {othello.id})
self.assertEqual(recent_conversation['max_message_id'], message_id)
# Now verify we have the appropriate self-pm data structure
del recent_conversations[recipient_id]
recent_conversation = list(recent_conversations.values())[0]
recipient_id = list(recent_conversations.keys())[0]
self.assertEqual(set(recent_conversation['user_ids']), set())
self.assertEqual(recent_conversation['max_message_id'], self_message_id)
def test_personal_message_by_id(self) -> None:
"""
Sending a personal message to a valid user ID is successful.
"""
self.login('hamlet')
result = self.client_post(
"/json/messages",
{
"type": "private",
"content": "Test message",
"client": "test suite",
"to": ujson.dumps([self.example_user("othello").id]),
},
)
self.assert_json_success(result)
msg = self.get_last_message()
self.assertEqual("Test message", msg.content)
self.assertEqual(msg.recipient_id, self.example_user("othello").id)
def test_group_personal_message_by_id(self) -> None:
"""
Sending a personal message to a valid user ID is successful.
"""
self.login('hamlet')
result = self.client_post(
"/json/messages",
{
"type": "private",
"content": "Test message",
"client": "test suite",
"to": ujson.dumps([self.example_user("othello").id,
self.example_user("cordelia").id]),
},
)
self.assert_json_success(result)
msg = self.get_last_message()
self.assertEqual("Test message", msg.content)
self.assertEqual(msg.recipient_id, get_huddle_recipient(
{self.example_user("hamlet").id,
self.example_user("othello").id,
self.example_user("cordelia").id}).id,
)
def test_personal_message_copying_self(self) -> None:
"""
Sending a personal message to yourself plus another user is successful,
and counts as a message just to that user.
"""
hamlet = self.example_user('hamlet')
othello = self.example_user('othello')
self.login_user(hamlet)
result = self.client_post("/json/messages", {
"type": "private",
"content": "Test message",
"client": "test suite",
"to": ujson.dumps([hamlet.id, othello.id])})
self.assert_json_success(result)
msg = self.get_last_message()
# Verify that we're not actually on the "recipient list"
self.assertNotIn("Hamlet", str(msg.recipient))
def test_personal_message_to_nonexistent_user(self) -> None:
"""
Sending a personal message to an invalid email returns error JSON.
"""
self.login('hamlet')
result = self.client_post("/json/messages", {"type": "private",
"content": "Test message",
"client": "test suite",
"to": "nonexistent"})
self.assert_json_error(result, "Invalid email 'nonexistent'")
def test_personal_message_to_deactivated_user(self) -> None:
"""
Sending a personal message to a deactivated user returns error JSON.
"""
othello = self.example_user('othello')
cordelia = self.example_user('cordelia')
do_deactivate_user(othello)
self.login('hamlet')
result = self.client_post("/json/messages", {
"type": "private",
"content": "Test message",
"client": "test suite",
"to": ujson.dumps([othello.id])})
self.assert_json_error(result, f"'{othello.email}' is no longer using Zulip.")
result = self.client_post("/json/messages", {
"type": "private",
"content": "Test message",
"client": "test suite",
"to": ujson.dumps([othello.id, cordelia.id])})
self.assert_json_error(result, f"'{othello.email}' is no longer using Zulip.")
def test_invalid_type(self) -> None:
"""
Sending a message of unknown type returns error JSON.
"""
self.login('hamlet')
othello = self.example_user('othello')
result = self.client_post("/json/messages", {"type": "invalid type",
"content": "Test message",
"client": "test suite",
"to": othello.email})
self.assert_json_error(result, "Invalid message type")
def test_empty_message(self) -> None:
"""
Sending a message that is empty or only whitespace should fail
"""
self.login('hamlet')
othello = self.example_user('othello')
result = self.client_post("/json/messages", {"type": "private",
"content": " ",
"client": "test suite",
"to": othello.email})
self.assert_json_error(result, "Message must not be empty")
def test_empty_string_topic(self) -> None:
"""
Sending a message that has empty string topic should fail
"""
self.login('hamlet')
result = self.client_post("/json/messages", {"type": "stream",
"to": "Verona",
"client": "test suite",
"content": "Test message",
"topic": ""})
self.assert_json_error(result, "Topic can't be empty")
def test_missing_topic(self) -> None:
"""
Sending a message without topic should fail
"""
self.login('hamlet')
result = self.client_post("/json/messages", {"type": "stream",
"to": "Verona",
"client": "test suite",
"content": "Test message"})
self.assert_json_error(result, "Missing topic")
def test_invalid_message_type(self) -> None:
"""
Messages other than the type of "private" or "stream" are considered as invalid
"""
self.login('hamlet')
result = self.client_post("/json/messages", {"type": "invalid",
"to": "Verona",
"client": "test suite",
"content": "Test message",
"topic": "Test topic"})
self.assert_json_error(result, "Invalid message type")
def test_private_message_without_recipients(self) -> None:
"""
Sending private message without recipients should fail
"""
self.login('hamlet')
result = self.client_post("/json/messages", {"type": "private",
"content": "Test content",
"client": "test suite",
"to": ""})
self.assert_json_error(result, "Message must have recipients")
def test_mirrored_huddle(self) -> None:
"""
Sending a mirrored huddle message works
"""
result = self.api_post(self.mit_user("starnine"),
"/json/messages", {"type": "private",
"sender": self.mit_email("sipbtest"),
"content": "Test message",
"client": "zephyr_mirror",
"to": ujson.dumps([self.mit_email("starnine"),
self.mit_email("espuser")])},
subdomain="zephyr")
self.assert_json_success(result)
def test_mirrored_personal(self) -> None:
"""
Sending a mirrored personal message works
"""
result = self.api_post(self.mit_user("starnine"),
"/json/messages", {"type": "private",
"sender": self.mit_email("sipbtest"),
"content": "Test message",
"client": "zephyr_mirror",
"to": self.mit_email("starnine")},
subdomain="zephyr")
self.assert_json_success(result)
def test_mirrored_personal_browser(self) -> None:
"""
Sending a mirrored personal message via the browser should not work.
"""
user = self.mit_user('starnine')
self.login_user(user)
result = self.client_post("/json/messages",
{"type": "private",
"sender": self.mit_email("sipbtest"),
"content": "Test message",
"client": "zephyr_mirror",
"to": self.mit_email("starnine")},
subdomain="zephyr")
self.assert_json_error(result, "Invalid mirrored message")
def test_mirrored_personal_to_someone_else(self) -> None:
"""
Sending a mirrored personal message to someone else is not allowed.
"""
result = self.api_post(self.mit_user("starnine"), "/api/v1/messages",
{"type": "private",
"sender": self.mit_email("sipbtest"),
"content": "Test message",
"client": "zephyr_mirror",
"to": self.mit_email("espuser")},
subdomain="zephyr")
self.assert_json_error(result, "User not authorized for this query")
def test_duplicated_mirrored_huddle(self) -> None:
"""
Sending two mirrored huddles in the row return the same ID
"""
msg = {"type": "private",
"sender": self.mit_email("sipbtest"),
"content": "Test message",
"client": "zephyr_mirror",
"to": ujson.dumps([self.mit_email("espuser"),
self.mit_email("starnine")])}
with mock.patch('DNS.dnslookup', return_value=[['starnine:*:84233:101:Athena Consulting Exchange User,,,:/mit/starnine:/bin/bash']]):
result1 = self.api_post(self.mit_user("starnine"), "/api/v1/messages", msg,
subdomain="zephyr")
self.assert_json_success(result1)
with mock.patch('DNS.dnslookup', return_value=[['espuser:*:95494:101:Esp Classroom,,,:/mit/espuser:/bin/athena/bash']]):
result2 = self.api_post(self.mit_user("espuser"), "/api/v1/messages", msg,
subdomain="zephyr")
self.assert_json_success(result2)
self.assertEqual(ujson.loads(result1.content)['id'],
ujson.loads(result2.content)['id'])
def test_message_with_null_bytes(self) -> None:
"""
A message with null bytes in it is handled.
"""
self.login('hamlet')
post_data = {"type": "stream", "to": "Verona", "client": "test suite",
"content": " I like null bytes \x00 in my content", "topic": "Test topic"}
result = self.client_post("/json/messages", post_data)
self.assert_json_error(result, "Message must not contain null bytes")
def test_strip_message(self) -> None:
"""
A message with mixed whitespace at the end is cleaned up.
"""
self.login('hamlet')
post_data = {"type": "stream", "to": "Verona", "client": "test suite",
"content": " I like whitespace at the end! \n\n \n", "topic": "Test topic"}
result = self.client_post("/json/messages", post_data)
self.assert_json_success(result)
sent_message = self.get_last_message()
self.assertEqual(sent_message.content, " I like whitespace at the end!")
def test_long_message(self) -> None:
"""
Sending a message longer than the maximum message length succeeds but is
truncated.
"""
self.login('hamlet')
long_message = "A" * (MAX_MESSAGE_LENGTH + 1)
post_data = {"type": "stream", "to": "Verona", "client": "test suite",
"content": long_message, "topic": "Test topic"}
result = self.client_post("/json/messages", post_data)
self.assert_json_success(result)
sent_message = self.get_last_message()
self.assertEqual(sent_message.content,
"A" * (MAX_MESSAGE_LENGTH - 20) + "\n[message truncated]")
def test_long_topic(self) -> None:
"""
Sending a message with a topic longer than the maximum topic length
succeeds, but the topic is truncated.
"""
self.login('hamlet')
long_topic = "A" * (MAX_TOPIC_NAME_LENGTH + 1)
post_data = {"type": "stream", "to": "Verona", "client": "test suite",
"content": "test content", "topic": long_topic}
result = self.client_post("/json/messages", post_data)
self.assert_json_success(result)
sent_message = self.get_last_message()
self.assertEqual(sent_message.topic_name(),
"A" * (MAX_TOPIC_NAME_LENGTH - 3) + "...")
def test_send_forged_message_as_not_superuser(self) -> None:
self.login('hamlet')
result = self.client_post("/json/messages", {"type": "stream",
"to": "Verona",
"client": "test suite",
"content": "Test message",
"topic": "Test topic",
"forged": "true"})
self.assert_json_error(result, "User not authorized for this query")
def test_send_message_as_not_superuser_to_different_domain(self) -> None:
self.login('hamlet')
result = self.client_post("/json/messages", {"type": "stream",
"to": "Verona",
"client": "test suite",
"content": "Test message",
"topic": "Test topic",
"realm_str": "mit"})
self.assert_json_error(result, "User not authorized for this query")
def test_send_message_as_superuser_to_domain_that_dont_exist(self) -> None:
user = self.example_user("default_bot")
password = "test_password"
user.set_password(password)
user.is_api_super_user = True
user.save()
result = self.api_post(user,
"/api/v1/messages", {"type": "stream",
"to": "Verona",
"client": "test suite",
"content": "Test message",
"topic": "Test topic",
"realm_str": "non-existing"})
user.is_api_super_user = False
user.save()
self.assert_json_error(result, "Unknown organization 'non-existing'")
def test_send_message_when_sender_is_not_set(self) -> None:
result = self.api_post(self.mit_user("starnine"), "/api/v1/messages",
{"type": "private",
"content": "Test message",
"client": "zephyr_mirror",
"to": self.mit_email("starnine")},
subdomain="zephyr")
self.assert_json_error(result, "Missing sender")
def test_send_message_as_not_superuser_when_type_is_not_private(self) -> None:
result = self.api_post(self.mit_user("starnine"), "/api/v1/messages",
{"type": "not-private",
"sender": self.mit_email("sipbtest"),
"content": "Test message",
"client": "zephyr_mirror",
"to": self.mit_email("starnine")},
subdomain="zephyr")
self.assert_json_error(result, "User not authorized for this query")
@mock.patch("zerver.views.message_send.create_mirrored_message_users")
def test_send_message_create_mirrored_message_user_returns_invalid_input(
self, create_mirrored_message_users_mock: Any) -> None:
create_mirrored_message_users_mock.side_effect = InvalidMirrorInput()
result = self.api_post(self.mit_user("starnine"), "/api/v1/messages",
{"type": "private",
"sender": self.mit_email("sipbtest"),
"content": "Test message",
"client": "zephyr_mirror",
"to": self.mit_email("starnine")},
subdomain="zephyr")
self.assert_json_error(result, "Invalid mirrored message")
@mock.patch("zerver.views.message_send.create_mirrored_message_users")
def test_send_message_when_client_is_zephyr_mirror_but_string_id_is_not_zephyr(
self, create_mirrored_message_users_mock: Any) -> None:
create_mirrored_message_users_mock.return_value = mock.Mock()
user = self.mit_user("starnine")
user.realm.string_id = 'notzephyr'
user.realm.save()
result = self.api_post(user, "/api/v1/messages",
{"type": "private",
"sender": self.mit_email("sipbtest"),
"content": "Test message",
"client": "zephyr_mirror",
"to": user.email},
subdomain="notzephyr")
self.assert_json_error(result, "Zephyr mirroring is not allowed in this organization")
@mock.patch("zerver.views.message_send.create_mirrored_message_users")
def test_send_message_when_client_is_zephyr_mirror_but_recipient_is_user_id(
self, create_mirrored_message_users_mock: Any) -> None:
create_mirrored_message_users_mock.return_value = mock.Mock()
user = self.mit_user("starnine")
self.login_user(user)
result = self.api_post(user, "/api/v1/messages",
{"type": "private",
"sender": self.mit_email("sipbtest"),
"content": "Test message",
"client": "zephyr_mirror",
"to": ujson.dumps([user.id])},
subdomain="zephyr")
self.assert_json_error(result, "Mirroring not allowed with recipient user IDs")
def test_send_message_irc_mirror(self) -> None:
reset_emails_in_zulip_realm()
self.login('hamlet')
bot_info = {
'full_name': 'IRC bot',
'short_name': 'irc',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
email = "irc-bot@zulip.testserver"
user = get_user(email, get_realm('zulip'))
user.is_api_super_user = True
user.save()
user = get_user(email, get_realm('zulip'))
self.subscribe(user, "IRCland")
# Simulate a mirrored message with a slightly old timestamp.
fake_date_sent = timezone_now() - datetime.timedelta(minutes=37)
fake_timestamp = datetime_to_timestamp(fake_date_sent)
result = self.api_post(user, "/api/v1/messages", {"type": "stream",
"forged": "true",
"time": fake_timestamp,
"sender": "irc-user@irc.zulip.com",
"content": "Test message",
"client": "irc_mirror",
"topic": "from irc",
"to": "IRCLand"})
self.assert_json_success(result)
msg = self.get_last_message()
self.assertEqual(int(datetime_to_timestamp(msg.date_sent)), int(fake_timestamp))
# Now test again using forged=yes
fake_date_sent = timezone_now() - datetime.timedelta(minutes=22)
fake_timestamp = datetime_to_timestamp(fake_date_sent)
result = self.api_post(user, "/api/v1/messages", {"type": "stream",
"forged": "yes",
"time": fake_timestamp,
"sender": "irc-user@irc.zulip.com",
"content": "Test message",
"client": "irc_mirror",
"topic": "from irc",
"to": "IRCLand"})
self.assert_json_success(result)
msg = self.get_last_message()
self.assertEqual(int(datetime_to_timestamp(msg.date_sent)), int(fake_timestamp))
def test_unsubscribed_api_super_user(self) -> None:
reset_emails_in_zulip_realm()
cordelia = self.example_user('cordelia')
stream_name = 'private_stream'
self.make_stream(stream_name, invite_only=True)
self.unsubscribe(cordelia, stream_name)
# As long as Cordelia is a super_user, she can send messages
# to ANY stream, even one she is not unsubscribed to, and
# she can do it for herself or on behalf of a mirrored user.
def test_with(sender_email: str, client: str, forged: bool) -> None:
payload = dict(
type="stream",
to=stream_name,
client=client,
topic='whatever',
content='whatever',
forged=ujson.dumps(forged),
)
# Only pass the 'sender' property when doing mirroring behavior.
if forged:
payload['sender'] = sender_email
cordelia.is_api_super_user = False
cordelia.save()
result = self.api_post(cordelia, "/api/v1/messages", payload)
self.assert_json_error_contains(result, 'authorized')
cordelia.is_api_super_user = True
cordelia.save()
result = self.api_post(cordelia, "/api/v1/messages", payload)
self.assert_json_success(result)
test_with(
sender_email=cordelia.email,
client='test suite',
forged=False,
)
test_with(
sender_email='irc_person@zulip.com',
client='irc_mirror',
forged=True,
)
def test_bot_can_send_to_owner_stream(self) -> None:
cordelia = self.example_user('cordelia')
bot = self.create_test_bot(
short_name='whatever',
user_profile=cordelia,
)
stream_name = 'private_stream'
self.make_stream(stream_name, invite_only=True)
payload = dict(
type="stream",
to=stream_name,
client='test suite',
topic='whatever',
content='whatever',
)
result = self.api_post(bot, "/api/v1/messages", payload)
self.assert_json_error_contains(result, 'Not authorized to send')
# We subscribe the bot owner! (aka cordelia)
self.subscribe(bot.bot_owner, stream_name)
result = self.api_post(bot, "/api/v1/messages", payload)
self.assert_json_success(result)
def test_cross_realm_bots_can_use_api_on_own_subdomain(self) -> None:
# Cross realm bots should use internal_send_*_message, not the API:
notification_bot = self.notification_bot()
stream = self.make_stream("notify_channel", get_realm("zulipinternal"))
result = self.api_post(notification_bot,
"/api/v1/messages",
{"type": "stream",
"to": "notify_channel",
"client": "test suite",
"content": "Test message",
"topic": "Test topic"},
subdomain='zulipinternal')
self.assert_json_success(result)
message = self.get_last_message()
self.assertEqual(message.content, "Test message")
self.assertEqual(message.sender, notification_bot)
self.assertEqual(message.recipient.type_id, stream.id)
def test_create_mirror_user_despite_race(self) -> None:
realm = get_realm('zulip')
email = 'fred@example.com'
email_to_full_name = lambda email: 'fred'
def create_user(**kwargs: Any) -> UserProfile:
self.assertEqual(kwargs['full_name'], 'fred')
self.assertEqual(kwargs['email'], email)
self.assertEqual(kwargs['active'], False)
self.assertEqual(kwargs['is_mirror_dummy'], True)
# We create an actual user here to simulate a race.
# We use the minimal, un-mocked function.
kwargs['bot_type'] = None
kwargs['bot_owner'] = None
kwargs['tos_version'] = None
kwargs['timezone'] = timezone_now()
create_user_profile(**kwargs).save()
raise IntegrityError()
with mock.patch('zerver.lib.actions.create_user',
side_effect=create_user) as m:
mirror_fred_user = create_mirror_user_if_needed(
realm,
email,
email_to_full_name,
)
self.assertEqual(mirror_fred_user.delivery_email, email)
m.assert_called()
def test_guest_user(self) -> None:
sender = self.example_user('polonius')
stream_name = 'public stream'
self.make_stream(stream_name, invite_only=False)
payload = dict(
type="stream",
to=stream_name,
client='test suite',
topic='whatever',
content='whatever',
)
# Guest user can't send message to unsubscribed public streams
result = self.api_post(sender, "/api/v1/messages", payload)
self.assert_json_error(result, "Not authorized to send to stream 'public stream'")
self.subscribe(sender, stream_name)
# Guest user can send message to subscribed public streams
result = self.api_post(sender, "/api/v1/messages", payload)
self.assert_json_success(result)
class ScheduledMessageTest(ZulipTestCase):
def last_scheduled_message(self) -> ScheduledMessage:
return ScheduledMessage.objects.all().order_by('-id')[0]
def do_schedule_message(self, msg_type: str, to: str, msg: str,
defer_until: str='', tz_guess: str='',
delivery_type: str='send_later',
realm_str: str='zulip') -> HttpResponse:
self.login('hamlet')
topic_name = ''
if msg_type == 'stream':
topic_name = 'Test topic'
payload = {"type": msg_type,
"to": to,
"client": "test suite",
"content": msg,
"topic": topic_name,
"realm_str": realm_str,
"delivery_type": delivery_type,
"tz_guess": tz_guess}
if defer_until:
payload["deliver_at"] = defer_until
result = self.client_post("/json/messages", payload)
return result
def test_schedule_message(self) -> None:
content = "Test message"
defer_until = timezone_now().replace(tzinfo=None) + datetime.timedelta(days=1)
defer_until_str = str(defer_until)
# Scheduling a message to a stream you are subscribed is successful.
result = self.do_schedule_message('stream', 'Verona',
content + ' 1', defer_until_str)
message = self.last_scheduled_message()
self.assert_json_success(result)
self.assertEqual(message.content, 'Test message 1')
self.assertEqual(message.topic_name(), 'Test topic')
self.assertEqual(message.scheduled_timestamp, convert_to_UTC(defer_until))
self.assertEqual(message.delivery_type, ScheduledMessage.SEND_LATER)
# Scheduling a message for reminders.
result = self.do_schedule_message('stream', 'Verona',
content + ' 2', defer_until_str,
delivery_type='remind')
message = self.last_scheduled_message()
self.assert_json_success(result)
self.assertEqual(message.delivery_type, ScheduledMessage.REMIND)
# Scheduling a private message is successful.
othello = self.example_user('othello')
hamlet = self.example_user('hamlet')
result = self.do_schedule_message('private', othello.email,
content + ' 3', defer_until_str)
message = self.last_scheduled_message()
self.assert_json_success(result)
self.assertEqual(message.content, 'Test message 3')
self.assertEqual(message.scheduled_timestamp, convert_to_UTC(defer_until))
self.assertEqual(message.delivery_type, ScheduledMessage.SEND_LATER)
# Setting a reminder in PM's to other users causes a error.
result = self.do_schedule_message('private', othello.email,
content + ' 4', defer_until_str,
delivery_type='remind')
self.assert_json_error(result, 'Reminders can only be set for streams.')
# Setting a reminder in PM's to ourself is successful.
# Required by reminders from message actions popover caret feature.
result = self.do_schedule_message('private', hamlet.email,
content + ' 5', defer_until_str,
delivery_type='remind')
message = self.last_scheduled_message()
self.assert_json_success(result)
self.assertEqual(message.content, 'Test message 5')
self.assertEqual(message.delivery_type, ScheduledMessage.REMIND)
# Scheduling a message while guessing timezone.
tz_guess = 'Asia/Kolkata'
result = self.do_schedule_message('stream', 'Verona', content + ' 6',
defer_until_str, tz_guess=tz_guess)
message = self.last_scheduled_message()
self.assert_json_success(result)
self.assertEqual(message.content, 'Test message 6')
local_tz = get_timezone(tz_guess)
utz_defer_until = local_tz.normalize(local_tz.localize(defer_until))
self.assertEqual(message.scheduled_timestamp,
convert_to_UTC(utz_defer_until))
self.assertEqual(message.delivery_type, ScheduledMessage.SEND_LATER)
# Test with users timezone setting as set to some timezone rather than
# empty. This will help interpret timestamp in users local timezone.
user = self.example_user("hamlet")
user.timezone = 'US/Pacific'
user.save(update_fields=['timezone'])
result = self.do_schedule_message('stream', 'Verona',
content + ' 7', defer_until_str)
message = self.last_scheduled_message()
self.assert_json_success(result)
self.assertEqual(message.content, 'Test message 7')
local_tz = get_timezone(user.timezone)
utz_defer_until = local_tz.normalize(local_tz.localize(defer_until))
self.assertEqual(message.scheduled_timestamp,
convert_to_UTC(utz_defer_until))
self.assertEqual(message.delivery_type, ScheduledMessage.SEND_LATER)
def test_scheduling_in_past(self) -> None:
# Scheduling a message in past should fail.
content = "Test message"
defer_until = timezone_now()
defer_until_str = str(defer_until)
result = self.do_schedule_message('stream', 'Verona',
content + ' 1', defer_until_str)
self.assert_json_error(result, 'Time must be in the future.')
def test_invalid_timestamp(self) -> None:
# Scheduling a message from which timestamp couldn't be parsed
# successfully should fail.
content = "Test message"
defer_until = 'Missed the timestamp'
result = self.do_schedule_message('stream', 'Verona',
content + ' 1', defer_until)
self.assert_json_error(result, 'Invalid time format')
def test_missing_deliver_at(self) -> None:
content = "Test message"
result = self.do_schedule_message('stream', 'Verona',
content + ' 1')
self.assert_json_error(result, 'Missing deliver_at in a request for delayed message delivery')
class EditMessageTest(ZulipTestCase):
def check_topic(self,
msg_id: int,
topic_name: str) -> None:
msg = Message.objects.get(id=msg_id)
self.assertEqual(msg.topic_name(), topic_name)
def check_message(self,
msg_id: int,
topic_name: str,
content: str) -> None:
# Make sure we saved the message correctly to the DB.
msg = Message.objects.get(id=msg_id)
self.assertEqual(msg.topic_name(), topic_name)
self.assertEqual(msg.content, content)
'''
Next, we will make sure we properly cached the
messages. We still have to do 2 queries to
hydrate sender/recipient info, but we won't need
to hit the zerver_message table.
'''
with queries_captured() as queries:
(fetch_message_dict,) = messages_for_ids(
message_ids = [msg.id],
user_message_flags={msg_id: []},
search_fields=dict(),
apply_markdown=False,
client_gravatar=False,
allow_edit_history=True,
)
self.assertEqual(len(queries), 2)
for query in queries:
self.assertNotIn('message', query['sql'])
self.assertEqual(
fetch_message_dict[TOPIC_NAME],
msg.topic_name(),
)
self.assertEqual(
fetch_message_dict['content'],
msg.content,
)
self.assertEqual(
fetch_message_dict['sender_id'],
msg.sender_id,
)
if msg.edit_history:
self.assertEqual(
fetch_message_dict['edit_history'],
ujson.loads(msg.edit_history),
)
def test_query_count_on_to_dict_uncached(self) -> None:
# `to_dict_uncached` method is used by the mechanisms
# tested in this class. Hence, its performance is tested here.
# Generate 2 messages
user = self.example_user("hamlet")
self.login_user(user)
stream_name = "public_stream"
self.subscribe(user, stream_name)
message_ids = []
message_ids.append(self.send_stream_message(user,
stream_name, "Message one"))
user_2 = self.example_user("cordelia")
self.subscribe(user_2, stream_name)
message_ids.append(self.send_stream_message(user_2,
stream_name, "Message two"))
self.subscribe(self.notification_bot(), stream_name)
message_ids.append(self.send_stream_message(self.notification_bot(),
stream_name, "Message three"))
messages = [Message.objects.select_related().get(id=message_id)
for message_id in message_ids]
# Check number of queries performed
with queries_captured() as queries:
MessageDict.to_dict_uncached(messages)
# 1 query for realm_id per message = 3
# 1 query each for reactions & submessage for all messages = 2
self.assertEqual(len(queries), 5)
realm_id = 2 # Fetched from stream object
# Check number of queries performed with realm_id
with queries_captured() as queries:
MessageDict.to_dict_uncached(messages, realm_id)
# 1 query each for reactions & submessage for all messages = 2
self.assertEqual(len(queries), 2)
def test_save_message(self) -> None:
"""This is also tested by a client test, but here we can verify
the cache against the database"""
self.login('hamlet')
msg_id = self.send_stream_message(self.example_user("hamlet"), "Scotland",
topic_name="editing", content="before edit")
result = self.client_patch("/json/messages/" + str(msg_id), {
'message_id': msg_id,
'content': 'after edit',
})
self.assert_json_success(result)
self.check_message(msg_id, topic_name="editing", content="after edit")
result = self.client_patch("/json/messages/" + str(msg_id), {
'message_id': msg_id,
'topic': 'edited',
})
self.assert_json_success(result)
self.check_topic(msg_id, topic_name="edited")
def test_fetch_raw_message(self) -> None:
self.login('hamlet')
msg_id = self.send_personal_message(
from_user=self.example_user("hamlet"),
to_user=self.example_user("cordelia"),
content="**before** edit",
)
result = self.client_get('/json/messages/' + str(msg_id))
self.assert_json_success(result)
self.assertEqual(result.json()['raw_content'], '**before** edit')
# Test error cases
result = self.client_get('/json/messages/999999')
self.assert_json_error(result, 'Invalid message(s)')
self.login('cordelia')
result = self.client_get('/json/messages/' + str(msg_id))
self.assert_json_success(result)
self.login('othello')
result = self.client_get('/json/messages/' + str(msg_id))
self.assert_json_error(result, 'Invalid message(s)')
def test_fetch_raw_message_stream_wrong_realm(self) -> None:
user_profile = self.example_user("hamlet")
self.login_user(user_profile)
stream = self.make_stream('public_stream')
self.subscribe(user_profile, stream.name)
msg_id = self.send_stream_message(user_profile, stream.name,
topic_name="test", content="test")
result = self.client_get('/json/messages/' + str(msg_id))
self.assert_json_success(result)
mit_user = self.mit_user('sipbtest')
self.login_user(mit_user)
result = self.client_get('/json/messages/' + str(msg_id), subdomain="zephyr")
self.assert_json_error(result, 'Invalid message(s)')
def test_fetch_raw_message_private_stream(self) -> None:
user_profile = self.example_user("hamlet")
self.login_user(user_profile)
stream = self.make_stream('private_stream', invite_only=True)
self.subscribe(user_profile, stream.name)
msg_id = self.send_stream_message(user_profile, stream.name,
topic_name="test", content="test")
result = self.client_get('/json/messages/' + str(msg_id))
self.assert_json_success(result)
self.login('othello')
result = self.client_get('/json/messages/' + str(msg_id))
self.assert_json_error(result, 'Invalid message(s)')
def test_edit_message_no_permission(self) -> None:
self.login('hamlet')
msg_id = self.send_stream_message(self.example_user("iago"), "Scotland",
topic_name="editing", content="before edit")
result = self.client_patch("/json/messages/" + str(msg_id), {
'message_id': msg_id,
'content': 'content after edit',
})
self.assert_json_error(result, "You don't have permission to edit this message")
def test_edit_message_no_changes(self) -> None:
self.login('hamlet')
msg_id = self.send_stream_message(self.example_user("hamlet"), "Scotland",
topic_name="editing", content="before edit")
result = self.client_patch("/json/messages/" + str(msg_id), {
'message_id': msg_id,
})
self.assert_json_error(result, "Nothing to change")
def test_edit_message_no_topic(self) -> None:
self.login('hamlet')
msg_id = self.send_stream_message(self.example_user("hamlet"), "Scotland",
topic_name="editing", content="before edit")
result = self.client_patch("/json/messages/" + str(msg_id), {
'message_id': msg_id,
'topic': ' ',
})
self.assert_json_error(result, "Topic can't be empty")
def test_edit_message_no_content(self) -> None:
self.login('hamlet')
msg_id = self.send_stream_message(self.example_user("hamlet"), "Scotland",
topic_name="editing", content="before edit")
result = self.client_patch("/json/messages/" + str(msg_id), {
'message_id': msg_id,
'content': ' ',
})
self.assert_json_success(result)
content = Message.objects.filter(id=msg_id).values_list('content', flat = True)[0]
self.assertEqual(content, "(deleted)")
def test_edit_message_history_disabled(self) -> None:
user_profile = self.example_user("hamlet")
do_set_realm_property(user_profile.realm, "allow_edit_history", False)
self.login('hamlet')
# Single-line edit
msg_id_1 = self.send_stream_message(self.example_user("hamlet"),
"Denmark",
topic_name="editing",
content="content before edit")
new_content_1 = 'content after edit'
result_1 = self.client_patch("/json/messages/" + str(msg_id_1), {
'message_id': msg_id_1, 'content': new_content_1,
})
self.assert_json_success(result_1)
result = self.client_get(
"/json/messages/" + str(msg_id_1) + "/history")
self.assert_json_error(result, "Message edit history is disabled in this organization")
# Now verify that if we fetch the message directly, there's no
# edit history data attached.
messages_result = self.client_get("/json/messages",
{"anchor": msg_id_1, "num_before": 0, "num_after": 10})
self.assert_json_success(messages_result)
json_messages = ujson.loads(
messages_result.content.decode('utf-8'))
for msg in json_messages['messages']:
self.assertNotIn("edit_history", msg)
def test_edit_message_history(self) -> None:
self.login('hamlet')
# Single-line edit
msg_id_1 = self.send_stream_message(
self.example_user("hamlet"),
"Scotland",
topic_name="editing",
content="content before edit")
new_content_1 = 'content after edit'
result_1 = self.client_patch("/json/messages/" + str(msg_id_1), {
'message_id': msg_id_1, 'content': new_content_1,
})
self.assert_json_success(result_1)
message_edit_history_1 = self.client_get(
"/json/messages/" + str(msg_id_1) + "/history")
json_response_1 = ujson.loads(
message_edit_history_1.content.decode('utf-8'))
message_history_1 = json_response_1['message_history']
# Check content of message after edit.
self.assertEqual(message_history_1[0]['rendered_content'],
'<p>content before edit</p>')
self.assertEqual(message_history_1[1]['rendered_content'],
'<p>content after edit</p>')
self.assertEqual(message_history_1[1]['content_html_diff'],
('<p>content '
'<span class="highlight_text_inserted">after</span> '
'<span class="highlight_text_deleted">before</span>'
' edit</p>'))
# Check content of message before edit.
self.assertEqual(message_history_1[1]['prev_rendered_content'],
'<p>content before edit</p>')
# Edits on new lines
msg_id_2 = self.send_stream_message(
self.example_user("hamlet"),
"Scotland",
topic_name="editing",
content=('content before edit, line 1\n'
'\n'
'content before edit, line 3'))
new_content_2 = ('content before edit, line 1\n'
'content after edit, line 2\n'
'content before edit, line 3')
result_2 = self.client_patch("/json/messages/" + str(msg_id_2), {
'message_id': msg_id_2, 'content': new_content_2,
})
self.assert_json_success(result_2)
message_edit_history_2 = self.client_get(
"/json/messages/" + str(msg_id_2) + "/history")
json_response_2 = ujson.loads(
message_edit_history_2.content.decode('utf-8'))
message_history_2 = json_response_2['message_history']
self.assertEqual(message_history_2[0]['rendered_content'],
('<p>content before edit, line 1</p>\n'
'<p>content before edit, line 3</p>'))
self.assertEqual(message_history_2[1]['rendered_content'],
('<p>content before edit, line 1<br>\n'
'content after edit, line 2<br>\n'
'content before edit, line 3</p>'))
self.assertEqual(message_history_2[1]['content_html_diff'],
('<p>content before edit, line 1<br> '
'content <span class="highlight_text_inserted">after edit, line 2<br> '
'content</span> before edit, line 3</p>'))
self.assertEqual(message_history_2[1]['prev_rendered_content'],
('<p>content before edit, line 1</p>\n'
'<p>content before edit, line 3</p>'))
def test_edit_link(self) -> None:
# Link editing
self.login('hamlet')
msg_id_1 = self.send_stream_message(
self.example_user("hamlet"),
"Scotland",
topic_name="editing",
content="Here is a link to [zulip](www.zulip.org).")
new_content_1 = 'Here is a link to [zulip](www.zulipchat.com).'
result_1 = self.client_patch("/json/messages/" + str(msg_id_1), {
'message_id': msg_id_1, 'content': new_content_1,
})
self.assert_json_success(result_1)
message_edit_history_1 = self.client_get(
"/json/messages/" + str(msg_id_1) + "/history")
json_response_1 = ujson.loads(
message_edit_history_1.content.decode('utf-8'))
message_history_1 = json_response_1['message_history']
# Check content of message after edit.
self.assertEqual(message_history_1[0]['rendered_content'],
'<p>Here is a link to '
'<a href="http://www.zulip.org">zulip</a>.</p>')
self.assertEqual(message_history_1[1]['rendered_content'],
'<p>Here is a link to '
'<a href="http://www.zulipchat.com">zulip</a>.</p>')
self.assertEqual(message_history_1[1]['content_html_diff'],
('<p>Here is a link to <a href="http://www.zulipchat.com"'
'>zulip '
'<span class="highlight_text_inserted"> Link: http://www.zulipchat.com .'
'</span> <span class="highlight_text_deleted"> Link: http://www.zulip.org .'
'</span> </a></p>'))
def test_edit_history_unedited(self) -> None:
self.login('hamlet')
msg_id = self.send_stream_message(
self.example_user('hamlet'),
'Scotland',
topic_name='editing',
content='This message has not been edited.')
result = self.client_get(f'/json/messages/{msg_id}/history')
self.assert_json_success(result)
message_history = result.json()['message_history']
self.assert_length(message_history, 1)
def test_user_info_for_updates(self) -> None:
hamlet = self.example_user('hamlet')
cordelia = self.example_user('cordelia')
self.login_user(hamlet)
self.subscribe(hamlet, 'Scotland')
self.subscribe(cordelia, 'Scotland')
msg_id = self.send_stream_message(hamlet, 'Scotland',
content='@**Cordelia Lear**')
user_info = get_user_info_for_message_updates(msg_id)
message_user_ids = user_info['message_user_ids']
self.assertIn(hamlet.id, message_user_ids)
self.assertIn(cordelia.id, message_user_ids)
mention_user_ids = user_info['mention_user_ids']
self.assertEqual(mention_user_ids, {cordelia.id})
def test_edit_cases(self) -> None:
"""This test verifies the accuracy of construction of Zulip's edit
history data structures."""
self.login('hamlet')
hamlet = self.example_user('hamlet')
msg_id = self.send_stream_message(self.example_user("hamlet"), "Scotland",
topic_name="topic 1", content="content 1")
result = self.client_patch("/json/messages/" + str(msg_id), {
'message_id': msg_id,
'content': 'content 2',
})
self.assert_json_success(result)
history = ujson.loads(Message.objects.get(id=msg_id).edit_history)
self.assertEqual(history[0]['prev_content'], 'content 1')
self.assertEqual(history[0]['user_id'], hamlet.id)
self.assertEqual(set(history[0].keys()),
{'timestamp', 'prev_content', 'user_id',
'prev_rendered_content', 'prev_rendered_content_version'})
result = self.client_patch("/json/messages/" + str(msg_id), {
'message_id': msg_id,
'topic': 'topic 2',
})
self.assert_json_success(result)
history = ujson.loads(Message.objects.get(id=msg_id).edit_history)
self.assertEqual(history[0][LEGACY_PREV_TOPIC], 'topic 1')
self.assertEqual(history[0]['user_id'], hamlet.id)
self.assertEqual(set(history[0].keys()), {'timestamp', LEGACY_PREV_TOPIC, 'user_id'})
result = self.client_patch("/json/messages/" + str(msg_id), {
'message_id': msg_id,
'content': 'content 3',
'topic': 'topic 3',
})
self.assert_json_success(result)
history = ujson.loads(Message.objects.get(id=msg_id).edit_history)
self.assertEqual(history[0]['prev_content'], 'content 2')
self.assertEqual(history[0][LEGACY_PREV_TOPIC], 'topic 2')
self.assertEqual(history[0]['user_id'], hamlet.id)
self.assertEqual(set(history[0].keys()),
{'timestamp', LEGACY_PREV_TOPIC, 'prev_content', 'user_id',
'prev_rendered_content', 'prev_rendered_content_version'})
result = self.client_patch("/json/messages/" + str(msg_id), {
'message_id': msg_id,
'content': 'content 4',
})
self.assert_json_success(result)
history = ujson.loads(Message.objects.get(id=msg_id).edit_history)
self.assertEqual(history[0]['prev_content'], 'content 3')
self.assertEqual(history[0]['user_id'], hamlet.id)
self.login('iago')
result = self.client_patch("/json/messages/" + str(msg_id), {
'message_id': msg_id,
'topic': 'topic 4',
})
self.assert_json_success(result)
history = ujson.loads(Message.objects.get(id=msg_id).edit_history)
self.assertEqual(history[0][LEGACY_PREV_TOPIC], 'topic 3')
self.assertEqual(history[0]['user_id'], self.example_user('iago').id)
history = ujson.loads(Message.objects.get(id=msg_id).edit_history)
self.assertEqual(history[0][LEGACY_PREV_TOPIC], 'topic 3')
self.assertEqual(history[2][LEGACY_PREV_TOPIC], 'topic 2')
self.assertEqual(history[3][LEGACY_PREV_TOPIC], 'topic 1')
self.assertEqual(history[1]['prev_content'], 'content 3')
self.assertEqual(history[2]['prev_content'], 'content 2')
self.assertEqual(history[4]['prev_content'], 'content 1')
# Now, we verify that the edit history data sent back has the
# correct filled-out fields
message_edit_history = self.client_get("/json/messages/" + str(msg_id) + "/history")
json_response = ujson.loads(message_edit_history.content.decode('utf-8'))
# We reverse the message history view output so that the IDs line up with the above.
message_history = list(reversed(json_response['message_history']))
i = 0
for entry in message_history:
expected_entries = {'content', 'rendered_content', 'topic', 'timestamp', 'user_id'}
if i in {0, 2, 3}:
expected_entries.add('prev_topic')
if i in {1, 2, 4}:
expected_entries.add('prev_content')
expected_entries.add('prev_rendered_content')
expected_entries.add('content_html_diff')
i += 1
self.assertEqual(expected_entries, set(entry.keys()))
self.assertEqual(len(message_history), 6)
self.assertEqual(message_history[0]['prev_topic'], 'topic 3')
self.assertEqual(message_history[0]['topic'], 'topic 4')
self.assertEqual(message_history[1]['topic'], 'topic 3')
self.assertEqual(message_history[2]['topic'], 'topic 3')
self.assertEqual(message_history[2]['prev_topic'], 'topic 2')
self.assertEqual(message_history[3]['topic'], 'topic 2')
self.assertEqual(message_history[3]['prev_topic'], 'topic 1')
self.assertEqual(message_history[4]['topic'], 'topic 1')
self.assertEqual(message_history[0]['content'], 'content 4')
self.assertEqual(message_history[1]['content'], 'content 4')
self.assertEqual(message_history[1]['prev_content'], 'content 3')
self.assertEqual(message_history[2]['content'], 'content 3')
self.assertEqual(message_history[2]['prev_content'], 'content 2')
self.assertEqual(message_history[3]['content'], 'content 2')
self.assertEqual(message_history[4]['content'], 'content 2')
self.assertEqual(message_history[4]['prev_content'], 'content 1')
self.assertEqual(message_history[5]['content'], 'content 1')
self.assertEqual(message_history[5]['topic'], 'topic 1')
def test_edit_message_content_limit(self) -> None:
def set_message_editing_params(allow_message_editing: bool,
message_content_edit_limit_seconds: int,
allow_community_topic_editing: bool) -> None:
result = self.client_patch("/json/realm", {
'allow_message_editing': ujson.dumps(allow_message_editing),
'message_content_edit_limit_seconds': message_content_edit_limit_seconds,
'allow_community_topic_editing': ujson.dumps(allow_community_topic_editing),
})
self.assert_json_success(result)
def do_edit_message_assert_success(id_: int, unique_str: str, topic_only: bool=False) -> None:
new_topic = 'topic' + unique_str
new_content = 'content' + unique_str
params_dict = {'message_id': id_, 'topic': new_topic}
if not topic_only:
params_dict['content'] = new_content
result = self.client_patch("/json/messages/" + str(id_), params_dict)
self.assert_json_success(result)
if topic_only:
self.check_topic(id_, topic_name=new_topic)
else:
self.check_message(id_, topic_name=new_topic, content=new_content)
def do_edit_message_assert_error(id_: int, unique_str: str, error: str,
topic_only: bool=False) -> None:
message = Message.objects.get(id=id_)
old_topic = message.topic_name()
old_content = message.content
new_topic = 'topic' + unique_str
new_content = 'content' + unique_str
params_dict = {'message_id': id_, 'topic': new_topic}
if not topic_only:
params_dict['content'] = new_content
result = self.client_patch("/json/messages/" + str(id_), params_dict)
message = Message.objects.get(id=id_)
self.assert_json_error(result, error)
msg = Message.objects.get(id=id_)
self.assertEqual(msg.topic_name(), old_topic)
self.assertEqual(msg.content, old_content)
self.login('iago')
# send a message in the past
id_ = self.send_stream_message(self.example_user("iago"), "Scotland",
content="content", topic_name="topic")
message = Message.objects.get(id=id_)
message.date_sent = message.date_sent - datetime.timedelta(seconds=180)
message.save()
# test the various possible message editing settings
# high enough time limit, all edits allowed
set_message_editing_params(True, 240, False)
do_edit_message_assert_success(id_, 'A')
# out of time, only topic editing allowed
set_message_editing_params(True, 120, False)
do_edit_message_assert_success(id_, 'B', True)
do_edit_message_assert_error(id_, 'C', "The time limit for editing this message has passed")
# infinite time, all edits allowed
set_message_editing_params(True, 0, False)
do_edit_message_assert_success(id_, 'D')
# without allow_message_editing, nothing is allowed
set_message_editing_params(False, 240, False)
do_edit_message_assert_error(id_, 'E', "Your organization has turned off message editing", True)
set_message_editing_params(False, 120, False)
do_edit_message_assert_error(id_, 'F', "Your organization has turned off message editing", True)
set_message_editing_params(False, 0, False)
do_edit_message_assert_error(id_, 'G', "Your organization has turned off message editing", True)
def test_allow_community_topic_editing(self) -> None:
def set_message_editing_params(allow_message_editing: bool,
message_content_edit_limit_seconds: int,
allow_community_topic_editing: bool) -> None:
result = self.client_patch("/json/realm", {
'allow_message_editing': ujson.dumps(allow_message_editing),
'message_content_edit_limit_seconds': message_content_edit_limit_seconds,
'allow_community_topic_editing': ujson.dumps(allow_community_topic_editing),
})
self.assert_json_success(result)
def do_edit_message_assert_success(id_: int, unique_str: str) -> None:
new_topic = 'topic' + unique_str
params_dict = {'message_id': id_, 'topic': new_topic}
result = self.client_patch("/json/messages/" + str(id_), params_dict)
self.assert_json_success(result)
self.check_topic(id_, topic_name=new_topic)
def do_edit_message_assert_error(id_: int, unique_str: str, error: str) -> None:
message = Message.objects.get(id=id_)
old_topic = message.topic_name()
old_content = message.content
new_topic = 'topic' + unique_str
params_dict = {'message_id': id_, 'topic': new_topic}
result = self.client_patch("/json/messages/" + str(id_), params_dict)
message = Message.objects.get(id=id_)
self.assert_json_error(result, error)
msg = Message.objects.get(id=id_)
self.assertEqual(msg.topic_name(), old_topic)
self.assertEqual(msg.content, old_content)
self.login('iago')
# send a message in the past
id_ = self.send_stream_message(self.example_user("hamlet"), "Scotland",
content="content", topic_name="topic")
message = Message.objects.get(id=id_)
message.date_sent = message.date_sent - datetime.timedelta(seconds=180)
message.save()
# any user can edit the topic of a message
set_message_editing_params(True, 0, True)
# log in as a new user
self.login('cordelia')
do_edit_message_assert_success(id_, 'A')
# only admins can edit the topics of messages
self.login('iago')
set_message_editing_params(True, 0, False)
do_edit_message_assert_success(id_, 'B')
self.login('cordelia')
do_edit_message_assert_error(id_, 'C', "You don't have permission to edit this message")
# users cannot edit topics if allow_message_editing is False
self.login('iago')
set_message_editing_params(False, 0, True)
self.login('cordelia')
do_edit_message_assert_error(id_, 'D', "Your organization has turned off message editing")
# non-admin users cannot edit topics sent > 24 hrs ago
message.date_sent = message.date_sent - datetime.timedelta(seconds=90000)
message.save()
self.login('iago')
set_message_editing_params(True, 0, True)
do_edit_message_assert_success(id_, 'E')
self.login('cordelia')
do_edit_message_assert_error(id_, 'F', "The time limit for editing this message has passed")
# anyone should be able to edit "no topic" indefinitely
message.set_topic_name("(no topic)")
message.save()
self.login('cordelia')
do_edit_message_assert_success(id_, 'D')
@mock.patch("zerver.lib.actions.send_event")
def test_edit_topic_public_history_stream(self, mock_send_event: mock.MagicMock) -> None:
stream_name = "Macbeth"
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
self.make_stream(stream_name, history_public_to_subscribers=True)
self.subscribe(hamlet, stream_name)
self.login_user(hamlet)
message_id = self.send_stream_message(hamlet, stream_name, "Where am I?")
self.login_user(cordelia)
self.subscribe(cordelia, stream_name)
message = Message.objects.get(id=message_id)
def do_update_message_topic_success(user_profile: UserProfile, message: Message,
topic_name: str, users_to_be_notified: List[Dict[str, Any]]) -> None:
do_update_message(
user_profile=user_profile,
message=message,
new_stream=None,
topic_name=topic_name,
propagate_mode="change_later",
send_notification_to_old_thread=False,
send_notification_to_new_thread=False,
content=None,
rendered_content=None,
prior_mention_user_ids=set(),
mention_user_ids=set(),
mention_data=None,
)
mock_send_event.assert_called_with(mock.ANY, mock.ANY, users_to_be_notified)
# Returns the users that need to be notified when a message topic is changed
def notify(user_id: int) -> Dict[str, Any]:
um = UserMessage.objects.get(message=message_id)
if um.user_profile_id == user_id:
return {
"id": user_id,
"flags": um.flags_list(),
}
else:
return {
"id": user_id,
"flags": ["read"],
}
users_to_be_notified = list(map(notify, [hamlet.id, cordelia.id]))
# Edit topic of a message sent before Cordelia subscribed the stream
do_update_message_topic_success(cordelia, message, "Othello eats apple", users_to_be_notified)
# If Cordelia is long-term idle, she doesn't get a notification.
cordelia.long_term_idle = True
cordelia.save()
users_to_be_notified = list(map(notify, [hamlet.id]))
do_update_message_topic_success(cordelia, message, "Another topic idle", users_to_be_notified)
cordelia.long_term_idle = False
cordelia.save()
# Even if Hamlet unsubscribes the stream, he should be notified when the topic is changed
# because he has a UserMessage row.
self.unsubscribe(hamlet, stream_name)
users_to_be_notified = list(map(notify, [hamlet.id, cordelia.id]))
do_update_message_topic_success(cordelia, message, "Another topic", users_to_be_notified)
# Hamlet subscribes to the stream again and Cordelia unsubscribes, then Hamlet changes
# the message topic. Cordelia won't receive any updates when a message on that stream is
# changed because she is not a subscriber and doesn't have a UserMessage row.
self.subscribe(hamlet, stream_name)
self.unsubscribe(cordelia, stream_name)
self.login_user(hamlet)
users_to_be_notified = list(map(notify, [hamlet.id]))
do_update_message_topic_success(hamlet, message, "Change again", users_to_be_notified)
@mock.patch("zerver.lib.actions.send_event")
def test_wildcard_mention(self, mock_send_event: mock.MagicMock) -> None:
stream_name = "Macbeth"
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
self.make_stream(stream_name, history_public_to_subscribers=True)
self.subscribe(hamlet, stream_name)
self.subscribe(cordelia, stream_name)
self.login_user(hamlet)
message_id = self.send_stream_message(hamlet, stream_name, "Hello everyone")
def notify(user_id: int) -> Dict[str, Any]:
return {
"id": user_id,
"flags": ["wildcard_mentioned"],
}
users_to_be_notified = sorted(map(notify, [cordelia.id, hamlet.id]), key=itemgetter("id"))
result = self.client_patch("/json/messages/" + str(message_id), {
'message_id': message_id,
'content': 'Hello @**everyone**',
})
self.assert_json_success(result)
# Extract the send_event call where event type is 'update_message'.
# Here we assert wildcard_mention_user_ids has been set properly.
called = False
for call_args in mock_send_event.call_args_list:
(arg_realm, arg_event, arg_notified_users) = call_args[0]
if arg_event['type'] == 'update_message':
self.assertEqual(arg_event['type'], 'update_message')
self.assertEqual(arg_event['wildcard_mention_user_ids'], [cordelia.id, hamlet.id])
self.assertEqual(sorted(arg_notified_users, key=itemgetter("id")), users_to_be_notified)
called = True
self.assertTrue(called)
def test_propagate_topic_forward(self) -> None:
self.login('hamlet')
id1 = self.send_stream_message(self.example_user("hamlet"), "Scotland",
topic_name="topic1")
id2 = self.send_stream_message(self.example_user("iago"), "Scotland",
topic_name="topic1")
id3 = self.send_stream_message(self.example_user("iago"), "Rome",
topic_name="topic1")
id4 = self.send_stream_message(self.example_user("hamlet"), "Scotland",
topic_name="topic2")
id5 = self.send_stream_message(self.example_user("iago"), "Scotland",
topic_name="topic1")
result = self.client_patch("/json/messages/" + str(id1), {
'message_id': id1,
'topic': 'edited',
'propagate_mode': 'change_later',
})
self.assert_json_success(result)
self.check_topic(id1, topic_name="edited")
self.check_topic(id2, topic_name="edited")
self.check_topic(id3, topic_name="topic1")
self.check_topic(id4, topic_name="topic2")
self.check_topic(id5, topic_name="edited")
def test_propagate_all_topics(self) -> None:
self.login('hamlet')
id1 = self.send_stream_message(self.example_user("hamlet"), "Scotland",
topic_name="topic1")
id2 = self.send_stream_message(self.example_user("hamlet"), "Scotland",
topic_name="topic1")
id3 = self.send_stream_message(self.example_user("iago"), "Rome",
topic_name="topic1")
id4 = self.send_stream_message(self.example_user("hamlet"), "Scotland",
topic_name="topic2")
id5 = self.send_stream_message(self.example_user("iago"), "Scotland",
topic_name="topic1")
id6 = self.send_stream_message(self.example_user("iago"), "Scotland",
topic_name="topic3")
result = self.client_patch("/json/messages/" + str(id2), {
'message_id': id2,
'topic': 'edited',
'propagate_mode': 'change_all',
})
self.assert_json_success(result)
self.check_topic(id1, topic_name="edited")
self.check_topic(id2, topic_name="edited")
self.check_topic(id3, topic_name="topic1")
self.check_topic(id4, topic_name="topic2")
self.check_topic(id5, topic_name="edited")
self.check_topic(id6, topic_name="topic3")
def test_propagate_all_topics_with_different_uppercase_letters(self) -> None:
self.login('hamlet')
id1 = self.send_stream_message(self.example_user("hamlet"), "Scotland",
topic_name="topic1")
id2 = self.send_stream_message(self.example_user("hamlet"), "Scotland",
topic_name="Topic1")
id3 = self.send_stream_message(self.example_user("iago"), "Rome",
topic_name="topiC1")
id4 = self.send_stream_message(self.example_user("iago"), "Scotland",
topic_name="toPic1")
result = self.client_patch("/json/messages/" + str(id2), {
'message_id': id2,
'topic': 'edited',
'propagate_mode': 'change_all',
})
self.assert_json_success(result)
self.check_topic(id1, topic_name="edited")
self.check_topic(id2, topic_name="edited")
self.check_topic(id3, topic_name="topiC1")
self.check_topic(id4, topic_name="edited")
def test_propagate_invalid(self) -> None:
self.login('hamlet')
id1 = self.send_stream_message(self.example_user("hamlet"), "Scotland",
topic_name="topic1")
result = self.client_patch("/json/messages/" + str(id1), {
'topic': 'edited',
'propagate_mode': 'invalid',
})
self.assert_json_error(result, 'Invalid propagate_mode')
self.check_topic(id1, topic_name="topic1")
result = self.client_patch("/json/messages/" + str(id1), {
'content': 'edited',
'propagate_mode': 'change_all',
})
self.assert_json_error(result, 'Invalid propagate_mode without topic edit')
self.check_topic(id1, topic_name="topic1")
def prepare_move_topics(self, user_email: str, old_stream: str, new_stream: str, topic: str) -> Tuple[UserProfile, Stream, Stream, int, int]:
user_profile = self.example_user(user_email)
self.login(user_email)
stream = self.make_stream(old_stream)
new_stream = self.make_stream(new_stream)
self.subscribe(user_profile, stream.name)
self.subscribe(user_profile, new_stream.name)
msg_id = self.send_stream_message(user_profile, stream.name,
topic_name=topic, content="First")
msg_id_lt = self.send_stream_message(user_profile, stream.name,
topic_name=topic, content="Second")
self.send_stream_message(user_profile, stream.name,
topic_name=topic, content="third")
return (user_profile, stream, new_stream, msg_id, msg_id_lt)
def test_move_message_to_stream(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_lt) = self.prepare_move_topics(
"iago", "test move stream", "new stream", "test")
result = self.client_patch("/json/messages/" + str(msg_id), {
'message_id': msg_id,
'stream_id': new_stream.id,
'propagate_mode': 'change_all',
})
self.assert_json_success(result)
messages = get_topic_messages(user_profile, old_stream, "test")
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].content, f"This topic was moved by @_**Iago|{user_profile.id}** to #**new stream>test**")
messages = get_topic_messages(user_profile, new_stream, "test")
self.assertEqual(len(messages), 4)
self.assertEqual(messages[3].content, f"This topic was moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**")
def test_move_message_to_stream_change_later(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"iago", "test move stream", "new stream", "test")
result = self.client_patch("/json/messages/" + str(msg_id_later), {
'message_id': msg_id_later,
'stream_id': new_stream.id,
'propagate_mode': 'change_later',
})
self.assert_json_success(result)
messages = get_topic_messages(user_profile, old_stream, "test")
self.assertEqual(len(messages), 2)
self.assertEqual(messages[0].id, msg_id)
self.assertEqual(messages[1].content, f"This topic was moved by @_**Iago|{user_profile.id}** to #**new stream>test**")
messages = get_topic_messages(user_profile, new_stream, "test")
self.assertEqual(len(messages), 3)
self.assertEqual(messages[0].id, msg_id_later)
self.assertEqual(messages[2].content, f"This topic was moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**")
def test_move_message_to_stream_no_allowed(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"aaron", "test move stream", "new stream", "test")
result = self.client_patch("/json/messages/" + str(msg_id), {
'message_id': msg_id,
'stream_id': new_stream.id,
'propagate_mode': 'change_all',
})
self.assert_json_error(result, "You don't have permission to move this message")
messages = get_topic_messages(user_profile, old_stream, "test")
self.assertEqual(len(messages), 3)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assertEqual(len(messages), 0)
def test_move_message_to_stream_with_content(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"iago", "test move stream", "new stream", "test")
result = self.client_patch("/json/messages/" + str(msg_id), {
'message_id': msg_id,
'stream_id': new_stream.id,
'propagate_mode': 'change_all',
'content': 'Not allowed',
})
self.assert_json_error(result, "Cannot change message content while changing stream")
messages = get_topic_messages(user_profile, old_stream, "test")
self.assertEqual(len(messages), 3)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assertEqual(len(messages), 0)
def test_move_message_to_stream_and_topic(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(
"iago", "test move stream", "new stream", "test")
with queries_captured() as queries:
result = self.client_patch("/json/messages/" + str(msg_id), {
'message_id': msg_id,
'stream_id': new_stream.id,
'propagate_mode': 'change_all',
'topic': 'new topic',
})
self.assertEqual(len(queries), 52)
messages = get_topic_messages(user_profile, old_stream, "test")
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].content, f"This topic was moved by @_**Iago|{user_profile.id}** to #**new stream>new topic**")
messages = get_topic_messages(user_profile, new_stream, "new topic")
self.assertEqual(len(messages), 4)
self.assertEqual(messages[3].content, f"This topic was moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**")
self.assert_json_success(result)
def test_inaccessible_msg_after_stream_change(self) -> None:
"""Simulates the case where message is moved to a stream where user is not a subscribed"""
(user_profile, old_stream, new_stream, msg_id, msg_id_lt) = self.prepare_move_topics(
"iago", "test move stream", "new stream", "test")
guest_user = self.example_user('polonius')
non_guest_user = self.example_user('hamlet')
self.subscribe(guest_user, old_stream.name)
self.subscribe(non_guest_user, old_stream.name)
msg_id_to_test_acesss = self.send_stream_message(user_profile, old_stream.name,
topic_name='test', content="fourth")
self.assertEqual(has_message_access(guest_user, Message.objects.get(id=msg_id_to_test_acesss), None), True)
self.assertEqual(has_message_access(non_guest_user, Message.objects.get(id=msg_id_to_test_acesss), None), True)
result = self.client_patch("/json/messages/" + str(msg_id), {
'message_id': msg_id,
'stream_id': new_stream.id,
'propagate_mode': 'change_all',
'topic': 'new topic'
})
self.assert_json_success(result)
self.assertEqual(has_message_access(guest_user, Message.objects.get(id=msg_id_to_test_acesss), None), False)
self.assertEqual(has_message_access(non_guest_user, Message.objects.get(id=msg_id_to_test_acesss), None), True)
self.assertEqual(UserMessage.objects.filter(
user_profile_id=non_guest_user.id,
message_id=msg_id_to_test_acesss,
).count(), 0)
self.assertEqual(has_message_access(self.example_user('iago'), Message.objects.get(id=msg_id_to_test_acesss), None), True)
def test_no_notify_move_message_to_stream(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_lt) = self.prepare_move_topics(
"iago", "test move stream", "new stream", "test")
result = self.client_patch("/json/messages/" + str(msg_id), {
'message_id': msg_id,
'stream_id': new_stream.id,
'propagate_mode': 'change_all',
'send_notification_to_old_thread': 'false',
'send_notification_to_new_thread': 'false',
})
self.assert_json_success(result)
messages = get_topic_messages(user_profile, old_stream, "test")
self.assertEqual(len(messages), 0)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assertEqual(len(messages), 3)
def test_notify_new_thread_move_message_to_stream(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_lt) = self.prepare_move_topics(
"iago", "test move stream", "new stream", "test")
result = self.client_patch("/json/messages/" + str(msg_id), {
'message_id': msg_id,
'stream_id': new_stream.id,
'propagate_mode': 'change_all',
'send_notification_to_old_thread': 'false',
'send_notification_to_new_thread': 'true',
})
self.assert_json_success(result)
messages = get_topic_messages(user_profile, old_stream, "test")
self.assertEqual(len(messages), 0)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assertEqual(len(messages), 4)
self.assertEqual(messages[3].content, f"This topic was moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**")
def test_notify_old_thread_move_message_to_stream(self) -> None:
(user_profile, old_stream, new_stream, msg_id, msg_id_lt) = self.prepare_move_topics(
"iago", "test move stream", "new stream", "test")
result = self.client_patch("/json/messages/" + str(msg_id), {
'message_id': msg_id,
'stream_id': new_stream.id,
'propagate_mode': 'change_all',
'send_notification_to_old_thread': 'true',
'send_notification_to_new_thread': 'false',
})
self.assert_json_success(result)
messages = get_topic_messages(user_profile, old_stream, "test")
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].content, f"This topic was moved by @_**Iago|{user_profile.id}** to #**new stream>test**")
messages = get_topic_messages(user_profile, new_stream, "test")
self.assertEqual(len(messages), 3)
def test_move_message_to_stream_to_private_stream(self) -> None:
user_profile = self.example_user("iago")
self.login("iago")
stream = self.make_stream("test move stream")
new_stream = self.make_stream("new stream", None, True)
self.subscribe(user_profile, stream.name)
self.subscribe(user_profile, new_stream.name)
msg_id = self.send_stream_message(user_profile, stream.name,
topic_name="test", content="First")
self.send_stream_message(user_profile, stream.name,
topic_name="test", content="Second")
result = self.client_patch("/json/messages/" + str(msg_id), {
'message_id': msg_id,
'stream_id': new_stream.id,
'propagate_mode': 'change_all',
})
self.assert_json_error(result, "Streams must be public")
# We expect the messages to remain in the original stream/topic
messages = get_topic_messages(user_profile, stream, "test")
self.assertEqual(len(messages), 2)
messages = get_topic_messages(user_profile, new_stream, "test")
self.assertEqual(len(messages), 0)
class MirroredMessageUsersTest(ZulipTestCase):
def test_invalid_sender(self) -> None:
user = self.example_user('hamlet')
recipients: List[str] = []
Request = namedtuple('Request', ['POST'])
request = Request(POST=dict()) # no sender
with self.assertRaises(InvalidMirrorInput):
create_mirrored_message_users(request, user, recipients)
def test_invalid_client(self) -> None:
client = get_client(name='banned_mirror') # Invalid!!!
user = self.example_user('hamlet')
sender = user
recipients: List[str] = []
Request = namedtuple('Request', ['POST', 'client'])
request = Request(POST = dict(sender=sender.email, type='private'),
client = client)
with self.assertRaises(InvalidMirrorInput):
create_mirrored_message_users(request, user, recipients)
def test_invalid_email(self) -> None:
invalid_email = 'alice AT example.com'
recipients = [invalid_email]
# We use an MIT user here to maximize code coverage
user = self.mit_user('starnine')
sender = user
Request = namedtuple('Request', ['POST', 'client'])
for client_name in ['zephyr_mirror', 'irc_mirror', 'jabber_mirror']:
client = get_client(name=client_name)
request = Request(POST = dict(sender=sender.email, type='private'),
client = client)
with self.assertRaises(InvalidMirrorInput):
create_mirrored_message_users(request, user, recipients)
@mock.patch('DNS.dnslookup', return_value=[['sipbtest:*:20922:101:Fred Sipb,,,:/mit/sipbtest:/bin/athena/tcsh']])
def test_zephyr_mirror_new_recipient(self, ignored: object) -> None:
"""Test mirror dummy user creation for PM recipients"""
client = get_client(name='zephyr_mirror')
user = self.mit_user('starnine')
sender = self.mit_user('sipbtest')
new_user_email = 'bob_the_new_user@mit.edu'
new_user_realm = get_realm("zephyr")
recipients = [user.email, new_user_email]
# Now make the request.
Request = namedtuple('Request', ['POST', 'client'])
request = Request(POST = dict(sender=sender.email, type='private'),
client = client)
mirror_sender = create_mirrored_message_users(request, user, recipients)
self.assertEqual(mirror_sender, sender)
realm_users = UserProfile.objects.filter(realm=sender.realm)
realm_emails = {user.email for user in realm_users}
self.assertIn(user.email, realm_emails)
self.assertIn(new_user_email, realm_emails)
bob = get_user(new_user_email, new_user_realm)
self.assertTrue(bob.is_mirror_dummy)
@mock.patch('DNS.dnslookup', return_value=[['sipbtest:*:20922:101:Fred Sipb,,,:/mit/sipbtest:/bin/athena/tcsh']])
def test_zephyr_mirror_new_sender(self, ignored: object) -> None:
"""Test mirror dummy user creation for sender when sending to stream"""
client = get_client(name='zephyr_mirror')
user = self.mit_user('starnine')
sender_email = 'new_sender@mit.edu'
recipients = ['stream_name']
# Now make the request.
Request = namedtuple('Request', ['POST', 'client'])
request = Request(POST = dict(sender=sender_email, type='stream'),
client = client)
mirror_sender = create_mirrored_message_users(request, user, recipients)
assert(mirror_sender is not None)
self.assertEqual(mirror_sender.email, sender_email)
self.assertTrue(mirror_sender.is_mirror_dummy)
def test_irc_mirror(self) -> None:
reset_emails_in_zulip_realm()
client = get_client(name='irc_mirror')
sender = self.example_user('hamlet')
recipients = [self.nonreg_email('alice'), 'bob@irc.zulip.com', self.nonreg_email('cordelia')]
# Now make the request.
Request = namedtuple('Request', ['POST', 'client'])
request = Request(POST = dict(sender=sender.email, type='private'),
client = client)
mirror_sender = create_mirrored_message_users(request, sender, recipients)
self.assertEqual(mirror_sender, sender)
realm_users = UserProfile.objects.filter(realm=sender.realm)
realm_emails = {user.email for user in realm_users}
self.assertIn(self.nonreg_email('alice'), realm_emails)
self.assertIn('bob@irc.zulip.com', realm_emails)
bob = get_user('bob@irc.zulip.com', sender.realm)
self.assertTrue(bob.is_mirror_dummy)
def test_jabber_mirror(self) -> None:
reset_emails_in_zulip_realm()
client = get_client(name='jabber_mirror')
sender = self.example_user('hamlet')
user = sender
recipients = [self.nonreg_email('alice'), self.nonreg_email('bob'), self.nonreg_email('cordelia')]
# Now make the request.
Request = namedtuple('Request', ['POST', 'client'])
request = Request(POST = dict(sender=sender.email, type='private'),
client = client)
mirror_sender = create_mirrored_message_users(request, user, recipients)
self.assertEqual(mirror_sender, sender)
realm_users = UserProfile.objects.filter(realm=sender.realm)
realm_emails = {user.email for user in realm_users}
self.assertIn(self.nonreg_email('alice'), realm_emails)
self.assertIn(self.nonreg_email('bob'), realm_emails)
bob = get_user(self.nonreg_email('bob'), sender.realm)
self.assertTrue(bob.is_mirror_dummy)
class MessageAccessTests(ZulipTestCase):
def test_update_invalid_flags(self) -> None:
message = self.send_personal_message(
self.example_user("cordelia"),
self.example_user("hamlet"),
"hello",
)
self.login('hamlet')
result = self.client_post("/json/messages/flags",
{"messages": ujson.dumps([message]),
"op": "add",
"flag": "invalid"})
self.assert_json_error(result, "Invalid flag: 'invalid'")
result = self.client_post("/json/messages/flags",
{"messages": ujson.dumps([message]),
"op": "add",
"flag": "is_private"})
self.assert_json_error(result, "Invalid flag: 'is_private'")
result = self.client_post("/json/messages/flags",
{"messages": ujson.dumps([message]),
"op": "add",
"flag": "active_mobile_push_notification"})
self.assert_json_error(result, "Invalid flag: 'active_mobile_push_notification'")
result = self.client_post("/json/messages/flags",
{"messages": ujson.dumps([message]),
"op": "add",
"flag": "mentioned"})
self.assert_json_error(result, "Flag not editable: 'mentioned'")
def change_star(self, messages: List[int], add: bool=True, **kwargs: Any) -> HttpResponse:
return self.client_post("/json/messages/flags",
{"messages": ujson.dumps(messages),
"op": "add" if add else "remove",
"flag": "starred"},
**kwargs)
def test_change_star(self) -> None:
"""
You can set a message as starred/un-starred through
POST /json/messages/flags.
"""
self.login('hamlet')
message_ids = [self.send_personal_message(self.example_user("hamlet"),
self.example_user("hamlet"),
"test")]
# Star a message.
result = self.change_star(message_ids)
self.assert_json_success(result)
for msg in self.get_messages():
if msg['id'] in message_ids:
self.assertEqual(msg['flags'], ['starred'])
else:
self.assertEqual(msg['flags'], ['read'])
result = self.change_star(message_ids, False)
self.assert_json_success(result)
# Remove the stars.
for msg in self.get_messages():
if msg['id'] in message_ids:
self.assertEqual(msg['flags'], [])
def test_change_star_public_stream_historical(self) -> None:
"""
You can set a message as starred/un-starred through
POST /json/messages/flags.
"""
stream_name = "new_stream"
self.subscribe(self.example_user("hamlet"), stream_name)
self.login('hamlet')
message_ids = [
self.send_stream_message(self.example_user("hamlet"), stream_name, "test"),
]
# Send a second message so we can verify it isn't modified
other_message_ids = [
self.send_stream_message(self.example_user("hamlet"), stream_name, "test_unused"),
]
received_message_ids = [
self.send_personal_message(
self.example_user("hamlet"),
self.example_user("cordelia"),
"test_received",
),
]
# Now login as another user who wasn't on that stream
self.login('cordelia')
# Send a message to yourself to make sure we have at least one with the read flag
sent_message_ids = [
self.send_personal_message(
self.example_user("cordelia"),
self.example_user("cordelia"),
"test_read_message",
),
]
result = self.client_post("/json/messages/flags",
{"messages": ujson.dumps(sent_message_ids),
"op": "add",
"flag": "read"})
# We can't change flags other than "starred" on historical messages:
result = self.client_post("/json/messages/flags",
{"messages": ujson.dumps(message_ids),
"op": "add",
"flag": "read"})
self.assert_json_error(result, 'Invalid message(s)')
# Trying to change a list of more than one historical message fails
result = self.change_star(message_ids * 2)
self.assert_json_error(result, 'Invalid message(s)')
# Confirm that one can change the historical flag now
result = self.change_star(message_ids)
self.assert_json_success(result)
for msg in self.get_messages():
if msg['id'] in message_ids:
self.assertEqual(set(msg['flags']), {'starred', 'historical', 'read'})
elif msg['id'] in received_message_ids:
self.assertEqual(msg['flags'], [])
else:
self.assertEqual(msg['flags'], ['read'])
self.assertNotIn(msg['id'], other_message_ids)
result = self.change_star(message_ids, False)
self.assert_json_success(result)
# But it still doesn't work if you're in another realm
user = self.mit_user('sipbtest')
self.login_user(user)
result = self.change_star(message_ids, subdomain="zephyr")
self.assert_json_error(result, 'Invalid message(s)')
def test_change_star_private_message_security(self) -> None:
"""
You can set a message as starred/un-starred through
POST /json/messages/flags.
"""
self.login('hamlet')
message_ids = [
self.send_personal_message(
self.example_user("hamlet"),
self.example_user("hamlet"),
"test",
),
]
# Starring private messages you didn't receive fails.
self.login('cordelia')
result = self.change_star(message_ids)
self.assert_json_error(result, 'Invalid message(s)')
def test_change_star_private_stream_security(self) -> None:
stream_name = "private_stream"
self.make_stream(stream_name, invite_only=True)
self.subscribe(self.example_user("hamlet"), stream_name)
self.login('hamlet')
message_ids = [
self.send_stream_message(self.example_user("hamlet"), stream_name, "test"),
]
# Starring private stream messages you received works
result = self.change_star(message_ids)
self.assert_json_success(result)
# Starring private stream messages you didn't receive fails.
self.login('cordelia')
result = self.change_star(message_ids)
self.assert_json_error(result, 'Invalid message(s)')
stream_name = "private_stream_2"
self.make_stream(stream_name, invite_only=True,
history_public_to_subscribers=True)
self.subscribe(self.example_user("hamlet"), stream_name)
self.login('hamlet')
message_ids = [
self.send_stream_message(self.example_user("hamlet"), stream_name, "test"),
]
# With stream.history_public_to_subscribers = True, you still
# can't see it if you didn't receive the message and are
# not subscribed.
self.login('cordelia')
result = self.change_star(message_ids)
self.assert_json_error(result, 'Invalid message(s)')
# But if you subscribe, then you can star the message
self.subscribe(self.example_user("cordelia"), stream_name)
result = self.change_star(message_ids)
self.assert_json_success(result)
def test_new_message(self) -> None:
"""
New messages aren't starred.
"""
sender = self.example_user('hamlet')
self.login_user(sender)
content = "Test message for star"
self.send_stream_message(sender, "Verona",
content=content)
sent_message = UserMessage.objects.filter(
user_profile=self.example_user('hamlet'),
).order_by("id").reverse()[0]
self.assertEqual(sent_message.message.content, content)
self.assertFalse(sent_message.flags.starred)
def test_change_star_public_stream_security_for_guest_user(self) -> None:
# Guest user can't access(star) unsubscribed public stream messages
normal_user = self.example_user("hamlet")
stream_name = "public_stream"
self.make_stream(stream_name)
self.subscribe(normal_user, stream_name)
self.login_user(normal_user)
message_id = [
self.send_stream_message(normal_user, stream_name, "test 1"),
]
guest_user = self.example_user('polonius')
self.login_user(guest_user)
result = self.change_star(message_id)
self.assert_json_error(result, 'Invalid message(s)')
# Subscribed guest users can access public stream messages sent before they join
self.subscribe(guest_user, stream_name)
result = self.change_star(message_id)
self.assert_json_success(result)
# And messages sent after they join
self.login_user(normal_user)
message_id = [
self.send_stream_message(normal_user, stream_name, "test 2"),
]
self.login_user(guest_user)
result = self.change_star(message_id)
self.assert_json_success(result)
def test_change_star_private_stream_security_for_guest_user(self) -> None:
# Guest users can't access(star) unsubscribed private stream messages
normal_user = self.example_user("hamlet")
stream_name = "private_stream"
stream = self.make_stream(stream_name, invite_only=True)
self.subscribe(normal_user, stream_name)
self.login_user(normal_user)
message_id = [
self.send_stream_message(normal_user, stream_name, "test 1"),
]
guest_user = self.example_user('polonius')
self.login_user(guest_user)
result = self.change_star(message_id)
self.assert_json_error(result, 'Invalid message(s)')
# Guest user can't access messages of subscribed private streams if
# history is not public to subscribers
self.subscribe(guest_user, stream_name)
result = self.change_star(message_id)
self.assert_json_error(result, 'Invalid message(s)')
# Guest user can access messages of subscribed private streams if
# history is public to subscribers
do_change_stream_invite_only(stream, True, history_public_to_subscribers=True)
result = self.change_star(message_id)
self.assert_json_success(result)
# With history not public to subscribers, they can still see new messages
do_change_stream_invite_only(stream, True, history_public_to_subscribers=False)
self.login_user(normal_user)
message_id = [
self.send_stream_message(normal_user, stream_name, "test 2"),
]
self.login_user(guest_user)
result = self.change_star(message_id)
self.assert_json_success(result)
def test_bulk_access_messages_private_stream(self) -> None:
user = self.example_user("hamlet")
self.login_user(user)
stream_name = "private_stream"
stream = self.make_stream(stream_name, invite_only=True,
history_public_to_subscribers=False)
self.subscribe(user, stream_name)
# Send a message before subscribing a new user to stream
message_one_id = self.send_stream_message(user,
stream_name, "Message one")
later_subscribed_user = self.example_user("cordelia")
# Subscribe a user to private-protected history stream
self.subscribe(later_subscribed_user, stream_name)
# Send a message after subscribing a new user to stream
message_two_id = self.send_stream_message(user,
stream_name, "Message two")
message_ids = [message_one_id, message_two_id]
messages = [Message.objects.select_related().get(id=message_id)
for message_id in message_ids]
filtered_messages = bulk_access_messages(later_subscribed_user, messages)
# Message sent before subscribing wouldn't be accessible by later
# subscribed user as stream has protected history
self.assertEqual(len(filtered_messages), 1)
self.assertEqual(filtered_messages[0].id, message_two_id)
do_change_stream_invite_only(stream, True, history_public_to_subscribers=True)
filtered_messages = bulk_access_messages(later_subscribed_user, messages)
# Message sent before subscribing are accessible by 8user as stream
# don't have protected history
self.assertEqual(len(filtered_messages), 2)
# Testing messages accessiblity for an unsubscribed user
unsubscribed_user = self.example_user("ZOE")
filtered_messages = bulk_access_messages(unsubscribed_user, messages)
self.assertEqual(len(filtered_messages), 0)
def test_bulk_access_messages_public_stream(self) -> None:
user = self.example_user("hamlet")
self.login_user(user)
# Testing messages accessiblity including a public stream message
stream_name = "public_stream"
self.subscribe(user, stream_name)
message_one_id = self.send_stream_message(user,
stream_name, "Message one")
later_subscribed_user = self.example_user("cordelia")
self.subscribe(later_subscribed_user, stream_name)
# Send a message after subscribing a new user to stream
message_two_id = self.send_stream_message(user,
stream_name, "Message two")
message_ids = [message_one_id, message_two_id]
messages = [Message.objects.select_related().get(id=message_id)
for message_id in message_ids]
# All public stream messages are always accessible
filtered_messages = bulk_access_messages(later_subscribed_user, messages)
self.assertEqual(len(filtered_messages), 2)
unsubscribed_user = self.example_user("ZOE")
filtered_messages = bulk_access_messages(unsubscribed_user, messages)
self.assertEqual(len(filtered_messages), 2)
class MessageHasKeywordsTest(ZulipTestCase):
'''Test for keywords like has_link, has_image, has_attachment.'''
def setup_dummy_attachments(self, user_profile: UserProfile) -> List[str]:
sample_size = 10
realm_id = user_profile.realm_id
dummy_files = [
('zulip.txt', f'{realm_id}/31/4CBjtTLYZhk66pZrF8hnYGwc/zulip.txt', sample_size),
('temp_file.py', f'{realm_id}/31/4CBjtTLYZhk66pZrF8hnYGwc/temp_file.py', sample_size),
('abc.py', f'{realm_id}/31/4CBjtTLYZhk66pZrF8hnYGwc/abc.py', sample_size),
]
for file_name, path_id, size in dummy_files:
create_attachment(file_name, path_id, user_profile, size)
# return path ids
return [x[1] for x in dummy_files]
def test_claim_attachment(self) -> None:
user_profile = self.example_user('hamlet')
dummy_path_ids = self.setup_dummy_attachments(user_profile)
dummy_urls = [f"http://zulip.testserver/user_uploads/{x}" for x in dummy_path_ids]
# Send message referring the attachment
self.subscribe(user_profile, "Denmark")
def assert_attachment_claimed(path_id: str, claimed: bool) -> None:
attachment = Attachment.objects.get(path_id=path_id)
self.assertEqual(attachment.is_claimed(), claimed)
# This message should claim attachments 1 only because attachment 2
# is not being parsed as a link by Markdown.
body = ("Some files here ...[zulip.txt]({})" +
"{}.... Some more...." +
"{}").format(dummy_urls[0], dummy_urls[1], dummy_urls[1])
self.send_stream_message(user_profile, "Denmark", body, "test")
assert_attachment_claimed(dummy_path_ids[0], True)
assert_attachment_claimed(dummy_path_ids[1], False)
# This message tries to claim the third attachment but fails because
# Markdown would not set has_attachments = True here.
body = f"Link in code: `{dummy_urls[2]}`"
self.send_stream_message(user_profile, "Denmark", body, "test")
assert_attachment_claimed(dummy_path_ids[2], False)
# Another scenario where we wouldn't parse the link.
body = f"Link to not parse: .{dummy_urls[2]}.`"
self.send_stream_message(user_profile, "Denmark", body, "test")
assert_attachment_claimed(dummy_path_ids[2], False)
# Finally, claim attachment 3.
body = f"Link: {dummy_urls[2]}"
self.send_stream_message(user_profile, "Denmark", body, "test")
assert_attachment_claimed(dummy_path_ids[2], True)
assert_attachment_claimed(dummy_path_ids[1], False)
def test_finds_all_links(self) -> None:
msg_ids = []
msg_contents = ["foo.org", "[bar](baz.gov)", "http://quux.ca"]
for msg_content in msg_contents:
msg_ids.append(self.send_stream_message(self.example_user('hamlet'),
'Denmark', content=msg_content))
msgs = [Message.objects.get(id=id) for id in msg_ids]
self.assertTrue(all([msg.has_link for msg in msgs]))
def test_finds_only_links(self) -> None:
msg_ids = []
msg_contents = ["`example.org`", '``example.org```', '$$https://example.org$$', "foo"]
for msg_content in msg_contents:
msg_ids.append(self.send_stream_message(self.example_user('hamlet'),
'Denmark', content=msg_content))
msgs = [Message.objects.get(id=id) for id in msg_ids]
self.assertFalse(all([msg.has_link for msg in msgs]))
def update_message(self, msg: Message, content: str) -> None:
hamlet = self.example_user('hamlet')
realm_id = hamlet.realm.id
rendered_content = render_markdown(msg, content)
mention_data = MentionData(realm_id, content)
do_update_message(hamlet, msg, None, None, "change_one", False, False, content,
rendered_content, set(), set(), mention_data=mention_data)
def test_finds_link_after_edit(self) -> None:
hamlet = self.example_user('hamlet')
msg_id = self.send_stream_message(hamlet, 'Denmark', content='a')
msg = Message.objects.get(id=msg_id)
self.assertFalse(msg.has_link)
self.update_message(msg, 'a http://foo.com')
self.assertTrue(msg.has_link)
self.update_message(msg, 'a')
self.assertFalse(msg.has_link)
# Check in blockquotes work
self.update_message(msg, '> http://bar.com')
self.assertTrue(msg.has_link)
self.update_message(msg, 'a `http://foo.com`')
self.assertFalse(msg.has_link)
def test_has_image(self) -> None:
msg_ids = []
msg_contents = ["Link: foo.org",
"Image: https://www.google.com/images/srpr/logo4w.png",
"Image: https://www.google.com/images/srpr/logo4w.pdf",
"[Google Link](https://www.google.com/images/srpr/logo4w.png)"]
for msg_content in msg_contents:
msg_ids.append(self.send_stream_message(self.example_user('hamlet'),
'Denmark', content=msg_content))
msgs = [Message.objects.get(id=id) for id in msg_ids]
self.assertEqual([False, True, False, True], [msg.has_image for msg in msgs])
self.update_message(msgs[0], 'https://www.google.com/images/srpr/logo4w.png')
self.assertTrue(msgs[0].has_image)
self.update_message(msgs[0], 'No Image Again')
self.assertFalse(msgs[0].has_image)
def test_has_attachment(self) -> None:
hamlet = self.example_user('hamlet')
dummy_path_ids = self.setup_dummy_attachments(hamlet)
dummy_urls = [f"http://zulip.testserver/user_uploads/{x}" for x in dummy_path_ids]
self.subscribe(hamlet, "Denmark")
body = ("Files ...[zulip.txt]({}) {} {}").format(dummy_urls[0], dummy_urls[1], dummy_urls[2])
msg_id = self.send_stream_message(hamlet, "Denmark", body, "test")
msg = Message.objects.get(id=msg_id)
self.assertTrue(msg.has_attachment)
self.update_message(msg, 'No Attachments')
self.assertFalse(msg.has_attachment)
self.update_message(msg, body)
self.assertTrue(msg.has_attachment)
self.update_message(msg, f'Link in code: `{dummy_urls[1]}`')
self.assertFalse(msg.has_attachment)
# Test blockquotes
self.update_message(msg, f'> {dummy_urls[1]}')
self.assertTrue(msg.has_attachment)
# Additional test to check has_attachment is being set is due to the correct attachment.
self.update_message(msg, f'Outside: {dummy_urls[0]}. In code: `{dummy_urls[1]}`.')
self.assertTrue(msg.has_attachment)
self.assertTrue(msg.attachment_set.filter(path_id=dummy_path_ids[0]))
self.assertEqual(msg.attachment_set.count(), 1)
self.update_message(msg, f'Outside: {dummy_urls[1]}. In code: `{dummy_urls[0]}`.')
self.assertTrue(msg.has_attachment)
self.assertTrue(msg.attachment_set.filter(path_id=dummy_path_ids[1]))
self.assertEqual(msg.attachment_set.count(), 1)
self.update_message(msg, f'Both in code: `{dummy_urls[1]} {dummy_urls[0]}`.')
self.assertFalse(msg.has_attachment)
self.assertEqual(msg.attachment_set.count(), 0)
def test_potential_attachment_path_ids(self) -> None:
hamlet = self.example_user('hamlet')
self.subscribe(hamlet, "Denmark")
dummy_path_ids = self.setup_dummy_attachments(hamlet)
body = "Hello"
msg_id = self.send_stream_message(hamlet, "Denmark", body, "test")
msg = Message.objects.get(id=msg_id)
with mock.patch("zerver.lib.actions.do_claim_attachments",
wraps=do_claim_attachments) as m:
self.update_message(msg, f'[link](http://{hamlet.realm.host}/user_uploads/{dummy_path_ids[0]})')
self.assertTrue(m.called)
m.reset_mock()
self.update_message(msg, f'[link](/user_uploads/{dummy_path_ids[1]})')
self.assertTrue(m.called)
m.reset_mock()
self.update_message(msg, f'[new text link](/user_uploads/{dummy_path_ids[1]})')
self.assertFalse(m.called)
m.reset_mock()
# It's not clear this is correct behavior
self.update_message(msg, f'[link](user_uploads/{dummy_path_ids[2]})')
self.assertFalse(m.called)
m.reset_mock()
self.update_message(msg, f'[link](https://github.com/user_uploads/{dummy_path_ids[0]})')
self.assertFalse(m.called)
m.reset_mock()
class MissedMessageTest(ZulipTestCase):
def test_presence_idle_user_ids(self) -> None:
UserPresence.objects.all().delete()
sender = self.example_user('cordelia')
realm = sender.realm
hamlet = self.example_user('hamlet')
othello = self.example_user('othello')
recipient_ids = {hamlet.id, othello.id}
message_type = 'stream'
user_flags: Dict[int, List[str]] = {}
def assert_missing(user_ids: List[int]) -> None:
presence_idle_user_ids = get_active_presence_idle_user_ids(
realm=realm,
sender_id=sender.id,
message_type=message_type,
active_user_ids=recipient_ids,
user_flags=user_flags,
)
self.assertEqual(sorted(user_ids), sorted(presence_idle_user_ids))
def set_presence(user: UserProfile, client_name: str, ago: int) -> None:
when = timezone_now() - datetime.timedelta(seconds=ago)
UserPresence.objects.create(
user_profile_id=user.id,
realm_id=user.realm_id,
client=get_client(client_name),
timestamp=when,
)
message_type = 'private'
assert_missing([hamlet.id, othello.id])
message_type = 'stream'
user_flags[hamlet.id] = ['mentioned']
assert_missing([hamlet.id])
set_presence(hamlet, 'iPhone', ago=5000)
assert_missing([hamlet.id])
set_presence(hamlet, 'webapp', ago=15)
assert_missing([])
message_type = 'private'
assert_missing([othello.id])
class LogDictTest(ZulipTestCase):
def test_to_log_dict(self) -> None:
user = self.example_user('hamlet')
stream_name = 'Denmark'
topic_name = 'Copenhagen'
content = 'find me some good coffee shops'
message_id = self.send_stream_message(user, stream_name,
topic_name=topic_name,
content=content)
message = Message.objects.get(id=message_id)
dct = message.to_log_dict()
self.assertTrue('timestamp' in dct)
self.assertEqual(dct['content'], 'find me some good coffee shops')
self.assertEqual(dct['id'], message.id)
self.assertEqual(dct['recipient'], 'Denmark')
self.assertEqual(dct['sender_realm_str'], 'zulip')
self.assertEqual(dct['sender_email'], user.email)
self.assertEqual(dct['sender_full_name'], 'King Hamlet')
self.assertEqual(dct['sender_id'], user.id)
self.assertEqual(dct['sender_short_name'], 'hamlet')
self.assertEqual(dct['sending_client'], 'test suite')
self.assertEqual(dct[DB_TOPIC_NAME], 'Copenhagen')
self.assertEqual(dct['type'], 'stream')
class CheckMessageTest(ZulipTestCase):
def test_basic_check_message_call(self) -> None:
sender = self.example_user('othello')
client = make_client(name="test suite")
stream_name = 'España y Francia'
self.make_stream(stream_name)
topic_name = 'issue'
message_content = 'whatever'
addressee = Addressee.for_stream_name(stream_name, topic_name)
ret = check_message(sender, client, addressee, message_content)
self.assertEqual(ret['message'].sender.id, sender.id)
def test_bot_pm_feature(self) -> None:
"""We send a PM to a bot's owner if their bot sends a message to
an unsubscribed stream"""
parent = self.example_user('othello')
bot = do_create_user(
email='othello-bot@zulip.com',
password='',
realm=parent.realm,
full_name='',
short_name='',
bot_type=UserProfile.DEFAULT_BOT,
bot_owner=parent,
)
bot.last_reminder = None
sender = bot
client = make_client(name="test suite")
stream_name = 'Россия'
topic_name = 'issue'
addressee = Addressee.for_stream_name(stream_name, topic_name)
message_content = 'whatever'
old_count = message_stream_count(parent)
# Try sending to stream that doesn't exist sends a reminder to
# the sender
with self.assertRaises(JsonableError):
check_message(sender, client, addressee, message_content)
new_count = message_stream_count(parent)
self.assertEqual(new_count, old_count + 1)
self.assertIn("that stream does not exist.", most_recent_message(parent).content)
# Try sending to stream that exists with no subscribers soon
# after; due to rate-limiting, this should send nothing.
self.make_stream(stream_name)
ret = check_message(sender, client, addressee, message_content)
new_count = message_stream_count(parent)
self.assertEqual(new_count, old_count + 1)
# Try sending to stream that exists with no subscribers longer
# after; this should send an error to the bot owner that the
# stream doesn't exist
assert(sender.last_reminder is not None)
sender.last_reminder = sender.last_reminder - datetime.timedelta(hours=1)
sender.save(update_fields=["last_reminder"])
ret = check_message(sender, client, addressee, message_content)
new_count = message_stream_count(parent)
self.assertEqual(new_count, old_count + 2)
self.assertEqual(ret['message'].sender.email, 'othello-bot@zulip.com')
self.assertIn("does not have any subscribers", most_recent_message(parent).content)
def test_bot_pm_error_handling(self) -> None:
# This just test some defensive code.
cordelia = self.example_user('cordelia')
test_bot = self.create_test_bot(
short_name='test',
user_profile=cordelia,
)
content = 'whatever'
good_realm = test_bot.realm
wrong_realm = get_realm("zephyr")
wrong_sender = cordelia
send_rate_limited_pm_notification_to_bot_owner(test_bot, wrong_realm, content)
self.assertEqual(test_bot.last_reminder, None)
send_rate_limited_pm_notification_to_bot_owner(wrong_sender, good_realm, content)
self.assertEqual(test_bot.last_reminder, None)
test_bot.realm.deactivated = True
send_rate_limited_pm_notification_to_bot_owner(test_bot, good_realm, content)
self.assertEqual(test_bot.last_reminder, None)
class DeleteMessageTest(ZulipTestCase):
def test_delete_message_invalid_request_format(self) -> None:
self.login('iago')
hamlet = self.example_user('hamlet')
msg_id = self.send_stream_message(hamlet, "Scotland")
result = self.client_delete(f'/json/messages/{msg_id + 1}',
{'message_id': msg_id})
self.assert_json_error(result, "Invalid message(s)")
result = self.client_delete(f'/json/messages/{msg_id}')
self.assert_json_success(result)
def test_delete_message_by_user(self) -> None:
def set_message_deleting_params(allow_message_deleting: bool,
message_content_delete_limit_seconds: int) -> None:
self.login('iago')
result = self.client_patch("/json/realm", {
'allow_message_deleting': ujson.dumps(allow_message_deleting),
'message_content_delete_limit_seconds': message_content_delete_limit_seconds,
})
self.assert_json_success(result)
def test_delete_message_by_admin(msg_id: int) -> HttpResponse:
self.login('iago')
result = self.client_delete(f'/json/messages/{msg_id}')
return result
def test_delete_message_by_owner(msg_id: int) -> HttpResponse:
self.login('hamlet')
result = self.client_delete(f'/json/messages/{msg_id}')
return result
def test_delete_message_by_other_user(msg_id: int) -> HttpResponse:
self.login('cordelia')
result = self.client_delete(f'/json/messages/{msg_id}')
return result
# Test if message deleting is not allowed(default).
set_message_deleting_params(False, 0)
hamlet = self.example_user('hamlet')
self.login_user(hamlet)
msg_id = self.send_stream_message(hamlet, "Scotland")
result = test_delete_message_by_owner(msg_id=msg_id)
self.assert_json_error(result, "You don't have permission to delete this message")
result = test_delete_message_by_other_user(msg_id=msg_id)
self.assert_json_error(result, "You don't have permission to delete this message")
result = test_delete_message_by_admin(msg_id=msg_id)
self.assert_json_success(result)
# Test if message deleting is allowed.
# Test if time limit is zero(no limit).
set_message_deleting_params(True, 0)
msg_id = self.send_stream_message(hamlet, "Scotland")
message = Message.objects.get(id=msg_id)
message.date_sent = message.date_sent - datetime.timedelta(seconds=600)
message.save()
result = test_delete_message_by_other_user(msg_id=msg_id)
self.assert_json_error(result, "You don't have permission to delete this message")
result = test_delete_message_by_owner(msg_id=msg_id)
self.assert_json_success(result)
# Test if time limit is non-zero.
set_message_deleting_params(True, 240)
msg_id_1 = self.send_stream_message(hamlet, "Scotland")
message = Message.objects.get(id=msg_id_1)
message.date_sent = message.date_sent - datetime.timedelta(seconds=120)
message.save()
msg_id_2 = self.send_stream_message(hamlet, "Scotland")
message = Message.objects.get(id=msg_id_2)
message.date_sent = message.date_sent - datetime.timedelta(seconds=360)
message.save()
result = test_delete_message_by_other_user(msg_id=msg_id_1)
self.assert_json_error(result, "You don't have permission to delete this message")
result = test_delete_message_by_owner(msg_id=msg_id_1)
self.assert_json_success(result)
result = test_delete_message_by_owner(msg_id=msg_id_2)
self.assert_json_error(result, "The time limit for deleting this message has passed")
# No limit for admin.
result = test_delete_message_by_admin(msg_id=msg_id_2)
self.assert_json_success(result)
# Test multiple delete requests with no latency issues
msg_id = self.send_stream_message(hamlet, "Scotland")
result = test_delete_message_by_owner(msg_id=msg_id)
self.assert_json_success(result)
result = test_delete_message_by_owner(msg_id=msg_id)
self.assert_json_error(result, "Invalid message(s)")
# Test handling of 500 error caused by multiple delete requests due to latency.
# see issue #11219.
with mock.patch("zerver.views.message_edit.do_delete_messages") as m, \
mock.patch("zerver.views.message_edit.validate_can_delete_message", return_value=None), \
mock.patch("zerver.views.message_edit.access_message", return_value=(None, None)):
m.side_effect = IntegrityError()
result = test_delete_message_by_owner(msg_id=msg_id)
self.assert_json_error(result, "Message already deleted")
m.side_effect = Message.DoesNotExist()
result = test_delete_message_by_owner(msg_id=msg_id)
self.assert_json_error(result, "Message already deleted")
class SoftDeactivationMessageTest(ZulipTestCase):
def test_reactivate_user_if_soft_deactivated(self) -> None:
recipient_list = [self.example_user("hamlet"), self.example_user("iago")]
for user_profile in recipient_list:
self.subscribe(user_profile, "Denmark")
sender = self.example_user('iago')
stream_name = 'Denmark'
topic_name = 'foo'
def last_realm_audit_log_entry(event_type: int) -> RealmAuditLog:
return RealmAuditLog.objects.filter(
event_type=event_type,
).order_by('-event_time')[0]
long_term_idle_user = self.example_user('hamlet')
# We are sending this message to ensure that long_term_idle_user has
# at least one UserMessage row.
self.send_stream_message(long_term_idle_user, stream_name)
do_soft_deactivate_users([long_term_idle_user])
message = 'Test Message 1'
message_id = self.send_stream_message(sender, stream_name,
message, topic_name)
idle_user_msg_list = get_user_messages(long_term_idle_user)
idle_user_msg_count = len(idle_user_msg_list)
self.assertNotEqual(idle_user_msg_list[-1].content, message)
with queries_captured() as queries:
reactivate_user_if_soft_deactivated(long_term_idle_user)
self.assert_length(queries, 8)
self.assertFalse(long_term_idle_user.long_term_idle)
self.assertEqual(last_realm_audit_log_entry(
RealmAuditLog.USER_SOFT_ACTIVATED).modified_user, long_term_idle_user)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(len(idle_user_msg_list), idle_user_msg_count + 1)
self.assertEqual(idle_user_msg_list[-1].content, message)
long_term_idle_user.refresh_from_db()
self.assertEqual(long_term_idle_user.last_active_message_id, message_id)
def test_add_missing_messages(self) -> None:
recipient_list = [self.example_user("hamlet"), self.example_user("iago")]
for user_profile in recipient_list:
self.subscribe(user_profile, "Denmark")
sender = self.example_user('iago')
realm = sender.realm
sending_client = make_client(name="test suite")
stream_name = 'Denmark'
stream = get_stream(stream_name, realm)
topic_name = 'foo'
def send_fake_message(message_content: str, stream: Stream) -> Message:
recipient = stream.recipient
message = Message(sender = sender,
recipient = recipient,
content = message_content,
date_sent = timezone_now(),
sending_client = sending_client)
message.set_topic_name(topic_name)
message.save()
return message
long_term_idle_user = self.example_user('hamlet')
self.send_stream_message(long_term_idle_user, stream_name)
do_soft_deactivate_users([long_term_idle_user])
# Test that add_missing_messages() in simplest case of adding a
# message for which UserMessage row doesn't exist for this user.
sent_message = send_fake_message('Test Message 1', stream)
idle_user_msg_list = get_user_messages(long_term_idle_user)
idle_user_msg_count = len(idle_user_msg_list)
self.assertNotEqual(idle_user_msg_list[-1], sent_message)
with queries_captured() as queries:
add_missing_messages(long_term_idle_user)
self.assert_length(queries, 6)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(len(idle_user_msg_list), idle_user_msg_count + 1)
self.assertEqual(idle_user_msg_list[-1], sent_message)
long_term_idle_user.refresh_from_db()
self.assertEqual(long_term_idle_user.last_active_message_id, sent_message.id)
# Test that add_missing_messages() only adds messages that aren't
# already present in the UserMessage table. This test works on the
# fact that previous test just above this added a message but didn't
# updated the last_active_message_id field for the user.
sent_message = send_fake_message('Test Message 2', stream)
idle_user_msg_list = get_user_messages(long_term_idle_user)
idle_user_msg_count = len(idle_user_msg_list)
self.assertNotEqual(idle_user_msg_list[-1], sent_message)
with queries_captured() as queries:
add_missing_messages(long_term_idle_user)
self.assert_length(queries, 7)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(len(idle_user_msg_list), idle_user_msg_count + 1)
self.assertEqual(idle_user_msg_list[-1], sent_message)
long_term_idle_user.refresh_from_db()
self.assertEqual(long_term_idle_user.last_active_message_id, sent_message.id)
# Test UserMessage rows are created correctly in case of stream
# Subscription was altered by admin while user was away.
# Test for a public stream.
sent_message_list = []
sent_message_list.append(send_fake_message('Test Message 3', stream))
# Alter subscription to stream.
self.unsubscribe(long_term_idle_user, stream_name)
send_fake_message('Test Message 4', stream)
self.subscribe(long_term_idle_user, stream_name)
sent_message_list.append(send_fake_message('Test Message 5', stream))
sent_message_list.reverse()
idle_user_msg_list = get_user_messages(long_term_idle_user)
idle_user_msg_count = len(idle_user_msg_list)
for sent_message in sent_message_list:
self.assertNotEqual(idle_user_msg_list.pop(), sent_message)
with queries_captured() as queries:
add_missing_messages(long_term_idle_user)
self.assert_length(queries, 6)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(len(idle_user_msg_list), idle_user_msg_count + 2)
for sent_message in sent_message_list:
self.assertEqual(idle_user_msg_list.pop(), sent_message)
long_term_idle_user.refresh_from_db()
self.assertEqual(long_term_idle_user.last_active_message_id, sent_message_list[0].id)
# Test consecutive subscribe/unsubscribe in a public stream
sent_message_list = []
sent_message_list.append(send_fake_message('Test Message 6', stream))
# Unsubscribe from stream and then immediately subscribe back again.
self.unsubscribe(long_term_idle_user, stream_name)
self.subscribe(long_term_idle_user, stream_name)
sent_message_list.append(send_fake_message('Test Message 7', stream))
# Again unsubscribe from stream and send a message.
# This will make sure that if initially in a unsubscribed state
# a consecutive subscribe/unsubscribe doesn't misbehave.
self.unsubscribe(long_term_idle_user, stream_name)
send_fake_message('Test Message 8', stream)
# Do a subscribe and unsubscribe immediately.
self.subscribe(long_term_idle_user, stream_name)
self.unsubscribe(long_term_idle_user, stream_name)
sent_message_list.reverse()
idle_user_msg_list = get_user_messages(long_term_idle_user)
idle_user_msg_count = len(idle_user_msg_list)
for sent_message in sent_message_list:
self.assertNotEqual(idle_user_msg_list.pop(), sent_message)
with queries_captured() as queries:
add_missing_messages(long_term_idle_user)
self.assert_length(queries, 6)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(len(idle_user_msg_list), idle_user_msg_count + 2)
for sent_message in sent_message_list:
self.assertEqual(idle_user_msg_list.pop(), sent_message)
long_term_idle_user.refresh_from_db()
self.assertEqual(long_term_idle_user.last_active_message_id, sent_message_list[0].id)
# Test for when user unsubscribes before soft deactivation
# (must reactivate them in order to do this).
do_soft_activate_users([long_term_idle_user])
self.subscribe(long_term_idle_user, stream_name)
# Send a real message to update last_active_message_id
sent_message_id = self.send_stream_message(
sender, stream_name, 'Test Message 9')
self.unsubscribe(long_term_idle_user, stream_name)
# Soft deactivate and send another message to the unsubscribed stream.
do_soft_deactivate_users([long_term_idle_user])
send_fake_message('Test Message 10', stream)
idle_user_msg_list = get_user_messages(long_term_idle_user)
idle_user_msg_count = len(idle_user_msg_list)
self.assertEqual(idle_user_msg_list[-1].id, sent_message_id)
with queries_captured() as queries:
add_missing_messages(long_term_idle_user)
# There are no streams to fetch missing messages from, so
# the Message.objects query will be avoided.
self.assert_length(queries, 4)
idle_user_msg_list = get_user_messages(long_term_idle_user)
# No new UserMessage rows should have been created.
self.assertEqual(len(idle_user_msg_list), idle_user_msg_count)
# Note: At this point in this test we have long_term_idle_user
# unsubscribed from the 'Denmark' stream.
# Test for a Private Stream.
stream_name = "Core"
private_stream = self.make_stream('Core', invite_only=True)
self.subscribe(self.example_user("iago"), stream_name)
sent_message_list = []
send_fake_message('Test Message 11', private_stream)
self.subscribe(self.example_user("hamlet"), stream_name)
sent_message_list.append(send_fake_message('Test Message 12', private_stream))
self.unsubscribe(long_term_idle_user, stream_name)
send_fake_message('Test Message 13', private_stream)
self.subscribe(long_term_idle_user, stream_name)
sent_message_list.append(send_fake_message('Test Message 14', private_stream))
sent_message_list.reverse()
idle_user_msg_list = get_user_messages(long_term_idle_user)
idle_user_msg_count = len(idle_user_msg_list)
for sent_message in sent_message_list:
self.assertNotEqual(idle_user_msg_list.pop(), sent_message)
with queries_captured() as queries:
add_missing_messages(long_term_idle_user)
self.assert_length(queries, 6)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(len(idle_user_msg_list), idle_user_msg_count + 2)
for sent_message in sent_message_list:
self.assertEqual(idle_user_msg_list.pop(), sent_message)
long_term_idle_user.refresh_from_db()
self.assertEqual(long_term_idle_user.last_active_message_id, sent_message_list[0].id)
@mock.patch('zerver.lib.soft_deactivation.BULK_CREATE_BATCH_SIZE', 2)
def test_add_missing_messages_pagination(self) -> None:
recipient_list = [self.example_user("hamlet"), self.example_user("iago")]
stream_name = 'Denmark'
for user_profile in recipient_list:
self.subscribe(user_profile, stream_name)
sender = self.example_user('iago')
long_term_idle_user = self.example_user('hamlet')
self.send_stream_message(long_term_idle_user, stream_name)
do_soft_deactivate_users([long_term_idle_user])
num_new_messages = 5
message_ids = []
for _ in range(num_new_messages):
message_id = self.send_stream_message(sender, stream_name)
message_ids.append(message_id)
idle_user_msg_list = get_user_messages(long_term_idle_user)
idle_user_msg_count = len(idle_user_msg_list)
with queries_captured() as queries:
add_missing_messages(long_term_idle_user)
self.assert_length(queries, 10)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(len(idle_user_msg_list), idle_user_msg_count + num_new_messages)
long_term_idle_user.refresh_from_db()
self.assertEqual(long_term_idle_user.last_active_message_id, message_ids[-1])
def test_user_message_filter(self) -> None:
# In this test we are basically testing out the logic used out in
# do_send_messages() in action.py for filtering the messages for which
# UserMessage rows should be created for a soft-deactivated user.
recipient_list = [
self.example_user("hamlet"),
self.example_user("iago"),
self.example_user('cordelia'),
]
for user_profile in recipient_list:
self.subscribe(user_profile, "Denmark")
cordelia = self.example_user('cordelia')
sender = self.example_user('iago')
stream_name = 'Denmark'
topic_name = 'foo'
def send_stream_message(content: str) -> None:
self.send_stream_message(sender, stream_name,
content, topic_name)
def send_personal_message(content: str) -> None:
self.send_personal_message(sender, self.example_user("hamlet"), content)
long_term_idle_user = self.example_user('hamlet')
self.send_stream_message(long_term_idle_user, stream_name)
do_soft_deactivate_users([long_term_idle_user])
def assert_um_count(user: UserProfile, count: int) -> None:
user_messages = get_user_messages(user)
self.assertEqual(len(user_messages), count)
def assert_last_um_content(user: UserProfile, content: str, negate: bool=False) -> None:
user_messages = get_user_messages(user)
if negate:
self.assertNotEqual(user_messages[-1].content, content)
else:
self.assertEqual(user_messages[-1].content, content)
# Test that sending a message to a stream with soft deactivated user
# doesn't end up creating UserMessage row for deactivated user.
general_user_msg_count = len(get_user_messages(cordelia))
soft_deactivated_user_msg_count = len(get_user_messages(long_term_idle_user))
message = 'Test Message 1'
send_stream_message(message)
assert_last_um_content(long_term_idle_user, message, negate=True)
assert_um_count(long_term_idle_user, soft_deactivated_user_msg_count)
assert_um_count(cordelia, general_user_msg_count + 1)
assert_last_um_content(cordelia, message)
# Test that sending a message to a stream with soft deactivated user
# and push/email notifications on creates a UserMessage row for the
# deactivated user.
sub = get_subscription(stream_name, long_term_idle_user)
sub.push_notifications = True
sub.save()
general_user_msg_count = len(get_user_messages(cordelia))
soft_deactivated_user_msg_count = len(get_user_messages(long_term_idle_user))
message = 'Test private stream message'
send_stream_message(message)
assert_um_count(long_term_idle_user, soft_deactivated_user_msg_count + 1)
assert_last_um_content(long_term_idle_user, message)
sub.push_notifications = False
sub.save()
# Test sending a private message to soft deactivated user creates
# UserMessage row.
soft_deactivated_user_msg_count = len(get_user_messages(long_term_idle_user))
message = 'Test PM'
send_personal_message(message)
assert_um_count(long_term_idle_user, soft_deactivated_user_msg_count + 1)
assert_last_um_content(long_term_idle_user, message)
# Test UserMessage row is created while user is deactivated if
# user itself is mentioned.
general_user_msg_count = len(get_user_messages(cordelia))
soft_deactivated_user_msg_count = len(get_user_messages(long_term_idle_user))
message = 'Test @**King Hamlet** mention'
send_stream_message(message)
assert_last_um_content(long_term_idle_user, message)
assert_um_count(long_term_idle_user, soft_deactivated_user_msg_count + 1)
assert_um_count(cordelia, general_user_msg_count + 1)
assert_last_um_content(cordelia, message)
# Test UserMessage row is not created while user is deactivated if
# anyone is mentioned but the user.
general_user_msg_count = len(get_user_messages(cordelia))
soft_deactivated_user_msg_count = len(get_user_messages(long_term_idle_user))
message = 'Test @**Cordelia Lear** mention'
send_stream_message(message)
assert_last_um_content(long_term_idle_user, message, negate=True)
assert_um_count(long_term_idle_user, soft_deactivated_user_msg_count)
assert_um_count(cordelia, general_user_msg_count + 1)
assert_last_um_content(cordelia, message)
# Test UserMessage row is created while user is deactivated if
# there is a wildcard mention such as @all or @everyone
general_user_msg_count = len(get_user_messages(cordelia))
soft_deactivated_user_msg_count = len(get_user_messages(long_term_idle_user))
message = 'Test @**all** mention'
send_stream_message(message)
assert_last_um_content(long_term_idle_user, message)
assert_um_count(long_term_idle_user, soft_deactivated_user_msg_count + 1)
assert_um_count(cordelia, general_user_msg_count + 1)
assert_last_um_content(cordelia, message)
general_user_msg_count = len(get_user_messages(cordelia))
soft_deactivated_user_msg_count = len(get_user_messages(long_term_idle_user))
message = 'Test @**everyone** mention'
send_stream_message(message)
assert_last_um_content(long_term_idle_user, message)
assert_um_count(long_term_idle_user, soft_deactivated_user_msg_count + 1)
assert_um_count(cordelia, general_user_msg_count + 1)
assert_last_um_content(cordelia, message)
general_user_msg_count = len(get_user_messages(cordelia))
soft_deactivated_user_msg_count = len(get_user_messages(long_term_idle_user))
message = 'Test @**stream** mention'
send_stream_message(message)
assert_last_um_content(long_term_idle_user, message)
assert_um_count(long_term_idle_user, soft_deactivated_user_msg_count + 1)
assert_um_count(cordelia, general_user_msg_count + 1)
assert_last_um_content(cordelia, message)
# Test UserMessage row is not created while user is deactivated if there
# is a alert word in message.
do_add_alert_words(long_term_idle_user, ['test_alert_word'])
general_user_msg_count = len(get_user_messages(cordelia))
soft_deactivated_user_msg_count = len(get_user_messages(long_term_idle_user))
message = 'Testing test_alert_word'
send_stream_message(message)
assert_last_um_content(long_term_idle_user, message)
assert_um_count(long_term_idle_user, soft_deactivated_user_msg_count + 1)
assert_um_count(cordelia, general_user_msg_count + 1)
assert_last_um_content(cordelia, message)
# Test UserMessage row is created while user is deactivated if
# message is a me message.
general_user_msg_count = len(get_user_messages(cordelia))
soft_deactivated_user_msg_count = len(get_user_messages(long_term_idle_user))
message = '/me says test'
send_stream_message(message)
assert_last_um_content(long_term_idle_user, message, negate=True)
assert_um_count(long_term_idle_user, soft_deactivated_user_msg_count)
assert_um_count(cordelia, general_user_msg_count + 1)
assert_last_um_content(cordelia, message)
class MessageHydrationTest(ZulipTestCase):
def test_hydrate_stream_recipient_info(self) -> None:
realm = get_realm('zulip')
cordelia = self.example_user('cordelia')
stream_id = get_stream('Verona', realm).id
obj = dict(
recipient_type=Recipient.STREAM,
recipient_type_id=stream_id,
sender_is_mirror_dummy=False,
sender_email=cordelia.email,
sender_full_name=cordelia.full_name,
sender_short_name=cordelia.short_name,
sender_id=cordelia.id,
)
MessageDict.hydrate_recipient_info(obj, 'Verona')
self.assertEqual(obj['display_recipient'], 'Verona')
self.assertEqual(obj['type'], 'stream')
def test_hydrate_pm_recipient_info(self) -> None:
cordelia = self.example_user('cordelia')
display_recipient: List[UserDisplayRecipient] = [
dict(
email='aaron@example.com',
full_name='Aaron Smith',
short_name='Aaron',
id=999,
is_mirror_dummy=False,
),
]
obj = dict(
recipient_type=Recipient.PERSONAL,
recipient_type_id=None,
sender_is_mirror_dummy=False,
sender_email=cordelia.email,
sender_full_name=cordelia.full_name,
sender_short_name=cordelia.short_name,
sender_id=cordelia.id,
)
MessageDict.hydrate_recipient_info(obj, display_recipient)
self.assertEqual(
obj['display_recipient'],
[
dict(
email='aaron@example.com',
full_name='Aaron Smith',
short_name='Aaron',
id=999,
is_mirror_dummy=False,
),
dict(
email=cordelia.email,
full_name=cordelia.full_name,
id=cordelia.id,
short_name=cordelia.short_name,
is_mirror_dummy=False,
),
],
)
self.assertEqual(obj['type'], 'private')
def test_messages_for_ids(self) -> None:
hamlet = self.example_user('hamlet')
cordelia = self.example_user('cordelia')
stream_name = 'test stream'
self.subscribe(cordelia, stream_name)
old_message_id = self.send_stream_message(cordelia, stream_name, content='foo')
self.subscribe(hamlet, stream_name)
content = 'hello @**King Hamlet**'
new_message_id = self.send_stream_message(cordelia, stream_name, content=content)
user_message_flags = {
old_message_id: ['read', 'historical'],
new_message_id: ['mentioned'],
}
messages = messages_for_ids(
message_ids=[old_message_id, new_message_id],
user_message_flags=user_message_flags,
search_fields={},
apply_markdown=True,
client_gravatar=True,
allow_edit_history=False,
)
self.assertEqual(len(messages), 2)
for message in messages:
if message['id'] == old_message_id:
old_message = message
elif message['id'] == new_message_id:
new_message = message
self.assertEqual(old_message['content'], '<p>foo</p>')
self.assertEqual(old_message['flags'], ['read', 'historical'])
self.assertIn('class="user-mention"', new_message['content'])
self.assertEqual(new_message['flags'], ['mentioned'])
def test_display_recipient_up_to_date(self) -> None:
"""
This is a test for a bug where due to caching of message_dicts,
after updating a user's information, fetching those cached messages
via messages_for_ids would return message_dicts with display_recipient
still having the old information. The returned message_dicts should have
up-to-date display_recipients and we check for that here.
"""
hamlet = self.example_user('hamlet')
cordelia = self.example_user('cordelia')
message_id = self.send_personal_message(hamlet, cordelia, 'test')
cordelia_recipient = cordelia.recipient
# Cause the display_recipient to get cached:
get_display_recipient(cordelia_recipient)
# Change cordelia's email:
cordelia_new_email = 'new-cordelia@zulip.com'
cordelia.email = cordelia_new_email
cordelia.save()
# Local display_recipient cache needs to be flushed.
# flush_per_request_caches() is called after every request,
# so it makes sense to run it here.
flush_per_request_caches()
messages = messages_for_ids(
message_ids=[message_id],
user_message_flags={message_id: ['read']},
search_fields={},
apply_markdown=True,
client_gravatar=True,
allow_edit_history=False,
)
message = messages[0]
# Find which display_recipient in the list is cordelia:
for display_recipient in message['display_recipient']:
if display_recipient['short_name'] == 'cordelia':
cordelia_display_recipient = display_recipient
# Make sure the email is up-to-date.
self.assertEqual(cordelia_display_recipient['email'], cordelia_new_email)
class TestMessageForIdsDisplayRecipientFetching(ZulipTestCase):
def _verify_display_recipient(self, display_recipient: DisplayRecipientT,
expected_recipient_objects: Union[Stream, List[UserProfile]]) -> None:
if isinstance(expected_recipient_objects, Stream):
self.assertEqual(display_recipient, expected_recipient_objects.name)
else:
for user_profile in expected_recipient_objects:
recipient_dict: UserDisplayRecipient = {
'email': user_profile.email,
'full_name': user_profile.full_name,
'short_name': user_profile.short_name,
'id': user_profile.id,
'is_mirror_dummy': user_profile.is_mirror_dummy,
}
self.assertTrue(recipient_dict in display_recipient)
def test_display_recipient_personal(self) -> None:
hamlet = self.example_user('hamlet')
cordelia = self.example_user('cordelia')
othello = self.example_user('othello')
message_ids = [
self.send_personal_message(hamlet, cordelia, 'test'),
self.send_personal_message(cordelia, othello, 'test'),
]
messages = messages_for_ids(
message_ids=message_ids,
user_message_flags={message_id: ['read'] for message_id in message_ids},
search_fields={},
apply_markdown=True,
client_gravatar=True,
allow_edit_history=False,
)
self._verify_display_recipient(messages[0]['display_recipient'], [hamlet, cordelia])
self._verify_display_recipient(messages[1]['display_recipient'], [cordelia, othello])
def test_display_recipient_stream(self) -> None:
cordelia = self.example_user('cordelia')
message_ids = [
self.send_stream_message(cordelia, "Verona", content='test'),
self.send_stream_message(cordelia, "Denmark", content='test'),
]
messages = messages_for_ids(
message_ids=message_ids,
user_message_flags={message_id: ['read'] for message_id in message_ids},
search_fields={},
apply_markdown=True,
client_gravatar=True,
allow_edit_history=False,
)
self._verify_display_recipient(messages[0]['display_recipient'], get_stream("Verona", cordelia.realm))
self._verify_display_recipient(messages[1]['display_recipient'], get_stream("Denmark", cordelia.realm))
def test_display_recipient_huddle(self) -> None:
hamlet = self.example_user('hamlet')
cordelia = self.example_user('cordelia')
othello = self.example_user('othello')
iago = self.example_user('iago')
message_ids = [
self.send_huddle_message(hamlet, [cordelia, othello], 'test'),
self.send_huddle_message(cordelia, [hamlet, othello, iago], 'test'),
]
messages = messages_for_ids(
message_ids=message_ids,
user_message_flags={message_id: ['read'] for message_id in message_ids},
search_fields={},
apply_markdown=True,
client_gravatar=True,
allow_edit_history=False,
)
self._verify_display_recipient(messages[0]['display_recipient'], [hamlet, cordelia, othello])
self._verify_display_recipient(messages[1]['display_recipient'], [hamlet, cordelia, othello, iago])
def test_display_recipient_various_types(self) -> None:
hamlet = self.example_user('hamlet')
cordelia = self.example_user('cordelia')
othello = self.example_user('othello')
iago = self.example_user('iago')
message_ids = [
self.send_huddle_message(hamlet, [cordelia, othello], 'test'),
self.send_stream_message(cordelia, "Verona", content='test'),
self.send_personal_message(hamlet, cordelia, 'test'),
self.send_stream_message(cordelia, "Denmark", content='test'),
self.send_huddle_message(cordelia, [hamlet, othello, iago], 'test'),
self.send_personal_message(cordelia, othello, 'test'),
]
messages = messages_for_ids(
message_ids=message_ids,
user_message_flags={message_id: ['read'] for message_id in message_ids},
search_fields={},
apply_markdown=True,
client_gravatar=True,
allow_edit_history=False,
)
self._verify_display_recipient(messages[0]['display_recipient'], [hamlet, cordelia, othello])
self._verify_display_recipient(messages[1]['display_recipient'], get_stream("Verona", hamlet.realm))
self._verify_display_recipient(messages[2]['display_recipient'], [hamlet, cordelia])
self._verify_display_recipient(messages[3]['display_recipient'], get_stream("Denmark", hamlet.realm))
self._verify_display_recipient(messages[4]['display_recipient'], [hamlet, cordelia, othello, iago])
self._verify_display_recipient(messages[5]['display_recipient'], [cordelia, othello])
class MessageVisibilityTest(ZulipTestCase):
def test_update_first_visible_message_id(self) -> None:
Message.objects.all().delete()
message_ids = [self.send_stream_message(self.example_user("othello"), "Scotland") for i in range(15)]
# If message_visibility_limit is None update_first_visible_message_id
# should set first_visible_message_id to 0
realm = get_realm("zulip")
realm.message_visibility_limit = None
# Setting to a random value other than 0 as the default value of
# first_visible_message_id is 0
realm.first_visible_message_id = 5
realm.save()
update_first_visible_message_id(realm)
self.assertEqual(get_first_visible_message_id(realm), 0)
realm.message_visibility_limit = 10
realm.save()
expected_message_id = message_ids[5]
update_first_visible_message_id(realm)
self.assertEqual(get_first_visible_message_id(realm), expected_message_id)
# If the message_visibility_limit is greater than number of messages
# get_first_visible_message_id should return 0
realm.message_visibility_limit = 50
realm.save()
update_first_visible_message_id(realm)
self.assertEqual(get_first_visible_message_id(realm), 0)
def test_maybe_update_first_visible_message_id(self) -> None:
realm = get_realm("zulip")
lookback_hours = 30
realm.message_visibility_limit = None
realm.save()
end_time = timezone_now() - datetime.timedelta(hours=lookback_hours - 5)
stat = COUNT_STATS['messages_sent:is_bot:hour']
RealmCount.objects.create(realm=realm, property=stat.property,
end_time=end_time, value=5)
with mock.patch("zerver.lib.message.update_first_visible_message_id") as m:
maybe_update_first_visible_message_id(realm, lookback_hours)
m.assert_not_called()
realm.message_visibility_limit = 10
realm.save()
RealmCount.objects.all().delete()
with mock.patch("zerver.lib.message.update_first_visible_message_id") as m:
maybe_update_first_visible_message_id(realm, lookback_hours)
m.assert_not_called()
RealmCount.objects.create(realm=realm, property=stat.property,
end_time=end_time, value=5)
with mock.patch("zerver.lib.message.update_first_visible_message_id") as m:
maybe_update_first_visible_message_id(realm, lookback_hours)
m.assert_called_once_with(realm)
class TestBulkGetHuddleUserIds(ZulipTestCase):
def test_bulk_get_huddle_user_ids(self) -> None:
hamlet = self.example_user('hamlet')
cordelia = self.example_user('cordelia')
othello = self.example_user('othello')
iago = self.example_user('iago')
message_ids = [
self.send_huddle_message(hamlet, [cordelia, othello], 'test'),
self.send_huddle_message(cordelia, [hamlet, othello, iago], 'test'),
]
messages = Message.objects.filter(id__in=message_ids).order_by("id")
first_huddle_recipient = messages[0].recipient
first_huddle_user_ids = list(get_huddle_user_ids(first_huddle_recipient))
second_huddle_recipient = messages[1].recipient
second_huddle_user_ids = list(get_huddle_user_ids(second_huddle_recipient))
huddle_user_ids = bulk_get_huddle_user_ids([first_huddle_recipient, second_huddle_recipient])
self.assertEqual(huddle_user_ids[first_huddle_recipient.id], first_huddle_user_ids)
self.assertEqual(huddle_user_ids[second_huddle_recipient.id], second_huddle_user_ids)
def test_bulk_get_huddle_user_ids_empty_list(self) -> None:
self.assertEqual(bulk_get_huddle_user_ids([]), {})
class NoRecipientIDsTest(ZulipTestCase):
def test_no_recipient_ids(self) -> None:
user_profile = self.example_user('cordelia')
Subscription.objects.filter(user_profile=user_profile, recipient__type=Recipient.STREAM).delete()
subs = gather_subscriptions_helper(user_profile)
# Checks that gather_subscriptions_helper will not return anything
# since there will not be any recipients, without crashing.
#
# This covers a rare corner case.
self.assertEqual(len(subs[0]), 0) | unknown | codeparrot/codeparrot-clean | ||
import logging
from flask import current_app
from drift.core.extensions.jwt import query_current_user
log = logging.getLogger(__name__)
def _update_analytics():
client_id = None
current_user = query_current_user()
if current_user:
user_id = current_user["user_id"]
client_id = current_user.get("client_id")
if not client_id:
log.debug("client_id not found in JWT for user %s. Not updating client stats." % user_id)
# As we are doing this 'after request' we should only acquire the redis session if it's
# already available. A "hard" reference on g.redis could have side effects in this
# context.
redis = current_app.extensions['redis'].get_session_if_available()
if redis:
# use redis pipeline to minimize roundtrips
pipe = redis.conn.pipeline()
k = redis.make_key('stats:numrequests')
pipe.incr(k)
pipe.expire(k, 3600)
if client_id:
k = redis.make_key('stats:numrequestsclient:{}'.format(client_id))
pipe.incr(k)
pipe.expire(k, 3600)
pipe.execute()
def after_request(response):
_update_analytics()
return response
def register_extension(app):
app.after_request(after_request) | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common;
import org.apache.kafka.common.internals.KafkaFutureImpl;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CancellationException;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionException;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.function.Supplier;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* A unit test for KafkaFuture.
*/
@Timeout(120)
public class KafkaFutureTest {
/** Asserts that the given future is done, didn't fail and wasn't cancelled. */
private void assertIsSuccessful(KafkaFuture<?> future) {
assertTrue(future.isDone());
assertFalse(future.isCompletedExceptionally());
assertFalse(future.isCancelled());
}
/** Asserts that the given future is done, failed and wasn't cancelled. */
private void assertIsFailed(KafkaFuture<?> future) {
assertTrue(future.isDone());
assertFalse(future.isCancelled());
assertTrue(future.isCompletedExceptionally());
}
/** Asserts that the given future is done, didn't fail and was cancelled. */
private void assertIsCancelled(KafkaFuture<?> future) {
assertTrue(future.isDone());
assertTrue(future.isCancelled());
assertTrue(future.isCompletedExceptionally());
assertThrows(CancellationException.class, () -> future.getNow(null));
assertThrows(CancellationException.class, () -> future.get(0, TimeUnit.MILLISECONDS));
}
private <T> void awaitAndAssertResult(KafkaFuture<T> future,
T expectedResult,
T alternativeValue) {
assertNotEquals(expectedResult, alternativeValue);
try {
assertEquals(expectedResult, future.get(5, TimeUnit.MINUTES));
} catch (Exception e) {
throw new AssertionError("Unexpected exception", e);
}
try {
assertEquals(expectedResult, future.get());
} catch (Exception e) {
throw new AssertionError("Unexpected exception", e);
}
try {
assertEquals(expectedResult, future.getNow(alternativeValue));
} catch (Exception e) {
throw new AssertionError("Unexpected exception", e);
}
}
private Throwable awaitAndAssertFailure(KafkaFuture<?> future,
Class<? extends Throwable> expectedException,
String expectedMessage) {
ExecutionException executionException = assertThrows(ExecutionException.class, () -> future.get(5, TimeUnit.MINUTES));
assertEquals(expectedException, executionException.getCause().getClass());
assertEquals(expectedMessage, executionException.getCause().getMessage());
executionException = assertThrows(ExecutionException.class, future::get);
assertEquals(expectedException, executionException.getCause().getClass());
assertEquals(expectedMessage, executionException.getCause().getMessage());
executionException = assertThrows(ExecutionException.class, () -> future.getNow(null));
assertEquals(expectedException, executionException.getCause().getClass());
assertEquals(expectedMessage, executionException.getCause().getMessage());
return executionException.getCause();
}
private void awaitAndAssertCancelled(KafkaFuture<?> future, String expectedMessage) {
CancellationException cancellationException = assertThrows(CancellationException.class, () -> future.get(5, TimeUnit.MINUTES));
assertEquals(expectedMessage, cancellationException.getMessage());
assertEquals(CancellationException.class, cancellationException.getClass());
cancellationException = assertThrows(CancellationException.class, future::get);
assertEquals(expectedMessage, cancellationException.getMessage());
assertEquals(CancellationException.class, cancellationException.getClass());
cancellationException = assertThrows(CancellationException.class, () -> future.getNow(null));
assertEquals(expectedMessage, cancellationException.getMessage());
assertEquals(CancellationException.class, cancellationException.getClass());
}
private Object invokeOrThrow(final Method method, final Object obj, final Object... args) throws Throwable {
try {
return method.invoke(obj, args);
} catch (InvocationTargetException e) {
throw e.getCause();
}
}
@Test
public void testCompleteFutures() throws Exception {
KafkaFutureImpl<Integer> future123 = new KafkaFutureImpl<>();
assertTrue(future123.complete(123));
assertFalse(future123.complete(456));
assertFalse(future123.cancel(true));
assertEquals(Integer.valueOf(123), future123.get());
assertIsSuccessful(future123);
KafkaFuture<Integer> future456 = KafkaFuture.completedFuture(456);
assertFalse(future456.complete(789));
assertFalse(future456.cancel(true));
assertEquals(Integer.valueOf(456), future456.get());
assertIsSuccessful(future456);
}
@Test
public void testCompleteFuturesExceptionally() {
KafkaFutureImpl<Integer> futureFail = new KafkaFutureImpl<>();
assertTrue(futureFail.completeExceptionally(new RuntimeException("We require more vespene gas")));
assertIsFailed(futureFail);
assertFalse(futureFail.completeExceptionally(new RuntimeException("We require more minerals")));
assertFalse(futureFail.cancel(true));
ExecutionException executionException = assertThrows(ExecutionException.class, futureFail::get);
assertEquals(RuntimeException.class, executionException.getCause().getClass());
assertEquals("We require more vespene gas", executionException.getCause().getMessage());
KafkaFutureImpl<Integer> tricky1 = new KafkaFutureImpl<>();
assertTrue(tricky1.completeExceptionally(new CompletionException(new CancellationException())));
assertIsFailed(tricky1);
awaitAndAssertFailure(tricky1, CompletionException.class, "java.util.concurrent.CancellationException");
}
@Test
public void testCompleteFuturesViaCancellation() {
KafkaFutureImpl<Integer> viaCancel = new KafkaFutureImpl<>();
assertTrue(viaCancel.cancel(true));
assertIsCancelled(viaCancel);
awaitAndAssertCancelled(viaCancel, null);
KafkaFutureImpl<Integer> viaCancellationException = new KafkaFutureImpl<>();
assertTrue(viaCancellationException.completeExceptionally(new CancellationException("We require more vespene gas")));
assertIsCancelled(viaCancellationException);
awaitAndAssertCancelled(viaCancellationException, "We require more vespene gas");
}
@Test
public void testToString() {
KafkaFutureImpl<Integer> success = new KafkaFutureImpl<>();
assertEquals("KafkaFuture{value=null,exception=null,done=false}", success.toString());
success.complete(12);
assertEquals("KafkaFuture{value=12,exception=null,done=true}", success.toString());
KafkaFutureImpl<Integer> failure = new KafkaFutureImpl<>();
failure.completeExceptionally(new RuntimeException("foo"));
assertEquals("KafkaFuture{value=null,exception=java.lang.RuntimeException: foo,done=true}", failure.toString());
KafkaFutureImpl<Integer> tricky1 = new KafkaFutureImpl<>();
tricky1.completeExceptionally(new CompletionException(new CancellationException()));
assertEquals("KafkaFuture{value=null,exception=java.util.concurrent.CompletionException: java.util.concurrent.CancellationException,done=true}", tricky1.toString());
KafkaFutureImpl<Integer> cancelled = new KafkaFutureImpl<>();
cancelled.cancel(true);
assertEquals("KafkaFuture{value=null,exception=java.util.concurrent.CancellationException,done=true}", cancelled.toString());
}
@Test
public void testCompletingFutures() throws Exception {
final KafkaFutureImpl<String> future = new KafkaFutureImpl<>();
CompleterThread<String> myThread = new CompleterThread<>(future, "You must construct additional pylons.");
assertIsNotCompleted(future);
assertEquals("I am ready", future.getNow("I am ready"));
myThread.start();
awaitAndAssertResult(future, "You must construct additional pylons.", "I am ready");
assertIsSuccessful(future);
myThread.join();
assertNull(myThread.testException);
}
@Test
public void testCompletingFuturesExceptionally() throws Exception {
final KafkaFutureImpl<String> future = new KafkaFutureImpl<>();
CompleterThread<String> myThread = new CompleterThread<>(future, null,
new RuntimeException("Ultimate efficiency achieved."));
assertIsNotCompleted(future);
assertEquals("I am ready", future.getNow("I am ready"));
myThread.start();
awaitAndAssertFailure(future, RuntimeException.class, "Ultimate efficiency achieved.");
assertIsFailed(future);
myThread.join();
assertNull(myThread.testException);
}
@Test
public void testCompletingFuturesViaCancellation() throws Exception {
final KafkaFutureImpl<String> future = new KafkaFutureImpl<>();
CompleterThread<String> myThread = new CompleterThread<>(future, null,
new CancellationException("Ultimate efficiency achieved."));
assertIsNotCompleted(future);
assertEquals("I am ready", future.getNow("I am ready"));
myThread.start();
awaitAndAssertCancelled(future, "Ultimate efficiency achieved.");
assertIsCancelled(future);
myThread.join();
assertNull(myThread.testException);
}
private void assertIsNotCompleted(KafkaFutureImpl<String> future) {
assertFalse(future.isDone());
assertFalse(future.isCompletedExceptionally());
assertFalse(future.isCancelled());
}
@Test
public void testThenApplyOnSucceededFuture() throws Exception {
KafkaFutureImpl<Integer> future = new KafkaFutureImpl<>();
KafkaFuture<Integer> doubledFuture = future.thenApply(integer -> 2 * integer);
assertFalse(doubledFuture.isDone());
KafkaFuture<Integer> tripledFuture = future.thenApply(integer -> 3 * integer);
assertFalse(tripledFuture.isDone());
future.complete(21);
assertEquals(Integer.valueOf(21), future.getNow(-1));
assertEquals(Integer.valueOf(42), doubledFuture.getNow(-1));
assertEquals(Integer.valueOf(63), tripledFuture.getNow(-1));
KafkaFuture<Integer> quadrupledFuture = future.thenApply(integer -> 4 * integer);
assertEquals(Integer.valueOf(84), quadrupledFuture.getNow(-1));
}
@Test
public void testThenApplyOnFailedFuture() {
KafkaFutureImpl<Integer> future = new KafkaFutureImpl<>();
KafkaFuture<Integer> dependantFuture = future.thenApply(integer -> 2 * integer);
future.completeExceptionally(new RuntimeException("We require more vespene gas"));
assertIsFailed(future);
assertIsFailed(dependantFuture);
awaitAndAssertFailure(future, RuntimeException.class, "We require more vespene gas");
awaitAndAssertFailure(dependantFuture, RuntimeException.class, "We require more vespene gas");
}
@Test
public void testThenApplyOnFailedFutureTricky() {
KafkaFutureImpl<Integer> future = new KafkaFutureImpl<>();
KafkaFuture<Integer> dependantFuture = future.thenApply(integer -> 2 * integer);
future.completeExceptionally(new CompletionException(new RuntimeException("We require more vespene gas")));
assertIsFailed(future);
assertIsFailed(dependantFuture);
awaitAndAssertFailure(future, CompletionException.class, "java.lang.RuntimeException: We require more vespene gas");
awaitAndAssertFailure(dependantFuture, CompletionException.class, "java.lang.RuntimeException: We require more vespene gas");
}
@Test
public void testThenApplyOnFailedFutureTricky2() {
KafkaFutureImpl<Integer> future = new KafkaFutureImpl<>();
KafkaFuture<Integer> dependantFuture = future.thenApply(integer -> 2 * integer);
future.completeExceptionally(new CompletionException(new CancellationException()));
assertIsFailed(future);
assertIsFailed(dependantFuture);
awaitAndAssertFailure(future, CompletionException.class, "java.util.concurrent.CancellationException");
awaitAndAssertFailure(dependantFuture, CompletionException.class, "java.util.concurrent.CancellationException");
}
@Test
public void testThenApplyOnSucceededFutureAndFunctionThrows() {
KafkaFutureImpl<Integer> future = new KafkaFutureImpl<>();
KafkaFuture<Integer> dependantFuture = future.thenApply(integer -> {
throw new RuntimeException("We require more vespene gas");
});
future.complete(21);
assertIsSuccessful(future);
assertIsFailed(dependantFuture);
awaitAndAssertResult(future, 21, null);
awaitAndAssertFailure(dependantFuture, RuntimeException.class, "We require more vespene gas");
}
@Test
public void testThenApplyOnSucceededFutureAndFunctionThrowsCompletionException() {
KafkaFutureImpl<Integer> future = new KafkaFutureImpl<>();
KafkaFuture<Integer> dependantFuture = future.thenApply(integer -> {
throw new CompletionException(new RuntimeException("We require more vespene gas"));
});
future.complete(21);
assertIsSuccessful(future);
assertIsFailed(dependantFuture);
awaitAndAssertResult(future, 21, null);
Throwable cause = awaitAndAssertFailure(dependantFuture, CompletionException.class, "java.lang.RuntimeException: We require more vespene gas");
assertInstanceOf(RuntimeException.class, cause.getCause());
assertEquals("We require more vespene gas", cause.getCause().getMessage());
}
@Test
public void testThenApplyOnFailedFutureFunctionNotCalled() {
KafkaFutureImpl<Integer> future = new KafkaFutureImpl<>();
boolean[] ran = {false};
KafkaFuture<Integer> dependantFuture = future.thenApply(integer -> {
// Because the top level future failed, this should never be called.
ran[0] = true;
return null;
});
future.completeExceptionally(new RuntimeException("We require more minerals"));
assertIsFailed(future);
assertIsFailed(dependantFuture);
awaitAndAssertFailure(future, RuntimeException.class, "We require more minerals");
awaitAndAssertFailure(dependantFuture, RuntimeException.class, "We require more minerals");
assertFalse(ran[0]);
}
@Test
public void testThenApplyOnCancelledFuture() {
KafkaFutureImpl<Integer> future = new KafkaFutureImpl<>();
KafkaFuture<Integer> dependantFuture = future.thenApply(integer -> 2 * integer);
future.cancel(true);
assertIsCancelled(future);
assertIsCancelled(dependantFuture);
awaitAndAssertCancelled(future, null);
awaitAndAssertCancelled(dependantFuture, null);
}
@Test
public void testWhenCompleteOnSucceededFuture() throws Throwable {
KafkaFutureImpl<Integer> future = new KafkaFutureImpl<>();
Throwable[] err = new Throwable[1];
boolean[] ran = {false};
KafkaFuture<Integer> dependantFuture = future.whenComplete((integer, ex) -> {
ran[0] = true;
try {
assertEquals(Integer.valueOf(21), integer);
if (ex != null) {
throw ex;
}
} catch (Throwable e) {
err[0] = e;
}
});
assertFalse(dependantFuture.isDone());
assertTrue(future.complete(21));
assertTrue(ran[0]);
if (err[0] != null) {
throw err[0];
}
}
@Test
public void testWhenCompleteOnFailedFuture() {
KafkaFutureImpl<Integer> future = new KafkaFutureImpl<>();
Throwable[] err = new Throwable[1];
boolean[] ran = {false};
KafkaFuture<Integer> dependantFuture = future.whenComplete((integer, ex) -> {
ran[0] = true;
err[0] = ex;
if (integer != null) {
err[0] = new AssertionError();
}
});
assertFalse(dependantFuture.isDone());
RuntimeException ex = new RuntimeException("We require more vespene gas");
assertTrue(future.completeExceptionally(ex));
assertTrue(ran[0]);
assertEquals(err[0], ex);
}
@Test
public void testWhenCompleteOnSucceededFutureAndConsumerThrows() {
KafkaFutureImpl<Integer> future = new KafkaFutureImpl<>();
boolean[] ran = {false};
KafkaFuture<Integer> dependantFuture = future.whenComplete((integer, ex) -> {
ran[0] = true;
throw new RuntimeException("We require more minerals");
});
assertFalse(dependantFuture.isDone());
assertTrue(future.complete(21));
assertIsSuccessful(future);
assertTrue(ran[0]);
assertIsFailed(dependantFuture);
awaitAndAssertFailure(dependantFuture, RuntimeException.class, "We require more minerals");
}
@Test
public void testWhenCompleteOnFailedFutureAndConsumerThrows() {
KafkaFutureImpl<Integer> future = new KafkaFutureImpl<>();
boolean[] ran = {false};
KafkaFuture<Integer> dependantFuture = future.whenComplete((integer, ex) -> {
ran[0] = true;
throw new RuntimeException("We require more minerals");
});
assertFalse(dependantFuture.isDone());
assertTrue(future.completeExceptionally(new RuntimeException("We require more vespene gas")));
assertIsFailed(future);
assertTrue(ran[0]);
assertIsFailed(dependantFuture);
awaitAndAssertFailure(dependantFuture, RuntimeException.class, "We require more vespene gas");
}
@Test
public void testWhenCompleteOnCancelledFuture() {
KafkaFutureImpl<Integer> future = new KafkaFutureImpl<>();
Throwable[] err = new Throwable[1];
boolean[] ran = {false};
KafkaFuture<Integer> dependantFuture = future.whenComplete((integer, ex) -> {
ran[0] = true;
err[0] = ex;
if (integer != null) {
err[0] = new AssertionError();
}
});
assertFalse(dependantFuture.isDone());
assertTrue(future.cancel(true));
assertTrue(ran[0]);
assertInstanceOf(CancellationException.class, err[0]);
}
private static class CompleterThread<T> extends Thread {
private final KafkaFutureImpl<T> future;
private final T value;
private final Throwable exception;
Throwable testException = null;
CompleterThread(KafkaFutureImpl<T> future, T value) {
this.future = future;
this.value = value;
this.exception = null;
}
CompleterThread(KafkaFutureImpl<T> future, T value, Exception exception) {
this.future = future;
this.value = value;
this.exception = exception;
}
@Override
public void run() {
try {
try {
Thread.sleep(0, 200);
} catch (InterruptedException e) {
}
if (exception == null) {
future.complete(value);
} else {
future.completeExceptionally(exception);
}
} catch (Throwable testException) {
this.testException = testException;
}
}
}
private static class WaiterThread<T> extends Thread {
private final KafkaFutureImpl<T> future;
private final T expected;
Throwable testException = null;
WaiterThread(KafkaFutureImpl<T> future, T expected) {
this.future = future;
this.expected = expected;
}
@Override
public void run() {
try {
T value = future.get();
assertEquals(expected, value);
} catch (Throwable testException) {
this.testException = testException;
}
}
}
@Test
public void testAllOfFutures() throws Exception {
final int numThreads = 5;
final List<KafkaFutureImpl<Integer>> futures = new ArrayList<>();
for (int i = 0; i < numThreads; i++) {
futures.add(new KafkaFutureImpl<>());
}
KafkaFuture<Void> allFuture = KafkaFuture.allOf(futures.toArray(new KafkaFuture[0]));
final List<CompleterThread<Integer>> completerThreads = new ArrayList<>();
final List<WaiterThread<Integer>> waiterThreads = new ArrayList<>();
for (int i = 0; i < numThreads; i++) {
completerThreads.add(new CompleterThread<>(futures.get(i), i));
waiterThreads.add(new WaiterThread<>(futures.get(i), i));
}
assertFalse(allFuture.isDone());
for (int i = 0; i < numThreads; i++) {
waiterThreads.get(i).start();
}
for (int i = 0; i < numThreads - 1; i++) {
completerThreads.get(i).start();
}
assertFalse(allFuture.isDone());
completerThreads.get(numThreads - 1).start();
allFuture.get();
assertIsSuccessful(allFuture);
for (int i = 0; i < numThreads; i++) {
assertEquals(Integer.valueOf(i), futures.get(i).get());
}
for (int i = 0; i < numThreads; i++) {
completerThreads.get(i).join();
waiterThreads.get(i).join();
assertNull(completerThreads.get(i).testException);
assertNull(waiterThreads.get(i).testException);
}
}
@Test
public void testAllOfFuturesWithFailure() throws Exception {
final int numThreads = 5;
final List<KafkaFutureImpl<Integer>> futures = new ArrayList<>();
for (int i = 0; i < numThreads; i++) {
futures.add(new KafkaFutureImpl<>());
}
KafkaFuture<Void> allFuture = KafkaFuture.allOf(futures.toArray(new KafkaFuture[0]));
final List<CompleterThread<Integer>> completerThreads = new ArrayList<>();
final List<WaiterThread<Integer>> waiterThreads = new ArrayList<>();
int lastIndex = numThreads - 1;
for (int i = 0; i < lastIndex; i++) {
completerThreads.add(new CompleterThread<>(futures.get(i), i));
waiterThreads.add(new WaiterThread<>(futures.get(i), i));
}
completerThreads.add(new CompleterThread<>(futures.get(lastIndex), null, new RuntimeException("Last one failed")));
waiterThreads.add(new WaiterThread<>(futures.get(lastIndex), lastIndex));
assertFalse(allFuture.isDone());
for (int i = 0; i < numThreads; i++) {
waiterThreads.get(i).start();
}
for (int i = 0; i < lastIndex; i++) {
completerThreads.get(i).start();
}
assertFalse(allFuture.isDone());
completerThreads.get(lastIndex).start();
awaitAndAssertFailure(allFuture, RuntimeException.class, "Last one failed");
assertIsFailed(allFuture);
for (int i = 0; i < lastIndex; i++) {
assertEquals(Integer.valueOf(i), futures.get(i).get());
}
assertIsFailed(futures.get(lastIndex));
for (int i = 0; i < numThreads; i++) {
completerThreads.get(i).join();
waiterThreads.get(i).join();
assertNull(completerThreads.get(i).testException);
if (i == lastIndex) {
assertEquals(ExecutionException.class, waiterThreads.get(i).testException.getClass());
assertEquals(RuntimeException.class, waiterThreads.get(i).testException.getCause().getClass());
assertEquals("Last one failed", waiterThreads.get(i).testException.getCause().getMessage());
} else {
assertNull(waiterThreads.get(i).testException);
}
}
}
@Test
public void testAllOfFuturesHandlesZeroFutures() throws Exception {
KafkaFuture<Void> allFuture = KafkaFuture.allOf();
assertTrue(allFuture.isDone());
assertFalse(allFuture.isCancelled());
assertFalse(allFuture.isCompletedExceptionally());
allFuture.get();
}
@Test
public void testFutureTimeoutWithZeroWait() {
final KafkaFutureImpl<String> future = new KafkaFutureImpl<>();
assertThrows(TimeoutException.class, () -> future.get(0, TimeUnit.MILLISECONDS));
}
@Test
@SuppressWarnings("unchecked")
public void testLeakCompletableFuture() throws Throwable {
final KafkaFutureImpl<String> kfut = new KafkaFutureImpl<>();
CompletableFuture<String> comfut = kfut.toCompletionStage().toCompletableFuture();
assertThrows(UnsupportedOperationException.class, () -> comfut.complete(""));
assertThrows(UnsupportedOperationException.class, () -> comfut.completeExceptionally(new RuntimeException()));
Method completeOnTimeout = CompletableFuture.class.getDeclaredMethod("completeOnTimeout", Object.class, Long.TYPE, TimeUnit.class);
assertThrows(UnsupportedOperationException.class, () -> invokeOrThrow(completeOnTimeout, comfut, "", 1L, TimeUnit.MILLISECONDS));
Method completeAsync = CompletableFuture.class.getDeclaredMethod("completeAsync", Supplier.class);
assertThrows(UnsupportedOperationException.class, () -> invokeOrThrow(completeAsync, comfut, (Supplier<String>) () -> ""));
Method obtrudeValue = CompletableFuture.class.getDeclaredMethod("obtrudeValue", Object.class);
assertThrows(UnsupportedOperationException.class, () -> invokeOrThrow(obtrudeValue, comfut, ""));
Method obtrudeException = CompletableFuture.class.getDeclaredMethod("obtrudeException", Throwable.class);
assertThrows(UnsupportedOperationException.class, () -> invokeOrThrow(obtrudeException, comfut, new RuntimeException()));
// Check the CF from a minimal CompletionStage doesn't cause completion of the original KafkaFuture
Method minimal = CompletableFuture.class.getDeclaredMethod("minimalCompletionStage");
CompletionStage<String> cs = (CompletionStage<String>) invokeOrThrow(minimal, comfut);
cs.toCompletableFuture().complete("");
assertFalse(kfut.isDone());
assertFalse(comfut.isDone());
}
} | java | github | https://github.com/apache/kafka | clients/src/test/java/org/apache/kafka/common/KafkaFutureTest.java |
# -*- coding: utf-8 -*-
"""
***************************************************************************
Grass7Algorithm.py
---------------------
Date : February 2015
Copyright : (C) 2014-2015 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'February 2015'
__copyright__ = '(C) 2012-2015, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import sys
import os
import re
import uuid
import importlib
from qgis.PyQt.QtCore import QCoreApplication, QUrl
from qgis.core import (Qgis,
QgsRasterLayer,
QgsApplication,
QgsMapLayer,
QgsProcessingUtils,
QgsProcessing,
QgsMessageLog,
QgsVectorFileWriter,
QgsProcessingAlgorithm,
QgsProcessingParameterDefinition,
QgsProcessingException,
QgsProcessingParameterExtent,
QgsProcessingParameterEnum,
QgsProcessingParameterNumber,
QgsProcessingParameterString,
QgsProcessingParameterField,
QgsProcessingParameterPoint,
QgsProcessingParameterBoolean,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterVectorLayer,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterMultipleLayers,
QgsProcessingParameterVectorDestination,
QgsProcessingParameterRasterDestination,
QgsProcessingParameterFileDestination,
QgsProcessingParameterFile,
QgsProcessingParameterFolderDestination,
QgsProcessingOutputHtml,
QgsProcessingUtils,
QgsVectorLayer)
from qgis.utils import iface
from osgeo import ogr
from processing.core.ProcessingConfig import ProcessingConfig
from processing.core.parameters import (getParameterFromString)
from .Grass7Utils import Grass7Utils
#from processing.tools import dataobjects, system
from processing.tools.system import isWindows, getTempFilename
pluginPath = os.path.normpath(os.path.join(
os.path.split(os.path.dirname(__file__))[0], os.pardir))
class Grass7Algorithm(QgsProcessingAlgorithm):
GRASS_OUTPUT_TYPE_PARAMETER = 'GRASS_OUTPUT_TYPE_PARAMETER'
GRASS_MIN_AREA_PARAMETER = 'GRASS_MIN_AREA_PARAMETER'
GRASS_SNAP_TOLERANCE_PARAMETER = 'GRASS_SNAP_TOLERANCE_PARAMETER'
GRASS_REGION_EXTENT_PARAMETER = 'GRASS_REGION_PARAMETER'
GRASS_REGION_CELLSIZE_PARAMETER = 'GRASS_REGION_CELLSIZE_PARAMETER'
GRASS_REGION_ALIGN_TO_RESOLUTION = 'GRASS_REGION_ALIGN_TO_RESOLUTION'
GRASS_RASTER_FORMAT_OPT = 'GRASS_RASTER_FORMAT_OPT'
GRASS_RASTER_FORMAT_META = 'GRASS_RASTER_FORMAT_META'
GRASS_VECTOR_DSCO = 'GRASS_VECTOR_DSCO'
GRASS_VECTOR_LCO = 'GRASS_VECTOR_LCO'
OUTPUT_TYPES = ['auto', 'point', 'line', 'area']
QGIS_OUTPUT_TYPES = {QgsProcessing.TypeVectorAnyGeometry: 'auto',
QgsProcessing.TypeVectorPoint: 'point',
QgsProcessing.TypeVectorLine: 'line',
QgsProcessing.TypeVectorPolygon: 'area'}
def __init__(self, descriptionfile):
super().__init__()
self._name = ''
self._display_name = ''
self._short_description = ''
self._group = ''
self._groupId = ''
self.groupIdRegex = re.compile(r'^[^\s\(]+')
self.grass7Name = ''
self.params = []
self.hardcodedStrings = []
self.inputLayers = []
self.descriptionFile = descriptionfile
# Default GRASS parameters
self.region = None
self.cellSize = None
self.snaptTolerance = None
self.outputType = None
self.minArea = None
self.alignToResolution = None
# Load parameters from a description file
self.defineCharacteristicsFromFile()
self.numExportedLayers = 0
# Do we need this anymore?
self.uniqueSuffix = str(uuid.uuid4()).replace('-', '')
# Use the ext mechanism
name = self.name().replace('.', '_')
try:
self.module = importlib.import_module(
'processing.algs.grass7.ext.{}'.format(name))
except ImportError:
self.module = None
def createInstance(self):
return self.__class__(self.descriptionFile)
def name(self):
return self._name
def displayName(self):
return self._display_name
def shortDescription(self):
return self._short_description
def group(self):
return self._group
def groupId(self):
return self._groupId
def icon(self):
return QgsApplication.getThemeIcon("/providerGrass.svg")
def svgIconPath(self):
return QgsApplication.iconPath("providerGrass.svg")
def flags(self):
# TODO - maybe it's safe to background thread this?
return super().flags() | QgsProcessingAlgorithm.FlagNoThreading | QgsProcessingAlgorithm.FlagDisplayNameIsLiteral
def tr(self, string, context=''):
if context == '':
context = self.__class__.__name__
return QCoreApplication.translate(context, string)
def helpUrl(self):
helpPath = Grass7Utils.grassHelpPath()
if helpPath == '':
return None
if os.path.exists(helpPath):
return QUrl.fromLocalFile(os.path.join(helpPath, '{}.html'.format(self.grass7Name))).toString()
else:
return helpPath + '{}.html'.format(self.grass7Name)
def initAlgorithm(self, config=None):
"""
Algorithm initialization
"""
for p in self.params:
# We use createOutput argument for automatic output creation
res = self.addParameter(p, True)
def defineCharacteristicsFromFile(self):
"""
Create algorithm parameters and outputs from a text file.
"""
with open(self.descriptionFile) as lines:
# First line of the file is the Grass algorithm name
line = lines.readline().strip('\n').strip()
self.grass7Name = line
# Second line if the algorithm name in Processing
line = lines.readline().strip('\n').strip()
self._short_description = line
if " - " not in line:
self._name = self.grass7Name
else:
self._name = line[:line.find(' ')].lower()
self._display_name = self._name
# Read the grass group
line = lines.readline().strip('\n').strip()
self._group = QCoreApplication.translate("GrassAlgorithm", line)
self._groupId = self.groupIdRegex.search(line).group(0).lower()
hasRasterOutput = False
hasRasterInput = False
hasVectorInput = False
vectorOutputs = False
# Then you have parameters/output definition
line = lines.readline().strip('\n').strip()
while line != '':
try:
line = line.strip('\n').strip()
if line.startswith('Hardcoded'):
self.hardcodedStrings.append(line[len('Hardcoded|'):])
parameter = getParameterFromString(line)
if parameter is not None:
self.params.append(parameter)
if isinstance(parameter, (QgsProcessingParameterVectorLayer, QgsProcessingParameterFeatureSource)):
hasVectorInput = True
elif isinstance(parameter, QgsProcessingParameterRasterLayer):
hasRasterInput = True
elif isinstance(parameter, QgsProcessingParameterMultipleLayers):
if parameter.layerType() < 3 or parameter.layerType() == 5:
hasVectorInput = True
elif parameter.layerType() == 3:
hasRasterInput = True
elif isinstance(parameter, QgsProcessingParameterVectorDestination):
vectorOutputs = True
elif isinstance(parameter, QgsProcessingParameterRasterDestination):
hasRasterOutput = True
line = lines.readline().strip('\n').strip()
except Exception as e:
QgsMessageLog.logMessage(self.tr('Could not open GRASS GIS 7 algorithm: {0}\n{1}').format(self.descriptionFile, line), self.tr('Processing'), Qgis.Critical)
raise e
param = QgsProcessingParameterExtent(
self.GRASS_REGION_EXTENT_PARAMETER,
self.tr('GRASS GIS 7 region extent'),
optional=True
)
param.setFlags(param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.params.append(param)
if hasRasterOutput or hasRasterInput:
# Add a cellsize parameter
param = QgsProcessingParameterNumber(
self.GRASS_REGION_CELLSIZE_PARAMETER,
self.tr('GRASS GIS 7 region cellsize (leave 0 for default)'),
type=QgsProcessingParameterNumber.Double,
minValue=0.0, maxValue=sys.float_info.max + 1, defaultValue=0.0
)
param.setFlags(param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.params.append(param)
if hasRasterOutput:
# Add a createopt parameter for format export
param = QgsProcessingParameterString(
self.GRASS_RASTER_FORMAT_OPT,
self.tr('Output Rasters format options (createopt)'),
multiLine=True, optional=True
)
param.setFlags(param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.params.append(param)
# Add a metadata parameter for format export
param = QgsProcessingParameterString(
self.GRASS_RASTER_FORMAT_META,
self.tr('Output Rasters format metadata options (metaopt)'),
multiLine=True, optional=True
)
param.setFlags(param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.params.append(param)
if hasVectorInput:
param = QgsProcessingParameterNumber(self.GRASS_SNAP_TOLERANCE_PARAMETER,
self.tr('v.in.ogr snap tolerance (-1 = no snap)'),
type=QgsProcessingParameterNumber.Double,
minValue=-1.0, maxValue=sys.float_info.max + 1,
defaultValue=-1.0)
param.setFlags(param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.params.append(param)
param = QgsProcessingParameterNumber(self.GRASS_MIN_AREA_PARAMETER,
self.tr('v.in.ogr min area'),
type=QgsProcessingParameterNumber.Double,
minValue=0.0, maxValue=sys.float_info.max + 1,
defaultValue=0.0001)
param.setFlags(param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.params.append(param)
if vectorOutputs:
# Add an optional output type
param = QgsProcessingParameterEnum(self.GRASS_OUTPUT_TYPE_PARAMETER,
self.tr('v.out.ogr output type'),
self.OUTPUT_TYPES)
param.setFlags(param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.params.append(param)
# Add a DSCO parameter for format export
param = QgsProcessingParameterString(
self.GRASS_VECTOR_DSCO,
self.tr('v.out.ogr output data source options (dsco)'),
multiLine=True, optional=True
)
param.setFlags(param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.params.append(param)
# Add a LCO parameter for format export
param = QgsProcessingParameterString(
self.GRASS_VECTOR_LCO,
self.tr('v.out.ogr output layer options (lco)'),
multiLine=True, optional=True
)
param.setFlags(param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.params.append(param)
def getDefaultCellSize(self):
"""
Determine a default cell size from all the raster layers.
"""
cellsize = 0.0
layers = [l for l in self.inputLayers if isinstance(l, QgsRasterLayer)]
for layer in layers:
cellsize = max(layer.rasterUnitsPerPixelX(), cellsize)
if cellsize == 0.0:
cellsize = 100.0
return cellsize
def grabDefaultGrassParameters(self, parameters, context):
"""
Imports default GRASS parameters (EXTENT, etc) into
object attributes for faster retrieving.
"""
# GRASS region extent
self.region = self.parameterAsExtent(parameters,
self.GRASS_REGION_EXTENT_PARAMETER,
context)
# GRASS cell size
if self.parameterDefinition(self.GRASS_REGION_CELLSIZE_PARAMETER):
self.cellSize = self.parameterAsDouble(parameters,
self.GRASS_REGION_CELLSIZE_PARAMETER,
context)
# GRASS snap tolerance
self.snapTolerance = self.parameterAsDouble(parameters,
self.GRASS_SNAP_TOLERANCE_PARAMETER,
context)
# GRASS min area
self.minArea = self.parameterAsDouble(parameters,
self.GRASS_MIN_AREA_PARAMETER,
context)
# GRASS output type
self.outputType = self.parameterAsString(parameters,
self.GRASS_OUTPUT_TYPE_PARAMETER,
context)
# GRASS align to resolution
self.alignToResolution = self.parameterAsBool(parameters,
self.GRASS_REGION_ALIGN_TO_RESOLUTION,
context)
def processAlgorithm(self, original_parameters, context, feedback):
if isWindows():
path = Grass7Utils.grassPath()
if path == '':
raise QgsProcessingException(
self.tr('GRASS GIS 7 folder is not configured. Please '
'configure it before running GRASS GIS 7 algorithms.'))
# make a copy of the original parameters dictionary - it gets modified by grass algorithms
parameters = {k: v for k, v in original_parameters.items()}
# Create brand new commands lists
self.commands = []
self.outputCommands = []
self.exportedLayers = {}
# If GRASS session has been created outside of this algorithm then
# get the list of layers loaded in GRASS otherwise start a new
# session
existingSession = Grass7Utils.sessionRunning
if existingSession:
self.exportedLayers = Grass7Utils.getSessionLayers()
else:
Grass7Utils.startGrassSession()
# Handle default GRASS parameters
self.grabDefaultGrassParameters(parameters, context)
# Handle ext functions for inputs/command/outputs
for fName in ['Inputs', 'Command', 'Outputs']:
fullName = 'process{}'.format(fName)
if self.module and hasattr(self.module, fullName):
getattr(self.module, fullName)(self, parameters, context, feedback)
else:
getattr(self, fullName)(parameters, context, feedback)
# Run GRASS
loglines = []
loglines.append(self.tr('GRASS GIS 7 execution commands'))
for line in self.commands:
feedback.pushCommandInfo(line)
loglines.append(line)
if ProcessingConfig.getSetting(Grass7Utils.GRASS_LOG_COMMANDS):
QgsMessageLog.logMessage("\n".join(loglines), self.tr('Processing'), Qgis.Info)
Grass7Utils.executeGrass(self.commands, feedback, self.outputCommands)
# If the session has been created outside of this algorithm, add
# the new GRASS GIS 7 layers to it otherwise finish the session
if existingSession:
Grass7Utils.addSessionLayers(self.exportedLayers)
else:
Grass7Utils.endGrassSession()
# Return outputs map
outputs = {}
for out in self.outputDefinitions():
outName = out.name()
if outName in parameters:
outputs[outName] = parameters[outName]
if isinstance(out, QgsProcessingOutputHtml):
self.convertToHtml(parameters[outName])
return outputs
def processInputs(self, parameters, context, feedback):
"""Prepare the GRASS import commands"""
inputs = [p for p in self.parameterDefinitions()
if isinstance(p, (QgsProcessingParameterVectorLayer,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterMultipleLayers))]
for param in inputs:
paramName = param.name()
if not paramName in parameters:
continue
# Handle Null parameter
if parameters[paramName] is None:
continue
elif isinstance(parameters[paramName], str) and len(parameters[paramName]) == 0:
continue
# Raster inputs needs to be imported into temp GRASS DB
if isinstance(param, QgsProcessingParameterRasterLayer):
if paramName not in self.exportedLayers:
self.loadRasterLayerFromParameter(
paramName, parameters, context)
# Vector inputs needs to be imported into temp GRASS DB
elif isinstance(param, (QgsProcessingParameterFeatureSource, QgsProcessingParameterVectorLayer)):
if paramName not in self.exportedLayers:
# Attribute tables are also vector inputs
if QgsProcessing.TypeFile in param.dataTypes():
self.loadAttributeTableFromParameter(
paramName, parameters, context)
else:
self.loadVectorLayerFromParameter(
paramName, parameters, context, feedback, None)
# For multiple inputs, process each layer
elif isinstance(param, QgsProcessingParameterMultipleLayers):
layers = self.parameterAsLayerList(parameters, paramName, context)
for idx, layer in enumerate(layers):
layerName = '{}_{}'.format(paramName, idx)
# Add a raster layer
if layer.type() == QgsMapLayer.RasterLayer:
self.loadRasterLayer(layerName, layer)
# Add a vector layer
elif layer.type() == QgsMapLayer.VectorLayer:
self.loadVectorLayer(layerName, layer, None)
self.postInputs()
def postInputs(self):
"""
After layer imports, we need to update some internal parameters
"""
# If projection has not already be set, use the project
self.setSessionProjectionFromProject()
# Build GRASS region
if self.region.isEmpty():
self.region = QgsProcessingUtils.combineLayerExtents(self.inputLayers)
command = 'g.region n={} s={} e={} w={}'.format(
self.region.yMaximum(), self.region.yMinimum(),
self.region.xMaximum(), self.region.xMinimum()
)
# Handle cell size
if self.parameterDefinition(self.GRASS_REGION_CELLSIZE_PARAMETER):
if self.cellSize:
cellSize = self.cellSize
else:
cellSize = self.getDefaultCellSize()
command += ' res={}'.format(cellSize)
# Handle align to resolution
if self.alignToResolution:
command += ' -a'
# Add the default parameters commands
self.commands.append(command)
QgsMessageLog.logMessage(self.tr('processInputs end. Commands: {}').format(self.commands), 'Grass7', Qgis.Info)
def processCommand(self, parameters, context, feedback, delOutputs=False):
"""
Prepare the GRASS algorithm command
:param parameters:
:param context:
:param delOutputs: do not add outputs to commands.
"""
noOutputs = [o for o in self.parameterDefinitions() if o not in self.destinationParameterDefinitions()]
command = '{} '.format(self.grass7Name)
command += '{}'.join(self.hardcodedStrings)
# Add algorithm command
for param in noOutputs:
paramName = param.name()
value = None
# Exclude default GRASS parameters
if paramName in [self.GRASS_REGION_CELLSIZE_PARAMETER,
self.GRASS_REGION_EXTENT_PARAMETER,
self.GRASS_MIN_AREA_PARAMETER,
self.GRASS_SNAP_TOLERANCE_PARAMETER,
self.GRASS_OUTPUT_TYPE_PARAMETER,
self.GRASS_REGION_ALIGN_TO_RESOLUTION,
self.GRASS_RASTER_FORMAT_OPT,
self.GRASS_RASTER_FORMAT_META,
self.GRASS_VECTOR_DSCO,
self.GRASS_VECTOR_LCO]:
continue
# Raster and vector layers
if isinstance(param, (QgsProcessingParameterRasterLayer,
QgsProcessingParameterVectorLayer,
QgsProcessingParameterFeatureSource)):
if paramName in self.exportedLayers:
value = self.exportedLayers[paramName]
else:
value = self.parameterAsCompatibleSourceLayerPath(
parameters, paramName, context,
QgsVectorFileWriter.supportedFormatExtensions()
)
# MultipleLayers
elif isinstance(param, QgsProcessingParameterMultipleLayers):
layers = self.parameterAsLayerList(parameters, paramName, context)
values = []
for idx in range(len(layers)):
layerName = '{}_{}'.format(paramName, idx)
values.append(self.exportedLayers[layerName])
value = ','.join(values)
# For booleans, we just add the parameter name
elif isinstance(param, QgsProcessingParameterBoolean):
if self.parameterAsBool(parameters, paramName, context):
command += ' {}'.format(paramName)
# For Extents, remove if the value is null
elif isinstance(param, QgsProcessingParameterExtent):
if self.parameterAsExtent(parameters, paramName, context):
value = self.parameterAsString(parameters, paramName, context)
# For enumeration, we need to grab the string value
elif isinstance(param, QgsProcessingParameterEnum):
# Handle multiple values
if param.allowMultiple():
indexes = self.parameterAsEnums(parameters, paramName, context)
else:
indexes = [self.parameterAsEnum(parameters, paramName, context)]
if indexes:
value = '"{}"'.format(','.join([param.options()[i] for i in indexes]))
# For strings, we just translate as string
elif isinstance(param, QgsProcessingParameterString):
data = self.parameterAsString(parameters, paramName, context)
# if string is empty, we don't add it
if len(data) > 0:
value = '"{}"'.format(
self.parameterAsString(parameters, paramName, context)
)
# For fields, we just translate as string
elif isinstance(param, QgsProcessingParameterField):
value = ','.join(
self.parameterAsFields(parameters, paramName, context)
)
elif isinstance(param, QgsProcessingParameterFile):
if self.parameterAsString(parameters, paramName, context):
value = '"{}"'.format(
self.parameterAsString(parameters, paramName, context)
)
elif isinstance(param, QgsProcessingParameterPoint):
if self.parameterAsString(parameters, paramName, context):
# parameter specified, evaluate as point
# TODO - handle CRS transform
point = self.parameterAsPoint(parameters, paramName, context)
value = '{},{}'.format(point.x(), point.y())
# For numbers, we translate as a string
elif isinstance(param, (QgsProcessingParameterNumber,
QgsProcessingParameterPoint)):
value = self.parameterAsString(parameters, paramName, context)
# For everything else, we assume that it is a string
else:
value = '"{}"'.format(
self.parameterAsString(parameters, paramName, context)
)
if value:
command += ' {}={}'.format(paramName.replace('~', ''), value)
# Handle outputs
if not delOutputs:
for out in self.destinationParameterDefinitions():
# We exclude hidden parameters
if out.flags() & QgsProcessingParameterDefinition.FlagHidden:
continue
outName = out.name()
# For File destination
if isinstance(out, QgsProcessingParameterFileDestination):
if outName in parameters and parameters[outName] is not None:
# for HTML reports, we need to redirect stdout
if out.defaultFileExtension().lower() == 'html':
command += ' > "{}"'.format(
self.parameterAsFileOutput(
parameters, outName, context)
)
else:
command += ' {}="{}"'.format(
outName,
self.parameterAsFileOutput(
parameters, outName, context))
# For folders destination
elif isinstance(out, QgsProcessingParameterFolderDestination):
# We need to add a unique temporary basename
uniqueBasename = outName + self.uniqueSuffix
command += ' {}={}'.format(outName, uniqueBasename)
else:
if outName in parameters and parameters[outName] is not None:
# We add an output name to make sure it is unique if the session
# uses this algorithm several times.
#value = self.parameterAsOutputLayer(parameters, outName, context)
uniqueOutputName = outName + self.uniqueSuffix
command += ' {}={}'.format(outName, uniqueOutputName)
# Add output file to exported layers, to indicate that
# they are present in GRASS
self.exportedLayers[outName] = uniqueOutputName
command += ' --overwrite'
self.commands.append(command)
QgsMessageLog.logMessage(self.tr('processCommands end. Commands: {}').format(self.commands), 'Grass7', Qgis.Info)
def vectorOutputType(self, parameters, context):
"""Determine vector output types for outputs"""
self.outType = 'auto'
if self.parameterDefinition(self.GRASS_OUTPUT_TYPE_PARAMETER):
typeidx = self.parameterAsEnum(parameters,
self.GRASS_OUTPUT_TYPE_PARAMETER,
context)
self.outType = ('auto' if typeidx
is None else self.OUTPUT_TYPES[typeidx])
def processOutputs(self, parameters, context, feedback):
"""Prepare the GRASS v.out.ogr commands"""
# Determine general vector output type
self.vectorOutputType(parameters, context)
for out in self.destinationParameterDefinitions():
outName = out.name()
if not outName in parameters:
# skipped output
continue
if isinstance(out, QgsProcessingParameterRasterDestination):
self.exportRasterLayerFromParameter(outName, parameters, context)
elif isinstance(out, QgsProcessingParameterVectorDestination):
self.exportVectorLayerFromParameter(outName, parameters, context)
elif isinstance(out, QgsProcessingParameterFolderDestination):
self.exportRasterLayersIntoDirectory(outName, parameters, context)
def loadRasterLayerFromParameter(self, name, parameters, context, external=True, band=1):
"""
Creates a dedicated command to load a raster into
the temporary GRASS DB.
:param name: name of the parameter.
:param parameters: algorithm parameters dict.
:param context: algorithm context.
:param external: True if using r.external.
:param band: imports only specified band. None for all bands.
"""
layer = self.parameterAsRasterLayer(parameters, name, context)
self.loadRasterLayer(name, layer, external, band)
def loadRasterLayer(self, name, layer, external=True, band=1, destName=None):
"""
Creates a dedicated command to load a raster into
the temporary GRASS DB.
:param name: name of the parameter.
:param layer: QgsMapLayer for the raster layer.
:param external: True if using r.external.
:param band: imports only specified band. None for all bands.
:param destName: force the destination name of the raster.
"""
self.inputLayers.append(layer)
self.setSessionProjectionFromLayer(layer)
if not destName:
destName = 'rast_{}'.format(os.path.basename(getTempFilename()))
self.exportedLayers[name] = destName
command = '{0} input="{1}" {2}output="{3}" --overwrite -o'.format(
'r.external' if external else 'r.in.gdal',
os.path.normpath(layer.source()),
'band={} '.format(band) if band else '',
destName)
self.commands.append(command)
def exportRasterLayerFromParameter(self, name, parameters, context, colorTable=True):
"""
Creates a dedicated command to export a raster from
temporary GRASS DB into a file via gdal.
:param name: name of the parameter.
:param parameters: Algorithm parameters dict.
:param context: Algorithm context.
:param colorTable: preserve color Table.
"""
fileName = self.parameterAsOutputLayer(parameters, name, context)
if not fileName:
return
fileName = os.path.normpath(fileName)
grassName = '{}{}'.format(name, self.uniqueSuffix)
outFormat = Grass7Utils.getRasterFormatFromFilename(fileName)
createOpt = self.parameterAsString(parameters, self.GRASS_RASTER_FORMAT_OPT, context)
metaOpt = self.parameterAsString(parameters, self.GRASS_RASTER_FORMAT_META, context)
self.exportRasterLayer(grassName, fileName, colorTable, outFormat, createOpt, metaOpt)
def exportRasterLayer(self, grassName, fileName,
colorTable=True, outFormat='GTiff',
createOpt=None,
metaOpt=None):
"""
Creates a dedicated command to export a raster from
temporary GRASS DB into a file via gdal.
:param grassName: name of the raster to export.
:param fileName: file path of raster layer.
:param colorTable: preserve color Table.
:param outFormat: file format for export.
:param createOpt: creation options for format.
:param metaOpt: metadata options for export.
"""
if not createOpt:
if outFormat in Grass7Utils.GRASS_RASTER_FORMATS_CREATEOPTS:
createOpt = Grass7Utils.GRASS_RASTER_FORMATS_CREATEOPTS[outFormat]
for cmd in [self.commands, self.outputCommands]:
# Adjust region to layer before exporting
cmd.append('g.region raster={}'.format(grassName))
cmd.append(
'r.out.gdal -t -m{0} input="{1}" output="{2}" format="{3}" {4}{5} --overwrite'.format(
'' if colorTable else ' -c',
grassName, fileName,
outFormat,
' createopt="{}"'.format(createOpt) if createOpt else '',
' metaopt="{}"'.format(metaOpt) if metaOpt else ''
)
)
def exportRasterLayersIntoDirectory(self, name, parameters, context, colorTable=True, wholeDB=False):
"""
Creates a dedicated loop command to export rasters from
temporary GRASS DB into a directory via gdal.
:param name: name of the output directory parameter.
:param parameters: Algorithm parameters dict.
:param context: Algorithm context.
:param colorTable: preserve color Table.
:param wholeDB: export every raster layer from the GRASSDB
"""
# Grab directory name and temporary basename
outDir = os.path.normpath(
self.parameterAsString(parameters, name, context))
basename = ''
if not wholeDB:
basename = name + self.uniqueSuffix
# Add a loop export from the basename
for cmd in [self.commands, self.outputCommands]:
# TODO Windows support
# TODO Format/options support
cmd.append("for r in $(g.list type=rast pattern='{}*'); do".format(basename))
cmd.append(" r.out.gdal -m{0} input=${{r}} output={1}/${{r}}.tif {2}".format(
' -t' if colorTable else '', outDir,
'--overwrite -c createopt="TFW=YES,COMPRESS=LZW"'
)
)
cmd.append("done")
def loadVectorLayerFromParameter(self, name, parameters, context, feedback, external=False):
"""
Creates a dedicated command to load a vector into
the temporary GRASS DB.
:param name: name of the parameter
:param parameters: Parameters of the algorithm.
:param context: Processing context
:param external: use v.external (v.in.ogr if False).
"""
layer = self.parameterAsVectorLayer(parameters, name, context)
if layer is None or layer.dataProvider().name() != 'ogr':
# parameter is not a vector layer or not an OGR layer - try to convert to a source compatible with
# grass OGR inputs and extract selection if required
path = self.parameterAsCompatibleSourceLayerPath(parameters, name, context,
QgsVectorFileWriter.supportedFormatExtensions(),
feedback=feedback)
ogr_layer = QgsVectorLayer(path, '', 'ogr')
self.loadVectorLayer(name, ogr_layer, external)
else:
# already an ogr layer source
self.loadVectorLayer(name, layer, external)
def loadVectorLayer(self, name, layer, external=False):
"""
Creates a dedicated command to load a vector into
temporary GRASS DB.
:param name: name of the parameter
:param layer: QgsMapLayer for the vector layer.
:param external: use v.external (v.in.ogr if False).
"""
# TODO: support multiple input formats
if external is None:
external = ProcessingConfig.getSetting(
Grass7Utils.GRASS_USE_VEXTERNAL)
# safety check: we can only use external for ogr layers which support random read
if external:
ds = ogr.Open(layer.source())
if ds is not None:
ogr_layer = ds.GetLayer()
if ogr_layer is None or not ogr_layer.TestCapability(ogr.OLCRandomRead):
external = False
else:
external = False
self.inputLayers.append(layer)
self.setSessionProjectionFromLayer(layer)
destFilename = 'vector_{}'.format(os.path.basename(getTempFilename()))
self.exportedLayers[name] = destFilename
command = '{0}{1}{2} input="{3}" output="{4}" --overwrite -o'.format(
'v.external' if external else 'v.in.ogr',
' min_area={}'.format(self.minArea) if not external else '',
' snap={}'.format(self.snapTolerance) if not external else '',
os.path.normpath(layer.source()),
destFilename)
self.commands.append(command)
def exportVectorLayerFromParameter(self, name, parameters, context, layer=None, nocats=False):
"""
Creates a dedicated command to export a vector from
a QgsProcessingParameter.
:param name: name of the parameter.
:param context: parameters context.
:param layer: for vector with multiples layers, exports only one layer.
:param nocats: do not export GRASS categories.
"""
fileName = os.path.normpath(
self.parameterAsOutputLayer(parameters, name, context))
grassName = '{}{}'.format(name, self.uniqueSuffix)
# Find if there is a dataType
dataType = self.outType
if self.outType == 'auto':
parameter = self.parameterDefinition(name)
if parameter:
layerType = parameter.dataType()
if layerType in self.QGIS_OUTPUT_TYPES:
dataType = self.QGIS_OUTPUT_TYPES[layerType]
outFormat = QgsVectorFileWriter.driverForExtension(os.path.splitext(fileName)[1]).replace(' ', '_')
dsco = self.parameterAsString(parameters, self.GRASS_VECTOR_DSCO, context)
lco = self.parameterAsString(parameters, self.GRASS_VECTOR_LCO, context)
self.exportVectorLayer(grassName, fileName, layer, nocats, dataType, outFormat, dsco, lco)
def exportVectorLayer(self, grassName, fileName, layer=None, nocats=False, dataType='auto',
outFormat=None, dsco=None, lco=None):
"""
Creates a dedicated command to export a vector from
temporary GRASS DB into a file via OGR.
:param grassName: name of the vector to export.
:param fileName: file path of vector layer.
:param dataType: export only this type of data.
:param layer: for vector with multiples layers, exports only one layer.
:param nocats: do not export GRASS categories.
:param outFormat: file format for export.
:param dsco: datasource creation options for format.
:param lco: layer creation options for format.
"""
if outFormat is None:
outFormat = QgsVectorFileWriter.driverForExtension(os.path.splitext(fileName)[1]).replace(' ', '_')
for cmd in [self.commands, self.outputCommands]:
cmd.append(
'v.out.ogr{0} type="{1}" input="{2}" output="{3}" format="{4}" {5}{6}{7} --overwrite'.format(
'' if nocats else ' -c',
dataType, grassName, fileName,
outFormat,
'layer={}'.format(layer) if layer else '',
' dsco="{}"'.format(dsco) if dsco else '',
' lco="{}"'.format(lco) if lco else ''
)
)
def loadAttributeTableFromParameter(self, name, parameters, context):
"""
Creates a dedicated command to load an attribute table
into the temporary GRASS DB.
:param name: name of the parameter
:param parameters: Parameters of the algorithm.
:param context: Processing context
"""
table = self.parameterAsVectorLayer(parameters, name, context)
self.loadAttributeTable(name, table)
def loadAttributeTable(self, name, layer, destName=None):
"""
Creates a dedicated command to load an attribute table
into the temporary GRASS DB.
:param name: name of the input parameter.
:param layer: a layer object to import from.
:param destName: force the name for the table into GRASS DB.
"""
self.inputLayers.append(layer)
if not destName:
destName = 'table_{}'.format(os.path.basename(getTempFilename()))
self.exportedLayers[name] = destName
command = 'db.in.ogr --overwrite input="{0}" output="{1}"'.format(
os.path.normpath(layer.source()), destName)
self.commands.append(command)
def exportAttributeTable(self, grassName, fileName, outFormat='CSV', layer=1):
"""
Creates a dedicated command to export an attribute
table from the temporary GRASS DB into a file via ogr.
:param grassName: name of the parameter.
:param fileName: file path of raster layer.
:param outFormat: file format for export.
:param layer: In GRASS a vector can have multiple layers.
"""
for cmd in [self.commands, self.outputCommands]:
cmd.append(
'db.out.ogr input="{0}" output="{1}" layer={2} format={3} --overwrite'.format(
grassName, fileName, layer, outFormat
)
)
def setSessionProjectionFromProject(self):
"""
Set the projection from the project.
We creates a PROJ4 definition which is transmitted to Grass
"""
if not Grass7Utils.projectionSet and iface:
proj4 = iface.mapCanvas().mapSettings().destinationCrs().toProj4()
command = 'g.proj -c proj4="{}"'.format(proj4)
self.commands.append(command)
Grass7Utils.projectionSet = True
def setSessionProjectionFromLayer(self, layer):
"""
Set the projection from a QgsVectorLayer.
We creates a PROJ4 definition which is transmitted to Grass
"""
if not Grass7Utils.projectionSet:
proj4 = str(layer.crs().toProj4())
command = 'g.proj -c proj4="{}"'.format(proj4)
self.commands.append(command)
Grass7Utils.projectionSet = True
def convertToHtml(self, fileName):
# Read HTML contents
lines = []
html = False
with open(fileName, 'r', encoding='utf-8') as f:
lines = f.readlines()
if len(lines) > 1 and '<html>' not in lines[0]:
# Then write into the HTML file
with open(fileName, 'w', encoding='utf-8') as f:
f.write('<html><head>')
f.write('<meta http-equiv="Content-Type" content="text/html; charset=utf-8" /></head>')
f.write('<body><p>')
for line in lines:
f.write('{}</br>'.format(line))
f.write('</p></body></html>')
def canExecute(self):
message = Grass7Utils.checkGrassIsInstalled()
return not message, message
def checkParameterValues(self, parameters, context):
grass_parameters = {k: v for k, v in parameters.items()}
if self.module:
if hasattr(self.module, 'checkParameterValuesBeforeExecuting'):
func = getattr(self.module, 'checkParameterValuesBeforeExecuting')
return func(self, grass_parameters, context)
return super().checkParameterValues(grass_parameters, context) | unknown | codeparrot/codeparrot-clean | ||
Article detail template. | html | github | https://github.com/django/django | tests/templates/views/article_detail.html |
import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith("1.grd"):
gridfile1 = file
for file in os.listdir('.'):
if file.endswith("2.grd"):
gridfile2 = file
for file in os.listdir('.'):
if file.endswith("3.grd"):
gridfile3 = file
# ------------------------
for file in os.listdir('.'):
if file.endswith("1.txt"):
Elines1 = file
for file in os.listdir('.'):
if file.endswith("2.txt"):
Elines2 = file
for file in os.listdir('.'):
if file.endswith("3.txt"):
Elines3 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid1 = [];
grid2 = [];
grid3 = [];
with open(gridfile1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid1.append(row);
grid1 = asarray(grid1)
with open(gridfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid2.append(row);
grid2 = asarray(grid2)
with open(gridfile3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid3.append(row);
grid3 = asarray(grid3)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines1 = [];
dataEmissionlines2 = [];
dataEmissionlines3 = [];
with open(Elines1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines1.append(row);
dataEmissionlines1 = asarray(dataEmissionlines1)
with open(Elines2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers2 = csvReader.next()
for row in csvReader:
dataEmissionlines2.append(row);
dataEmissionlines2 = asarray(dataEmissionlines2)
with open(Elines3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers3 = csvReader.next()
for row in csvReader:
dataEmissionlines3.append(row);
dataEmissionlines3 = asarray(dataEmissionlines3)
print "import files complete"
# ---------------------------------------------------
#for concatenating grid
#pull the phi and hdens values from each of the runs. exclude header lines
grid1new = zeros((len(grid1[:,0])-1,2))
grid1new[:,0] = grid1[1:,6]
grid1new[:,1] = grid1[1:,7]
grid2new = zeros((len(grid2[:,0])-1,2))
x = array(17.00000)
grid2new[:,0] = repeat(x,len(grid2[:,0])-1)
grid2new[:,1] = grid2[1:,6]
grid3new = zeros((len(grid3[:,0])-1,2))
grid3new[:,0] = grid3[1:,6]
grid3new[:,1] = grid3[1:,7]
grid = concatenate((grid1new,grid2new,grid3new))
hdens_values = grid[:,1]
phi_values = grid[:,0]
# ---------------------------------------------------
#for concatenating Emission lines data
Emissionlines = concatenate((dataEmissionlines1[:,1:],dataEmissionlines2[:,1:],dataEmissionlines3[:,1:]))
#for lines
headers = headers[1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(concatenated_data[0]),4))
# ---------------------------------------------------
#constructing grid by scaling
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = concatenated_data[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
# ---------------------------------------------------
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(concatenated_data),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
# ---------------------------------------------------
#change desired lines here!
line = [0, #977
1, #991
2, #1026
5, #1216
91, #1218
6, #1239
7, #1240
8, #1243
9, #1263
10, #1304
11,#1308
12, #1397
13, #1402
14, #1406
16, #1486
17] #1531
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("Dusty UV Lines", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('Dusty_UV_Lines.pdf')
plt.clf()
print "figure saved" | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2011, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
class CrashLogs(object):
def __init__(self, host):
self._host = host
def find_newest_log(self, process_name, pid=None, include_errors=False, newer_than=None):
if self._host.platform.is_mac():
return self._find_newest_log_darwin(process_name, pid, include_errors, newer_than)
return None
def _log_directory_darwin(self):
log_directory = self._host.filesystem.expanduser("~")
log_directory = self._host.filesystem.join(log_directory, "Library", "Logs")
if self._host.filesystem.exists(self._host.filesystem.join(log_directory, "DiagnosticReports")):
log_directory = self._host.filesystem.join(log_directory, "DiagnosticReports")
else:
log_directory = self._host.filesystem.join(log_directory, "CrashReporter")
return log_directory
def _find_newest_log_darwin(self, process_name, pid, include_errors, newer_than):
def is_crash_log(fs, dirpath, basename):
return basename.startswith(process_name + "_") and basename.endswith(".crash")
log_directory = self._log_directory_darwin()
logs = self._host.filesystem.files_under(log_directory, file_filter=is_crash_log)
first_line_regex = re.compile(r'^Process:\s+(?P<process_name>.*) \[(?P<pid>\d+)\]$')
errors = ''
for path in reversed(sorted(logs)):
try:
if not newer_than or self._host.filesystem.mtime(path) > newer_than:
f = self._host.filesystem.read_text_file(path)
match = first_line_regex.match(f[0:f.find('\n')])
if match and match.group('process_name') == process_name and (pid is None or int(match.group('pid')) == pid):
return errors + f
except IOError, e:
if include_errors:
errors += "ERROR: Failed to read '%s': %s\n" % (path, str(e))
except OSError, e:
if include_errors:
errors += "ERROR: Failed to read '%s': %s\n" % (path, str(e))
if include_errors and errors:
return errors
return None | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.consumer;
import org.apache.kafka.common.TopicPartition;
import java.time.Duration;
import java.util.Collection;
import java.util.Map;
/**
* A callback interface that the user can implement to trigger custom actions when a commit request completes. The callback
* may be executed in any thread calling {@link Consumer#poll(java.time.Duration) poll()}.
*/
@FunctionalInterface
public interface OffsetCommitCallback {
/**
* A callback method the user can implement to provide asynchronous handling of commit request completion.
* This method will be called when the commit request sent to the server has been acknowledged.
*
* @param offsets A map of the offsets and associated metadata that this callback applies to
* @param exception The exception thrown during processing of the request, or null if the commit completed successfully
*
* @throws org.apache.kafka.clients.consumer.CommitFailedException if the commit failed and cannot be retried.
* This can only occur if you are using automatic group management with {@link KafkaConsumer#subscribe(Collection)},
* or if there is an active group with the same groupId which is using group management.
* @throws org.apache.kafka.common.errors.RebalanceInProgressException if the commit failed because
* it is in the middle of a rebalance. In such cases
* commit could be retried after the rebalance is completed with the {@link KafkaConsumer#poll(Duration)} call.
* @throws org.apache.kafka.common.errors.WakeupException if {@link KafkaConsumer#wakeup()} is called before or while this
* function is called
* @throws org.apache.kafka.common.errors.InterruptException if the calling thread is interrupted before or while
* this function is called
* @throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the topic or to the
* configured groupId. See the exception for more details
* @throws org.apache.kafka.common.KafkaException for any other unrecoverable errors (e.g. if offset metadata
* is too large or if the committed offset is invalid).
*/
void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception);
} | java | github | https://github.com/apache/kafka | clients/src/main/java/org/apache/kafka/clients/consumer/OffsetCommitCallback.java |
"""Models for API management."""
import logging
from smtplib import SMTPException
from config_models.models import ConfigurationModel
from django.conf import settings
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.contrib.sites.models import Site
from django.core.mail import send_mail
from django.db import models
from django.db.models.signals import post_save, pre_save
from django.dispatch import receiver
from django.urls import reverse
from django.utils.translation import ugettext as _u
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
from model_utils.models import TimeStampedModel
from six.moves.urllib.parse import urlunsplit # pylint: disable=import-error
from common.djangoapps.edxmako.shortcuts import render_to_string
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
log = logging.getLogger(__name__)
@python_2_unicode_compatible
class ApiAccessRequest(TimeStampedModel):
"""
Model to track API access for a user.
.. pii: Stores a website, company name, company address for this user
.. pii_types: location, external_service, other
.. pii_retirement: local_api
"""
PENDING = u'pending'
DENIED = u'denied'
APPROVED = u'approved'
STATUS_CHOICES = (
(PENDING, _('Pending')),
(DENIED, _('Denied')),
(APPROVED, _('Approved')),
)
user = models.OneToOneField(User, related_name='api_access_request', on_delete=models.CASCADE)
status = models.CharField(
max_length=255,
choices=STATUS_CHOICES,
default=PENDING,
db_index=True,
help_text=_('Status of this API access request'),
)
website = models.URLField(help_text=_('The URL of the website associated with this API user.'))
reason = models.TextField(help_text=_('The reason this user wants to access the API.'))
company_name = models.CharField(max_length=255, default=u'')
company_address = models.CharField(max_length=255, default=u'')
site = models.ForeignKey(Site, on_delete=models.CASCADE)
contacted = models.BooleanField(default=False)
class Meta:
get_latest_by = 'modified'
ordering = ('-modified', '-created',)
@classmethod
def has_api_access(cls, user):
"""Returns whether or not this user has been granted API access.
Arguments:
user (User): The user to check access for.
Returns:
bool
"""
return cls.api_access_status(user) == cls.APPROVED
@classmethod
def api_access_status(cls, user):
"""
Returns the user's API access status, or None if they have not
requested access.
Arguments:
user (User): The user to check access for.
Returns:
str or None
"""
try:
return cls.objects.get(user=user).status
except cls.DoesNotExist:
return None
@classmethod
def retire_user(cls, user):
"""
Retires the user's API acccess request table for GDPR
Arguments:
user (User): The user linked to the data to retire in the model.
Returns:
True: If the user has a linked data in the model and retirement is successful
False: user has no linked data in the model.
"""
try:
retire_target = cls.objects.get(user=user)
except cls.DoesNotExist:
return False
else:
retire_target.website = ''
retire_target.company_address = ''
retire_target.company_name = ''
retire_target.reason = ''
retire_target.save()
return True
def approve(self):
"""Approve this request."""
log.info(u'Approving API request from user [%s].', self.user.id)
self.status = self.APPROVED
self.save()
def deny(self):
"""Deny this request."""
log.info(u'Denying API request from user [%s].', self.user.id)
self.status = self.DENIED
self.save()
def __str__(self):
return u'ApiAccessRequest {website} [{status}]'.format(website=self.website, status=self.status)
@python_2_unicode_compatible
class ApiAccessConfig(ConfigurationModel):
"""
Configuration for API management.
.. no_pii:
"""
def __str__(self):
return 'ApiAccessConfig [enabled={}]'.format(self.enabled)
@receiver(post_save, sender=ApiAccessRequest, dispatch_uid="api_access_request_post_save_email")
def send_request_email(sender, instance, created, **kwargs): # pylint: disable=unused-argument
""" Send request email after new record created. """
if created:
_send_new_pending_email(instance)
@receiver(pre_save, sender=ApiAccessRequest, dispatch_uid="api_access_request_pre_save_email")
def send_decision_email(sender, instance, **kwargs): # pylint: disable=unused-argument
""" Send decision email after status changed. """
if instance.id and not instance.contacted:
old_instance = ApiAccessRequest.objects.get(pk=instance.id)
if instance.status != old_instance.status:
_send_decision_email(instance)
def _send_new_pending_email(instance):
""" Send an email to settings.API_ACCESS_MANAGER_EMAIL with the contents of this API access request. """
context = {
'approval_url': urlunsplit(
(
'https' if settings.HTTPS == 'on' else 'http',
instance.site.domain,
reverse('admin:api_admin_apiaccessrequest_change', args=(instance.id,)),
'',
'',
)
),
'api_request': instance
}
message = render_to_string('api_admin/api_access_request_email_new_request.txt', context)
try:
send_mail(
_u(u'API access request from {company}').format(company=instance.company_name),
message,
settings.API_ACCESS_FROM_EMAIL,
[settings.API_ACCESS_MANAGER_EMAIL],
fail_silently=False
)
except SMTPException:
log.exception(u'Error sending API user notification email for request [%s].', instance.id)
def _send_decision_email(instance):
""" Send an email to requesting user with the decision made about their request. """
context = {
'name': instance.user.username,
'api_management_url': urlunsplit(
(
'https' if settings.HTTPS == 'on' else 'http',
instance.site.domain,
reverse('api_admin:api-status'),
'',
'',
)
),
'authentication_docs_url': settings.AUTH_DOCUMENTATION_URL,
'api_docs_url': settings.API_DOCUMENTATION_URL,
'support_email_address': settings.API_ACCESS_FROM_EMAIL,
'platform_name': configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)
}
message = render_to_string(
'api_admin/api_access_request_email_{status}.txt'.format(status=instance.status),
context
)
try:
send_mail(
_u('API access request'),
message,
settings.API_ACCESS_FROM_EMAIL,
[instance.user.email],
fail_silently=False
)
instance.contacted = True
except SMTPException:
log.exception(u'Error sending API user notification email for request [%s].', instance.id)
@python_2_unicode_compatible
class Catalog(models.Model):
"""
A (non-Django-managed) model for Catalogs in the course discovery service.
.. no_pii:
"""
id = models.IntegerField(primary_key=True) # pylint: disable=invalid-name
name = models.CharField(max_length=255, null=False, blank=False)
query = models.TextField(null=False, blank=False)
viewers = models.TextField()
class Meta(object):
# Catalogs live in course discovery, so we do not create any
# tables in LMS. Instead we override the save method to not
# touch the database, and use our API client to communicate
# with discovery.
managed = False
def __init__(self, *args, **kwargs):
attributes = kwargs.get('attributes')
if attributes:
self.id = attributes['id'] # pylint: disable=invalid-name
self.name = attributes['name']
self.query = attributes['query']
self.viewers = attributes['viewers']
else:
super(Catalog, self).__init__(*args, **kwargs) # lint-amnesty, pylint: disable=super-with-arguments
def save(self, **kwargs): # lint-amnesty, pylint: disable=arguments-differ, unused-argument
return None
@property
def attributes(self):
"""Return a dictionary representation of this catalog."""
return {
'id': self.id,
'name': self.name,
'query': self.query,
'viewers': self.viewers,
}
def __str__(self):
return u'Catalog {name} [{query}]'.format(name=self.name, query=self.query) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.context.annotation;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import java.util.Map;
import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.support.BeanDefinitionRegistry;
import org.springframework.beans.factory.support.RootBeanDefinition;
import org.springframework.core.annotation.AnnotationAttributes;
import org.springframework.core.type.AnnotatedTypeMetadata;
import org.springframework.core.type.AnnotationMetadata;
import org.springframework.stereotype.Component;
import static org.assertj.core.api.Assertions.assertThat;
/**
* Test for {@link Conditional} beans.
*
* @author Phillip Webb
* @author Juergen Hoeller
*/
@SuppressWarnings("resource")
public class ConfigurationClassWithConditionTests {
@Test
void conditionalOnMissingBeanMatch() {
AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext();
ctx.register(BeanOneConfiguration.class, BeanTwoConfiguration.class);
ctx.refresh();
assertThat(ctx.containsBean("bean1")).isTrue();
assertThat(ctx.containsBean("bean2")).isFalse();
assertThat(ctx.containsBean("configurationClassWithConditionTests.BeanTwoConfiguration")).isFalse();
}
@Test
void conditionalOnMissingBeanNoMatch() {
AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext();
ctx.register(BeanTwoConfiguration.class);
ctx.refresh();
assertThat(ctx.containsBean("bean1")).isFalse();
assertThat(ctx.containsBean("bean2")).isTrue();
assertThat(ctx.containsBean("configurationClassWithConditionTests.BeanTwoConfiguration")).isTrue();
}
@Test
void conditionalOnBeanMatch() {
AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext();
ctx.register(BeanOneConfiguration.class, BeanThreeConfiguration.class);
ctx.refresh();
assertThat(ctx.containsBean("bean1")).isTrue();
assertThat(ctx.containsBean("bean3")).isTrue();
}
@Test
void conditionalOnBeanNoMatch() {
AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext();
ctx.register(BeanThreeConfiguration.class);
ctx.refresh();
assertThat(ctx.containsBean("bean1")).isFalse();
assertThat(ctx.containsBean("bean3")).isFalse();
}
@Test
void metaConditional() {
AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext();
ctx.register(ConfigurationWithMetaCondition.class);
ctx.refresh();
assertThat(ctx.containsBean("bean")).isTrue();
}
@Test
void metaConditionalWithAsm() {
AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext();
ctx.registerBeanDefinition("config", new RootBeanDefinition(ConfigurationWithMetaCondition.class.getName()));
ctx.refresh();
assertThat(ctx.containsBean("bean")).isTrue();
}
@Test
void nonConfigurationClass() {
AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext();
ctx.register(NonConfigurationClass.class);
ctx.refresh();
assertThat(ctx.containsBean("bean1")).isFalse();
}
@Test
void nonConfigurationClassWithAsm() {
AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext();
ctx.registerBeanDefinition("config", new RootBeanDefinition(NonConfigurationClass.class.getName()));
ctx.refresh();
assertThat(ctx.containsBean("bean1")).isFalse();
}
@Test
void methodConditional() {
AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext();
ctx.register(ConditionOnMethodConfiguration.class);
ctx.refresh();
assertThat(ctx.containsBean("bean1")).isFalse();
}
@Test
void methodConditionalWithAsm() {
AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext();
ctx.registerBeanDefinition("config", new RootBeanDefinition(ConditionOnMethodConfiguration.class.getName()));
ctx.refresh();
assertThat(ctx.containsBean("bean1")).isFalse();
}
@Test
void importsNotCreated() {
AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext();
ctx.register(ImportsNotCreated.class);
ctx.refresh();
}
@Test
void conditionOnOverriddenMethodHonored() {
AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(ConfigWithBeanSkipped.class);
assertThat(context.getBeansOfType(ExampleBean.class)).isEmpty();
}
@Test
void noConditionOnOverriddenMethodHonored() {
AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(ConfigWithBeanReactivated.class);
Map<String, ExampleBean> beans = context.getBeansOfType(ExampleBean.class);
assertThat(beans).containsOnlyKeys("baz");
}
@Test
void configWithAlternativeBeans() {
AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(ConfigWithAlternativeBeans.class);
Map<String, ExampleBean> beans = context.getBeansOfType(ExampleBean.class);
assertThat(beans).containsOnlyKeys("baz");
}
@Configuration
static class BeanOneConfiguration {
@Bean
public ExampleBean bean1() {
return new ExampleBean();
}
}
@Configuration
@Conditional(NoBeanOneCondition.class)
static class BeanTwoConfiguration {
@Bean
public ExampleBean bean2() {
return new ExampleBean();
}
}
@Configuration
@Conditional(HasBeanOneCondition.class)
static class BeanThreeConfiguration {
@Bean
public ExampleBean bean3() {
return new ExampleBean();
}
}
@Configuration
@MetaConditional("test")
static class ConfigurationWithMetaCondition {
@Bean
public ExampleBean bean() {
return new ExampleBean();
}
}
@Conditional(MetaConditionalFilter.class)
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface MetaConditional {
String value();
}
@Conditional(NeverCondition.class)
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.TYPE, ElementType.METHOD})
public @interface Never {
}
@Conditional(AlwaysCondition.class)
@Never
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.TYPE, ElementType.METHOD})
public @interface MetaNever {
}
static class NoBeanOneCondition implements Condition {
@Override
public boolean matches(ConditionContext context, AnnotatedTypeMetadata metadata) {
return !context.getBeanFactory().containsBeanDefinition("bean1");
}
}
static class HasBeanOneCondition implements ConfigurationCondition {
@Override
public ConfigurationPhase getConfigurationPhase() {
return ConfigurationPhase.REGISTER_BEAN;
}
@Override
public boolean matches(ConditionContext context, AnnotatedTypeMetadata metadata) {
return context.getBeanFactory().containsBeanDefinition("bean1");
}
}
static class MetaConditionalFilter implements Condition {
@Override
public boolean matches(ConditionContext context, AnnotatedTypeMetadata metadata) {
AnnotationAttributes attributes = AnnotationAttributes.fromMap(metadata.getAnnotationAttributes(MetaConditional.class.getName()));
assertThat(attributes.getString("value")).isEqualTo("test");
return true;
}
}
static class NeverCondition implements Condition {
@Override
public boolean matches(ConditionContext context, AnnotatedTypeMetadata metadata) {
return false;
}
}
static class AlwaysCondition implements Condition {
@Override
public boolean matches(ConditionContext context, AnnotatedTypeMetadata metadata) {
return true;
}
}
@Component
@MetaNever
static class NonConfigurationClass {
@Bean
public ExampleBean bean1() {
return new ExampleBean();
}
}
@Configuration
static class ConditionOnMethodConfiguration {
@Bean
@Never
public ExampleBean bean1() {
return new ExampleBean();
}
}
@Configuration
@Never
@Import({ConfigurationNotCreated.class, RegistrarNotCreated.class, ImportSelectorNotCreated.class})
static class ImportsNotCreated {
static {
if (true) {
throw new RuntimeException();
}
}
}
@Configuration
static class ConfigurationNotCreated {
static {
if (true) {
throw new RuntimeException();
}
}
}
static class RegistrarNotCreated implements ImportBeanDefinitionRegistrar {
static {
if (true) {
throw new RuntimeException();
}
}
@Override
public void registerBeanDefinitions(AnnotationMetadata importingClassMetadata,
BeanDefinitionRegistry registry) {
}
}
static class ImportSelectorNotCreated implements ImportSelector {
static {
if (true) {
throw new RuntimeException();
}
}
@Override
public String[] selectImports(AnnotationMetadata importingClassMetadata) {
return new String[] {};
}
}
static class ExampleBean {
}
@Configuration
static class ConfigWithBeanActive {
@Bean
public ExampleBean baz() {
return new ExampleBean();
}
}
static class ConfigWithBeanSkipped extends ConfigWithBeanActive {
@Override
@Bean
@Conditional(NeverCondition.class)
public ExampleBean baz() {
return new ExampleBean();
}
}
static class ConfigWithBeanReactivated extends ConfigWithBeanSkipped {
@Override
@Bean
public ExampleBean baz() {
return new ExampleBean();
}
}
@Configuration
static class ConfigWithAlternativeBeans {
@Bean(name = "baz")
@Conditional(AlwaysCondition.class)
public ExampleBean baz1() {
return new ExampleBean();
}
@Bean(name = "baz")
@Conditional(NeverCondition.class)
public ExampleBean baz2() {
return new ExampleBean();
}
}
} | java | github | https://github.com/spring-projects/spring-framework | spring-context/src/test/java/org/springframework/context/annotation/ConfigurationClassWithConditionTests.java |
// Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the CockroachDB Software License
// included in the /LICENSE file.
package base
import (
"context"
"sync/atomic"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/cockroachdb/errors"
)
// ClusterIDContainer is used to share a single Cluster ID instance between
// multiple layers. It allows setting and getting the value. Once a value is
// set, the value cannot change.
//
// The cluster ID is determined on startup as follows:
// - If there are existing stores, their cluster ID is used.
// - If the node is bootstrapping, a new UUID is generated.
// - Otherwise, it is determined via gossip with other nodes.
type ClusterIDContainer struct {
clusterID atomic.Value // uuid.UUID
// OnSet, if non-nil, is called after the ID is set with the new value.
OnSet func(uuid.UUID)
}
// String returns the cluster ID, or "?" if it is unset.
func (c *ClusterIDContainer) String() string {
val := c.Get()
if val == uuid.Nil {
return "?"
}
return val.String()
}
// Get returns the current cluster ID; uuid.Nil if it is unset.
func (c *ClusterIDContainer) Get() uuid.UUID {
v := c.clusterID.Load()
if v == nil {
return uuid.Nil
}
return v.(uuid.UUID)
}
// Set sets the current cluster ID. If it is already set, the value must match.
func (c *ClusterIDContainer) Set(ctx context.Context, val uuid.UUID) {
// NOTE: this compare-and-swap is intentionally racy and won't catch all
// cases where two different cluster IDs are set. That's ok, as this is
// just a sanity check. But if we decide to care, we can use the new
// (*atomic.Value).CompareAndSwap API introduced in go1.17.
cur := c.Get()
if cur == uuid.Nil {
c.clusterID.Store(val)
} else if cur != val {
// NB: we are avoiding log.Dev.Fatal here because we want to avoid a dependency
// on the log package. Also, this assertion would denote a serious bug and
// we may as well panic.
panic(errors.AssertionFailedf("different ClusterIDs set: %s, then %s", cur, val))
}
if c.OnSet != nil {
c.OnSet(val)
}
}
// Reset changes the ClusterID regardless of the old value.
//
// Should only be used in testing code.
func (c *ClusterIDContainer) Reset(val uuid.UUID) {
c.clusterID.Store(val)
} | go | github | https://github.com/cockroachdb/cockroach | pkg/base/cluster_id.go |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
INCLUDES = """
#include <openssl/dh.h>
"""
TYPES = """
typedef struct dh_st {
/* Prime number (shared) */
BIGNUM *p;
/* Generator of Z_p (shared) */
BIGNUM *g;
/* Private DH value x */
BIGNUM *priv_key;
/* Public DH value g^x */
BIGNUM *pub_key;
...;
} DH;
"""
FUNCTIONS = """
DH *DH_new(void);
void DH_free(DH *);
int DH_size(const DH *);
DH *DH_generate_parameters(int, int, void (*)(int, int, void *), void *);
int DH_check(const DH *, int *);
int DH_generate_key(DH *);
int DH_compute_key(unsigned char *, const BIGNUM *, DH *);
int DH_set_ex_data(DH *, int, void *);
void *DH_get_ex_data(DH *, int);
DH *d2i_DHparams(DH **, const unsigned char **, long);
int i2d_DHparams(const DH *, unsigned char **);
int DHparams_print_fp(FILE *, const DH *);
int DHparams_print(BIO *, const DH *);
"""
MACROS = """
int DH_generate_parameters_ex(DH *, int, int, BN_GENCB *);
"""
CUSTOMIZATIONS = """
"""
CONDITIONAL_NAMES = {} | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: iso-8859-15 -*-
# =================================================================
#
# Authors: Tom Kralidis <tomkralidis@gmail.com>
#
# Copyright (c) 2015 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import os
from pycsw.core import util
from pycsw.core.etree import etree
NAMESPACE = 'http://www.w3.org/2005/Atom'
NAMESPACES = {'atom': NAMESPACE, 'georss': 'http://www.georss.org/georss'}
XPATH_MAPPINGS = {
'pycsw:Identifier': 'atom:id',
'pycsw:Title': 'atom:title',
'pycsw:Creator': 'atom:author',
'pycsw:Abstract': 'atom:summary',
'pycsw:PublicationDate': 'atom:published',
'pycsw:Keywords': 'atom:category',
'pycsw:Contributor': 'atom:contributor',
'pycsw:AccessConstraints': 'atom:rights',
'pycsw:Modified': 'atom:updated',
'pycsw:Source': 'atom:source',
}
def write_record(result, esn, context, url=None):
''' Return csw:SearchResults child as lxml.etree.Element '''
typename = util.getqattr(result, context.md_core_model['mappings']['pycsw:Typename'])
if esn == 'full' and typename == 'atom:entry':
# dump record as is and exit
return etree.fromstring(util.getqattr(result, context.md_core_model['mappings']['pycsw:XML']))
node = etree.Element(util.nspath_eval('atom:entry', NAMESPACES), nsmap=NAMESPACES)
node.attrib[util.nspath_eval('xsi:schemaLocation', context.namespaces)] = \
'%s http://www.kbcafe.com/rss/atom.xsd.xml' % NAMESPACES['atom']
# author
val = util.getqattr(result, context.md_core_model['mappings']['pycsw:Creator'])
if val:
author = etree.SubElement(node, util.nspath_eval('atom:author', NAMESPACES))
etree.SubElement(author, util.nspath_eval('atom:name', NAMESPACES)).text = val
# category
val = util.getqattr(result, context.md_core_model['mappings']['pycsw:Keywords'])
if val:
for kw in val.split(','):
etree.SubElement(node, util.nspath_eval('atom:category', NAMESPACES), term=kw)
for qval in ['pycsw:Contributor', 'pycsw:Identifier']:
val = util.getqattr(result, context.md_core_model['mappings'][qval])
if val:
etree.SubElement(node, util.nspath_eval(XPATH_MAPPINGS[qval], NAMESPACES)).text = val
if qval == 'pycsw:Identifier':
etree.SubElement(node, util.nspath_eval('dc:identifier', context.namespaces)).text = val
rlinks = util.getqattr(result, context.md_core_model['mappings']['pycsw:Links'])
if rlinks:
for link in rlinks.split('^'):
linkset = link.split(',')
url2 = etree.SubElement(node, util.nspath_eval('atom:link', NAMESPACES), href=linkset[-1], type=linkset[2], title=linkset[1])
etree.SubElement(node, util.nspath_eval('atom:link', NAMESPACES), href='%s?service=CSW&version=2.0.2&request=GetRepositoryItem&id=%s' % (url, util.getqattr(result, context.md_core_model['mappings']['pycsw:Identifier'])))
# atom:title
el = etree.SubElement(node, util.nspath_eval(XPATH_MAPPINGS['pycsw:Title'], NAMESPACES))
val = util.getqattr(result, context.md_core_model['mappings']['pycsw:Title'])
if val:
el.text =val
# atom:updated
el = etree.SubElement(node, util.nspath_eval(XPATH_MAPPINGS['pycsw:Modified'], NAMESPACES))
val = util.getqattr(result, context.md_core_model['mappings']['pycsw:Modified'])
if val:
el.text =val
else:
val = util.getqattr(result, context.md_core_model['mappings']['pycsw:InsertDate'])
el.text = val
for qval in ['pycsw:PublicationDate', 'pycsw:AccessConstraints', 'pycsw:Source', 'pycsw:Abstract']:
val = util.getqattr(result, context.md_core_model['mappings'][qval])
if val:
etree.SubElement(node, util.nspath_eval(XPATH_MAPPINGS[qval], NAMESPACES)).text = val
# bbox extent
val = util.getqattr(result, context.md_core_model['mappings']['pycsw:BoundingBox'])
bboxel = write_extent(val, context.namespaces)
if bboxel is not None:
node.append(bboxel)
return node
def write_extent(bbox, nsmap):
''' Generate BBOX extent '''
if bbox is not None:
try:
bbox2 = util.wkt2geom(bbox)
except:
return None
where = etree.Element(util.nspath_eval('georss:where', NAMESPACES))
envelope = etree.SubElement(where, util.nspath_eval('gml:Envelope', nsmap), srsName='http://www.opengis.net/def/crs/EPSG/0/4326')
etree.SubElement(envelope, util.nspath_eval('gml:lowerCorner', nsmap)).text = '%s %s' % (bbox2[1], bbox2[0])
etree.SubElement(envelope, util.nspath_eval('gml:upperCorner', nsmap)).text = '%s %s' % (bbox2[3], bbox2[2])
return where
return None | unknown | codeparrot/codeparrot-clean | ||
from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy, ugettext as _
ERROR_MESSAGE = ugettext_lazy("Please enter a correct username and password. "
"Note that both fields are case-sensitive.")
class AdminAuthenticationForm(AuthenticationForm):
"""
A custom authentication form used in the admin app.
"""
this_is_the_login_form = forms.BooleanField(widget=forms.HiddenInput, initial=1,
error_messages={'required': ugettext_lazy("Please log in again, because your session has expired.")})
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
message = ERROR_MESSAGE
if username and password:
self.user_cache = authenticate(username=username, password=password)
if self.user_cache is None:
if username is not None and u'@' in username:
# Mistakenly entered e-mail address instead of username? Look it up.
try:
user = User.objects.get(email=username)
except (User.DoesNotExist, User.MultipleObjectsReturned):
# Nothing to do here, moving along.
pass
else:
if user.check_password(password):
message = _("Your e-mail address is not your username."
" Try '%s' instead.") % user.username
raise forms.ValidationError(message)
elif not self.user_cache.is_active or not self.user_cache.is_staff:
raise forms.ValidationError(message)
self.check_for_test_cookie()
return self.cleaned_data | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import os
import locale
import sys
try:
# reduce moved to functools in python3.
reduce
except NameError:
from functools import reduce
def XmlToString(content, encoding='utf-8', pretty=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Visual Studio files have a lot of pre-defined structures. This function makes
it easy to represent these structures as Python data structures, instead of
having to create a lot of function calls.
Each XML element of the content is represented as a list composed of:
1. The name of the element, a string,
2. The attributes of the element, a dictionary (optional), and
3+. The content of the element, if any. Strings are simple text nodes and
lists are child elements.
Example 1:
<test/>
becomes
['test']
Example 2:
<myelement a='value1' b='value2'>
<childtype>This is</childtype>
<childtype>it!</childtype>
</myelement>
becomes
['myelement', {'a':'value1', 'b':'value2'},
['childtype', 'This is'],
['childtype', 'it!'],
]
Args:
content: The structured content to be converted.
encoding: The encoding to report on the first XML line.
pretty: True if we want pretty printing with indents and new lines.
Returns:
The XML content as a string.
"""
# We create a huge list of all the elements of the file.
xml_parts = ['<?xml version="1.0" encoding="%s"?>' % encoding]
if pretty:
xml_parts.append('\n')
_ConstructContentList(xml_parts, content, pretty)
# Convert it to a string
return ''.join(xml_parts)
def _ConstructContentList(xml_parts, specification, pretty, level=0):
""" Appends the XML parts corresponding to the specification.
Args:
xml_parts: A list of XML parts to be appended to.
specification: The specification of the element. See EasyXml docs.
pretty: True if we want pretty printing with indents and new lines.
level: Indentation level.
"""
# The first item in a specification is the name of the element.
if pretty:
indentation = ' ' * level
new_line = '\n'
else:
indentation = ''
new_line = ''
name = specification[0]
if not isinstance(name, str):
raise Exception('The first item of an EasyXml specification should be '
'a string. Specification was ' + str(specification))
xml_parts.append(indentation + '<' + name)
# Optionally in second position is a dictionary of the attributes.
rest = specification[1:]
if rest and isinstance(rest[0], dict):
for at, val in sorted(rest[0].items()):
xml_parts.append(' %s="%s"' % (at, _XmlEscape(val, attr=True)))
rest = rest[1:]
if rest:
xml_parts.append('>')
all_strings = reduce(lambda x, y: x and isinstance(y, str), rest, True)
multi_line = not all_strings
if multi_line and new_line:
xml_parts.append(new_line)
for child_spec in rest:
# If it's a string, append a text node.
# Otherwise recurse over that child definition
if isinstance(child_spec, str):
xml_parts.append(_XmlEscape(child_spec))
else:
_ConstructContentList(xml_parts, child_spec, pretty, level + 1)
if multi_line and indentation:
xml_parts.append(indentation)
xml_parts.append('</%s>%s' % (name, new_line))
else:
xml_parts.append('/>%s' % new_line)
def WriteXmlIfChanged(content, path, encoding='utf-8', pretty=False,
win32=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Args:
content: The structured content to be written.
path: Location of the file.
encoding: The encoding to report on the first line of the XML file.
pretty: True if we want pretty printing with indents and new lines.
"""
xml_string = XmlToString(content, encoding, pretty)
if win32 and os.linesep != '\r\n':
xml_string = xml_string.replace('\n', '\r\n')
default_encoding = locale.getdefaultlocale()[1]
if default_encoding and default_encoding.upper() != encoding.upper():
try:
xml_string = xml_string.decode(default_encoding).encode(encoding)
except AttributeError:
pass
# Get the old content
try:
f = open(path, 'r')
existing = f.read()
f.close()
except:
existing = None
# It has changed, write it
if existing != xml_string:
f = open(path, 'w')
f.write(xml_string)
f.close()
_xml_escape_map = {
'"': '"',
"'": ''',
'<': '<',
'>': '>',
'&': '&',
'\n': '
',
'\r': '
',
}
_xml_escape_re = re.compile(
"(%s)" % "|".join(map(re.escape, _xml_escape_map.keys())))
def _XmlEscape(value, attr=False):
""" Escape a string for inclusion in XML."""
def replace(match):
m = match.string[match.start() : match.end()]
# don't replace single quotes in attrs
if attr and m == "'":
return m
return _xml_escape_map[m]
return _xml_escape_re.sub(replace, value) | unknown | codeparrot/codeparrot-clean | ||
from datetime import datetime, timedelta
from django.contrib.auth.models import User
from rest_framework.test import APITestCase, APIClient
from rest_framework.reverse import reverse
from rest_framework import status
from students.models import Class, Subject, Student, Teacher
from .serializers import HomeworkSerializer, SubmissionSerializer
from .models import Homework, Submission
class HomeworksViewSetTestCase(APITestCase):
def setUp(self):
self.client = APIClient()
self.list_view_name = 'homeworks:homeworks-list'
self.detail_view_name = 'homeworks:homeworks-detail'
self.serializer_class = HomeworkSerializer
self.clazz = Class.objects.create(number=10, letter='A')
self.subject = Subject.objects.create(title='test_subject')
self.student_user = User.objects.create(username='test', password='pass')
self.teacher_user = User.objects.create(username='author', password='pass123')
self.teacher = Teacher.objects.create(user=self.teacher_user, subject=self.subject)
self.student = Student.objects.create(user=self.student_user, clazz=self.clazz)
self.homework = Homework.objects.create(
subject=self.subject,
clazz=self.clazz,
deadline=datetime.now().date(),
details='detailed explanation',
author=self.teacher
)
def test_homeworks_list_with_anonymous_user(self):
response = self.client.get(reverse(self.list_view_name))
self.assertEqual(
response.data['detail'],
'Authentication credentials were not provided.'
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_homeworks_detail_with_anonymous_user(self):
response = self.client.get(
reverse(self.detail_view_name, kwargs={'pk': self.homework.id})
)
self.assertEqual(
response.data['detail'],
'Authentication credentials were not provided.'
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_homeworks_list_with_authenticated_user(self):
self.client.force_authenticate(user=self.student_user)
response = self.client.get(reverse(self.list_view_name))
self.assertIsNotNone(response.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_homeworks_detail_with_authenticated_user(self):
self.client.force_authenticate(user=self.student_user)
response = self.client.get(reverse(self.detail_view_name, kwargs={'pk': self.homework.id}))
self.assertEqual(response.data['clazz']['number'], self.student.clazz.number)
self.assertEqual(response.data['clazz']['letter'], self.student.clazz.letter)
self.assertEqual(response.data['details'], self.homework.details)
self.assertEqual(response.data['subject']['title'], self.subject.title)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_homeworks_list_with_expired_date(self):
self.client.force_authenticate(user=self.student_user)
self.homework.deadline -= timedelta(days=5)
self.homework.save()
response = self.client.get(reverse(self.list_view_name))
self.assertEqual(response.data['results'], [])
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_homeworks_detail_with_invalid_id(self):
self.client.force_authenticate(user=self.student_user)
response = self.client.get(
reverse(self.detail_view_name, kwargs={'pk': self.homework.id + 1})
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_homeworks_creation_with_student_account(self):
self.client.force_authenticate(user=self.student_user)
self.homework.details = 'details'
post_data = self.serializer_class(self.homework).data
response = self.client.post(
reverse(self.list_view_name), post_data, format='json'
)
self.assertEqual(
response.data['detail'],
'Only teachers are allowed to view and modify this content.'
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_homeworks_creation_with_too_long_details(self):
self.client.force_authenticate(user=self.teacher_user)
self.homework.details = 'details' * 256
post_data = self.serializer_class(self.homework).data
response = self.client.post(reverse(self.list_view_name), post_data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.data['details'],
['Ensure this field has no more than 256 characters.']
)
def test_homeworks_creation_with_valid_details(self):
self.client.force_authenticate(user=self.teacher_user)
self.homework.details = 'details'
post_data = self.serializer_class(self.homework).data
response = self.client.post(reverse(self.list_view_name), post_data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_homeworks_update_with_student_account(self):
self.client.force_authenticate(user=self.student_user)
self.homework.details = 'details'
put_data = self.serializer_class(self.homework).data
response = self.client.put(
reverse(self.detail_view_name, kwargs={'pk': self.homework.id}), put_data, format='json'
)
self.assertEqual(
response.data['detail'],
'Only teachers are allowed to view and modify this content.'
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_homeworks_update_with_too_long_details(self):
self.client.force_authenticate(user=self.teacher_user)
self.homework.details = 'details' * 256
put_data = self.serializer_class(self.homework).data
response = self.client.put(
reverse(self.detail_view_name, kwargs={'pk': self.homework.id}), put_data, format='json'
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.data['details'],
['Ensure this field has no more than 256 characters.']
)
def test_homeworks_update_with_valid_details(self):
self.client.force_authenticate(user=self.teacher_user)
self.homework.details = 'details'
put_data = self.serializer_class(self.homework).data
response = self.client.put(
reverse(self.detail_view_name, kwargs={'pk': self.homework.id}), put_data, format='json'
)
self.assertEqual(response.data['details'], self.homework.details)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_homeworks_update_of_another_user(self):
self.client.force_authenticate(user=self.teacher_user)
new_user = User.objects.create(username='test2', password='pass')
new_teacher = Teacher.objects.create(user=new_user, subject=self.subject)
self.homework.author = new_teacher
self.homework.save()
response = self.client.put(
reverse(self.detail_view_name, kwargs={'pk': self.homework.id}),
{'details': 'detailed information'},
format='json'
)
self.assertEqual(
response.data['detail'],
'You should be the author of this content in order to modify it.'
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_homeworks_deletion_of_another_user(self):
self.client.force_authenticate(user=self.teacher_user)
new_user = User.objects.create(username='test2', password='pass')
new_teacher = Teacher.objects.create(user=new_user, subject=self.subject)
self.homework.author = new_teacher
self.homework.save()
response = self.client.delete(
reverse(self.detail_view_name, kwargs={'pk': self.homework.id})
)
self.assertEqual(
response.data['detail'],
'You should be the author of this content in order to modify it.'
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_homeworks_deletion(self):
self.client.force_authenticate(user=self.teacher_user)
response = self.client.delete(
reverse(self.detail_view_name, kwargs={'pk': self.homework.id})
)
self.assertEqual(Homework.objects.count(), 0)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
class SubmissionsViewSetTestCase(APITestCase):
def setUp(self):
self.client = APIClient()
self.list_view_name = 'homeworks:submissions-list'
self.detail_view_name = 'homeworks:submissions-detail'
self.serializer_class = SubmissionSerializer
self.clazz = Class.objects.create(number=10, letter='A')
self.subject = Subject.objects.create(title='test_subject')
self.student_user1 = User.objects.create(username='test', password='pass')
self.student_user2 = User.objects.create(username='test1', password='password')
self.teacher_user = User.objects.create(username='author', password='pass123')
self.teacher = Teacher.objects.create(user=self.teacher_user, subject=self.subject)
self.student1 = Student.objects.create(user=self.student_user1, clazz=self.clazz)
self.student2 = Student.objects.create(user=self.student_user2, clazz=self.clazz)
self.homework = Homework.objects.create(
subject=self.subject,
clazz=self.clazz,
deadline=datetime.now().date(),
details='detailed explanation',
author=self.teacher
)
self.student1_submission = Submission.objects.create(
homework=self.homework,
student=self.student1,
content='solution'
)
self.student2_submission = Submission.objects.create(
homework=self.homework,
student=self.student2,
content='test'
)
def test_submissions_list_with_anonymous_user(self):
response = self.client.get(
reverse(self.list_view_name, kwargs={'homeworks_pk': self.homework.id})
)
self.assertEqual(response.data['detail'], 'Authentication credentials were not provided.')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_submissions_detail_with_anonymous_user(self):
response = self.client.get(
reverse(
self.detail_view_name,
kwargs={
'homeworks_pk': self.homework.id,
'pk': self.student1_submission.id
}
)
)
self.assertEqual(response.data['detail'], 'Authentication credentials were not provided.')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_submissions_list_with_student_user(self):
self.client.force_authenticate(user=self.student_user1)
response = self.client.get(
reverse(self.list_view_name, kwargs={'homeworks_pk': self.homework.id})
)
self.assertNotEqual(response.data, SubmissionSerializer(self.student2_submission).data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_submissions_detail_with_student_user(self):
self.client.force_authenticate(user=self.student_user1)
response = self.client.get(
reverse(
self.detail_view_name,
kwargs={
'homeworks_pk': self.homework.id,
'pk': self.student1_submission.id
}
)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_submissions_detail_of_another_student(self):
self.client.force_authenticate(user=self.student_user1)
response = self.client.get(
reverse(
self.detail_view_name,
kwargs={
'homeworks_pk': self.homework.id,
'pk': self.student2_submission.id
}
)
)
self.assertEqual(
response.data['detail'], 'You do not have permission to perform this action.'
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_submissions_list_with_teacher_user(self):
self.client.force_authenticate(user=self.teacher_user)
response = self.client.get(
reverse(self.list_view_name, kwargs={'homeworks_pk': self.homework.id})
)
self.assertEqual(response.data[1]['id'], self.student1_submission.id)
self.assertEqual(response.data[0]['id'], self.student2_submission.id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_submissions_detail_with_teacher_user(self):
self.client.force_authenticate(user=self.teacher_user)
response = self.client.get(
reverse(
self.detail_view_name,
kwargs={
'homeworks_pk': self.homework.id,
'pk': self.student1_submission.id
}
)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_checked_submissions_list(self):
self.client.force_authenticate(user=self.teacher_user)
self.client.put(
reverse(
self.detail_view_name,
kwargs={
'homeworks_pk': self.homework.id,
'pk': self.student1_submission.id
}
),
{'checked': True},
format='json'
)
self.client.put(
reverse(
self.detail_view_name,
kwargs={
'homeworks_pk': self.homework.id,
'pk': self.student2_submission.id
}
),
{'checked': True},
format='json'
)
response = self.client.get(
reverse(self.list_view_name, kwargs={'homeworks_pk': self.homework.id})
)
self.assertEqual(response.data, [])
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_submission_creation_with_teacher_user(self):
self.client.force_authenticate(user=self.teacher_user)
response = self.client.post(
reverse(self.list_view_name, kwargs={'homeworks_pk': self.homework.id}),
{'content': 'test'},
format='json'
)
self.assertEqual(
response.data['detail'],
'Only students are allowed to view and modify this content.'
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_second_submission_creation(self):
self.client.force_authenticate(user=self.student_user1)
response = self.client.post(
reverse(self.list_view_name, kwargs={'homeworks_pk': self.homework.id}),
{'content': 'test'},
format='json'
)
self.assertEqual(response.data['detail'], 'You can submit only one submission.')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_checked_submission_update(self):
self.client.force_authenticate(user=self.teacher_user)
self.client.put(
reverse(
self.detail_view_name,
kwargs={
'homeworks_pk': self.homework.id,
'pk': self.student1_submission.id
}
),
{'checked': True},
format='json'
)
self.client.force_authenticate(user=self.student_user1)
response = self.client.put(
reverse(
self.detail_view_name,
kwargs={
'homeworks_pk': self.homework.id,
'pk': self.student1_submission.id
}
),
{'content': 'testing'},
format='json'
)
self.assertEqual(response.data['detail'], 'Submission is already checked.')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_submission_update(self):
self.client.force_authenticate(user=self.student_user1)
response = self.client.put(
reverse(
self.detail_view_name,
kwargs={
'homeworks_pk': self.homework.id,
'pk': self.student1_submission.id
}
),
{'content': 'testing'},
format='json'
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_submission_update_of_another_student(self):
self.client.force_authenticate(user=self.student_user2)
response = self.client.put(
reverse(
self.detail_view_name,
kwargs={
'homeworks_pk': self.homework.id,
'pk': self.student1_submission.id
}
),
{'content': 'testing'},
format='json'
)
self.assertEqual(
response.data['detail'],
'You do not have permission to perform this action.'
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) | unknown | codeparrot/codeparrot-clean | ||
- block:
- name: check baseline
command: ansible-inventory -i '{{ role_path }}/files/valid_sample.yml' --list
register: limited
- name: ensure non empty host list
assert:
that:
- "'something' in inv['_meta']['hostvars']"
- name: check that limit removes host
command: ansible-inventory -i '{{ role_path }}/files/valid_sample.yml' --limit '!something' --list
register: limited
- name: ensure empty host list
assert:
that:
- "'something' not in inv['_meta']['hostvars']"
- name: check dupes
command: ansible-inventory -i '{{ role_path }}/files/complex.ini' --list
register: limited
- name: ensure host only appears on directly assigned
assert:
that:
- "'hosts' not in inv['parent_1']"
- "'hosts' not in inv['parent_2']"
- "'hosts' in inv['parent_3']"
- "'test1' in inv['test_group1']['hosts']"
vars:
inv: '{{limited.stdout|from_json(profile="inventory_legacy") }}'
delegate_to: localhost | unknown | github | https://github.com/ansible/ansible | test/integration/targets/ansible-inventory/tasks/json_output.yml |
# Copyright (c) 2001, Stanford University
# All rights reserved.
#
# See the file LICENSE.txt for information on redistributing this software.
#
# Authors:
# Brian Paul
"""The TileDialog class is a dialog used to edit a list of tiles for
a server/network node. If the server node is an N-instance node the
dialog will display a spin control [1 .. N] to edit the tile list for
any of the N instances.
"""
from wxPython.wx import *
from wxPython.gizmos import *
import crutils
class TileDialog(wxDialog):
def __init__(self, parent, id, title, numLists, hosts=[""], message=""):
"""parent, id, and title are the standard wxDialog parameters.
"""
assert numLists >= 1
wxDialog.__init__(self, parent, id, title, pos=wxPoint(-1,-1),
style = wxDEFAULT_DIALOG_STYLE|wxRESIZE_BORDER)
id_OK = 1
id_CANCEL = 2
id_INSTANCE = 3
outerSizer = wxBoxSizer(wxVERTICAL)
if numLists > 1:
# spin box to choose node instance
box = wxStaticBox(parent=self, id=-1, label="Node Instance")
innerSizer = wxStaticBoxSizer(box, wxHORIZONTAL)
outerSizer.Add(innerSizer, 0, wxGROW|wxALL, 4)
label = wxStaticText(parent=self, id=-1, label="Instance:")
innerSizer.Add(label, flag=wxALIGN_CENTRE_VERTICAL|wxALL, border=2)
self.instanceCtrl = wxSpinCtrl(parent=self, id=id_INSTANCE,
size=wxSize(50,25),
min=1, max=numLists, value="1")
EVT_SPINCTRL(self.instanceCtrl, id_INSTANCE, self._onInstance)
self.hostLabel = wxStaticText(parent=self, id=-1,
label="Hostname: %s" % hosts[0])
innerSizer.Add(self.instanceCtrl,
flag=wxALIGN_CENTRE_VERTICAL|wxALL, border=2)
innerSizer.Add(self.hostLabel, flag=wxALIGN_CENTRE_VERTICAL|wxALL,
border=6)
# editable list of tile tuples
box = wxStaticBox(parent=self, id=-1, label="Edit Tile List")
innerSizer = wxStaticBoxSizer(box, wxVERTICAL)
outerSizer.Add(innerSizer, 1, wxALL|wxGROW, 4)
self.listBox = wxEditableListBox(parent=self, id=-1,
label="Tiles (x, y, width, height)",
size=(300, 200))
innerSizer.Add(self.listBox, 1, wxGROW|wxALL, 2)
# OK / Cancel buttons
rowSizer = wxGridSizer(rows=1, cols=2, vgap=4, hgap=20)
self.OkButton = wxButton(parent=self, id=id_OK, label="OK")
rowSizer.Add(self.OkButton, 0, wxALIGN_CENTER)
self.CancelButton = wxButton(parent=self, id=id_CANCEL, label="Cancel")
rowSizer.Add(self.CancelButton, 0, wxALIGN_CENTER)
outerSizer.Add(rowSizer, 0, wxGROW|wxALL, 4)
EVT_BUTTON(self.OkButton, id_OK, self._onOK)
EVT_BUTTON(self.CancelButton, id_CANCEL, self._onCancel)
min = outerSizer.GetMinSize()
self.SetSizer(outerSizer)
self.SetAutoLayout(true)
self.SetSizeHints(minW=min[0], minH=min[1])
self.SetSize(min)
self.TileListList = [] # array [numLists] of array of (x, y, w, h)
self.NumLists = numLists
for i in range(numLists):
self.TileListList.append( [] )
self.OldInstance = 1
self.Hosts = hosts
def __LoadWidget(self, i):
"""Load the widget with the ith tile list."""
strings = []
if i < len(self.TileListList):
for tile in self.TileListList[i]:
tileString = "(%d, %d, %d, %d)" % tile
strings.append(tileString)
self.listBox.SetStrings(strings)
def __ReadWidget(self, i):
"""Get the strings from the listBox and update the ith tile list."""
assert i >= 0
assert i < self.NumLists
strings = self.listBox.GetStrings()
tiles = []
for s in strings:
# parse "(x,y,w,h)" to get tuple (x,y,w,h)
# XXX probably need an exception handler
tile = eval(s)
if tile and len(tile) == 4:
tiles.append(tile)
self.TileListList[i] = tiles
def _onInstance(self, event):
"""Called when the instance spin control changes."""
self.__ReadWidget(self.OldInstance - 1)
i = self.instanceCtrl.GetValue()
assert i >= 1
self.__LoadWidget(i - 1)
if i - 1 < len(self.Hosts):
self.hostLabel.SetLabel("Hostname: %s" % self.Hosts[i - 1])
else:
# use last hostname
self.hostLabel.SetLabel("Hostname: %s" % self.Hosts[-1])
self.OldInstance = i
def _onOK(self, event):
"""Called by OK button"""
self.EndModal(wxID_OK)
def _onCancel(self, event):
"""Called by Cancel button"""
self.EndModal(wxID_CANCEL)
def SetTileLists(self, tiles):
"""Specify list of list of tiles (x,y,w,h) to edit."""
self.TileListList = tiles
while len(self.TileListList) < self.NumLists:
self.TileListList.append( [] )
self.__LoadWidget(0)
if self.NumLists > 1:
self.instanceCtrl.SetValue(1)
def GetTileLists(self):
"""Return list of list of tiles (x,y,w,h)."""
if self.NumLists > 1:
i = self.instanceCtrl.GetValue() - 1
else:
i = 0
self.__ReadWidget(i)
return self.TileListList | unknown | codeparrot/codeparrot-clean | ||
import {
createCodeFixAction,
createCombinedCodeActions,
eachDiagnostic,
registerCodeFix,
} from "../_namespaces/ts.codefix.js";
import {
AnyImportOrRequire,
AnyImportOrRequireStatement,
AnyImportSyntax,
arrayFrom,
BindingElement,
CancellationToken,
cast,
changeAnyExtension,
CodeAction,
CodeFixAction,
CodeFixContextBase,
combine,
compareBooleans,
compareNumberOfDirectorySeparators,
compareValues,
Comparison,
CompilerOptions,
createFutureSourceFile,
createModuleSpecifierResolutionHost,
createMultiMap,
createPackageJsonImportFilter,
Debug,
DiagnosticOrDiagnosticAndArguments,
Diagnostics,
DiagnosticWithLocation,
emptyArray,
every,
ExportKind,
ExportMapInfoKey,
factory,
findAncestor,
first,
firstDefined,
flatMap,
flatMapIterator,
forEachExternalModuleToImportFrom,
forEachNameOfDefaultExport,
formatting,
FutureSourceFile,
FutureSymbolExportInfo,
getAllowSyntheticDefaultImports,
getBaseFileName,
getDeclarationOfKind,
getDefaultLikeExportInfo,
getDirectoryPath,
getEmitModuleFormatOfFileWorker,
getEmitModuleKind,
getEmitModuleResolutionKind,
getEmitScriptTarget,
getExportInfoMap,
getImpliedNodeFormatForEmitWorker,
getIsFileExcluded,
getMeaningFromLocation,
getNameForExportedSymbol,
getOutputExtension,
getQuoteFromPreference,
getQuotePreference,
getSourceFileOfNode,
getSymbolId,
getSynthesizedDeepClone,
getTokenAtPosition,
getTokenPosOfNode,
getTypeKeywordOfTypeOnlyImport,
getUniqueSymbolId,
hasJSFileExtension,
hostGetCanonicalFileName,
Identifier,
identity,
ImportClause,
ImportEqualsDeclaration,
importFromModuleSpecifier,
ImportKind,
ImportSpecifier,
insertImports,
InternalSymbolName,
isDefaultImport,
isExternalModuleReference,
isFullSourceFile,
isIdentifier,
isImportable,
isImportClause,
isImportDeclaration,
isImportEqualsDeclaration,
isImportSpecifier,
isIntrinsicJsxName,
isJSDocImportTag,
isJsxClosingElement,
isJsxOpeningFragment,
isJsxOpeningLikeElement,
isJSXTagName,
isNamedImports,
isNamespaceImport,
isRequireVariableStatement,
isSourceFileJS,
isStringLiteral,
isStringLiteralLike,
isTypeOnlyImportDeclaration,
isTypeOnlyImportOrExportDeclaration,
isUMDExportSymbol,
isValidTypeOnlyAliasUseSite,
isVariableDeclarationInitializedToRequire,
jsxModeNeedsExplicitImport,
LanguageServiceHost,
last,
makeImport,
makeStringLiteral,
mapDefined,
memoizeOne,
ModuleKind,
moduleResolutionUsesNodeModules,
moduleSpecifiers,
moduleSymbolToValidIdentifier,
MultiMap,
Mutable,
NamedImports,
NamespaceImport,
Node,
NodeFlags,
nodeIsMissing,
ObjectBindingPattern,
OrganizeImports,
PackageJsonImportFilter,
Path,
pathContainsNodeModules,
pathIsBareSpecifier,
Program,
QuotePreference,
RequireOrImportCall,
RequireVariableStatement,
sameMap,
SemanticMeaning,
shouldUseUriStyleNodeCoreModules,
single,
skipAlias,
some,
SourceFile,
startsWith,
StringLiteral,
stripQuotes,
Symbol,
SymbolExportInfo,
SymbolFlags,
SymbolId,
SyntaxKind,
textChanges,
toPath,
toSorted,
tryCast,
tryGetModuleSpecifierFromDeclaration,
TypeChecker,
TypeOnlyAliasDeclaration,
UserPreferences,
VariableDeclarationInitializedTo,
} from "../_namespaces/ts.js";
/** @internal */
export const importFixName = "import";
const importFixId = "fixMissingImport";
const errorCodes: readonly number[] = [
Diagnostics.Cannot_find_name_0.code,
Diagnostics.Cannot_find_name_0_Did_you_mean_1.code,
Diagnostics.Cannot_find_name_0_Did_you_mean_the_instance_member_this_0.code,
Diagnostics.Cannot_find_name_0_Did_you_mean_the_static_member_1_0.code,
Diagnostics.Cannot_find_namespace_0.code,
Diagnostics._0_refers_to_a_UMD_global_but_the_current_file_is_a_module_Consider_adding_an_import_instead.code,
Diagnostics._0_only_refers_to_a_type_but_is_being_used_as_a_value_here.code,
Diagnostics.No_value_exists_in_scope_for_the_shorthand_property_0_Either_declare_one_or_provide_an_initializer.code,
Diagnostics._0_cannot_be_used_as_a_value_because_it_was_imported_using_import_type.code,
Diagnostics.Cannot_find_name_0_Do_you_need_to_install_type_definitions_for_jQuery_Try_npm_i_save_dev_types_Slashjquery.code,
Diagnostics.Cannot_find_name_0_Do_you_need_to_change_your_target_library_Try_changing_the_lib_compiler_option_to_1_or_later.code,
Diagnostics.Cannot_find_name_0_Do_you_need_to_change_your_target_library_Try_changing_the_lib_compiler_option_to_include_dom.code,
Diagnostics.Cannot_find_name_0_Do_you_need_to_install_type_definitions_for_a_test_runner_Try_npm_i_save_dev_types_Slashjest_or_npm_i_save_dev_types_Slashmocha_and_then_add_jest_or_mocha_to_the_types_field_in_your_tsconfig.code,
Diagnostics.Cannot_find_name_0_Did_you_mean_to_write_this_in_an_async_function.code,
Diagnostics.Cannot_find_name_0_Do_you_need_to_install_type_definitions_for_jQuery_Try_npm_i_save_dev_types_Slashjquery_and_then_add_jquery_to_the_types_field_in_your_tsconfig.code,
Diagnostics.Cannot_find_name_0_Do_you_need_to_install_type_definitions_for_a_test_runner_Try_npm_i_save_dev_types_Slashjest_or_npm_i_save_dev_types_Slashmocha.code,
Diagnostics.Cannot_find_name_0_Do_you_need_to_install_type_definitions_for_node_Try_npm_i_save_dev_types_Slashnode.code,
Diagnostics.Cannot_find_name_0_Do_you_need_to_install_type_definitions_for_node_Try_npm_i_save_dev_types_Slashnode_and_then_add_node_to_the_types_field_in_your_tsconfig.code,
Diagnostics.Cannot_find_namespace_0_Did_you_mean_1.code,
Diagnostics.Cannot_extend_an_interface_0_Did_you_mean_implements.code,
Diagnostics.This_JSX_tag_requires_0_to_be_in_scope_but_it_could_not_be_found.code,
];
registerCodeFix({
errorCodes,
getCodeActions(context) {
const { errorCode, preferences, sourceFile, span, program } = context;
const info = getFixInfos(context, errorCode, span.start, /*useAutoImportProvider*/ true);
if (!info) return undefined;
return info.map(({ fix, symbolName, errorIdentifierText }) =>
codeActionForFix(
context,
sourceFile,
symbolName,
fix,
/*includeSymbolNameInDescription*/ symbolName !== errorIdentifierText,
program,
preferences,
)
);
},
fixIds: [importFixId],
getAllCodeActions: context => {
const { sourceFile, program, preferences, host, cancellationToken } = context;
const importAdder = createImportAdderWorker(sourceFile, program, /*useAutoImportProvider*/ true, preferences, host, cancellationToken);
eachDiagnostic(context, errorCodes, diag => importAdder.addImportFromDiagnostic(diag, context));
return createCombinedCodeActions(textChanges.ChangeTracker.with(context, importAdder.writeFixes));
},
});
/**
* The node kinds that may be the declaration of an alias symbol imported/required from an external module.
* `ImportClause` is the declaration for a syntactic default import. `VariableDeclaration` is the declaration
* for a non-destructured `require` call.
* @internal
*/
export type ImportOrRequireAliasDeclaration = ImportEqualsDeclaration | ImportClause | ImportSpecifier | NamespaceImport | VariableDeclarationInitializedTo<RequireOrImportCall> | BindingElement;
/**
* Computes multiple import additions to a file and writes them to a ChangeTracker.
*
* @internal
*/
export interface ImportAdder {
hasFixes(): boolean;
addImportFromDiagnostic: (diagnostic: DiagnosticWithLocation, context: CodeFixContextBase) => void;
addImportFromExportedSymbol: (exportedSymbol: Symbol, isValidTypeOnlyUseSite?: boolean, referenceImport?: ImportOrRequireAliasDeclaration) => void;
addImportForNonExistentExport: (exportName: string, exportingFileName: string, exportKind: ExportKind, exportedMeanings: SymbolFlags, isImportUsageValidAsTypeOnly: boolean) => void;
addImportForModuleSymbol: (symbolAlias: Symbol, isValidTypeOnlyUseSite: boolean, referenceImport: ImportOrRequireAliasDeclaration) => void;
addImportForUnresolvedIdentifier: (context: CodeFixContextBase, symbolToken: Identifier, useAutoImportProvider: boolean) => void;
addVerbatimImport: (declaration: AnyImportOrRequireStatement | ImportOrRequireAliasDeclaration) => void;
removeExistingImport: (declaration: ImportOrRequireAliasDeclaration) => void;
writeFixes: (changeTracker: textChanges.ChangeTracker, oldFileQuotePreference?: QuotePreference) => void;
}
/** @internal */
export function createImportAdder(sourceFile: SourceFile | FutureSourceFile, program: Program, preferences: UserPreferences, host: LanguageServiceHost, cancellationToken?: CancellationToken): ImportAdder {
return createImportAdderWorker(sourceFile, program, /*useAutoImportProvider*/ false, preferences, host, cancellationToken);
}
interface AddToExistingState {
readonly importClauseOrBindingPattern: ImportClause | ObjectBindingPattern;
defaultImport: Import | undefined;
readonly namedImports: Map<string, { addAsTypeOnly: AddAsTypeOnly; propertyName?: string | undefined; }>;
}
function createImportAdderWorker(sourceFile: SourceFile | FutureSourceFile, program: Program, useAutoImportProvider: boolean, preferences: UserPreferences, host: LanguageServiceHost, cancellationToken: CancellationToken | undefined): ImportAdder {
const compilerOptions = program.getCompilerOptions();
// Namespace fixes don't conflict, so just build a list.
const addToNamespace: FixUseNamespaceImport[] = [];
const importType: FixAddJsdocTypeImport[] = [];
const addToExisting = new Map<ImportClause | ObjectBindingPattern, AddToExistingState>();
const removeExisting = new Set<ImportOrRequireAliasDeclaration>();
const verbatimImports = new Set<AnyImportOrRequireStatement | ImportOrRequireAliasDeclaration>();
type NewImportsKey = `${0 | 1}|${string}`;
/** Use `getNewImportEntry` for access */
const newImports = new Map<NewImportsKey, Mutable<ImportsCollection & { useRequire: boolean; }>>();
return { addImportFromDiagnostic, addImportFromExportedSymbol, addImportForModuleSymbol, writeFixes, hasFixes, addImportForUnresolvedIdentifier, addImportForNonExistentExport, removeExistingImport, addVerbatimImport };
function addVerbatimImport(declaration: AnyImportOrRequireStatement | ImportOrRequireAliasDeclaration) {
verbatimImports.add(declaration);
}
function addImportForUnresolvedIdentifier(context: CodeFixContextBase, symbolToken: Identifier, useAutoImportProvider: boolean) {
const info = getFixInfosWithoutDiagnostic(context, symbolToken, useAutoImportProvider);
if (!info || !info.length) return;
addImport(first(info));
}
function addImportFromDiagnostic(diagnostic: DiagnosticWithLocation, context: CodeFixContextBase) {
const info = getFixInfos(context, diagnostic.code, diagnostic.start, useAutoImportProvider);
if (!info || !info.length) return;
addImport(first(info));
}
function addImportFromExportedSymbol(exportedSymbol: Symbol, isValidTypeOnlyUseSite?: boolean, referenceImport?: ImportOrRequireAliasDeclaration) {
const moduleSymbol = Debug.checkDefined(exportedSymbol.parent, "Expected exported symbol to have module symbol as parent");
const symbolName = getNameForExportedSymbol(exportedSymbol, getEmitScriptTarget(compilerOptions));
const checker = program.getTypeChecker();
const symbol = checker.getMergedSymbol(skipAlias(exportedSymbol, checker));
const exportInfo = getAllExportInfoForSymbol(sourceFile, symbol, symbolName, moduleSymbol, /*preferCapitalized*/ false, program, host, preferences, cancellationToken);
if (!exportInfo) {
// If no exportInfo is found, this means export could not be resolved when we have filtered for autoImportFileExcludePatterns,
// so we should not generate an import.
Debug.assert(preferences.autoImportFileExcludePatterns?.length);
return;
}
const useRequire = shouldUseRequire(sourceFile, program);
let fix = getImportFixForSymbol(sourceFile, exportInfo, program, /*position*/ undefined, !!isValidTypeOnlyUseSite, useRequire, host, preferences);
if (fix) {
const localName = tryCast(referenceImport?.name, isIdentifier)?.text ?? symbolName;
let addAsTypeOnly: AddAsTypeOnly | undefined;
let propertyName: string | undefined;
if (
referenceImport
&& isTypeOnlyImportDeclaration(referenceImport)
&& (fix.kind === ImportFixKind.AddNew || fix.kind === ImportFixKind.AddToExisting)
&& fix.addAsTypeOnly === AddAsTypeOnly.Allowed
) {
// Copy the type-only status from the reference import
addAsTypeOnly = AddAsTypeOnly.Required;
}
if (exportedSymbol.name !== localName) {
// checks if the symbol was aliased at the referenced import
propertyName = exportedSymbol.name;
}
fix = {
...fix,
...(addAsTypeOnly === undefined ? {} : { addAsTypeOnly }),
...(propertyName === undefined ? {} : { propertyName }),
};
addImport({ fix, symbolName: localName ?? symbolName, errorIdentifierText: undefined });
}
}
function addImportForModuleSymbol(symbolAlias: Symbol, isValidTypeOnlyUseSite: boolean, referenceImport: ImportOrRequireAliasDeclaration) {
// Adds import for module, import alias will be symbolAlias.name
const checker = program.getTypeChecker();
const moduleSymbol = checker.getAliasedSymbol(symbolAlias);
Debug.assert(moduleSymbol.flags & SymbolFlags.Module, "Expected symbol to be a module");
const moduleSpecifierResolutionHost = createModuleSpecifierResolutionHost(program, host);
const moduleSpecifierResult = moduleSpecifiers.getModuleSpecifiersWithCacheInfo(moduleSymbol, checker, compilerOptions, sourceFile, moduleSpecifierResolutionHost, preferences, /*options*/ undefined, /*forAutoImport*/ true);
const useRequire = shouldUseRequire(sourceFile, program);
// Copy the type-only status from the reference import
let addAsTypeOnly = getAddAsTypeOnly(
isValidTypeOnlyUseSite,
/*isForNewImportDeclaration*/ true,
/*symbol*/ undefined,
symbolAlias.flags,
program.getTypeChecker(),
compilerOptions,
);
addAsTypeOnly = addAsTypeOnly === AddAsTypeOnly.Allowed && isTypeOnlyImportDeclaration(referenceImport) ? AddAsTypeOnly.Required : AddAsTypeOnly.Allowed;
// Copy the kind of import
const importKind = isImportDeclaration(referenceImport) ?
isDefaultImport(referenceImport) ? ImportKind.Default : ImportKind.Namespace :
isImportSpecifier(referenceImport) ? ImportKind.Named :
isImportClause(referenceImport) && !!referenceImport.name ? ImportKind.Default : ImportKind.Namespace;
const exportInfo = [{
symbol: symbolAlias,
moduleSymbol,
moduleFileName: moduleSymbol.declarations?.[0]?.getSourceFile()?.fileName,
exportKind: ExportKind.Module,
targetFlags: symbolAlias.flags,
isFromPackageJson: false,
}];
const existingFix = getImportFixForSymbol(
sourceFile,
exportInfo,
program,
/*position*/ undefined,
!!isValidTypeOnlyUseSite,
useRequire,
host,
preferences,
);
let fix: FixAddNewImport | ImportFixWithModuleSpecifier;
if (existingFix && importKind !== ImportKind.Namespace && existingFix.kind !== ImportFixKind.UseNamespace && existingFix.kind !== ImportFixKind.JsdocTypeImport) {
fix = {
...existingFix,
addAsTypeOnly,
importKind,
};
}
else {
fix = {
kind: ImportFixKind.AddNew,
moduleSpecifierKind: existingFix !== undefined ? existingFix.moduleSpecifierKind : moduleSpecifierResult.kind,
moduleSpecifier: existingFix !== undefined ? existingFix.moduleSpecifier : first(moduleSpecifierResult.moduleSpecifiers),
importKind,
addAsTypeOnly,
useRequire,
};
}
addImport({ fix, symbolName: symbolAlias.name, errorIdentifierText: undefined });
}
function addImportForNonExistentExport(exportName: string, exportingFileName: string, exportKind: ExportKind, exportedMeanings: SymbolFlags, isImportUsageValidAsTypeOnly: boolean) {
const exportingSourceFile = program.getSourceFile(exportingFileName);
const useRequire = shouldUseRequire(sourceFile, program);
if (exportingSourceFile && exportingSourceFile.symbol) {
const { fixes } = getImportFixes(
[{
exportKind,
isFromPackageJson: false,
moduleFileName: exportingFileName,
moduleSymbol: exportingSourceFile.symbol,
targetFlags: exportedMeanings,
}],
/*usagePosition*/ undefined,
isImportUsageValidAsTypeOnly,
useRequire,
program,
sourceFile,
host,
preferences,
);
if (fixes.length) {
addImport({ fix: fixes[0], symbolName: exportName, errorIdentifierText: exportName });
}
}
else {
// File does not exist yet or has no exports, so all imports added will be "new"
const futureExportingSourceFile = createFutureSourceFile(exportingFileName, ModuleKind.ESNext, program, host);
const moduleSpecifier = moduleSpecifiers.getLocalModuleSpecifierBetweenFileNames(
sourceFile,
exportingFileName,
compilerOptions,
createModuleSpecifierResolutionHost(program, host),
preferences,
);
const importKind = getImportKind(futureExportingSourceFile, exportKind, program);
const addAsTypeOnly = getAddAsTypeOnly(
isImportUsageValidAsTypeOnly,
/*isForNewImportDeclaration*/ true,
/*symbol*/ undefined,
exportedMeanings,
program.getTypeChecker(),
compilerOptions,
);
const fix: FixAddNewImport = {
kind: ImportFixKind.AddNew,
moduleSpecifierKind: "relative",
moduleSpecifier,
importKind,
addAsTypeOnly,
useRequire,
};
addImport({ fix, symbolName: exportName, errorIdentifierText: exportName });
}
}
function removeExistingImport(declaration: ImportOrRequireAliasDeclaration) {
if (declaration.kind === SyntaxKind.ImportClause) {
Debug.assertIsDefined(declaration.name, "ImportClause should have a name if it's being removed");
}
removeExisting.add(declaration);
}
function addImport(info: FixInfo) {
const { fix, symbolName } = info;
switch (fix.kind) {
case ImportFixKind.UseNamespace:
addToNamespace.push(fix);
break;
case ImportFixKind.JsdocTypeImport:
importType.push(fix);
break;
case ImportFixKind.AddToExisting: {
const { importClauseOrBindingPattern, importKind, addAsTypeOnly, propertyName } = fix;
let entry = addToExisting.get(importClauseOrBindingPattern);
if (!entry) {
addToExisting.set(importClauseOrBindingPattern, entry = { importClauseOrBindingPattern, defaultImport: undefined, namedImports: new Map() });
}
if (importKind === ImportKind.Named) {
const prevTypeOnly = entry?.namedImports.get(symbolName)?.addAsTypeOnly;
entry.namedImports.set(symbolName, { addAsTypeOnly: reduceAddAsTypeOnlyValues(prevTypeOnly, addAsTypeOnly), propertyName });
}
else {
Debug.assert(entry.defaultImport === undefined || entry.defaultImport.name === symbolName, "(Add to Existing) Default import should be missing or match symbolName");
entry.defaultImport = {
name: symbolName,
addAsTypeOnly: reduceAddAsTypeOnlyValues(entry.defaultImport?.addAsTypeOnly, addAsTypeOnly),
};
}
break;
}
case ImportFixKind.AddNew: {
const { moduleSpecifier, importKind, useRequire, addAsTypeOnly, propertyName } = fix;
const entry = getNewImportEntry(moduleSpecifier, importKind, useRequire, addAsTypeOnly);
Debug.assert(entry.useRequire === useRequire, "(Add new) Tried to add an `import` and a `require` for the same module");
switch (importKind) {
case ImportKind.Default:
Debug.assert(entry.defaultImport === undefined || entry.defaultImport.name === symbolName, "(Add new) Default import should be missing or match symbolName");
entry.defaultImport = { name: symbolName, addAsTypeOnly: reduceAddAsTypeOnlyValues(entry.defaultImport?.addAsTypeOnly, addAsTypeOnly) };
break;
case ImportKind.Named:
const prevValue = (entry.namedImports ||= new Map()).get(symbolName);
entry.namedImports.set(symbolName, [reduceAddAsTypeOnlyValues(prevValue, addAsTypeOnly), propertyName]);
break;
case ImportKind.CommonJS:
if (compilerOptions.verbatimModuleSyntax) {
const prevValue = (entry.namedImports ||= new Map()).get(symbolName);
entry.namedImports.set(symbolName, [reduceAddAsTypeOnlyValues(prevValue, addAsTypeOnly), propertyName]);
}
else {
Debug.assert(entry.namespaceLikeImport === undefined || entry.namespaceLikeImport.name === symbolName, "Namespacelike import shoudl be missing or match symbolName");
entry.namespaceLikeImport = { importKind, name: symbolName, addAsTypeOnly };
}
break;
case ImportKind.Namespace:
Debug.assert(entry.namespaceLikeImport === undefined || entry.namespaceLikeImport.name === symbolName, "Namespacelike import shoudl be missing or match symbolName");
entry.namespaceLikeImport = { importKind, name: symbolName, addAsTypeOnly };
break;
}
break;
}
case ImportFixKind.PromoteTypeOnly:
// Excluding from fix-all
break;
default:
Debug.assertNever(fix, `fix wasn't never - got kind ${(fix as ImportFix).kind}`);
}
function reduceAddAsTypeOnlyValues(prevValue: AddAsTypeOnly | undefined, newValue: AddAsTypeOnly): AddAsTypeOnly {
// `NotAllowed` overrides `Required` because one addition of a new import might be required to be type-only
// because of `--importsNotUsedAsValues=error`, but if a second addition of the same import is `NotAllowed`
// to be type-only, the reason the first one was `Required` - the unused runtime dependency - is now moot.
// Alternatively, if one addition is `Required` because it has no value meaning under `--preserveValueImports`
// and `--isolatedModules`, it should be impossible for another addition to be `NotAllowed` since that would
// mean a type is being referenced in a value location.
return Math.max(prevValue ?? 0, newValue);
}
function getNewImportEntry(moduleSpecifier: string, importKind: ImportKind, useRequire: boolean, addAsTypeOnly: AddAsTypeOnly): Mutable<ImportsCollection & { useRequire: boolean; }> {
// A default import that requires type-only makes the whole import type-only.
// (We could add `default` as a named import, but that style seems undesirable.)
// Under `--preserveValueImports` and `--importsNotUsedAsValues=error`, if a
// module default-exports a type but named-exports some values (weird), you would
// have to use a type-only default import and non-type-only named imports. These
// require two separate import declarations, so we build this into the map key.
const typeOnlyKey = newImportsKey(moduleSpecifier, /*topLevelTypeOnly*/ true);
const nonTypeOnlyKey = newImportsKey(moduleSpecifier, /*topLevelTypeOnly*/ false);
const typeOnlyEntry = newImports.get(typeOnlyKey);
const nonTypeOnlyEntry = newImports.get(nonTypeOnlyKey);
const newEntry: ImportsCollection & { useRequire: boolean; } = {
defaultImport: undefined,
namedImports: undefined,
namespaceLikeImport: undefined,
useRequire,
};
if (importKind === ImportKind.Default && addAsTypeOnly === AddAsTypeOnly.Required) {
if (typeOnlyEntry) return typeOnlyEntry;
newImports.set(typeOnlyKey, newEntry);
return newEntry;
}
if (addAsTypeOnly === AddAsTypeOnly.Allowed && (typeOnlyEntry || nonTypeOnlyEntry)) {
return (typeOnlyEntry || nonTypeOnlyEntry)!;
}
if (nonTypeOnlyEntry) {
return nonTypeOnlyEntry;
}
newImports.set(nonTypeOnlyKey, newEntry);
return newEntry;
}
function newImportsKey(moduleSpecifier: string, topLevelTypeOnly: boolean): NewImportsKey {
return `${topLevelTypeOnly ? 1 : 0}|${moduleSpecifier}`;
}
}
function writeFixes(changeTracker: textChanges.ChangeTracker, oldFileQuotePreference?: QuotePreference) {
let quotePreference: QuotePreference;
if (sourceFile.imports !== undefined && sourceFile.imports.length === 0 && oldFileQuotePreference !== undefined) {
// If the target file (including future files) has no imports, we must use the same quote preference as the file we are importing from.
quotePreference = oldFileQuotePreference;
}
else {
quotePreference = getQuotePreference(sourceFile, preferences);
}
for (const fix of addToNamespace) {
// Any modifications to existing syntax imply SourceFile already exists
addNamespaceQualifier(changeTracker, sourceFile as SourceFile, fix);
}
for (const fix of importType) {
// Any modifications to existing syntax imply SourceFile already exists
addImportType(changeTracker, sourceFile as SourceFile, fix, quotePreference);
}
let importSpecifiersToRemoveWhileAdding: Set<ImportSpecifier | BindingElement> | undefined;
if (removeExisting.size) {
Debug.assert(isFullSourceFile(sourceFile), "Cannot remove imports from a future source file");
const importDeclarationsWithRemovals = new Set(mapDefined([...removeExisting], d => findAncestor(d, isImportDeclaration)!));
const variableDeclarationsWithRemovals = new Set(mapDefined([...removeExisting], d => findAncestor(d, isVariableDeclarationInitializedToRequire)!));
const emptyImportDeclarations = [...importDeclarationsWithRemovals].filter(d =>
// nothing added to the import declaration
!addToExisting.has(d.importClause!) &&
// no default, or default is being removed
(!d.importClause?.name || removeExisting.has(d.importClause)) &&
// no namespace import, or namespace import is being removed
(!tryCast(d.importClause?.namedBindings, isNamespaceImport) || removeExisting.has(d.importClause!.namedBindings as NamespaceImport)) &&
// no named imports, or all named imports are being removed
(!tryCast(d.importClause?.namedBindings, isNamedImports) || every((d.importClause!.namedBindings as NamedImports).elements, e => removeExisting.has(e)))
);
const emptyVariableDeclarations = [...variableDeclarationsWithRemovals].filter(d =>
// no binding elements being added to the variable declaration
(d.name.kind !== SyntaxKind.ObjectBindingPattern || !addToExisting.has(d.name)) &&
// no binding elements, or all binding elements are being removed
(d.name.kind !== SyntaxKind.ObjectBindingPattern || every(d.name.elements, e => removeExisting.has(e)))
);
const namedBindingsToDelete = [...importDeclarationsWithRemovals].filter(d =>
// has named bindings
d.importClause?.namedBindings &&
// is not being fully removed
emptyImportDeclarations.indexOf(d) === -1 &&
// is not gaining named imports
!addToExisting.get(d.importClause)?.namedImports &&
// all named imports are being removed
(d.importClause.namedBindings.kind === SyntaxKind.NamespaceImport || every(d.importClause.namedBindings.elements, e => removeExisting.has(e)))
);
for (const declaration of [...emptyImportDeclarations, ...emptyVariableDeclarations]) {
changeTracker.delete(sourceFile, declaration);
}
for (const declaration of namedBindingsToDelete) {
changeTracker.replaceNode(
sourceFile,
declaration.importClause!,
factory.updateImportClause(
declaration.importClause!,
declaration.importClause!.phaseModifier,
declaration.importClause!.name,
/*namedBindings*/ undefined,
),
);
}
for (const declaration of removeExisting) {
const importDeclaration = findAncestor(declaration, isImportDeclaration);
if (
importDeclaration &&
emptyImportDeclarations.indexOf(importDeclaration) === -1 &&
namedBindingsToDelete.indexOf(importDeclaration) === -1
) {
if (declaration.kind === SyntaxKind.ImportClause) {
changeTracker.delete(sourceFile, declaration.name!);
}
else {
Debug.assert(declaration.kind === SyntaxKind.ImportSpecifier, "NamespaceImport should have been handled earlier");
if (addToExisting.get(importDeclaration.importClause!)?.namedImports) {
// Handle combined inserts/deletes in `doAddExistingFix`
(importSpecifiersToRemoveWhileAdding ??= new Set()).add(declaration);
}
else {
changeTracker.delete(sourceFile, declaration);
}
}
}
else if (declaration.kind === SyntaxKind.BindingElement) {
if (addToExisting.get(declaration.parent as ObjectBindingPattern)?.namedImports) {
// Handle combined inserts/deletes in `doAddExistingFix`
(importSpecifiersToRemoveWhileAdding ??= new Set()).add(declaration);
}
else {
changeTracker.delete(sourceFile, declaration);
}
}
else if (declaration.kind === SyntaxKind.ImportEqualsDeclaration) {
changeTracker.delete(sourceFile, declaration);
}
}
}
addToExisting.forEach(({ importClauseOrBindingPattern, defaultImport, namedImports }) => {
doAddExistingFix(
changeTracker,
sourceFile as SourceFile,
importClauseOrBindingPattern,
defaultImport,
arrayFrom(namedImports.entries(), ([name, { addAsTypeOnly, propertyName }]) => ({ addAsTypeOnly, propertyName, name })),
importSpecifiersToRemoveWhileAdding,
preferences,
);
});
let newDeclarations: AnyImportOrRequireStatement | readonly AnyImportOrRequireStatement[] | undefined;
newImports.forEach(({ useRequire, defaultImport, namedImports, namespaceLikeImport }, key) => {
const moduleSpecifier = key.slice(2); // From `${0 | 1}|${moduleSpecifier}` format
const getDeclarations = useRequire ? getNewRequires : getNewImports;
const declarations = getDeclarations(
moduleSpecifier,
quotePreference,
defaultImport,
namedImports && arrayFrom(namedImports.entries(), ([name, [addAsTypeOnly, propertyName]]) => ({ addAsTypeOnly, propertyName, name })),
namespaceLikeImport,
compilerOptions,
preferences,
);
newDeclarations = combine(newDeclarations, declarations);
});
newDeclarations = combine(newDeclarations, getCombinedVerbatimImports());
if (newDeclarations) {
insertImports(changeTracker, sourceFile, newDeclarations, /*blankLineBetween*/ true, preferences);
}
}
function getCombinedVerbatimImports(): AnyImportOrRequireStatement[] | undefined {
if (!verbatimImports.size) return undefined;
const importDeclarations = new Set(mapDefined([...verbatimImports], d => findAncestor(d, isImportDeclaration)));
const requireStatements = new Set(mapDefined([...verbatimImports], d => findAncestor(d, isRequireVariableStatement)));
return [
...mapDefined([...verbatimImports], d =>
d.kind === SyntaxKind.ImportEqualsDeclaration
? getSynthesizedDeepClone(d, /*includeTrivia*/ true)
: undefined),
...[...importDeclarations].map(d => {
if (verbatimImports.has(d)) {
return getSynthesizedDeepClone(d, /*includeTrivia*/ true);
}
return getSynthesizedDeepClone(
factory.updateImportDeclaration(
d,
d.modifiers,
d.importClause && factory.updateImportClause(
d.importClause,
d.importClause.phaseModifier,
verbatimImports.has(d.importClause) ? d.importClause.name : undefined,
verbatimImports.has(d.importClause.namedBindings as NamespaceImport)
? d.importClause.namedBindings as NamespaceImport :
tryCast(d.importClause.namedBindings, isNamedImports)?.elements.some(e => verbatimImports.has(e))
? factory.updateNamedImports(
d.importClause.namedBindings as NamedImports,
(d.importClause.namedBindings as NamedImports).elements.filter(e => verbatimImports.has(e)),
)
: undefined,
),
d.moduleSpecifier,
d.attributes,
),
/*includeTrivia*/ true,
);
}),
...[...requireStatements].map(s => {
if (verbatimImports.has(s)) {
return getSynthesizedDeepClone(s, /*includeTrivia*/ true);
}
return getSynthesizedDeepClone(
factory.updateVariableStatement(
s,
s.modifiers,
factory.updateVariableDeclarationList(
s.declarationList,
mapDefined(s.declarationList.declarations, d => {
if (verbatimImports.has(d)) {
return d;
}
return factory.updateVariableDeclaration(
d,
d.name.kind === SyntaxKind.ObjectBindingPattern
? factory.updateObjectBindingPattern(
d.name,
d.name.elements.filter(e => verbatimImports.has(e)),
) : d.name,
d.exclamationToken,
d.type,
d.initializer,
);
}),
),
),
/*includeTrivia*/ true,
) as RequireVariableStatement;
}),
];
}
function hasFixes() {
return addToNamespace.length > 0 || importType.length > 0 || addToExisting.size > 0 || newImports.size > 0 || verbatimImports.size > 0 || removeExisting.size > 0;
}
}
/**
* Computes module specifiers for multiple import additions to a file.
*
* @internal
*/
export interface ImportSpecifierResolver {
getModuleSpecifierForBestExportInfo(
exportInfo: readonly SymbolExportInfo[],
position: number,
isValidTypeOnlyUseSite: boolean,
fromCacheOnly?: boolean,
): { exportInfo?: SymbolExportInfo | FutureSymbolExportInfo; moduleSpecifier: string; computedWithoutCacheCount: number; } | undefined;
}
/** @internal */
export function createImportSpecifierResolver(importingFile: SourceFile, program: Program, host: LanguageServiceHost, preferences: UserPreferences): ImportSpecifierResolver {
const packageJsonImportFilter = createPackageJsonImportFilter(importingFile, preferences, host);
const importMap = createExistingImportMap(importingFile, program);
return { getModuleSpecifierForBestExportInfo };
function getModuleSpecifierForBestExportInfo(
exportInfo: readonly SymbolExportInfo[],
position: number,
isValidTypeOnlyUseSite: boolean,
fromCacheOnly?: boolean,
): { exportInfo?: SymbolExportInfo | FutureSymbolExportInfo; moduleSpecifier: string; computedWithoutCacheCount: number; } | undefined {
const { fixes, computedWithoutCacheCount } = getImportFixes(
exportInfo,
position,
isValidTypeOnlyUseSite,
/*useRequire*/ false,
program,
importingFile,
host,
preferences,
importMap,
fromCacheOnly,
);
const result = getBestFix(fixes, importingFile, program, packageJsonImportFilter, host, preferences);
return result && { ...result, computedWithoutCacheCount };
}
}
// Sorted with the preferred fix coming first.
const enum ImportFixKind {
UseNamespace,
JsdocTypeImport,
AddToExisting,
AddNew,
PromoteTypeOnly,
}
// These should not be combined as bitflags, but are given powers of 2 values to
// easily detect conflicts between `NotAllowed` and `Required` by giving them a unique sum.
// They're also ordered in terms of increasing priority for a fix-all scenario (see
// `reduceAddAsTypeOnlyValues`).
const enum AddAsTypeOnly {
Allowed = 1 << 0,
Required = 1 << 1,
NotAllowed = 1 << 2,
}
type ImportFix = FixUseNamespaceImport | FixAddJsdocTypeImport | FixAddToExistingImport | FixAddNewImport | FixPromoteTypeOnlyImport;
type ImportFixWithModuleSpecifier = FixUseNamespaceImport | FixAddJsdocTypeImport | FixAddToExistingImport | FixAddNewImport;
// Properties are be undefined if fix is derived from an existing import
interface ImportFixBase {
readonly isReExport?: boolean;
readonly exportInfo?: SymbolExportInfo | FutureSymbolExportInfo;
readonly moduleSpecifierKind: moduleSpecifiers.ModuleSpecifierResult["kind"];
readonly moduleSpecifier: string;
}
interface Qualification {
readonly usagePosition: number;
readonly namespacePrefix: string;
}
interface FixUseNamespaceImport extends ImportFixBase, Qualification {
readonly kind: ImportFixKind.UseNamespace;
}
interface FixAddJsdocTypeImport extends ImportFixBase {
readonly kind: ImportFixKind.JsdocTypeImport;
readonly usagePosition: number;
readonly isReExport: boolean;
readonly exportInfo: SymbolExportInfo | FutureSymbolExportInfo;
}
interface FixAddToExistingImport extends ImportFixBase {
readonly kind: ImportFixKind.AddToExisting;
readonly importClauseOrBindingPattern: ImportClause | ObjectBindingPattern;
readonly importKind: ImportKind.Default | ImportKind.Named;
readonly addAsTypeOnly: AddAsTypeOnly;
readonly propertyName?: string;
}
interface FixAddNewImport extends ImportFixBase {
readonly kind: ImportFixKind.AddNew;
readonly importKind: ImportKind;
readonly addAsTypeOnly: AddAsTypeOnly;
readonly propertyName?: string;
readonly useRequire: boolean;
readonly qualification?: Qualification;
}
interface FixPromoteTypeOnlyImport {
readonly kind: ImportFixKind.PromoteTypeOnly;
readonly typeOnlyAliasDeclaration: TypeOnlyAliasDeclaration;
}
/** Information needed to augment an existing import declaration. */
interface FixAddToExistingImportInfo {
readonly declaration: AnyImportOrRequire;
readonly importKind: ImportKind;
readonly targetFlags: SymbolFlags;
readonly symbol?: Symbol;
}
/** @internal */
export function getImportCompletionAction(
targetSymbol: Symbol,
moduleSymbol: Symbol,
exportMapKey: ExportMapInfoKey | undefined,
sourceFile: SourceFile,
symbolName: string,
isJsxTagName: boolean,
host: LanguageServiceHost,
program: Program,
formatContext: formatting.FormatContext,
position: number,
preferences: UserPreferences,
cancellationToken: CancellationToken,
): { readonly moduleSpecifier: string; readonly codeAction: CodeAction; } {
let exportInfos;
if (exportMapKey) {
// The new way: `exportMapKey` should be in the `data` of each auto-import completion entry and
// sent back when asking for details.
exportInfos = getExportInfoMap(sourceFile, host, program, preferences, cancellationToken).get(sourceFile.path, exportMapKey);
Debug.assertIsDefined(exportInfos, "Some exportInfo should match the specified exportMapKey");
}
else {
// The old way, kept alive for super old editors that don't give us `data` back.
exportInfos = pathIsBareSpecifier(stripQuotes(moduleSymbol.name))
? [getSingleExportInfoForSymbol(targetSymbol, symbolName, moduleSymbol, program, host)]
: getAllExportInfoForSymbol(sourceFile, targetSymbol, symbolName, moduleSymbol, isJsxTagName, program, host, preferences, cancellationToken);
Debug.assertIsDefined(exportInfos, "Some exportInfo should match the specified symbol / moduleSymbol");
}
const useRequire = shouldUseRequire(sourceFile, program);
const isValidTypeOnlyUseSite = isValidTypeOnlyAliasUseSite(getTokenAtPosition(sourceFile, position));
const fix = Debug.checkDefined(getImportFixForSymbol(sourceFile, exportInfos, program, position, isValidTypeOnlyUseSite, useRequire, host, preferences));
return {
moduleSpecifier: fix.moduleSpecifier,
codeAction: codeFixActionToCodeAction(codeActionForFix(
{ host, formatContext, preferences },
sourceFile,
symbolName,
fix,
/*includeSymbolNameInDescription*/ false,
program,
preferences,
)),
};
}
/** @internal */
export function getPromoteTypeOnlyCompletionAction(sourceFile: SourceFile, symbolToken: Identifier, program: Program, host: LanguageServiceHost, formatContext: formatting.FormatContext, preferences: UserPreferences): CodeAction | undefined {
const compilerOptions = program.getCompilerOptions();
const symbolName = single(getSymbolNamesToImport(sourceFile, program.getTypeChecker(), symbolToken, compilerOptions));
const fix = getTypeOnlyPromotionFix(sourceFile, symbolToken, symbolName, program);
const includeSymbolNameInDescription = symbolName !== symbolToken.text;
return fix && codeFixActionToCodeAction(codeActionForFix(
{ host, formatContext, preferences },
sourceFile,
symbolName,
fix,
includeSymbolNameInDescription,
program,
preferences,
));
}
function getImportFixForSymbol(sourceFile: SourceFile | FutureSourceFile, exportInfos: readonly SymbolExportInfo[], program: Program, position: number | undefined, isValidTypeOnlyUseSite: boolean, useRequire: boolean, host: LanguageServiceHost, preferences: UserPreferences) {
const packageJsonImportFilter = createPackageJsonImportFilter(sourceFile, preferences, host);
return getBestFix(getImportFixes(exportInfos, position, isValidTypeOnlyUseSite, useRequire, program, sourceFile, host, preferences).fixes, sourceFile, program, packageJsonImportFilter, host, preferences);
}
function codeFixActionToCodeAction({ description, changes, commands }: CodeFixAction): CodeAction {
return { description, changes, commands };
}
function getAllExportInfoForSymbol(importingFile: SourceFile | FutureSourceFile, symbol: Symbol, symbolName: string, moduleSymbol: Symbol, preferCapitalized: boolean, program: Program, host: LanguageServiceHost, preferences: UserPreferences, cancellationToken: CancellationToken | undefined): readonly SymbolExportInfo[] | undefined {
const getChecker = createGetChecker(program, host);
const isFileExcluded = preferences.autoImportFileExcludePatterns && getIsFileExcluded(host, preferences);
const mergedModuleSymbol = program.getTypeChecker().getMergedSymbol(moduleSymbol);
const moduleSourceFile = isFileExcluded && mergedModuleSymbol.declarations && getDeclarationOfKind(mergedModuleSymbol, SyntaxKind.SourceFile);
const moduleSymbolExcluded = moduleSourceFile && isFileExcluded(moduleSourceFile as SourceFile);
return getExportInfoMap(importingFile, host, program, preferences, cancellationToken)
.search(importingFile.path, preferCapitalized, name => name === symbolName, info => {
const checker = getChecker(info[0].isFromPackageJson);
if (
checker.getMergedSymbol(skipAlias(info[0].symbol, checker)) === symbol
&& (moduleSymbolExcluded || info.some(i => checker.getMergedSymbol(i.moduleSymbol) === moduleSymbol || i.symbol.parent === moduleSymbol))
) {
return info;
}
});
}
function getSingleExportInfoForSymbol(symbol: Symbol, symbolName: string, moduleSymbol: Symbol, program: Program, host: LanguageServiceHost): SymbolExportInfo {
const mainProgramInfo = getInfoWithChecker(program.getTypeChecker(), /*isFromPackageJson*/ false);
if (mainProgramInfo) {
return mainProgramInfo;
}
const autoImportProvider = host.getPackageJsonAutoImportProvider?.()?.getTypeChecker();
return Debug.checkDefined(autoImportProvider && getInfoWithChecker(autoImportProvider, /*isFromPackageJson*/ true), `Could not find symbol in specified module for code actions`);
function getInfoWithChecker(checker: TypeChecker, isFromPackageJson: boolean): SymbolExportInfo | undefined {
const defaultInfo = getDefaultLikeExportInfo(moduleSymbol, checker);
if (defaultInfo && skipAlias(defaultInfo.symbol, checker) === symbol) {
return { symbol: defaultInfo.symbol, moduleSymbol, moduleFileName: undefined, exportKind: defaultInfo.exportKind, targetFlags: skipAlias(symbol, checker).flags, isFromPackageJson };
}
const named = checker.tryGetMemberInModuleExportsAndProperties(symbolName, moduleSymbol);
if (named && skipAlias(named, checker) === symbol) {
return { symbol: named, moduleSymbol, moduleFileName: undefined, exportKind: ExportKind.Named, targetFlags: skipAlias(symbol, checker).flags, isFromPackageJson };
}
}
}
function getImportFixes(
exportInfos: readonly SymbolExportInfo[] | readonly FutureSymbolExportInfo[],
usagePosition: number | undefined,
isValidTypeOnlyUseSite: boolean,
useRequire: boolean,
program: Program,
sourceFile: SourceFile | FutureSourceFile,
host: LanguageServiceHost,
preferences: UserPreferences,
importMap = isFullSourceFile(sourceFile) ? createExistingImportMap(sourceFile, program) : undefined,
fromCacheOnly?: boolean,
): { computedWithoutCacheCount: number; fixes: readonly ImportFixWithModuleSpecifier[]; } {
const checker = program.getTypeChecker();
const existingImports = importMap ? flatMap(exportInfos, importMap.getImportsForExportInfo) : emptyArray;
const useNamespace = usagePosition !== undefined && tryUseExistingNamespaceImport(existingImports, usagePosition);
const addToExisting = tryAddToExistingImport(existingImports, isValidTypeOnlyUseSite, checker, program.getCompilerOptions());
if (addToExisting) {
// Don't bother providing an action to add a new import if we can add to an existing one.
return {
computedWithoutCacheCount: 0,
fixes: [...(useNamespace ? [useNamespace] : emptyArray), addToExisting],
};
}
const { fixes, computedWithoutCacheCount = 0 } = getFixesForAddImport(
exportInfos,
existingImports,
program,
sourceFile,
usagePosition,
isValidTypeOnlyUseSite,
useRequire,
host,
preferences,
fromCacheOnly,
);
return {
computedWithoutCacheCount,
fixes: [...(useNamespace ? [useNamespace] : emptyArray), ...fixes],
};
}
function tryUseExistingNamespaceImport(existingImports: readonly FixAddToExistingImportInfo[], position: number): FixUseNamespaceImport | undefined {
// It is possible that multiple import statements with the same specifier exist in the file.
// e.g.
//
// import * as ns from "foo";
// import { member1, member2 } from "foo";
//
// member3/**/ <-- cusor here
//
// in this case we should provie 2 actions:
// 1. change "member3" to "ns.member3"
// 2. add "member3" to the second import statement's import list
// and it is up to the user to decide which one fits best.
return firstDefined(existingImports, ({ declaration, importKind }): FixUseNamespaceImport | undefined => {
if (importKind !== ImportKind.Named) return undefined;
const namespacePrefix = getNamespaceLikeImportText(declaration);
const moduleSpecifier = namespacePrefix && tryGetModuleSpecifierFromDeclaration(declaration)?.text;
if (moduleSpecifier) {
return { kind: ImportFixKind.UseNamespace, namespacePrefix, usagePosition: position, moduleSpecifierKind: undefined, moduleSpecifier };
}
});
}
function getNamespaceLikeImportText(declaration: AnyImportOrRequire) {
switch (declaration.kind) {
case SyntaxKind.VariableDeclaration:
return tryCast(declaration.name, isIdentifier)?.text;
case SyntaxKind.ImportEqualsDeclaration:
return declaration.name.text;
case SyntaxKind.JSDocImportTag:
case SyntaxKind.ImportDeclaration:
return tryCast(declaration.importClause?.namedBindings, isNamespaceImport)?.name.text;
default:
return Debug.assertNever(declaration);
}
}
function getAddAsTypeOnly(
isValidTypeOnlyUseSite: boolean,
isForNewImportDeclaration: boolean,
symbol: Symbol | undefined,
targetFlags: SymbolFlags,
checker: TypeChecker,
compilerOptions: CompilerOptions,
) {
if (!isValidTypeOnlyUseSite) {
// Can't use a type-only import if the usage is an emitting position
return AddAsTypeOnly.NotAllowed;
}
if (
symbol &&
compilerOptions.verbatimModuleSyntax &&
(!(targetFlags & SymbolFlags.Value) || !!checker.getTypeOnlyAliasDeclaration(symbol))
) {
// A type-only import is required for this symbol if under these settings if the symbol will
// be erased, which will happen if the target symbol is purely a type or if it was exported/imported
// as type-only already somewhere between this import and the target.
return AddAsTypeOnly.Required;
}
return AddAsTypeOnly.Allowed;
}
function tryAddToExistingImport(existingImports: readonly FixAddToExistingImportInfo[], isValidTypeOnlyUseSite: boolean, checker: TypeChecker, compilerOptions: CompilerOptions): FixAddToExistingImport | undefined {
let best: FixAddToExistingImport | undefined;
for (const existingImport of existingImports) {
const fix = getAddToExistingImportFix(existingImport);
if (!fix) continue;
const isTypeOnly = isTypeOnlyImportDeclaration(fix.importClauseOrBindingPattern);
if (
fix.addAsTypeOnly !== AddAsTypeOnly.NotAllowed && isTypeOnly ||
fix.addAsTypeOnly === AddAsTypeOnly.NotAllowed && !isTypeOnly
) {
// Give preference to putting types in existing type-only imports and avoiding conversions
// of import statements to/from type-only.
return fix;
}
best ??= fix;
}
return best;
function getAddToExistingImportFix({ declaration, importKind, symbol, targetFlags }: FixAddToExistingImportInfo): FixAddToExistingImport | undefined {
if (importKind === ImportKind.CommonJS || importKind === ImportKind.Namespace || declaration.kind === SyntaxKind.ImportEqualsDeclaration) {
// These kinds of imports are not combinable with anything
return undefined;
}
if (declaration.kind === SyntaxKind.VariableDeclaration) {
return (importKind === ImportKind.Named || importKind === ImportKind.Default) && declaration.name.kind === SyntaxKind.ObjectBindingPattern
? { kind: ImportFixKind.AddToExisting, importClauseOrBindingPattern: declaration.name, importKind, moduleSpecifierKind: undefined, moduleSpecifier: declaration.initializer.arguments[0].text, addAsTypeOnly: AddAsTypeOnly.NotAllowed }
: undefined;
}
const { importClause } = declaration;
if (!importClause || !isStringLiteralLike(declaration.moduleSpecifier)) {
return undefined;
}
const { name, namedBindings } = importClause;
// A type-only import may not have both a default and named imports, so the only way a name can
// be added to an existing type-only import is adding a named import to existing named bindings.
if (importClause.isTypeOnly && !(importKind === ImportKind.Named && namedBindings)) {
return undefined;
}
// N.B. we don't have to figure out whether to use the main program checker
// or the AutoImportProvider checker because we're adding to an existing import; the existence of
// the import guarantees the symbol came from the main program.
const addAsTypeOnly = getAddAsTypeOnly(isValidTypeOnlyUseSite, /*isForNewImportDeclaration*/ false, symbol, targetFlags, checker, compilerOptions);
if (
importKind === ImportKind.Default && (
name || // Cannot add a default import to a declaration that already has one
addAsTypeOnly === AddAsTypeOnly.Required && namedBindings // Cannot add a default import as type-only if the import already has named bindings
)
) {
return undefined;
}
if (
importKind === ImportKind.Named &&
namedBindings?.kind === SyntaxKind.NamespaceImport // Cannot add a named import to a declaration that has a namespace import
) {
return undefined;
}
return {
kind: ImportFixKind.AddToExisting,
importClauseOrBindingPattern: importClause,
importKind,
moduleSpecifierKind: undefined,
moduleSpecifier: declaration.moduleSpecifier.text,
addAsTypeOnly,
};
}
}
function createExistingImportMap(importingFile: SourceFile, program: Program) {
const checker = program.getTypeChecker();
let importMap: MultiMap<SymbolId, AnyImportOrRequire> | undefined;
for (const moduleSpecifier of importingFile.imports) {
const i = importFromModuleSpecifier(moduleSpecifier);
if (isVariableDeclarationInitializedToRequire(i.parent)) {
const moduleSymbol = checker.resolveExternalModuleName(moduleSpecifier);
if (moduleSymbol) {
(importMap ||= createMultiMap()).add(getSymbolId(moduleSymbol), i.parent);
}
}
else if (i.kind === SyntaxKind.ImportDeclaration || i.kind === SyntaxKind.ImportEqualsDeclaration || i.kind === SyntaxKind.JSDocImportTag) {
const moduleSymbol = checker.getSymbolAtLocation(moduleSpecifier);
if (moduleSymbol) {
(importMap ||= createMultiMap()).add(getSymbolId(moduleSymbol), i);
}
}
}
return {
getImportsForExportInfo: ({ moduleSymbol, exportKind, targetFlags, symbol }: SymbolExportInfo | FutureSymbolExportInfo): readonly FixAddToExistingImportInfo[] => {
const matchingDeclarations = importMap?.get(getSymbolId(moduleSymbol));
if (!matchingDeclarations) return emptyArray;
// Can't use an es6 import for a type in JS.
if (
isSourceFileJS(importingFile)
&& !(targetFlags & SymbolFlags.Value)
&& !every(matchingDeclarations, isJSDocImportTag)
) return emptyArray;
const importKind = getImportKind(importingFile, exportKind, program);
return matchingDeclarations.map(declaration => ({ declaration, importKind, symbol, targetFlags }));
},
};
}
function shouldUseRequire(sourceFile: SourceFile | FutureSourceFile, program: Program): boolean {
// 1. TypeScript files don't use require variable declarations
if (!hasJSFileExtension(sourceFile.fileName)) {
return false;
}
// 2. If the current source file is unambiguously CJS or ESM, go with that
if (sourceFile.commonJsModuleIndicator && !sourceFile.externalModuleIndicator) return true;
if (sourceFile.externalModuleIndicator && !sourceFile.commonJsModuleIndicator) return false;
// 3. If there's a tsconfig/jsconfig, use its module setting
const compilerOptions = program.getCompilerOptions();
if (compilerOptions.configFile) {
return getEmitModuleKind(compilerOptions) < ModuleKind.ES2015;
}
// 4. In --module nodenext, assume we're not emitting JS -> JS, so use
// whatever syntax Node expects based on the detected module kind
// TODO: consider removing `impliedNodeFormatForEmit`
if (getImpliedNodeFormatForEmit(sourceFile, program) === ModuleKind.CommonJS) return true;
if (getImpliedNodeFormatForEmit(sourceFile, program) === ModuleKind.ESNext) return false;
// 5. Match the first other JS file in the program that's unambiguously CJS or ESM
for (const otherFile of program.getSourceFiles()) {
if (otherFile === sourceFile || !isSourceFileJS(otherFile) || program.isSourceFileFromExternalLibrary(otherFile)) continue;
if (otherFile.commonJsModuleIndicator && !otherFile.externalModuleIndicator) return true;
if (otherFile.externalModuleIndicator && !otherFile.commonJsModuleIndicator) return false;
}
// 6. Literally nothing to go on
return true;
}
function createGetChecker(program: Program, host: LanguageServiceHost) {
return memoizeOne((isFromPackageJson: boolean) => isFromPackageJson ? host.getPackageJsonAutoImportProvider!()!.getTypeChecker() : program.getTypeChecker());
}
function getNewImportFixes(
program: Program,
sourceFile: SourceFile | FutureSourceFile,
usagePosition: number | undefined,
isValidTypeOnlyUseSite: boolean,
useRequire: boolean,
exportInfo: readonly (SymbolExportInfo | FutureSymbolExportInfo)[],
host: LanguageServiceHost,
preferences: UserPreferences,
fromCacheOnly?: boolean,
): { computedWithoutCacheCount: number; fixes: readonly (FixAddNewImport | FixAddJsdocTypeImport)[]; } {
const isJs = hasJSFileExtension(sourceFile.fileName);
const compilerOptions = program.getCompilerOptions();
const moduleSpecifierResolutionHost = createModuleSpecifierResolutionHost(program, host);
const getChecker = createGetChecker(program, host);
const moduleResolution = getEmitModuleResolutionKind(compilerOptions);
const rejectNodeModulesRelativePaths = moduleResolutionUsesNodeModules(moduleResolution);
const getModuleSpecifiers = fromCacheOnly
? (exportInfo: SymbolExportInfo | FutureSymbolExportInfo) => moduleSpecifiers.tryGetModuleSpecifiersFromCache(exportInfo.moduleSymbol, sourceFile, moduleSpecifierResolutionHost, preferences)
: (exportInfo: SymbolExportInfo | FutureSymbolExportInfo, checker: TypeChecker) => moduleSpecifiers.getModuleSpecifiersWithCacheInfo(exportInfo.moduleSymbol, checker, compilerOptions, sourceFile, moduleSpecifierResolutionHost, preferences, /*options*/ undefined, /*forAutoImport*/ true);
let computedWithoutCacheCount = 0;
const fixes = flatMap(exportInfo, (exportInfo, i) => {
const checker = getChecker(exportInfo.isFromPackageJson);
const { computedWithoutCache, moduleSpecifiers, kind: moduleSpecifierKind } = getModuleSpecifiers(exportInfo, checker) ?? {};
const importedSymbolHasValueMeaning = !!(exportInfo.targetFlags & SymbolFlags.Value);
const addAsTypeOnly = getAddAsTypeOnly(isValidTypeOnlyUseSite, /*isForNewImportDeclaration*/ true, exportInfo.symbol, exportInfo.targetFlags, checker, compilerOptions);
computedWithoutCacheCount += computedWithoutCache ? 1 : 0;
return mapDefined(moduleSpecifiers, (moduleSpecifier): FixAddNewImport | FixAddJsdocTypeImport | undefined => {
if (rejectNodeModulesRelativePaths && pathContainsNodeModules(moduleSpecifier)) {
return undefined;
}
if (!importedSymbolHasValueMeaning && isJs && usagePosition !== undefined) {
// `position` should only be undefined at a missing jsx namespace, in which case we shouldn't be looking for pure types.
return { kind: ImportFixKind.JsdocTypeImport, moduleSpecifierKind, moduleSpecifier, usagePosition, exportInfo, isReExport: i > 0 };
}
const importKind = getImportKind(sourceFile, exportInfo.exportKind, program);
let qualification: Qualification | undefined;
if (usagePosition !== undefined && importKind === ImportKind.CommonJS && exportInfo.exportKind === ExportKind.Named) {
// Compiler options are restricting our import options to a require, but we need to access
// a named export or property of the exporting module. We need to import the entire module
// and insert a property access, e.g. `writeFile` becomes
//
// import fs = require("fs"); // or const in JS
// fs.writeFile
const exportEquals = checker.resolveExternalModuleSymbol(exportInfo.moduleSymbol);
let namespacePrefix;
if (exportEquals !== exportInfo.moduleSymbol) {
namespacePrefix = forEachNameOfDefaultExport(exportEquals, checker, getEmitScriptTarget(compilerOptions), identity)!;
}
namespacePrefix ||= moduleSymbolToValidIdentifier(
exportInfo.moduleSymbol,
getEmitScriptTarget(compilerOptions),
/*forceCapitalize*/ false,
);
qualification = { namespacePrefix, usagePosition };
}
return {
kind: ImportFixKind.AddNew,
moduleSpecifierKind,
moduleSpecifier,
importKind,
useRequire,
addAsTypeOnly,
exportInfo,
isReExport: i > 0,
qualification,
};
});
});
return { computedWithoutCacheCount, fixes };
}
function getFixesForAddImport(
exportInfos: readonly SymbolExportInfo[] | readonly FutureSymbolExportInfo[],
existingImports: readonly FixAddToExistingImportInfo[],
program: Program,
sourceFile: SourceFile | FutureSourceFile,
usagePosition: number | undefined,
isValidTypeOnlyUseSite: boolean,
useRequire: boolean,
host: LanguageServiceHost,
preferences: UserPreferences,
fromCacheOnly?: boolean,
): { computedWithoutCacheCount?: number; fixes: readonly (FixAddNewImport | FixAddJsdocTypeImport)[]; } {
const existingDeclaration = firstDefined(existingImports, info => newImportInfoFromExistingSpecifier(info, isValidTypeOnlyUseSite, useRequire, program.getTypeChecker(), program.getCompilerOptions()));
return existingDeclaration ? { fixes: [existingDeclaration] } : getNewImportFixes(program, sourceFile, usagePosition, isValidTypeOnlyUseSite, useRequire, exportInfos, host, preferences, fromCacheOnly);
}
function newImportInfoFromExistingSpecifier(
{ declaration, importKind, symbol, targetFlags }: FixAddToExistingImportInfo,
isValidTypeOnlyUseSite: boolean,
useRequire: boolean,
checker: TypeChecker,
compilerOptions: CompilerOptions,
): FixAddNewImport | undefined {
const moduleSpecifier = tryGetModuleSpecifierFromDeclaration(declaration)?.text;
if (moduleSpecifier) {
const addAsTypeOnly = useRequire
? AddAsTypeOnly.NotAllowed
: getAddAsTypeOnly(isValidTypeOnlyUseSite, /*isForNewImportDeclaration*/ true, symbol, targetFlags, checker, compilerOptions);
return { kind: ImportFixKind.AddNew, moduleSpecifierKind: undefined, moduleSpecifier, importKind, addAsTypeOnly, useRequire };
}
}
interface FixInfo {
readonly fix: ImportFix;
readonly symbolName: string;
readonly errorIdentifierText: string | undefined;
readonly isJsxNamespaceFix?: boolean;
}
function getFixInfos(context: CodeFixContextBase, errorCode: number, pos: number, useAutoImportProvider: boolean): readonly FixInfo[] | undefined {
const symbolToken = getTokenAtPosition(context.sourceFile, pos);
let info;
if (errorCode === Diagnostics._0_refers_to_a_UMD_global_but_the_current_file_is_a_module_Consider_adding_an_import_instead.code) {
info = getFixesInfoForUMDImport(context, symbolToken);
}
else if (!isIdentifier(symbolToken)) {
return undefined;
}
else if (errorCode === Diagnostics._0_cannot_be_used_as_a_value_because_it_was_imported_using_import_type.code) {
const symbolName = single(getSymbolNamesToImport(context.sourceFile, context.program.getTypeChecker(), symbolToken, context.program.getCompilerOptions()));
const fix = getTypeOnlyPromotionFix(context.sourceFile, symbolToken, symbolName, context.program);
return fix && [{ fix, symbolName, errorIdentifierText: symbolToken.text }];
}
else {
info = getFixesInfoForNonUMDImport(context, symbolToken, useAutoImportProvider);
}
const packageJsonImportFilter = createPackageJsonImportFilter(context.sourceFile, context.preferences, context.host);
return info && sortFixInfo(info, context.sourceFile, context.program, packageJsonImportFilter, context.host, context.preferences);
}
function sortFixInfo(fixes: readonly (FixInfo & { fix: ImportFixWithModuleSpecifier; })[], sourceFile: SourceFile, program: Program, packageJsonImportFilter: PackageJsonImportFilter, host: LanguageServiceHost, preferences: UserPreferences): readonly (FixInfo & { fix: ImportFixWithModuleSpecifier; })[] {
const _toPath = (fileName: string) => toPath(fileName, host.getCurrentDirectory(), hostGetCanonicalFileName(host));
return toSorted(fixes, (a, b) =>
compareBooleans(!!a.isJsxNamespaceFix, !!b.isJsxNamespaceFix) ||
compareValues(a.fix.kind, b.fix.kind) ||
compareModuleSpecifiers(a.fix, b.fix, sourceFile, program, preferences, packageJsonImportFilter.allowsImportingSpecifier, _toPath));
}
function getFixInfosWithoutDiagnostic(context: CodeFixContextBase, symbolToken: Identifier, useAutoImportProvider: boolean): readonly FixInfo[] | undefined {
const info = getFixesInfoForNonUMDImport(context, symbolToken, useAutoImportProvider);
const packageJsonImportFilter = createPackageJsonImportFilter(context.sourceFile, context.preferences, context.host);
return info && sortFixInfo(info, context.sourceFile, context.program, packageJsonImportFilter, context.host, context.preferences);
}
function getBestFix(fixes: readonly ImportFixWithModuleSpecifier[], sourceFile: SourceFile | FutureSourceFile, program: Program, packageJsonImportFilter: PackageJsonImportFilter, host: LanguageServiceHost, preferences: UserPreferences): ImportFixWithModuleSpecifier | undefined {
if (!some(fixes)) return;
// These will always be placed first if available, and are better than other kinds
if (fixes[0].kind === ImportFixKind.UseNamespace || fixes[0].kind === ImportFixKind.AddToExisting) {
return fixes[0];
}
return fixes.reduce((best, fix) =>
// Takes true branch of conditional if `fix` is better than `best`
compareModuleSpecifiers(
fix,
best,
sourceFile,
program,
preferences,
packageJsonImportFilter.allowsImportingSpecifier,
fileName => toPath(fileName, host.getCurrentDirectory(), hostGetCanonicalFileName(host)),
) === Comparison.LessThan ? fix : best
);
}
/** @returns `Comparison.LessThan` if `a` is better than `b`. */
function compareModuleSpecifiers(
a: ImportFixWithModuleSpecifier,
b: ImportFixWithModuleSpecifier,
importingFile: SourceFile | FutureSourceFile,
program: Program,
preferences: UserPreferences,
allowsImportingSpecifier: (specifier: string) => boolean,
toPath: (fileName: string) => Path,
): Comparison {
if (a.kind !== ImportFixKind.UseNamespace && b.kind !== ImportFixKind.UseNamespace) {
return compareBooleans(
b.moduleSpecifierKind !== "node_modules" || allowsImportingSpecifier(b.moduleSpecifier),
a.moduleSpecifierKind !== "node_modules" || allowsImportingSpecifier(a.moduleSpecifier),
)
|| compareModuleSpecifierRelativity(a, b, preferences)
|| compareNodeCoreModuleSpecifiers(a.moduleSpecifier, b.moduleSpecifier, importingFile, program)
|| compareBooleans(
isFixPossiblyReExportingImportingFile(a, importingFile.path, toPath),
isFixPossiblyReExportingImportingFile(b, importingFile.path, toPath),
)
|| compareNumberOfDirectorySeparators(a.moduleSpecifier, b.moduleSpecifier);
}
return Comparison.EqualTo;
}
function compareModuleSpecifierRelativity(a: ImportFixWithModuleSpecifier, b: ImportFixWithModuleSpecifier, preferences: UserPreferences): Comparison {
if (preferences.importModuleSpecifierPreference === "non-relative" || preferences.importModuleSpecifierPreference === "project-relative") {
return compareBooleans(a.moduleSpecifierKind === "relative", b.moduleSpecifierKind === "relative");
}
return Comparison.EqualTo;
}
// This is a simple heuristic to try to avoid creating an import cycle with a barrel re-export.
// E.g., do not `import { Foo } from ".."` when you could `import { Foo } from "../Foo"`.
// This can produce false positives or negatives if re-exports cross into sibling directories
// (e.g. `export * from "../whatever"`) or are not named "index".
function isFixPossiblyReExportingImportingFile(fix: ImportFixWithModuleSpecifier, importingFilePath: Path, toPath: (fileName: string) => Path): boolean {
if (
fix.isReExport &&
fix.exportInfo?.moduleFileName &&
isIndexFileName(fix.exportInfo.moduleFileName)
) {
const reExportDir = toPath(getDirectoryPath(fix.exportInfo.moduleFileName));
return startsWith(importingFilePath, reExportDir);
}
return false;
}
function isIndexFileName(fileName: string) {
return getBaseFileName(fileName, [".js", ".jsx", ".d.ts", ".ts", ".tsx"], /*ignoreCase*/ true) === "index";
}
function compareNodeCoreModuleSpecifiers(a: string, b: string, importingFile: SourceFile | FutureSourceFile, program: Program): Comparison {
if (startsWith(a, "node:") && !startsWith(b, "node:")) return shouldUseUriStyleNodeCoreModules(importingFile, program) ? Comparison.LessThan : Comparison.GreaterThan;
if (startsWith(b, "node:") && !startsWith(a, "node:")) return shouldUseUriStyleNodeCoreModules(importingFile, program) ? Comparison.GreaterThan : Comparison.LessThan;
return Comparison.EqualTo;
}
function getFixesInfoForUMDImport({ sourceFile, program, host, preferences }: CodeFixContextBase, token: Node): (FixInfo & { fix: ImportFixWithModuleSpecifier; })[] | undefined {
const checker = program.getTypeChecker();
const umdSymbol = getUmdSymbol(token, checker);
if (!umdSymbol) return undefined;
const symbol = checker.getAliasedSymbol(umdSymbol);
const symbolName = umdSymbol.name;
const exportInfo: readonly SymbolExportInfo[] = [{ symbol: umdSymbol, moduleSymbol: symbol, moduleFileName: undefined, exportKind: ExportKind.UMD, targetFlags: symbol.flags, isFromPackageJson: false }];
const useRequire = shouldUseRequire(sourceFile, program);
// `usagePosition` is undefined because `token` may not actually be a usage of the symbol we're importing.
// For example, we might need to import `React` in order to use an arbitrary JSX tag. We could send a position
// for other UMD imports, but `usagePosition` is currently only used to insert a namespace qualification
// before a named import, like converting `writeFile` to `fs.writeFile` (whether `fs` is already imported or
// not), and this function will only be called for UMD symbols, which are necessarily an `export =`, not a
// named export.
const fixes = getImportFixes(exportInfo, /*usagePosition*/ undefined, /*isValidTypeOnlyUseSite*/ false, useRequire, program, sourceFile, host, preferences).fixes;
return fixes.map(fix => ({ fix, symbolName, errorIdentifierText: tryCast(token, isIdentifier)?.text }));
}
function getUmdSymbol(token: Node, checker: TypeChecker): Symbol | undefined {
// try the identifier to see if it is the umd symbol
const umdSymbol = isIdentifier(token) ? checker.getSymbolAtLocation(token) : undefined;
if (isUMDExportSymbol(umdSymbol)) return umdSymbol;
// The error wasn't for the symbolAtLocation, it was for the JSX tag itself, which needs access to e.g. `React`.
const { parent } = token;
if ((isJsxOpeningLikeElement(parent) && parent.tagName === token) || isJsxOpeningFragment(parent)) {
const parentSymbol = checker.resolveName(checker.getJsxNamespace(parent), isJsxOpeningLikeElement(parent) ? token : parent, SymbolFlags.Value, /*excludeGlobals*/ false);
if (isUMDExportSymbol(parentSymbol)) {
return parentSymbol;
}
}
return undefined;
}
/**
* @param forceImportKeyword Indicates that the user has already typed `import`, so the result must start with `import`.
* (In other words, do not allow `const x = require("...")` for JS files.)
*
* @internal
*/
export function getImportKind(importingFile: SourceFile | FutureSourceFile, exportKind: ExportKind, program: Program, forceImportKeyword?: boolean): ImportKind {
if (program.getCompilerOptions().verbatimModuleSyntax && getEmitModuleFormatOfFile(importingFile, program) === ModuleKind.CommonJS) {
// TODO: if the exporting file is ESM under nodenext, or `forceImport` is given in a JS file, this is impossible
return ImportKind.CommonJS;
}
switch (exportKind) {
case ExportKind.Named:
return ImportKind.Named;
case ExportKind.Default:
return ImportKind.Default;
case ExportKind.ExportEquals:
return getExportEqualsImportKind(importingFile, program.getCompilerOptions(), !!forceImportKeyword);
case ExportKind.UMD:
return getUmdImportKind(importingFile, program, !!forceImportKeyword);
case ExportKind.Module:
return ImportKind.Namespace;
default:
return Debug.assertNever(exportKind);
}
}
function getUmdImportKind(importingFile: SourceFile | FutureSourceFile, program: Program, forceImportKeyword: boolean): ImportKind {
// Import a synthetic `default` if enabled.
if (getAllowSyntheticDefaultImports(program.getCompilerOptions())) {
return ImportKind.Default;
}
// When a synthetic `default` is unavailable, use `import..require` if the module kind supports it.
const moduleKind = getEmitModuleKind(program.getCompilerOptions());
switch (moduleKind) {
case ModuleKind.AMD:
case ModuleKind.CommonJS:
case ModuleKind.UMD:
if (hasJSFileExtension(importingFile.fileName)) {
return importingFile.externalModuleIndicator || forceImportKeyword ? ImportKind.Namespace : ImportKind.CommonJS;
}
return ImportKind.CommonJS;
case ModuleKind.System:
case ModuleKind.ES2015:
case ModuleKind.ES2020:
case ModuleKind.ES2022:
case ModuleKind.ESNext:
case ModuleKind.None:
case ModuleKind.Preserve:
// Fall back to the `import * as ns` style import.
return ImportKind.Namespace;
case ModuleKind.Node16:
case ModuleKind.Node18:
case ModuleKind.Node20:
case ModuleKind.NodeNext:
return getImpliedNodeFormatForEmit(importingFile, program) === ModuleKind.ESNext ? ImportKind.Namespace : ImportKind.CommonJS;
default:
return Debug.assertNever(moduleKind, `Unexpected moduleKind ${moduleKind}`);
}
}
function getFixesInfoForNonUMDImport({ sourceFile, program, cancellationToken, host, preferences }: CodeFixContextBase, symbolToken: Identifier, useAutoImportProvider: boolean): readonly (FixInfo & { fix: ImportFixWithModuleSpecifier; })[] | undefined {
const checker = program.getTypeChecker();
const compilerOptions = program.getCompilerOptions();
return flatMap(getSymbolNamesToImport(sourceFile, checker, symbolToken, compilerOptions), symbolName => {
// "default" is a keyword and not a legal identifier for the import, but appears as an identifier.
if (symbolName === InternalSymbolName.Default) {
return undefined;
}
const isValidTypeOnlyUseSite = isValidTypeOnlyAliasUseSite(symbolToken);
const useRequire = shouldUseRequire(sourceFile, program);
const exportInfo = getExportInfos(symbolName, isJSXTagName(symbolToken), getMeaningFromLocation(symbolToken), cancellationToken, sourceFile, program, useAutoImportProvider, host, preferences);
return arrayFrom(
flatMapIterator(exportInfo.values(), exportInfos => getImportFixes(exportInfos, symbolToken.getStart(sourceFile), isValidTypeOnlyUseSite, useRequire, program, sourceFile, host, preferences).fixes),
fix => ({ fix, symbolName, errorIdentifierText: symbolToken.text, isJsxNamespaceFix: symbolName !== symbolToken.text }),
);
});
}
function getTypeOnlyPromotionFix(sourceFile: SourceFile, symbolToken: Identifier, symbolName: string, program: Program): FixPromoteTypeOnlyImport | undefined {
const checker = program.getTypeChecker();
const symbol = checker.resolveName(symbolName, symbolToken, SymbolFlags.Value, /*excludeGlobals*/ true);
if (!symbol) return undefined;
const typeOnlyAliasDeclaration = checker.getTypeOnlyAliasDeclaration(symbol);
if (!typeOnlyAliasDeclaration || getSourceFileOfNode(typeOnlyAliasDeclaration) !== sourceFile) return undefined;
return { kind: ImportFixKind.PromoteTypeOnly, typeOnlyAliasDeclaration };
}
function getSymbolNamesToImport(sourceFile: SourceFile, checker: TypeChecker, symbolToken: Identifier, compilerOptions: CompilerOptions): string[] {
const parent = symbolToken.parent;
if ((isJsxOpeningLikeElement(parent) || isJsxClosingElement(parent)) && parent.tagName === symbolToken && jsxModeNeedsExplicitImport(compilerOptions.jsx)) {
const jsxNamespace = checker.getJsxNamespace(sourceFile);
if (needsJsxNamespaceFix(jsxNamespace, symbolToken, checker)) {
const needsComponentNameFix = !isIntrinsicJsxName(symbolToken.text) && !checker.resolveName(symbolToken.text, symbolToken, SymbolFlags.Value, /*excludeGlobals*/ false);
return needsComponentNameFix ? [symbolToken.text, jsxNamespace] : [jsxNamespace];
}
}
return [symbolToken.text];
}
function needsJsxNamespaceFix(jsxNamespace: string, symbolToken: Identifier, checker: TypeChecker) {
if (isIntrinsicJsxName(symbolToken.text)) return true; // If we were triggered by a matching error code on an intrinsic, the error must have been about missing the JSX factory
const namespaceSymbol = checker.resolveName(jsxNamespace, symbolToken, SymbolFlags.Value, /*excludeGlobals*/ true);
return !namespaceSymbol || some(namespaceSymbol.declarations, isTypeOnlyImportOrExportDeclaration) && !(namespaceSymbol.flags & SymbolFlags.Value);
}
// Returns a map from an exported symbol's ID to a list of every way it's (re-)exported.
function getExportInfos(
symbolName: string,
isJsxTagName: boolean,
currentTokenMeaning: SemanticMeaning,
cancellationToken: CancellationToken,
fromFile: SourceFile,
program: Program,
useAutoImportProvider: boolean,
host: LanguageServiceHost,
preferences: UserPreferences,
): ReadonlyMap<string, readonly SymbolExportInfo[]> {
// For each original symbol, keep all re-exports of that symbol together so we can call `getCodeActionsForImport` on the whole group at once.
// Maps symbol id to info for modules providing that symbol (original export + re-exports).
const originalSymbolToExportInfos = createMultiMap<string, SymbolExportInfo>();
const packageJsonFilter = createPackageJsonImportFilter(fromFile, preferences, host);
const moduleSpecifierCache = host.getModuleSpecifierCache?.();
const getModuleSpecifierResolutionHost = memoizeOne((isFromPackageJson: boolean) => {
return createModuleSpecifierResolutionHost(isFromPackageJson ? host.getPackageJsonAutoImportProvider!()! : program, host);
});
function addSymbol(moduleSymbol: Symbol, toFile: SourceFile | undefined, exportedSymbol: Symbol, exportKind: ExportKind, program: Program, isFromPackageJson: boolean): void {
const moduleSpecifierResolutionHost = getModuleSpecifierResolutionHost(isFromPackageJson);
if (isImportable(program, fromFile, toFile, moduleSymbol, preferences, packageJsonFilter, moduleSpecifierResolutionHost, moduleSpecifierCache)) {
const checker = program.getTypeChecker();
originalSymbolToExportInfos.add(getUniqueSymbolId(exportedSymbol, checker).toString(), { symbol: exportedSymbol, moduleSymbol, moduleFileName: toFile?.fileName, exportKind, targetFlags: skipAlias(exportedSymbol, checker).flags, isFromPackageJson });
}
}
forEachExternalModuleToImportFrom(program, host, preferences, useAutoImportProvider, (moduleSymbol, sourceFile, program, isFromPackageJson) => {
const checker = program.getTypeChecker();
cancellationToken.throwIfCancellationRequested();
const compilerOptions = program.getCompilerOptions();
const defaultInfo = getDefaultLikeExportInfo(moduleSymbol, checker);
if (
defaultInfo
&& symbolFlagsHaveMeaning(checker.getSymbolFlags(defaultInfo.symbol), currentTokenMeaning)
&& forEachNameOfDefaultExport(defaultInfo.symbol, checker, getEmitScriptTarget(compilerOptions), (name, capitalizedName) => (isJsxTagName ? capitalizedName ?? name : name) === symbolName)
) {
addSymbol(moduleSymbol, sourceFile, defaultInfo.symbol, defaultInfo.exportKind, program, isFromPackageJson);
}
// check exports with the same name
const exportSymbolWithIdenticalName = checker.tryGetMemberInModuleExportsAndProperties(symbolName, moduleSymbol);
if (exportSymbolWithIdenticalName && symbolFlagsHaveMeaning(checker.getSymbolFlags(exportSymbolWithIdenticalName), currentTokenMeaning)) {
addSymbol(moduleSymbol, sourceFile, exportSymbolWithIdenticalName, ExportKind.Named, program, isFromPackageJson);
}
});
return originalSymbolToExportInfos;
}
function getExportEqualsImportKind(importingFile: SourceFile | FutureSourceFile, compilerOptions: CompilerOptions, forceImportKeyword: boolean): ImportKind {
const allowSyntheticDefaults = getAllowSyntheticDefaultImports(compilerOptions);
const isJS = hasJSFileExtension(importingFile.fileName);
// 1. 'import =' will not work in es2015+ TS files, so the decision is between a default
// and a namespace import, based on allowSyntheticDefaultImports/esModuleInterop.
if (!isJS && getEmitModuleKind(compilerOptions) >= ModuleKind.ES2015) {
return allowSyntheticDefaults ? ImportKind.Default : ImportKind.Namespace;
}
// 2. 'import =' will not work in JavaScript, so the decision is between a default import,
// a namespace import, and const/require.
if (isJS) {
return importingFile.externalModuleIndicator || forceImportKeyword
? allowSyntheticDefaults ? ImportKind.Default : ImportKind.Namespace
: ImportKind.CommonJS;
}
// 3. At this point the most correct choice is probably 'import =', but people
// really hate that, so look to see if the importing file has any precedent
// on how to handle it.
for (const statement of importingFile.statements ?? emptyArray) {
// `import foo` parses as an ImportEqualsDeclaration even though it could be an ImportDeclaration
if (isImportEqualsDeclaration(statement) && !nodeIsMissing(statement.moduleReference)) {
return ImportKind.CommonJS;
}
}
// 4. We have no precedent to go on, so just use a default import if
// allowSyntheticDefaultImports/esModuleInterop is enabled.
return allowSyntheticDefaults ? ImportKind.Default : ImportKind.CommonJS;
}
function codeActionForFix(
context: textChanges.TextChangesContext,
sourceFile: SourceFile,
symbolName: string,
fix: ImportFix,
includeSymbolNameInDescription: boolean,
program: Program,
preferences: UserPreferences,
): CodeFixAction {
let diag!: DiagnosticOrDiagnosticAndArguments;
const changes = textChanges.ChangeTracker.with(context, tracker => {
diag = codeActionForFixWorker(tracker, sourceFile, symbolName, fix, includeSymbolNameInDescription, program, preferences);
});
return createCodeFixAction(importFixName, changes, diag, importFixId, Diagnostics.Add_all_missing_imports);
}
function codeActionForFixWorker(
changes: textChanges.ChangeTracker,
sourceFile: SourceFile,
symbolName: string,
fix: ImportFix,
includeSymbolNameInDescription: boolean,
program: Program,
preferences: UserPreferences,
): DiagnosticOrDiagnosticAndArguments {
const quotePreference = getQuotePreference(sourceFile, preferences);
switch (fix.kind) {
case ImportFixKind.UseNamespace:
addNamespaceQualifier(changes, sourceFile, fix);
return [Diagnostics.Change_0_to_1, symbolName, `${fix.namespacePrefix}.${symbolName}`];
case ImportFixKind.JsdocTypeImport:
addImportType(changes, sourceFile, fix, quotePreference);
return [Diagnostics.Change_0_to_1, symbolName, getImportTypePrefix(fix.moduleSpecifier, quotePreference) + symbolName];
case ImportFixKind.AddToExisting: {
const { importClauseOrBindingPattern, importKind, addAsTypeOnly, moduleSpecifier } = fix;
doAddExistingFix(
changes,
sourceFile,
importClauseOrBindingPattern,
importKind === ImportKind.Default ? { name: symbolName, addAsTypeOnly } : undefined,
importKind === ImportKind.Named ? [{ name: symbolName, addAsTypeOnly }] : emptyArray,
/*removeExistingImportSpecifiers*/ undefined,
preferences,
);
const moduleSpecifierWithoutQuotes = stripQuotes(moduleSpecifier);
return includeSymbolNameInDescription
? [Diagnostics.Import_0_from_1, symbolName, moduleSpecifierWithoutQuotes]
: [Diagnostics.Update_import_from_0, moduleSpecifierWithoutQuotes];
}
case ImportFixKind.AddNew: {
const { importKind, moduleSpecifier, addAsTypeOnly, useRequire, qualification } = fix;
const getDeclarations = useRequire ? getNewRequires : getNewImports;
const defaultImport: Import | undefined = importKind === ImportKind.Default ? { name: symbolName, addAsTypeOnly } : undefined;
const namedImports: Import[] | undefined = importKind === ImportKind.Named ? [{ name: symbolName, addAsTypeOnly }] : undefined;
const namespaceLikeImport = importKind === ImportKind.Namespace || importKind === ImportKind.CommonJS
? { importKind, name: qualification?.namespacePrefix || symbolName, addAsTypeOnly }
: undefined;
insertImports(
changes,
sourceFile,
getDeclarations(
moduleSpecifier,
quotePreference,
defaultImport,
namedImports,
namespaceLikeImport,
program.getCompilerOptions(),
preferences,
),
/*blankLineBetween*/ true,
preferences,
);
if (qualification) {
addNamespaceQualifier(changes, sourceFile, qualification);
}
return includeSymbolNameInDescription
? [Diagnostics.Import_0_from_1, symbolName, moduleSpecifier]
: [Diagnostics.Add_import_from_0, moduleSpecifier];
}
case ImportFixKind.PromoteTypeOnly: {
const { typeOnlyAliasDeclaration } = fix;
const promotedDeclaration = promoteFromTypeOnly(changes, typeOnlyAliasDeclaration, program, sourceFile, preferences);
return promotedDeclaration.kind === SyntaxKind.ImportSpecifier
? [Diagnostics.Remove_type_from_import_of_0_from_1, symbolName, getModuleSpecifierText(promotedDeclaration.parent.parent)]
: [Diagnostics.Remove_type_from_import_declaration_from_0, getModuleSpecifierText(promotedDeclaration)];
}
default:
return Debug.assertNever(fix, `Unexpected fix kind ${(fix as ImportFix).kind}`);
}
}
function getModuleSpecifierText(promotedDeclaration: ImportClause | ImportEqualsDeclaration): string {
return promotedDeclaration.kind === SyntaxKind.ImportEqualsDeclaration
? tryCast(tryCast(promotedDeclaration.moduleReference, isExternalModuleReference)?.expression, isStringLiteralLike)?.text || promotedDeclaration.moduleReference.getText()
: cast(promotedDeclaration.parent.moduleSpecifier, isStringLiteral).text;
}
function promoteFromTypeOnly(
changes: textChanges.ChangeTracker,
aliasDeclaration: TypeOnlyAliasDeclaration,
program: Program,
sourceFile: SourceFile,
preferences: UserPreferences,
) {
const compilerOptions = program.getCompilerOptions();
// See comment in `doAddExistingFix` on constant with the same name.
const convertExistingToTypeOnly = compilerOptions.verbatimModuleSyntax;
switch (aliasDeclaration.kind) {
case SyntaxKind.ImportSpecifier:
if (aliasDeclaration.isTypeOnly) {
if (aliasDeclaration.parent.elements.length > 1) {
const newSpecifier = factory.updateImportSpecifier(aliasDeclaration, /*isTypeOnly*/ false, aliasDeclaration.propertyName, aliasDeclaration.name);
const { specifierComparer } = OrganizeImports.getNamedImportSpecifierComparerWithDetection(aliasDeclaration.parent.parent.parent, preferences, sourceFile);
const insertionIndex = OrganizeImports.getImportSpecifierInsertionIndex(aliasDeclaration.parent.elements, newSpecifier, specifierComparer);
if (insertionIndex !== aliasDeclaration.parent.elements.indexOf(aliasDeclaration)) {
changes.delete(sourceFile, aliasDeclaration);
changes.insertImportSpecifierAtIndex(sourceFile, newSpecifier, aliasDeclaration.parent, insertionIndex);
return aliasDeclaration;
}
}
changes.deleteRange(sourceFile, { pos: getTokenPosOfNode(aliasDeclaration.getFirstToken()!), end: getTokenPosOfNode(aliasDeclaration.propertyName ?? aliasDeclaration.name) });
return aliasDeclaration;
}
else {
Debug.assert(aliasDeclaration.parent.parent.isTypeOnly);
promoteImportClause(aliasDeclaration.parent.parent);
return aliasDeclaration.parent.parent;
}
case SyntaxKind.ImportClause:
promoteImportClause(aliasDeclaration);
return aliasDeclaration;
case SyntaxKind.NamespaceImport:
promoteImportClause(aliasDeclaration.parent);
return aliasDeclaration.parent;
case SyntaxKind.ImportEqualsDeclaration:
changes.deleteRange(sourceFile, aliasDeclaration.getChildAt(1));
return aliasDeclaration;
default:
Debug.failBadSyntaxKind(aliasDeclaration);
}
function promoteImportClause(importClause: ImportClause) {
changes.delete(sourceFile, getTypeKeywordOfTypeOnlyImport(importClause, sourceFile));
// Change .ts extension to .js if necessary
if (!compilerOptions.allowImportingTsExtensions) {
const moduleSpecifier = tryGetModuleSpecifierFromDeclaration(importClause.parent);
const resolvedModule = moduleSpecifier && program.getResolvedModuleFromModuleSpecifier(moduleSpecifier, sourceFile)?.resolvedModule;
if (resolvedModule?.resolvedUsingTsExtension) {
const changedExtension = changeAnyExtension(moduleSpecifier!.text, getOutputExtension(moduleSpecifier!.text, compilerOptions));
changes.replaceNode(sourceFile, moduleSpecifier!, factory.createStringLiteral(changedExtension));
}
}
if (convertExistingToTypeOnly) {
const namedImports = tryCast(importClause.namedBindings, isNamedImports);
if (namedImports && namedImports.elements.length > 1) {
const sortState = OrganizeImports.getNamedImportSpecifierComparerWithDetection(importClause.parent, preferences, sourceFile);
if (
(sortState.isSorted !== false) &&
aliasDeclaration.kind === SyntaxKind.ImportSpecifier &&
namedImports.elements.indexOf(aliasDeclaration) !== 0
) {
// The import specifier being promoted will be the only non-type-only,
// import in the NamedImports, so it should be moved to the front.
changes.delete(sourceFile, aliasDeclaration);
changes.insertImportSpecifierAtIndex(sourceFile, aliasDeclaration, namedImports, 0);
}
for (const element of namedImports.elements) {
if (element !== aliasDeclaration && !element.isTypeOnly) {
changes.insertModifierBefore(sourceFile, SyntaxKind.TypeKeyword, element);
}
}
}
}
}
}
function doAddExistingFix(
changes: textChanges.ChangeTracker,
sourceFile: SourceFile,
clause: ImportClause | ObjectBindingPattern,
defaultImport: Import | undefined,
namedImports: readonly Import[],
removeExistingImportSpecifiers: Set<ImportSpecifier | BindingElement> | undefined,
preferences: UserPreferences,
): void {
if (clause.kind === SyntaxKind.ObjectBindingPattern) {
if (removeExistingImportSpecifiers && clause.elements.some(e => removeExistingImportSpecifiers.has(e))) {
// If we're both adding and removing elements, just replace and reprint the whole
// node. The change tracker doesn't understand all the operations and can insert or
// leave behind stray commas.
changes.replaceNode(
sourceFile,
clause,
factory.createObjectBindingPattern([
...clause.elements.filter(e => !removeExistingImportSpecifiers.has(e)),
...defaultImport ? [factory.createBindingElement(/*dotDotDotToken*/ undefined, /*propertyName*/ "default", defaultImport.name)] : emptyArray,
...namedImports.map(i => factory.createBindingElement(/*dotDotDotToken*/ undefined, i.propertyName, i.name)),
]),
);
return;
}
if (defaultImport) {
addElementToBindingPattern(clause, defaultImport.name, "default");
}
for (const specifier of namedImports) {
addElementToBindingPattern(clause, specifier.name, specifier.propertyName);
}
return;
}
// promoteFromTypeOnly = true if we need to promote the entire original clause from type only
const promoteFromTypeOnly = clause.isTypeOnly && some([defaultImport, ...namedImports], i => i?.addAsTypeOnly === AddAsTypeOnly.NotAllowed);
const existingSpecifiers = clause.namedBindings && tryCast(clause.namedBindings, isNamedImports)?.elements;
if (defaultImport) {
Debug.assert(!clause.name, "Cannot add a default import to an import clause that already has one");
changes.insertNodeAt(sourceFile, clause.getStart(sourceFile), factory.createIdentifier(defaultImport.name), { suffix: ", " });
}
if (namedImports.length) {
const { specifierComparer, isSorted } = OrganizeImports.getNamedImportSpecifierComparerWithDetection(clause.parent, preferences, sourceFile);
const newSpecifiers = toSorted(
namedImports.map(namedImport =>
factory.createImportSpecifier(
(!clause.isTypeOnly || promoteFromTypeOnly) && shouldUseTypeOnly(namedImport, preferences),
namedImport.propertyName === undefined ? undefined : factory.createIdentifier(namedImport.propertyName),
factory.createIdentifier(namedImport.name),
)
),
specifierComparer,
);
if (removeExistingImportSpecifiers) {
// If we're both adding and removing specifiers, just replace and reprint the whole
// node. The change tracker doesn't understand all the operations and can insert or
// leave behind stray commas.
changes.replaceNode(
sourceFile,
clause.namedBindings!,
factory.updateNamedImports(
clause.namedBindings as NamedImports,
toSorted([...existingSpecifiers!.filter(s => !removeExistingImportSpecifiers.has(s)), ...newSpecifiers], specifierComparer),
),
);
}
// The sorting preference computed earlier may or may not have validated that these particular
// import specifiers are sorted. If they aren't, `getImportSpecifierInsertionIndex` will return
// nonsense. So if there are existing specifiers, even if we know the sorting preference, we
// need to ensure that the existing specifiers are sorted according to the preference in order
// to do a sorted insertion.
// changed to check if existing specifiers are sorted
else if (existingSpecifiers?.length && isSorted !== false) {
// if we're promoting the clause from type-only, we need to transform the existing imports before attempting to insert the new named imports
const transformedExistingSpecifiers = (promoteFromTypeOnly && existingSpecifiers) ? factory.updateNamedImports(
clause.namedBindings as NamedImports,
sameMap(existingSpecifiers, e => factory.updateImportSpecifier(e, /*isTypeOnly*/ true, e.propertyName, e.name)),
).elements : existingSpecifiers;
for (const spec of newSpecifiers) {
const insertionIndex = OrganizeImports.getImportSpecifierInsertionIndex(transformedExistingSpecifiers, spec, specifierComparer);
changes.insertImportSpecifierAtIndex(sourceFile, spec, clause.namedBindings as NamedImports, insertionIndex);
}
}
else if (existingSpecifiers?.length) {
for (const spec of newSpecifiers) {
changes.insertNodeInListAfter(sourceFile, last(existingSpecifiers), spec, existingSpecifiers);
}
}
else {
if (newSpecifiers.length) {
const namedImports = factory.createNamedImports(newSpecifiers);
if (clause.namedBindings) {
changes.replaceNode(sourceFile, clause.namedBindings, namedImports);
}
else {
changes.insertNodeAfter(sourceFile, Debug.checkDefined(clause.name, "Import clause must have either named imports or a default import"), namedImports);
}
}
}
}
if (promoteFromTypeOnly) {
changes.delete(sourceFile, getTypeKeywordOfTypeOnlyImport(clause, sourceFile));
if (existingSpecifiers) {
// We used to convert existing specifiers to type-only only if compiler options indicated that
// would be meaningful (see the `importNameElisionDisabled` utility function), but user
// feedback indicated a preference for preserving the type-onlyness of existing specifiers
// regardless of whether it would make a difference in emit.
for (const specifier of existingSpecifiers) {
changes.insertModifierBefore(sourceFile, SyntaxKind.TypeKeyword, specifier);
}
}
}
function addElementToBindingPattern(bindingPattern: ObjectBindingPattern, name: string, propertyName: string | undefined) {
const element = factory.createBindingElement(/*dotDotDotToken*/ undefined, propertyName, name);
if (bindingPattern.elements.length) {
changes.insertNodeInListAfter(sourceFile, last(bindingPattern.elements), element);
}
else {
changes.replaceNode(sourceFile, bindingPattern, factory.createObjectBindingPattern([element]));
}
}
}
function addNamespaceQualifier(changes: textChanges.ChangeTracker, sourceFile: SourceFile, { namespacePrefix, usagePosition }: Qualification): void {
changes.insertText(sourceFile, usagePosition, namespacePrefix + ".");
}
function addImportType(changes: textChanges.ChangeTracker, sourceFile: SourceFile, { moduleSpecifier, usagePosition: position }: FixAddJsdocTypeImport, quotePreference: QuotePreference): void {
changes.insertText(sourceFile, position, getImportTypePrefix(moduleSpecifier, quotePreference));
}
function getImportTypePrefix(moduleSpecifier: string, quotePreference: QuotePreference): string {
const quote = getQuoteFromPreference(quotePreference);
return `import(${quote}${moduleSpecifier}${quote}).`;
}
interface Import {
readonly name: string;
readonly addAsTypeOnly: AddAsTypeOnly;
readonly propertyName?: string; // Use when needing to generate an `ImportSpecifier with a `propertyName`; the name preceding "as" keyword (undefined when "as" is absent)
}
interface ImportsCollection {
readonly defaultImport?: Import;
readonly namedImports?: Map<string, [AddAsTypeOnly, /*propertyName*/ string?]>;
readonly namespaceLikeImport?: {
readonly importKind: ImportKind.CommonJS | ImportKind.Namespace;
readonly name: string;
readonly addAsTypeOnly: AddAsTypeOnly;
};
}
function needsTypeOnly({ addAsTypeOnly }: { addAsTypeOnly: AddAsTypeOnly; }): boolean {
return addAsTypeOnly === AddAsTypeOnly.Required;
}
function shouldUseTypeOnly(info: { addAsTypeOnly: AddAsTypeOnly; }, preferences: UserPreferences): boolean {
return needsTypeOnly(info) || !!preferences.preferTypeOnlyAutoImports && info.addAsTypeOnly !== AddAsTypeOnly.NotAllowed;
}
function getNewImports(
moduleSpecifier: string,
quotePreference: QuotePreference,
defaultImport: Import | undefined,
namedImports: readonly Import[] | undefined,
namespaceLikeImport: Import & { importKind: ImportKind.CommonJS | ImportKind.Namespace; } | undefined,
compilerOptions: CompilerOptions,
preferences: UserPreferences,
): AnyImportSyntax | readonly AnyImportSyntax[] {
const quotedModuleSpecifier = makeStringLiteral(moduleSpecifier, quotePreference);
let statements: AnyImportSyntax | readonly AnyImportSyntax[] | undefined;
if (defaultImport !== undefined || namedImports?.length) {
// `verbatimModuleSyntax` should prefer top-level `import type` -
// even though it's not an error, it would add unnecessary runtime emit.
const topLevelTypeOnly = (!defaultImport || needsTypeOnly(defaultImport)) && every(namedImports, needsTypeOnly) ||
(compilerOptions.verbatimModuleSyntax || preferences.preferTypeOnlyAutoImports) &&
defaultImport?.addAsTypeOnly !== AddAsTypeOnly.NotAllowed &&
!some(namedImports, i => i.addAsTypeOnly === AddAsTypeOnly.NotAllowed);
statements = combine(
statements,
makeImport(
defaultImport && factory.createIdentifier(defaultImport.name),
namedImports?.map(namedImport =>
factory.createImportSpecifier(
!topLevelTypeOnly && shouldUseTypeOnly(namedImport, preferences),
namedImport.propertyName === undefined ? undefined : factory.createIdentifier(namedImport.propertyName),
factory.createIdentifier(namedImport.name),
)
),
moduleSpecifier,
quotePreference,
topLevelTypeOnly,
),
);
}
if (namespaceLikeImport) {
const declaration = namespaceLikeImport.importKind === ImportKind.CommonJS
? factory.createImportEqualsDeclaration(
/*modifiers*/ undefined,
shouldUseTypeOnly(namespaceLikeImport, preferences),
factory.createIdentifier(namespaceLikeImport.name),
factory.createExternalModuleReference(quotedModuleSpecifier),
)
: factory.createImportDeclaration(
/*modifiers*/ undefined,
factory.createImportClause(
shouldUseTypeOnly(namespaceLikeImport, preferences) ? SyntaxKind.TypeKeyword : undefined,
/*name*/ undefined,
factory.createNamespaceImport(factory.createIdentifier(namespaceLikeImport.name)),
),
quotedModuleSpecifier,
/*attributes*/ undefined,
);
statements = combine(statements, declaration);
}
return Debug.checkDefined(statements);
}
function getNewRequires(moduleSpecifier: string, quotePreference: QuotePreference, defaultImport: Import | undefined, namedImports: readonly Import[] | undefined, namespaceLikeImport: Import | undefined): RequireVariableStatement | readonly RequireVariableStatement[] {
const quotedModuleSpecifier = makeStringLiteral(moduleSpecifier, quotePreference);
let statements: RequireVariableStatement | readonly RequireVariableStatement[] | undefined;
// const { default: foo, bar, etc } = require('./mod');
if (defaultImport || namedImports?.length) {
const bindingElements = namedImports?.map(({ name, propertyName }) => factory.createBindingElement(/*dotDotDotToken*/ undefined, propertyName, name)) || [];
if (defaultImport) {
bindingElements.unshift(factory.createBindingElement(/*dotDotDotToken*/ undefined, "default", defaultImport.name));
}
const declaration = createConstEqualsRequireDeclaration(factory.createObjectBindingPattern(bindingElements), quotedModuleSpecifier);
statements = combine(statements, declaration);
}
// const foo = require('./mod');
if (namespaceLikeImport) {
const declaration = createConstEqualsRequireDeclaration(namespaceLikeImport.name, quotedModuleSpecifier);
statements = combine(statements, declaration);
}
return Debug.checkDefined(statements);
}
function createConstEqualsRequireDeclaration(name: string | ObjectBindingPattern, quotedModuleSpecifier: StringLiteral): RequireVariableStatement {
return factory.createVariableStatement(
/*modifiers*/ undefined,
factory.createVariableDeclarationList([
factory.createVariableDeclaration(
typeof name === "string" ? factory.createIdentifier(name) : name,
/*exclamationToken*/ undefined,
/*type*/ undefined,
factory.createCallExpression(factory.createIdentifier("require"), /*typeArguments*/ undefined, [quotedModuleSpecifier]),
),
], NodeFlags.Const),
) as RequireVariableStatement;
}
function symbolFlagsHaveMeaning(flags: SymbolFlags, meaning: SemanticMeaning): boolean {
return meaning === SemanticMeaning.All ? true :
meaning & SemanticMeaning.Value ? !!(flags & SymbolFlags.Value) :
meaning & SemanticMeaning.Type ? !!(flags & SymbolFlags.Type) :
meaning & SemanticMeaning.Namespace ? !!(flags & SymbolFlags.Namespace) :
false;
}
function getImpliedNodeFormatForEmit(file: SourceFile | FutureSourceFile, program: Program) {
return isFullSourceFile(file) ? program.getImpliedNodeFormatForEmit(file) : getImpliedNodeFormatForEmitWorker(file, program.getCompilerOptions());
}
function getEmitModuleFormatOfFile(file: SourceFile | FutureSourceFile, program: Program) {
return isFullSourceFile(file) ? program.getEmitModuleFormatOfFile(file) : getEmitModuleFormatOfFileWorker(file, program.getCompilerOptions());
} | typescript | github | https://github.com/microsoft/TypeScript | src/services/codefixes/importFixes.ts |
from ..message_server import Message, ClosingMessage
from ..interaction import instantiate
from ..network_error import NetworkError
class HelloMessage(Message):
"""
Let's name ourselves!
"""
def __init__(self, name=""):
Message.__init__(self)
self.name = name
def __setstate__(self, d):
if "name" in d:
self.name = d["name"]
class LogMessage(Message):
"""
Error to be saved to log.
"""
def __init__(self, event=None):
Message.__init__(self)
self.event = event
def __setstate__(self, d):
if "event" in d:
self.event = d["event"]
if isinstance(self.event, dict):
self.event = NetworkError()
self.event.__setstate__(d["event"])
class ReportQuery(Message):
"""
Request log files encoded into specific format.
"""
def __init__(self, fmt="pure"):
Message.__init__(self)
self.fmt = fmt
def __setstate__(self, d):
if "fmt" in d:
self.fmt = d["fmt"]
class ReportReply(Message):
"""
Return log files encoded into specific format.
"""
def __init__(self, report=None, fmt="pure"):
Message.__init__(self)
self.fmt = fmt
self.report = report
def __setstate__(self, d):
if "fmt" in d:
self.fmt = d["fmt"]
if "report" in d:
self.report = d["report"] | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
from glance import context
from glance.tests.unit import utils as unit_utils
from glance.tests import utils
def _fake_image(owner, is_public):
return {
'id': None,
'owner': owner,
'is_public': is_public,
}
def _fake_membership(can_share=False):
return {'can_share': can_share}
class TestContext(utils.BaseTestCase):
def setUp(self):
super(TestContext, self).setUp()
self.db_api = unit_utils.FakeDB()
def do_visible(self, exp_res, img_owner, img_public, **kwargs):
"""
Perform a context visibility test. Creates a (fake) image
with the specified owner and is_public attributes, then
creates a context with the given keyword arguments and expects
exp_res as the result of an is_image_visible() call on the
context.
"""
img = _fake_image(img_owner, img_public)
ctx = context.RequestContext(**kwargs)
self.assertEqual(exp_res, self.db_api.is_image_visible(ctx, img))
def test_empty_public(self):
"""
Tests that an empty context (with is_admin set to True) can
access an image with is_public set to True.
"""
self.do_visible(True, None, True, is_admin=True)
def test_empty_public_owned(self):
"""
Tests that an empty context (with is_admin set to True) can
access an owned image with is_public set to True.
"""
self.do_visible(True, 'pattieblack', True, is_admin=True)
def test_empty_private(self):
"""
Tests that an empty context (with is_admin set to True) can
access an image with is_public set to False.
"""
self.do_visible(True, None, False, is_admin=True)
def test_empty_private_owned(self):
"""
Tests that an empty context (with is_admin set to True) can
access an owned image with is_public set to False.
"""
self.do_visible(True, 'pattieblack', False, is_admin=True)
def test_anon_public(self):
"""
Tests that an anonymous context (with is_admin set to False)
can access an image with is_public set to True.
"""
self.do_visible(True, None, True)
def test_anon_public_owned(self):
"""
Tests that an anonymous context (with is_admin set to False)
can access an owned image with is_public set to True.
"""
self.do_visible(True, 'pattieblack', True)
def test_anon_private(self):
"""
Tests that an anonymous context (with is_admin set to False)
can access an unowned image with is_public set to False.
"""
self.do_visible(True, None, False)
def test_anon_private_owned(self):
"""
Tests that an anonymous context (with is_admin set to False)
cannot access an owned image with is_public set to False.
"""
self.do_visible(False, 'pattieblack', False)
def test_auth_public(self):
"""
Tests that an authenticated context (with is_admin set to
False) can access an image with is_public set to True.
"""
self.do_visible(True, None, True, tenant='froggy')
def test_auth_public_unowned(self):
"""
Tests that an authenticated context (with is_admin set to
False) can access an image (which it does not own) with
is_public set to True.
"""
self.do_visible(True, 'pattieblack', True, tenant='froggy')
def test_auth_public_owned(self):
"""
Tests that an authenticated context (with is_admin set to
False) can access an image (which it does own) with is_public
set to True.
"""
self.do_visible(True, 'pattieblack', True, tenant='pattieblack')
def test_auth_private(self):
"""
Tests that an authenticated context (with is_admin set to
False) can access an image with is_public set to False.
"""
self.do_visible(True, None, False, tenant='froggy')
def test_auth_private_unowned(self):
"""
Tests that an authenticated context (with is_admin set to
False) cannot access an image (which it does not own) with
is_public set to False.
"""
self.do_visible(False, 'pattieblack', False, tenant='froggy')
def test_auth_private_owned(self):
"""
Tests that an authenticated context (with is_admin set to
False) can access an image (which it does own) with is_public
set to False.
"""
self.do_visible(True, 'pattieblack', False, tenant='pattieblack')
def test_request_id(self):
contexts = [context.RequestContext().request_id for _ in range(5)]
# Check for uniqueness -- set() will normalize its argument
self.assertEqual(5, len(set(contexts)))
def test_service_catalog(self):
ctx = context.RequestContext(service_catalog=['foo'])
self.assertEqual(['foo'], ctx.service_catalog)
def test_user_identity(self):
ctx = context.RequestContext(user="user",
tenant="tenant",
domain="domain",
user_domain="user-domain",
project_domain="project-domain")
self.assertEqual('user tenant domain user-domain project-domain',
ctx.to_dict()["user_identity"]) | unknown | codeparrot/codeparrot-clean | ||
import type { NextApiRequest, NextApiResponse } from "next";
const regenerate = async (req: NextApiRequest, res: NextApiResponse) => {
if (req.query.secret !== process.env.NEXT_PRIVATE_REGENERATION_SECRET) {
console.error("Error regenerating: Invalid Token.");
return res
.status(401)
.json({ message: "Error regenerating: Invalid token" });
}
if (typeof req.query.path === "string") {
try {
const url = new URL(req.query.path, process.env.NEXT_PUBLIC_APP_URL);
const { pathname: pathToRegenerate } = url;
await res.revalidate(pathToRegenerate);
return res.json({ regenerated: true });
} catch (err) {
console.error("Error regenerating: Cannot parse url.");
return res.status(500).send("Error regenerating: Cannot parse url.");
}
}
return res.status(400).send("No path to regenerate");
};
export default regenerate; | typescript | github | https://github.com/vercel/next.js | examples/cms-payload/pages/api/regenerate.ts |
# -*- coding: utf-8 -*-
from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import redirect, render_to_response
from django.views.decorators.http import require_http_methods
from collector.forms import CollectorForm
from collector.models import Blob
from collector.utils.email import send_email
from collector.utils.http import JSONResponse201
@require_http_methods(['POST'])
def create(request):
form = CollectorForm(request.POST)
if not form.is_valid():
return HttpResponse(status=400)
data = form.cleaned_data
blob = Blob()
blob.email = data['collectorEmail']
blob.save()
send_email(request, blob)
return JSONResponse201(blob.to_json())
@require_http_methods(['GET'])
def delete(request, uid):
try:
blob = Blob.objects.get(uid=uid)
except Blob.DoesNotExist:
return redirect(blob404)
blob.delete()
return redirect(deleted)
@require_http_methods(['GET'])
def blob404(request):
try:
template_name = settings.COLLECTOR_BLOB404_TEMPLATE
except:
template_name = 'collector/blob404.html'
return render_to_response(template_name)
@require_http_methods(['GET'])
def deleted(request):
try:
template_name = settings.COLLECTOR_DELETED_TEMPLATE
except:
template_name = 'collector/deleted.html'
return render_to_response(template_name)
# Local Variables:
# indent-tabs-mode: nil
# End:
# vim: ai et sw=4 ts=4 | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
"""
Synopsis:
Generate Django model and form definitions.
Write to forms.py and models.py.
Usage:
python gen_model.py [options]
Options:
-f, --force
Overwrite models.py and forms.py without asking.
-h, --help
Show this help message.
"""
import sys
import os
import getopt
import importlib
#import nexmllib as supermod
#
# Globals
#
supermod = None
#
# Classes
#
class ProgramOptions(object):
def get_force_(self):
return self.force_
def set_force_(self, force):
self.force_ = force
force = property(get_force_, set_force_)
class Writer(object):
def __init__(self, outfilename, stdout_also=False):
self.outfilename = outfilename
self.outfile = open(outfilename, 'w')
self.stdout_also = stdout_also
self.line_count = 0
def get_count(self):
return self.line_count
def write(self, content):
self.outfile.write(content)
if self.stdout_also:
sys.stdout.write(content)
count = content.count('\n')
self.line_count += count
def close(self):
self.outfile.close()
#
# Functions
#
def generate_model(options, module_name):
global supermod
supermod = importlib.import_module(module_name)
models_file_name = 'models.py'
forms_file_name = 'forms.py'
admin_file_name = 'admin.py'
if ( (os.path.exists(models_file_name) or
os.path.exists(forms_file_name) or
os.path.exists(admin_file_name)
)
and not options.force):
sys.stderr.write('\nmodels.py or forms.py or admin.py exists. Use -f/--force to overwrite.\n\n')
sys.exit(1)
globals_dict = globals()
models_writer = Writer(models_file_name)
forms_writer = Writer(forms_file_name)
admin_writer = Writer(admin_file_name)
wrtmodels = models_writer.write
wrtforms = forms_writer.write
wrtadmin = admin_writer.write
wrtmodels('from django.db import models\n\n')
wrtforms('from django import forms\n\n')
for class_name in supermod.__all__:
if hasattr(supermod, class_name):
cls = getattr(supermod, class_name)
cls.generate_model_(wrtmodels, wrtforms)
else:
sys.stderr.write('class %s not defined\n' % (class_name, ))
wrtadmin('from django.contrib import admin\n')
wrtadmin('from models import \\\n')
first_time = True
for class_name in supermod.__all__:
if first_time:
wrtadmin(' %s_model' % (class_name, ))
first_time = False
else:
wrtadmin(', \\\n %s_model' % (class_name, ))
wrtadmin('\n\n')
for class_name in supermod.__all__:
wrtadmin('admin.site.register(%s_model)\n' % (class_name, ))
wrtadmin('\n')
models_writer.close()
forms_writer.close()
admin_writer.close()
print 'Wrote %d lines to models.py' % (models_writer.get_count(), )
print 'Wrote %d lines to forms.py' % (forms_writer.get_count(), )
print 'Wrote %d lines to admin.py' % (admin_writer.get_count(), )
USAGE_TEXT = __doc__
def usage():
print USAGE_TEXT
sys.exit(1)
def main():
args = sys.argv[1:]
try:
opts, args = getopt.getopt(args, 'hfs:', ['help', 'force',
'suffix=', ])
except:
usage()
options = ProgramOptions()
options.force = False
for opt, val in opts:
if opt in ('-h', '--help'):
usage()
elif opt in ('-f', '--force'):
options.force = True
if len(args) != 1:
usage()
module_name = args[0]
generate_model(options, module_name)
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main() | unknown | codeparrot/codeparrot-clean | ||
# sshrepo.py - ssh repository proxy class for mercurial
#
# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from node import bin, hex
from i18n import _
import repo, util, error, encoding
import re, urllib
class remotelock(object):
def __init__(self, repo):
self.repo = repo
def release(self):
self.repo.unlock()
self.repo = None
def __del__(self):
if self.repo:
self.release()
class sshrepository(repo.repository):
def __init__(self, ui, path, create=0):
self._url = path
self.ui = ui
m = re.match(r'^ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?$', path)
if not m:
self.abort(error.RepoError(_("couldn't parse location %s") % path))
self.user = m.group(2)
self.host = m.group(3)
self.port = m.group(5)
self.path = m.group(7) or "."
sshcmd = self.ui.config("ui", "ssh", "ssh")
remotecmd = self.ui.config("ui", "remotecmd", "hg")
args = util.sshargs(sshcmd, self.host, self.user, self.port)
if create:
cmd = '%s %s "%s init %s"'
cmd = cmd % (sshcmd, args, remotecmd, self.path)
ui.note(_('running %s\n') % cmd)
res = util.system(cmd)
if res != 0:
self.abort(error.RepoError(_("could not create remote repo")))
self.validate_repo(ui, sshcmd, args, remotecmd)
def url(self):
return self._url
def validate_repo(self, ui, sshcmd, args, remotecmd):
# cleanup up previous run
self.cleanup()
cmd = '%s %s "%s -R %s serve --stdio"'
cmd = cmd % (sshcmd, args, remotecmd, self.path)
cmd = util.quotecommand(cmd)
ui.note(_('running %s\n') % cmd)
self.pipeo, self.pipei, self.pipee = util.popen3(cmd)
# skip any noise generated by remote shell
self.do_cmd("hello")
r = self.do_cmd("between", pairs=("%s-%s" % ("0"*40, "0"*40)))
lines = ["", "dummy"]
max_noise = 500
while lines[-1] and max_noise:
l = r.readline()
self.readerr()
if lines[-1] == "1\n" and l == "\n":
break
if l:
ui.debug("remote: ", l)
lines.append(l)
max_noise -= 1
else:
self.abort(error.RepoError(_("no suitable response from remote hg")))
self.capabilities = set()
for l in reversed(lines):
if l.startswith("capabilities:"):
self.capabilities.update(l[:-1].split(":")[1].split())
break
def readerr(self):
while 1:
size = util.fstat(self.pipee).st_size
if size == 0:
break
l = self.pipee.readline()
if not l:
break
self.ui.status(_("remote: "), l)
def abort(self, exception):
self.cleanup()
raise exception
def cleanup(self):
try:
self.pipeo.close()
self.pipei.close()
# read the error descriptor until EOF
for l in self.pipee:
self.ui.status(_("remote: "), l)
self.pipee.close()
except:
pass
__del__ = cleanup
def do_cmd(self, cmd, **args):
self.ui.debug("sending %s command\n" % cmd)
self.pipeo.write("%s\n" % cmd)
for k, v in args.iteritems():
self.pipeo.write("%s %d\n" % (k, len(v)))
self.pipeo.write(v)
self.pipeo.flush()
return self.pipei
def call(self, cmd, **args):
self.do_cmd(cmd, **args)
return self._recv()
def _recv(self):
l = self.pipei.readline()
self.readerr()
try:
l = int(l)
except:
self.abort(error.ResponseError(_("unexpected response:"), l))
return self.pipei.read(l)
def _send(self, data, flush=False):
self.pipeo.write("%d\n" % len(data))
if data:
self.pipeo.write(data)
if flush:
self.pipeo.flush()
self.readerr()
def lock(self):
self.call("lock")
return remotelock(self)
def unlock(self):
self.call("unlock")
def lookup(self, key):
self.requirecap('lookup', _('look up remote revision'))
d = self.call("lookup", key=key)
success, data = d[:-1].split(" ", 1)
if int(success):
return bin(data)
else:
self.abort(error.RepoError(data))
def heads(self):
d = self.call("heads")
try:
return map(bin, d[:-1].split(" "))
except:
self.abort(error.ResponseError(_("unexpected response:"), d))
def branchmap(self):
d = self.call("branchmap")
try:
branchmap = {}
for branchpart in d.splitlines():
branchheads = branchpart.split(' ')
branchname = urllib.unquote(branchheads[0])
# Earlier servers (1.3.x) send branch names in (their) local
# charset. The best we can do is assume it's identical to our
# own local charset, in case it's not utf-8.
try:
branchname.decode('utf-8')
except UnicodeDecodeError:
branchname = encoding.fromlocal(branchname)
branchheads = [bin(x) for x in branchheads[1:]]
branchmap[branchname] = branchheads
return branchmap
except:
raise error.ResponseError(_("unexpected response:"), d)
def branches(self, nodes):
n = " ".join(map(hex, nodes))
d = self.call("branches", nodes=n)
try:
br = [tuple(map(bin, b.split(" "))) for b in d.splitlines()]
return br
except:
self.abort(error.ResponseError(_("unexpected response:"), d))
def between(self, pairs):
n = " ".join(["-".join(map(hex, p)) for p in pairs])
d = self.call("between", pairs=n)
try:
p = [l and map(bin, l.split(" ")) or [] for l in d.splitlines()]
return p
except:
self.abort(error.ResponseError(_("unexpected response:"), d))
def changegroup(self, nodes, kind):
n = " ".join(map(hex, nodes))
return self.do_cmd("changegroup", roots=n)
def changegroupsubset(self, bases, heads, kind):
self.requirecap('changegroupsubset', _('look up remote changes'))
bases = " ".join(map(hex, bases))
heads = " ".join(map(hex, heads))
return self.do_cmd("changegroupsubset", bases=bases, heads=heads)
def unbundle(self, cg, heads, source):
d = self.call("unbundle", heads=' '.join(map(hex, heads)))
if d:
# remote may send "unsynced changes"
self.abort(error.RepoError(_("push refused: %s") % d))
while 1:
d = cg.read(4096)
if not d:
break
self._send(d)
self._send("", flush=True)
r = self._recv()
if r:
# remote may send "unsynced changes"
self.abort(error.RepoError(_("push failed: %s") % r))
r = self._recv()
try:
return int(r)
except:
self.abort(error.ResponseError(_("unexpected response:"), r))
def addchangegroup(self, cg, source, url):
d = self.call("addchangegroup")
if d:
self.abort(error.RepoError(_("push refused: %s") % d))
while 1:
d = cg.read(4096)
if not d:
break
self.pipeo.write(d)
self.readerr()
self.pipeo.flush()
self.readerr()
r = self._recv()
if not r:
return 1
try:
return int(r)
except:
self.abort(error.ResponseError(_("unexpected response:"), r))
def stream_out(self):
return self.do_cmd('stream_out')
instance = sshrepository | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2017 - 2025 R. Thomas
* Copyright 2017 - 2025 Quarkslab
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "LIEF/Visitor.hpp"
#include "LIEF/PE/signature/ContentInfo.hpp"
#include "LIEF/PE/signature/GenericContent.hpp"
#include "LIEF/PE/EnumToString.hpp"
#include "LIEF/PE/signature/SpcIndirectData.hpp"
#include "Object.tcc"
#include "internal_utils.hpp"
#include <ostream>
namespace LIEF {
namespace PE {
ContentInfo::ContentInfo() :
value_(std::make_unique<GenericContent>())
{}
ContentInfo::ContentInfo(const ContentInfo& other) :
Object::Object(other),
value_{other.value_->clone()}
{}
ContentInfo& ContentInfo::operator=(ContentInfo other) {
swap(other);
return *this;
}
void ContentInfo::swap(ContentInfo& other) noexcept {
std::swap(value_, other.value_);
}
void ContentInfo::accept(Visitor& visitor) const {
visitor.visit(*this);
}
std::vector<uint8_t> ContentInfo::digest() const {
if (const auto* spc_ind_data = value_->cast<SpcIndirectData>()) {
return as_vector(spc_ind_data->digest());
}
return {};
}
ALGORITHMS ContentInfo::digest_algorithm() const {
if (const auto* spc_ind_data = value_->cast<SpcIndirectData>()) {
return spc_ind_data->digest_algorithm();
}
return ALGORITHMS::UNKNOWN;
}
std::ostream& operator<<(std::ostream& os, const ContentInfo& content_info) {
content_info.value().print(os);
return os;
}
}
} | cpp | github | https://github.com/nodejs/node | deps/LIEF/src/PE/signature/ContentInfo.cpp |
// This file is part of ICU4X. For terms of use, please see the file
// called LICENSE at the top level of the ICU4X source tree
// (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ).
//! Transform Extensions provide information on content transformations in a given locale.
//!
//! The main struct for this extension is [`Transform`] which contains [`Fields`] and an
//! optional [`LanguageIdentifier`].
//!
//! [`LanguageIdentifier`]: super::super::LanguageIdentifier
//!
//! # Examples
//!
//! ```
//! use icu::locale::extensions::transform::{Fields, Key, Transform, Value};
//! use icu::locale::{LanguageIdentifier, Locale};
//!
//! let mut loc: Locale =
//! "en-US-t-es-ar-h0-hybrid".parse().expect("Parsing failed.");
//!
//! let lang: LanguageIdentifier =
//! "es-AR".parse().expect("Parsing LanguageIdentifier failed.");
//!
//! let key: Key = "h0".parse().expect("Parsing key failed.");
//! let value: Value = "hybrid".parse().expect("Parsing value failed.");
//!
//! assert_eq!(loc.extensions.transform.lang, Some(lang));
//! assert!(loc.extensions.transform.fields.contains_key(&key));
//! assert_eq!(loc.extensions.transform.fields.get(&key), Some(&value));
//!
//! assert_eq!(&loc.extensions.transform.to_string(), "t-es-ar-h0-hybrid");
//! ```
mod fields;
mod key;
mod value;
use core::cmp::Ordering;
#[cfg(feature = "alloc")]
use core::str::FromStr;
pub use fields::Fields;
#[doc(inline)]
pub use key::{key, Key};
pub use value::Value;
#[cfg(feature = "alloc")]
use super::ExtensionType;
#[cfg(feature = "alloc")]
use crate::parser::SubtagIterator;
#[cfg(feature = "alloc")]
use crate::parser::{parse_language_identifier_from_iter, ParseError, ParserMode};
#[cfg(feature = "alloc")]
use crate::shortvec::ShortBoxSlice;
use crate::subtags;
#[cfg(feature = "alloc")]
use crate::subtags::Language;
use crate::LanguageIdentifier;
#[cfg(feature = "alloc")]
use litemap::LiteMap;
pub(crate) const TRANSFORM_EXT_CHAR: char = 't';
pub(crate) const TRANSFORM_EXT_STR: &str = "t";
/// A list of [`Unicode BCP47 T Extensions`] as defined in [`Unicode Locale
/// Identifier`] specification.
///
/// Transform extension carries information about source language or script of
/// transformed content, including content that has been transliterated, transcribed,
/// or translated, or in some other way influenced by the source (See [`RFC 6497`] for details).
///
/// # Examples
///
/// ```
/// use icu::locale::extensions::transform::{Key, Value};
/// use icu::locale::{LanguageIdentifier, Locale};
///
/// let mut loc: Locale =
/// "de-t-en-us-h0-hybrid".parse().expect("Parsing failed.");
///
/// let en_us: LanguageIdentifier = "en-US".parse().expect("Parsing failed.");
///
/// assert_eq!(loc.extensions.transform.lang, Some(en_us));
/// let key: Key = "h0".parse().expect("Parsing key failed.");
/// let value: Value = "hybrid".parse().expect("Parsing value failed.");
/// assert_eq!(loc.extensions.transform.fields.get(&key), Some(&value));
/// ```
/// [`Unicode BCP47 T Extensions`]: https://unicode.org/reports/tr35/#t_Extension
/// [`RFC 6497`]: https://www.ietf.org/rfc/rfc6497.txt
/// [`Unicode Locale Identifier`]: https://unicode.org/reports/tr35/#Unicode_locale_identifier
#[derive(Clone, PartialEq, Eq, Debug, Default, Hash)]
#[allow(clippy::exhaustive_structs)] // spec-backed stable datastructure
pub struct Transform {
/// The [`LanguageIdentifier`] specified with this locale extension, or `None` if not present.
pub lang: Option<LanguageIdentifier>,
/// The key-value pairs present in this locale extension, with each extension key subtag
/// associated to its provided value subtag.
pub fields: Fields,
}
impl Transform {
/// Returns a new empty map of Transform extensions. Same as [`default()`](Default::default()), but is `const`.
///
/// # Examples
///
/// ```
/// use icu::locale::extensions::transform::Transform;
///
/// assert_eq!(Transform::new(), Transform::default());
/// ```
#[inline]
pub const fn new() -> Self {
Self {
lang: None,
fields: Fields::new(),
}
}
/// A constructor which takes a str slice, parses it and
/// produces a well-formed [`Transform`].
#[inline]
#[cfg(feature = "alloc")]
pub fn try_from_str(s: &str) -> Result<Self, ParseError> {
Self::try_from_utf8(s.as_bytes())
}
/// See [`Self::try_from_str`]
#[cfg(feature = "alloc")]
pub fn try_from_utf8(code_units: &[u8]) -> Result<Self, ParseError> {
let mut iter = SubtagIterator::new(code_units);
let ext = iter.next().ok_or(ParseError::InvalidExtension)?;
if let ExtensionType::Transform = ExtensionType::try_from_byte_slice(ext)? {
return Self::try_from_iter(&mut iter);
}
Err(ParseError::InvalidExtension)
}
/// Returns `true` if there are no tfields and no tlang in the `TransformExtensionList`.
///
/// # Examples
///
/// ```
/// use icu::locale::Locale;
///
/// let mut loc: Locale = "en-US-t-es-ar".parse().expect("Parsing failed.");
///
/// assert!(!loc.extensions.transform.is_empty());
/// ```
pub fn is_empty(&self) -> bool {
self.lang.is_none() && self.fields.is_empty()
}
/// Clears the transform extension, effectively removing it from the locale.
///
/// # Examples
///
/// ```
/// use icu::locale::Locale;
///
/// let mut loc: Locale = "en-US-t-es-ar".parse().unwrap();
/// loc.extensions.transform.clear();
/// assert_eq!(loc, "en-US".parse().unwrap());
/// ```
pub fn clear(&mut self) {
self.lang = None;
self.fields.clear();
}
#[allow(clippy::type_complexity)]
pub(crate) fn as_tuple(
&self,
) -> (
Option<(
subtags::Language,
Option<subtags::Script>,
Option<subtags::Region>,
&subtags::Variants,
)>,
&Fields,
) {
(self.lang.as_ref().map(|l| l.as_tuple()), &self.fields)
}
/// Returns an ordering suitable for use in [`BTreeSet`].
///
/// The ordering may or may not be equivalent to string ordering, and it
/// may or may not be stable across ICU4X releases.
///
/// [`BTreeSet`]: alloc::collections::BTreeSet
pub fn total_cmp(&self, other: &Self) -> Ordering {
self.as_tuple().cmp(&other.as_tuple())
}
#[cfg(feature = "alloc")]
pub(crate) fn try_from_iter(iter: &mut SubtagIterator) -> Result<Self, ParseError> {
let mut tlang = None;
let mut tfields = LiteMap::new();
if let Some(subtag) = iter.peek() {
if Language::try_from_utf8(subtag).is_ok() {
tlang = Some(parse_language_identifier_from_iter(
iter,
ParserMode::Partial,
)?);
}
}
let mut current_tkey = None;
let mut current_tvalue = ShortBoxSlice::new();
let mut has_current_tvalue = false;
while let Some(subtag) = iter.peek() {
if let Some(tkey) = current_tkey {
if let Ok(val) = Value::parse_subtag(subtag) {
has_current_tvalue = true;
if let Some(val) = val {
current_tvalue.push(val);
}
} else {
if !has_current_tvalue {
return Err(ParseError::InvalidExtension);
}
tfields.try_insert(tkey, Value::from_short_slice_unchecked(current_tvalue));
current_tkey = None;
current_tvalue = ShortBoxSlice::new();
has_current_tvalue = false;
continue;
}
} else if let Ok(tkey) = Key::try_from_utf8(subtag) {
current_tkey = Some(tkey);
} else {
break;
}
iter.next();
}
if let Some(tkey) = current_tkey {
if !has_current_tvalue {
return Err(ParseError::InvalidExtension);
}
tfields.try_insert(tkey, Value::from_short_slice_unchecked(current_tvalue));
}
if tlang.is_none() && tfields.is_empty() {
Err(ParseError::InvalidExtension)
} else {
Ok(Self {
lang: tlang,
fields: tfields.into(),
})
}
}
pub(crate) fn for_each_subtag_str<E, F>(&self, f: &mut F, with_ext: bool) -> Result<(), E>
where
F: FnMut(&str) -> Result<(), E>,
{
if self.is_empty() {
return Ok(());
}
if with_ext {
f(TRANSFORM_EXT_STR)?;
}
if let Some(lang) = &self.lang {
lang.for_each_subtag_str_lowercased(f)?;
}
self.fields.for_each_subtag_str(f)
}
}
#[cfg(feature = "alloc")]
impl FromStr for Transform {
type Err = ParseError;
#[inline]
fn from_str(s: &str) -> Result<Self, Self::Err> {
Self::try_from_str(s)
}
}
writeable::impl_display_with_writeable!(Transform);
impl writeable::Writeable for Transform {
fn write_to<W: core::fmt::Write + ?Sized>(&self, sink: &mut W) -> core::fmt::Result {
if self.is_empty() {
return Ok(());
}
sink.write_char(TRANSFORM_EXT_CHAR)?;
if let Some(lang) = &self.lang {
sink.write_char('-')?;
lang.write_lowercased_to(sink)?;
}
if !self.fields.is_empty() {
sink.write_char('-')?;
writeable::Writeable::write_to(&self.fields, sink)?;
}
Ok(())
}
fn writeable_length_hint(&self) -> writeable::LengthHint {
if self.is_empty() {
return writeable::LengthHint::exact(0);
}
let mut result = writeable::LengthHint::exact(1);
if let Some(lang) = &self.lang {
result += writeable::Writeable::writeable_length_hint(lang) + 1;
}
if !self.fields.is_empty() {
result += writeable::Writeable::writeable_length_hint(&self.fields) + 1;
}
result
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_transform_extension_fromstr() {
let te: Transform = "t-en-us-h0-hybrid"
.parse()
.expect("Failed to parse Transform");
assert_eq!(te.to_string(), "t-en-us-h0-hybrid");
let te: Result<Transform, _> = "t".parse();
assert!(te.is_err());
}
} | rust | github | https://github.com/nodejs/node | deps/crates/vendor/icu_locale_core/src/extensions/transform/mod.rs |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.lib;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.metrics2.MetricsException;
/**
* Experimental interface to extend metrics dynamically
*/
@InterfaceAudience.Private
public enum DefaultMetricsFactory {
INSTANCE; // the singleton
private MutableMetricsFactory mmfImpl;
public static MutableMetricsFactory getAnnotatedMetricsFactory() {
return INSTANCE.getInstance(MutableMetricsFactory.class);
}
@SuppressWarnings("unchecked")
public synchronized <T> T getInstance(Class<T> cls) {
if (cls == MutableMetricsFactory.class) {
if (mmfImpl == null) {
mmfImpl = new MutableMetricsFactory();
}
return (T) mmfImpl;
}
throw new MetricsException("Unknown metrics factory type: "+ cls.getName());
}
public synchronized void setInstance(MutableMetricsFactory factory) {
mmfImpl = factory;
}
} | java | github | https://github.com/apache/hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsFactory.java |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.junit.jupiter.api.BeforeAll;
import java.io.IOException;
import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
public class TestSymlinkLocalFSFileContext extends TestSymlinkLocalFS {
@BeforeAll
public static void testSetup() throws Exception {
FileContext context = FileContext.getLocalFSFileContext();
wrapper = new FileContextTestWrapper(context);
}
@Override
public void testRenameFileWithDestParentSymlink() throws IOException {
assumeNotWindows();
super.testRenameFileWithDestParentSymlink();
}
} | java | github | https://github.com/apache/hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFSFileContext.java |
An attempted implementation of a trait method has the wrong number of function
parameters.
Erroneous code example:
```compile_fail,E0050
trait Foo {
fn foo(&self, x: u8) -> bool;
}
struct Bar;
// error: method `foo` has 1 parameter but the declaration in trait `Foo::foo`
// has 2
impl Foo for Bar {
fn foo(&self) -> bool { true }
}
```
For example, the `Foo` trait has a method `foo` with two function parameters
(`&self` and `u8`), but the implementation of `foo` for the type `Bar` omits
the `u8` parameter. To fix this error, they must have the same parameters:
```
trait Foo {
fn foo(&self, x: u8) -> bool;
}
struct Bar;
impl Foo for Bar {
fn foo(&self, x: u8) -> bool { // ok!
true
}
}
``` | unknown | github | https://github.com/rust-lang/rust | compiler/rustc_error_codes/src/error_codes/E0050.md |
/*
* min.c -- a minimal Lua interpreter
* loads stdin only with minimal error handling.
* no interaction, and no standard library, only a "print" function.
*/
#include <stdio.h>
#include "lua.h"
#include "lauxlib.h"
static int print(lua_State *L)
{
int n=lua_gettop(L);
int i;
for (i=1; i<=n; i++)
{
if (i>1) printf("\t");
if (lua_isstring(L,i))
printf("%s",lua_tostring(L,i));
else if (lua_isnil(L,i))
printf("%s","nil");
else if (lua_isboolean(L,i))
printf("%s",lua_toboolean(L,i) ? "true" : "false");
else
printf("%s:%p",luaL_typename(L,i),lua_topointer(L,i));
}
printf("\n");
return 0;
}
int main(void)
{
lua_State *L=lua_open();
lua_register(L,"print",print);
if (luaL_dofile(L,NULL)!=0) fprintf(stderr,"%s\n",lua_tostring(L,-1));
lua_close(L);
return 0;
} | c | github | https://github.com/redis/redis | deps/lua/etc/min.c |
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_COMMON_RUNTIME_GRAPH_VIEW_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_GRAPH_VIEW_H_
#include <memory>
#include <vector>
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
class Device;
class Graph;
class Node;
class OpKernel;
class Tensor;
// Represents a single data edge in a `NodeItem`.
struct EdgeInfo {
// The node ID of the destination in the containing `GraphView`.
int dst_id;
// The index of the output that produces values on this edge.
int output_slot : 31;
// true if this is the last info for output_slot in the EdgeInfo list.
bool is_last : 1;
// The index of the input that consumes values on this edge.
int input_slot;
};
// Represents a single control edge in a `NodeItem`.
struct ControlEdgeInfo {
// The node ID of the destination in the containing `GraphView`.
int dst_id;
};
// Compact structure representing a graph node and its associated kernel.
//
// Each NodeItem is an element of exactly one GraphView.
struct NodeItem {
// The index of this node's item in its GraphView.
int node_id = -1;
// Cached attributes of this node for fast lookup.
bool kernel_is_async : 1; // True iff kernel->AsAsync() != nullptr
bool is_merge : 1; // True iff IsMerge(node)
bool is_enter : 1; // True iff IsEnter(node)
bool is_constant_enter : 1; // True iff IsEnter(node) and
// node->GetAttr("is_constant") == true.
bool is_exit : 1; // True iff IsExit(node)
bool is_control_trigger : 1; // True iff IsControlTrigger(node)
bool is_source : 1; // True iff IsSource(node)
// True iff IsEnter(node) || IsExit(node) || IsNextIteration(node)
bool is_enter_exit_or_next_iter : 1;
bool is_transfer_node : 1; // True iff IsTransferNode(node)
bool is_initialization_op : 1; // True iff IsInitializationOp(node)
bool is_recv_or_switch : 1; // True iff IsRecv(node) || IsSwitch(node)
bool is_next_iteration : 1; // True iff IsNextIteration(node)
bool is_noop : 1; // True iff item->kernel->type_string_view() == "NoOp")
bool
is_any_consumer_merge_or_control_trigger : 1; // True iff the destination
// of any output edge is a
// merge or control trigger
// node.
bool is_any_input_ref_typed : 1; // True iff any IsRefType(dt) for dt in this
// node's input types.
bool is_distributed_communication : 1; // True iff the op is registered to
// use distributed communication.
// The kernel for this node.
OpKernel* kernel = nullptr;
// If the kernel is a Const op, this containts points to the constant tensor.
const Tensor* const_tensor = nullptr;
// Cached values of node->num_inputs() and node->num_outputs(), to
// avoid levels of indirection.
int num_inputs;
int num_outputs;
// ExecutorImpl::tensors_[input_start] is the 1st positional input
// for this node.
int input_start = 0;
// Number of output edges, excluding control edges.
int32_t num_output_edges;
// Number of output control edges.
int32_t num_output_control_edges;
// If non-null, contains an array of num_outputs bools, where the ith bool
// is true if and only if the ith output is consumed by another node.
std::unique_ptr<bool[]> outputs_required;
absl::Span<EdgeInfo> mutable_output_edges() {
return absl::Span<EdgeInfo>(output_edge_base(), num_output_edges);
}
absl::Span<const EdgeInfo> output_edges() const {
return absl::Span<const EdgeInfo>(output_edge_base(), num_output_edges);
}
gtl::ArraySlice<ControlEdgeInfo> output_control_edges() const {
return gtl::ArraySlice<const ControlEdgeInfo>(output_control_edge_base(),
num_output_control_edges);
}
DataType input_type(int i) const {
DCHECK_LT(i, num_inputs);
return static_cast<DataType>(input_type_base()[i]);
}
DataType output_type(int i) const {
DCHECK_LT(i, num_outputs);
return static_cast<DataType>(output_type_base()[i]);
}
// Return array of per-output allocator attributes.
const AllocatorAttributes* output_attrs() const { return output_attr_base(); }
// Return array of expected input index from which each output should
// be forwarded:
// kNeverForward (-2) for DO NOT FORWARD (must allocate).
// kNoReservation (-1) for no expected forwarding.
// 0... for forward from that input.
const int* forward_from() const { return forward_from_base(); }
std::string DebugString() const;
private:
friend class GraphView;
NodeItem() {}
// Variable length section starts immediately after *this
// (uint8 is enough for DataType).
// EdgeInfo out_edges[num_output_edges];
// ControlEdgeInfo out_control_edges[num_output_control_edges];
// AllocatorAttributes output_attr[num_outputs];
// int forward_from[num_outputs];
// uint8 input_type[num_inputs];
// uint8 output_type[num_outputs];
// Return pointer to variable length section.
char* var() const {
return const_cast<char*>(reinterpret_cast<const char*>(this) +
sizeof(NodeItem));
}
EdgeInfo* output_edge_base() const {
return reinterpret_cast<EdgeInfo*>(var());
}
ControlEdgeInfo* output_control_edge_base() const {
return reinterpret_cast<ControlEdgeInfo*>(var() + sizeof(EdgeInfo) *
num_output_edges);
}
AllocatorAttributes* output_attr_base() const {
return reinterpret_cast<AllocatorAttributes*>(
var() + sizeof(EdgeInfo) * num_output_edges +
sizeof(ControlEdgeInfo) * num_output_control_edges);
}
int* forward_from_base() const {
return reinterpret_cast<int*>(var() + sizeof(EdgeInfo) * num_output_edges +
sizeof(ControlEdgeInfo) *
num_output_control_edges +
sizeof(AllocatorAttributes) * num_outputs);
}
uint8_t* input_type_base() const {
return reinterpret_cast<uint8_t*>(
var() + sizeof(EdgeInfo) * num_output_edges +
sizeof(ControlEdgeInfo) * num_output_control_edges +
sizeof(AllocatorAttributes) * num_outputs + sizeof(int) * num_outputs);
}
uint8_t* output_type_base() const {
return reinterpret_cast<uint8_t*>(
var() + sizeof(EdgeInfo) * num_output_edges +
sizeof(ControlEdgeInfo) * num_output_control_edges +
sizeof(AllocatorAttributes) * num_outputs + sizeof(int) * num_outputs +
sizeof(uint8_t) * num_inputs);
}
NodeItem(const NodeItem&) = delete;
void operator=(const NodeItem&) = delete;
};
// Immutable view of a Graph organized for efficient execution.
//
// TODO(b/152651962): Add independent unit tests for this class.
class GraphView {
public:
GraphView() : space_(nullptr) {}
~GraphView();
absl::Status Initialize(const Graph* g);
absl::Status SetAllocAttrs(const Graph* g, const Device* device);
void SetScopedAllocatorAttrs(const std::vector<const Node*>& sa_nodes);
// Returns a mutable pointer to the `NodeItem` with the given `id` if it
// exists in the graph, or `nullptr` if it does not.
NodeItem* node(int32_t id) const {
DCHECK_GE(id, 0);
DCHECK_LT(id, num_nodes_);
uint32_t offset = node_offsets_[id];
return ((offset == std::numeric_limits<uint32_t>::max())
? nullptr
: reinterpret_cast<NodeItem*>(space_ + node_offsets_[id]));
}
// Returns the `NodeItem` with the given `id`.
//
// REQUIRES: `id` must be the ID of a valid node in the graph.
const NodeItem& node_ref(int32_t id) const {
DCHECK_GE(id, 0);
DCHECK_LT(id, num_nodes_);
uint32_t offset = node_offsets_[id];
DCHECK_NE(offset, std::numeric_limits<uint32_t>::max());
return *reinterpret_cast<NodeItem*>(space_ + node_offsets_[id]);
}
int32_t num_nodes() const { return num_nodes_; }
private:
char* InitializeNode(char* ptr, const Node* n);
size_t NodeItemBytes(const Node* n);
int32_t num_nodes_ = 0;
uint32_t* node_offsets_ = nullptr; // array of size "num_nodes_"
// node_offsets_[id] holds the byte offset for node w/ "id" in space_
char* space_; // NodeItem objects are allocated here
GraphView(const GraphView&) = delete;
void operator=(const GraphView&) = delete;
};
} // namespace tensorflow
#endif // TENSORFLOW_CORE_COMMON_RUNTIME_GRAPH_VIEW_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/core/common_runtime/graph_view.h |
# Eventually the mechanism used by this suite will be moved into read_concern_majority_passthrough,
# and the other similar existing suites that use CWRWC compatible RC.
test_kind: js_test
selector:
roots:
- jstests/core/**/*.js
- jstests/fle2/**/*.js
- src/mongo/db/modules/*/jstests/fle2/**/*.js
exclude_files:
# Transactions only a readConcern argument on the first command.
- jstests/core/txns/**/*.js
# These tests use benchRun(), which isn't configured to use the overridden writeConcern.
- jstests/core/**/bench_test*.js
- jstests/core/**/benchrun_pipeline_updates.js # benchRun() used for writes
exclude_with_any_tags:
- assumes_standalone_mongod
##
# The next three tags correspond to the special errors thrown by the
# set_read_and_write_concerns.js override when it refuses to replace the readConcern or
# writeConcern of a particular command. Above each tag are the message(s) that cause the tag to be
# warranted.
##
# "Cowardly refusing to override read concern of command: ..."
- assumes_read_concern_unchanged
# "Cowardly refusing to override write concern of command: ..."
- assumes_write_concern_unchanged
executor:
archive:
hooks:
- CheckReplDBHash
- CheckReplOplogs
- ValidateCollections
config:
shell_options:
eval: >-
globalThis.testingReplication = true;
hooks:
# The CheckReplDBHash hook waits until all operations have replicated to and have been applied
# on the secondaries, so we run the ValidateCollections hook after it to ensure we're
# validating the entire contents of the collection.
- class: CheckReplOplogs
- class: CheckReplDBHash
- class: ValidateCollections
- class: CleanEveryN
n: 20
fixture:
class: ReplicaSetFixture
mongod_options:
set_parameters:
enableTestCommands: 1
writePeriodicNoops: 1
# This suite requires w="majority" writes to be applied on all secondaries. By using a 2-node
# replica set and having secondaries vote, the majority of the replica set is all nodes.
num_nodes: 2
default_read_concern:
level: majority
default_write_concern: true | unknown | github | https://github.com/mongodb/mongo | buildscripts/resmokeconfig/suites/cwrwc_rc_majority_passthrough.yml |
use std::collections::BTreeSet;
use anyhow::Result;
use bincode::{Decode, Encode};
use turbo_rcstr::{RcStr, rcstr};
use turbo_tasks::{ResolvedVc, TaskInput, Vc, trace::TraceRawVcs};
use turbo_tasks_fs::FileSystemPath;
use turbopack::module_options::{
CssOptionsContext, EcmascriptOptionsContext, JsxTransformOptions, ModuleRule,
TypescriptTransformOptions, module_options_context::ModuleOptionsContext,
side_effect_free_packages_glob,
};
use turbopack_browser::{
BrowserChunkingContext, ContentHashing, CurrentChunkMethod,
react_refresh::assert_can_resolve_react_refresh,
};
use turbopack_core::{
chunk::{
AssetSuffix, ChunkingConfig, ChunkingContext, MangleType, MinifyType, SourceMapSourceType,
SourceMapsType, UnusedReferences, UrlBehavior, chunk_id_strategy::ModuleIdStrategy,
},
compile_time_info::{CompileTimeDefines, CompileTimeInfo, FreeVarReference, FreeVarReferences},
environment::{BrowserEnvironment, Environment, ExecutionEnvironment},
free_var_references,
issue::IssueSeverity,
module_graph::binding_usage_info::OptionBindingUsageInfo,
resolve::{parse::Request, pattern::Pattern},
};
use turbopack_css::chunk::CssChunkType;
use turbopack_ecmascript::{
AnalyzeMode, TypeofWindow, chunk::EcmascriptChunkType, references::esm::UrlRewriteBehavior,
};
use turbopack_node::{
execution_context::ExecutionContext,
transforms::postcss::{PostCssConfigLocation, PostCssTransformOptions},
};
use turbopack_resolve::resolve_options_context::{ResolveOptionsContext, TsConfigHandling};
use crate::{
mode::NextMode,
next_build::get_postcss_package_mapping,
next_client::{
runtime_entry::{RuntimeEntries, RuntimeEntry},
transforms::get_next_client_transforms_rules,
},
next_config::NextConfig,
next_font::local::NextFontLocalResolvePlugin,
next_import_map::{
get_next_client_fallback_import_map, get_next_client_import_map,
get_next_client_resolved_map,
},
next_shared::{
resolve::{
ModuleFeatureReportResolvePlugin, NextSharedRuntimeResolvePlugin,
get_invalid_server_only_resolve_plugin,
},
transforms::{
emotion::get_emotion_transform_rule,
react_remove_properties::get_react_remove_properties_transform_rule,
relay::get_relay_transform_rule, remove_console::get_remove_console_transform_rule,
styled_components::get_styled_components_transform_rule,
styled_jsx::get_styled_jsx_transform_rule,
swc_ecma_transform_plugins::get_swc_ecma_transform_plugin_rule,
},
webpack_rules::{WebpackLoaderBuiltinCondition, webpack_loader_options},
},
transform_options::{
get_decorators_transform_options, get_jsx_transform_options,
get_typescript_transform_options,
},
util::{
OptionEnvMap, defines, foreign_code_context_condition,
free_var_references_with_vercel_system_env_warnings, internal_assets_conditions,
module_styles_rule_condition, worker_forwarded_globals,
},
};
#[turbo_tasks::function]
async fn next_client_defines(define_env: Vc<OptionEnvMap>) -> Result<Vc<CompileTimeDefines>> {
Ok(defines(&*define_env.await?).cell())
}
#[turbo_tasks::function]
async fn next_client_free_vars(
define_env: Vc<OptionEnvMap>,
report_system_env_inlining: Vc<IssueSeverity>,
) -> Result<Vc<FreeVarReferences>> {
Ok(free_var_references!(
..free_var_references_with_vercel_system_env_warnings(
defines(&*define_env.await?),
*report_system_env_inlining.await?
),
Buffer = FreeVarReference::EcmaScriptModule {
request: rcstr!("node:buffer"),
lookup_path: None,
export: Some(rcstr!("Buffer")),
},
process = FreeVarReference::EcmaScriptModule {
request: rcstr!("node:process"),
lookup_path: None,
export: Some(rcstr!("default")),
}
)
.cell())
}
#[turbo_tasks::function]
pub async fn get_client_compile_time_info(
browserslist_query: RcStr,
define_env: Vc<OptionEnvMap>,
report_system_env_inlining: Vc<IssueSeverity>,
) -> Result<Vc<CompileTimeInfo>> {
CompileTimeInfo::builder(
Environment::new(ExecutionEnvironment::Browser(
BrowserEnvironment {
dom: true,
web_worker: false,
service_worker: false,
browserslist_query: browserslist_query.to_owned(),
}
.resolved_cell(),
))
.to_resolved()
.await?,
)
.defines(next_client_defines(define_env).to_resolved().await?)
.free_var_references(
next_client_free_vars(define_env, report_system_env_inlining)
.to_resolved()
.await?,
)
.cell()
.await
}
#[turbo_tasks::value(shared)]
#[derive(Debug, Clone, Hash, TaskInput)]
pub enum ClientContextType {
Pages { pages_dir: FileSystemPath },
App { app_dir: FileSystemPath },
Fallback,
Other,
}
#[turbo_tasks::function]
pub async fn get_client_resolve_options_context(
project_path: FileSystemPath,
ty: ClientContextType,
mode: Vc<NextMode>,
next_config: Vc<NextConfig>,
execution_context: Vc<ExecutionContext>,
) -> Result<Vc<ResolveOptionsContext>> {
let next_client_import_map = get_next_client_import_map(
project_path.clone(),
ty.clone(),
next_config,
mode,
execution_context,
)
.to_resolved()
.await?;
let next_client_fallback_import_map = get_next_client_fallback_import_map(ty.clone())
.to_resolved()
.await?;
let next_client_resolved_map =
get_next_client_resolved_map(project_path.clone(), project_path.clone(), *mode.await?)
.to_resolved()
.await?;
let mut custom_conditions: Vec<_> = mode.await?.custom_resolve_conditions().collect();
if *next_config.enable_cache_components().await? {
custom_conditions.push(rcstr!("next-js"));
};
let resolve_options_context = ResolveOptionsContext {
enable_node_modules: Some(project_path.root().owned().await?),
custom_conditions,
import_map: Some(next_client_import_map),
fallback_import_map: Some(next_client_fallback_import_map),
resolved_map: Some(next_client_resolved_map),
browser: true,
module: true,
before_resolve_plugins: vec![
ResolvedVc::upcast(
get_invalid_server_only_resolve_plugin(project_path.clone())
.to_resolved()
.await?,
),
ResolvedVc::upcast(
ModuleFeatureReportResolvePlugin::new(project_path.clone())
.to_resolved()
.await?,
),
ResolvedVc::upcast(
NextFontLocalResolvePlugin::new(project_path.clone())
.to_resolved()
.await?,
),
],
after_resolve_plugins: vec![ResolvedVc::upcast(
NextSharedRuntimeResolvePlugin::new(project_path.clone())
.to_resolved()
.await?,
)],
..Default::default()
};
let tsconfig_path = next_config.typescript_tsconfig_path().await?;
let tsconfig_path = project_path.join(
tsconfig_path
.as_ref()
// Fall back to tsconfig only for resolving. This is because we don't want Turbopack to
// resolve tsconfig.json relative to the file being compiled.
.unwrap_or(&rcstr!("tsconfig.json")),
)?;
Ok(ResolveOptionsContext {
enable_typescript: true,
enable_react: true,
enable_mjs_extension: true,
custom_extensions: next_config.resolve_extension().owned().await?,
tsconfig_path: TsConfigHandling::Fixed(tsconfig_path),
rules: vec![(
foreign_code_context_condition(next_config, project_path).await?,
resolve_options_context.clone().resolved_cell(),
)],
..resolve_options_context
}
.cell())
}
#[turbo_tasks::function]
pub async fn get_client_module_options_context(
project_path: FileSystemPath,
execution_context: ResolvedVc<ExecutionContext>,
env: ResolvedVc<Environment>,
ty: ClientContextType,
mode: Vc<NextMode>,
next_config: Vc<NextConfig>,
encryption_key: ResolvedVc<RcStr>,
) -> Result<Vc<ModuleOptionsContext>> {
let next_mode = mode.await?;
let resolve_options_context = get_client_resolve_options_context(
project_path.clone(),
ty.clone(),
mode,
next_config,
*execution_context,
);
let tsconfig_path = next_config
.typescript_tsconfig_path()
.await?
.as_ref()
.map(|p| project_path.join(p))
.transpose()?;
let tsconfig = get_typescript_transform_options(project_path.clone(), tsconfig_path.clone())
.to_resolved()
.await?;
let decorators_options =
get_decorators_transform_options(project_path.clone(), tsconfig_path.clone());
let enable_mdx_rs = *next_config.mdx_rs().await?;
let jsx_runtime_options = get_jsx_transform_options(
project_path.clone(),
mode,
Some(resolve_options_context),
false,
next_config,
tsconfig_path,
)
.to_resolved()
.await?;
let mut loader_conditions = BTreeSet::new();
loader_conditions.insert(WebpackLoaderBuiltinCondition::Browser);
loader_conditions.extend(mode.await?.webpack_loader_conditions());
// A separate webpack rules will be applied to codes matching foreign_code_context_condition.
// This allows to import codes from node_modules that requires webpack loaders, which next-dev
// implicitly does by default.
let mut foreign_conditions = loader_conditions.clone();
foreign_conditions.insert(WebpackLoaderBuiltinCondition::Foreign);
let foreign_enable_webpack_loaders =
*webpack_loader_options(project_path.clone(), next_config, foreign_conditions).await?;
// Now creates a webpack rules that applies to all code.
let enable_webpack_loaders =
*webpack_loader_options(project_path.clone(), next_config, loader_conditions).await?;
let tree_shaking_mode_for_user_code = *next_config
.tree_shaking_mode_for_user_code(next_mode.is_development())
.await?;
let tree_shaking_mode_for_foreign_code = *next_config
.tree_shaking_mode_for_foreign_code(next_mode.is_development())
.await?;
let target_browsers = env.runtime_versions();
let mut next_client_rules =
get_next_client_transforms_rules(next_config, ty.clone(), mode, false, encryption_key)
.await?;
let foreign_next_client_rules =
get_next_client_transforms_rules(next_config, ty.clone(), mode, true, encryption_key)
.await?;
let additional_rules: Vec<ModuleRule> = vec![
get_swc_ecma_transform_plugin_rule(next_config, project_path.clone()).await?,
get_relay_transform_rule(next_config, project_path.clone()).await?,
get_emotion_transform_rule(next_config).await?,
get_styled_components_transform_rule(next_config).await?,
get_styled_jsx_transform_rule(next_config, target_browsers).await?,
get_react_remove_properties_transform_rule(next_config).await?,
get_remove_console_transform_rule(next_config).await?,
]
.into_iter()
.flatten()
.collect();
next_client_rules.extend(additional_rules);
let postcss_transform_options = PostCssTransformOptions {
postcss_package: Some(
get_postcss_package_mapping(project_path.clone())
.to_resolved()
.await?,
),
config_location: PostCssConfigLocation::ProjectPathOrLocalPath,
..Default::default()
};
let postcss_foreign_transform_options = PostCssTransformOptions {
// For node_modules we don't want to resolve postcss config relative to the file being
// compiled, instead it only uses the project root postcss config.
config_location: PostCssConfigLocation::ProjectPath,
..postcss_transform_options.clone()
};
let enable_postcss_transform = Some(postcss_transform_options.resolved_cell());
let enable_foreign_postcss_transform = Some(postcss_foreign_transform_options.resolved_cell());
let source_maps = *next_config.client_source_maps(mode).await?;
let module_options_context = ModuleOptionsContext {
ecmascript: EcmascriptOptionsContext {
esm_url_rewrite_behavior: Some(UrlRewriteBehavior::Relative),
enable_typeof_window_inlining: Some(TypeofWindow::Object),
enable_import_as_bytes: *next_config.turbopack_import_type_bytes().await?,
enable_import_as_text: *next_config.turbopack_import_type_text().await?,
source_maps,
infer_module_side_effects: *next_config.turbopack_infer_module_side_effects().await?,
..Default::default()
},
css: CssOptionsContext {
source_maps,
module_css_condition: Some(module_styles_rule_condition()),
..Default::default()
},
static_url_tag: Some(rcstr!("client")),
environment: Some(env),
execution_context: Some(execution_context),
tree_shaking_mode: tree_shaking_mode_for_user_code,
enable_postcss_transform,
side_effect_free_packages: Some(
side_effect_free_packages_glob(next_config.optimize_package_imports())
.to_resolved()
.await?,
),
keep_last_successful_parse: next_mode.is_development(),
analyze_mode: if next_mode.is_development() {
AnalyzeMode::CodeGeneration
} else {
// Technically, this doesn't need to tracing for the client context. But this will
// result in more cache hits for the analysis for modules which are loaded for both ssr
// and client
AnalyzeMode::CodeGenerationAndTracing
},
..Default::default()
};
// node_modules context
let foreign_codes_options_context = ModuleOptionsContext {
ecmascript: EcmascriptOptionsContext {
enable_typeof_window_inlining: None,
// Ignore e.g. import(`${url}`) requests in node_modules.
ignore_dynamic_requests: true,
..module_options_context.ecmascript
},
enable_webpack_loaders: foreign_enable_webpack_loaders,
enable_postcss_transform: enable_foreign_postcss_transform,
module_rules: foreign_next_client_rules,
tree_shaking_mode: tree_shaking_mode_for_foreign_code,
// NOTE(WEB-1016) PostCSS transforms should also apply to foreign code.
..module_options_context.clone()
};
let internal_context = ModuleOptionsContext {
ecmascript: EcmascriptOptionsContext {
enable_typescript_transform: Some(
TypescriptTransformOptions::default().resolved_cell(),
),
enable_jsx: Some(JsxTransformOptions::default().resolved_cell()),
..module_options_context.ecmascript.clone()
},
enable_postcss_transform: None,
..module_options_context.clone()
};
let module_options_context = ModuleOptionsContext {
// We don't need to resolve React Refresh for each module. Instead,
// we try resolve it once at the root and pass down a context to all
// the modules.
ecmascript: EcmascriptOptionsContext {
enable_jsx: Some(jsx_runtime_options),
enable_typescript_transform: Some(tsconfig),
enable_decorators: Some(decorators_options.to_resolved().await?),
..module_options_context.ecmascript.clone()
},
enable_webpack_loaders,
enable_mdx_rs,
rules: vec![
(
foreign_code_context_condition(next_config, project_path).await?,
foreign_codes_options_context.resolved_cell(),
),
(
internal_assets_conditions().await?,
internal_context.resolved_cell(),
),
],
module_rules: next_client_rules,
..module_options_context
}
.cell();
Ok(module_options_context)
}
#[derive(Clone, Debug, PartialEq, Eq, Hash, TaskInput, TraceRawVcs, Encode, Decode)]
pub struct ClientChunkingContextOptions {
pub mode: Vc<NextMode>,
pub root_path: FileSystemPath,
pub client_root: FileSystemPath,
pub client_root_to_root_path: RcStr,
pub asset_prefix: Vc<RcStr>,
pub environment: Vc<Environment>,
pub module_id_strategy: Vc<ModuleIdStrategy>,
pub export_usage: Vc<OptionBindingUsageInfo>,
pub unused_references: Vc<UnusedReferences>,
pub minify: Vc<bool>,
pub source_maps: Vc<SourceMapsType>,
pub no_mangling: Vc<bool>,
pub scope_hoisting: Vc<bool>,
pub nested_async_chunking: Vc<bool>,
pub debug_ids: Vc<bool>,
pub should_use_absolute_url_references: Vc<bool>,
pub css_url_suffix: Vc<Option<RcStr>>,
}
#[turbo_tasks::function]
pub async fn get_client_chunking_context(
options: ClientChunkingContextOptions,
) -> Result<Vc<Box<dyn ChunkingContext>>> {
let ClientChunkingContextOptions {
mode,
root_path,
client_root,
client_root_to_root_path,
asset_prefix,
environment,
module_id_strategy,
export_usage,
unused_references,
minify,
source_maps,
no_mangling,
scope_hoisting,
nested_async_chunking,
debug_ids,
should_use_absolute_url_references,
css_url_suffix,
} = options;
let next_mode = mode.await?;
let asset_prefix = asset_prefix.owned().await?;
let mut builder = BrowserChunkingContext::builder(
root_path,
client_root.clone(),
client_root_to_root_path,
client_root.clone(),
client_root.join("static/chunks")?,
get_client_assets_path(client_root.clone()).owned().await?,
environment.to_resolved().await?,
next_mode.runtime_type(),
)
.chunk_base_path(Some(asset_prefix.clone()))
.asset_suffix(AssetSuffix::Inferred.resolved_cell())
.minify_type(if *minify.await? {
MinifyType::Minify {
mangle: (!*no_mangling.await?).then_some(MangleType::OptimalSize),
}
} else {
MinifyType::NoMinify
})
.source_maps(*source_maps.await?)
.asset_base_path(Some(asset_prefix))
.current_chunk_method(CurrentChunkMethod::DocumentCurrentScript)
.export_usage(*export_usage.await?)
.unused_references(unused_references.to_resolved().await?)
.module_id_strategy(module_id_strategy.to_resolved().await?)
.debug_ids(*debug_ids.await?)
.should_use_absolute_url_references(*should_use_absolute_url_references.await?)
.nested_async_availability(*nested_async_chunking.await?)
.worker_forwarded_globals(worker_forwarded_globals())
.default_url_behavior(UrlBehavior {
suffix: AssetSuffix::Inferred,
static_suffix: css_url_suffix.to_resolved().await?,
});
if next_mode.is_development() {
builder = builder
.hot_module_replacement()
.source_map_source_type(SourceMapSourceType::AbsoluteFileUri)
.dynamic_chunk_content_loading(true);
} else {
builder = builder
.chunking_config(
Vc::<EcmascriptChunkType>::default().to_resolved().await?,
ChunkingConfig {
min_chunk_size: 50_000,
max_chunk_count_per_group: 40,
max_merge_chunk_size: 200_000,
..Default::default()
},
)
.chunking_config(
Vc::<CssChunkType>::default().to_resolved().await?,
ChunkingConfig {
max_merge_chunk_size: 100_000,
..Default::default()
},
)
.use_content_hashing(ContentHashing::Direct { length: 16 })
.module_merging(*scope_hoisting.await?);
}
Ok(Vc::upcast(builder.build()))
}
#[turbo_tasks::function]
pub fn get_client_assets_path(client_root: FileSystemPath) -> Result<Vc<FileSystemPath>> {
Ok(client_root.join("static/media")?.cell())
}
#[turbo_tasks::function]
pub async fn get_client_runtime_entries(
project_root: FileSystemPath,
ty: ClientContextType,
mode: Vc<NextMode>,
next_config: Vc<NextConfig>,
execution_context: Vc<ExecutionContext>,
) -> Result<Vc<RuntimeEntries>> {
let mut runtime_entries = vec![];
let resolve_options_context = get_client_resolve_options_context(
project_root.clone(),
ty.clone(),
mode,
next_config,
execution_context,
);
if mode.await?.is_development() {
let enable_react_refresh =
assert_can_resolve_react_refresh(project_root.clone(), resolve_options_context)
.await?
.as_request();
// It's important that React Refresh come before the regular bootstrap file,
// because the bootstrap contains JSX which requires Refresh's global
// functions to be available.
if let Some(request) = enable_react_refresh {
runtime_entries.push(
RuntimeEntry::Request(request.to_resolved().await?, project_root.join("_")?)
.resolved_cell(),
)
};
}
if matches!(ty, ClientContextType::App { .. },) {
runtime_entries.push(
RuntimeEntry::Request(
Request::parse(Pattern::Constant(rcstr!(
"next/dist/client/app-next-turbopack.js"
)))
.to_resolved()
.await?,
project_root.join("_")?,
)
.resolved_cell(),
);
}
Ok(Vc::cell(runtime_entries))
} | rust | github | https://github.com/vercel/next.js | crates/next-core/src/next_client/context.rs |
#
# Copyright 2013, 2015 Red Hat, Inc.
# Copyright(c) FUJITSU Limited 2007.
#
# Cloning a virtual machine module.
#
# This work is licensed under the GNU GPLv2 or later.
# See the COPYING file in the top-level directory.
import re
import os
import libvirt
from . import generatename
from . import progress
from . import xmlutil
from .guest import Guest
from .devices import DeviceInterface
from .devices import DeviceDisk
from .logger import log
from .devices import DeviceChannel
def _replace_vm(conn, name):
"""
Remove the existing VM with the same name if requested
"""
try:
vm = conn.lookupByName(name)
except libvirt.libvirtError:
return
try:
log.debug("Explicitly replacing guest '%s'", name)
if vm.ID() != -1:
log.debug("Destroying guest '%s'", name)
vm.destroy()
log.debug("Undefining guest '%s'", name)
vm.undefine()
except libvirt.libvirtError as e: # pragma: no cover
msg = (_("Could not remove old vm '%(vm)s': %(error)s") % {
"vm": name, "error": str(e)})
raise RuntimeError(msg) from None
def _generate_clone_name(conn, basename):
"""
If the orig name is "foo-clone", we don't want the clone to be
"foo-clone-clone", we want "foo-clone1"
"""
regex = r"-clone[1-9]*$"
match = re.search(regex, basename)
start_num = 1
force_num = False
if match:
num_match = re.search("[1-9]+$", match.group())
force_num = True
if num_match:
start_num = int(str(num_match.group())) + 1
basename = basename[:match.start()]
def cb(n):
return generatename.check_libvirt_collision(
conn.lookupByName, n)
basename = basename + "-clone"
return generatename.generate_name(basename, cb,
sep="", start_num=start_num, force_num=force_num)
def _generate_clone_disk_path(conn, origname, newname, origpath):
"""
Generate desired cloned disk path name, derived from the
original path, original VM name, and proposed new VM name
"""
if origpath is None:
return None
path = origpath
suffix = ""
# Try to split the suffix off the existing disk name. Ex.
# foobar.img -> foobar-clone.img
#
# If the suffix is greater than 7 characters, assume it isn't
# a file extension and is part of the disk name, at which point
# just stick '-clone' on the end.
if "." in origpath and len(origpath.rsplit(".", 1)[1]) <= 7:
path, suffix = origpath.rsplit(".", 1)
suffix = "." + suffix
dirname = os.path.dirname(path)
basename = os.path.basename(path)
clonebase = basename + "-clone"
if origname and basename == origname:
clonebase = newname
clonebase = os.path.join(dirname, clonebase)
def cb(p):
return DeviceDisk.path_definitely_exists(conn, p)
return generatename.generate_name(clonebase, cb, suffix=suffix)
def _lookup_vm(conn, name):
try:
return conn.lookupByName(name)
except libvirt.libvirtError:
e = ValueError(_("Domain '%s' was not found.") % str(name))
raise e from None
def _build_clone_vol_install(orig_disk, new_disk):
# We set a stub size for initial creation
# set_input_vol will overwrite it
size = .000001
sparse = False
vol_install = DeviceDisk.build_vol_install(
orig_disk.conn, os.path.basename(new_disk.get_source_path()),
new_disk.get_parent_pool(), size, sparse)
vol_install.set_input_vol(orig_disk.get_vol_object())
return vol_install
def _build_clone_disk(orig_disk, clonepath, allow_create, sparse):
conn = orig_disk.conn
device = DeviceDisk.DEVICE_DISK
if not clonepath:
device = DeviceDisk.DEVICE_CDROM
new_disk = DeviceDisk(conn)
new_disk.set_source_path(clonepath)
new_disk.device = device
if not allow_create:
new_disk.validate()
return new_disk
if new_disk.get_vol_object():
# Special case: non remote cloning of a guest using
# managed block devices: fall back to local cloning if
# we have permissions to do so. This validation check
# caused a few bug reports in a short period of time,
# so must be a common case.
if (conn.is_remote() or
new_disk.type != new_disk.TYPE_BLOCK or
not orig_disk.get_source_path() or
not os.access(orig_disk.get_source_path(), os.R_OK) or
not new_disk.get_source_path() or
not os.access(new_disk.get_source_path(), os.W_OK)):
raise RuntimeError(
_("Clone onto existing storage volume is not "
"currently supported: '%s'") % new_disk.get_source_path())
if (orig_disk.get_vol_object() and
new_disk.wants_storage_creation()):
vol_install = _build_clone_vol_install(orig_disk, new_disk)
if not sparse:
vol_install.allocation = vol_install.capacity
new_disk.set_vol_install(vol_install)
elif orig_disk.get_source_path():
new_disk.set_local_disk_to_clone(orig_disk, sparse)
new_disk.validate()
return new_disk
def _get_cloneable_msg(disk):
"""
If the disk storage is not cloneable, return a string explaining why
"""
if disk.wants_storage_creation():
return _("Disk path '%s' does not exist.") % disk.get_source_path()
if disk.type == "network":
proto = disk.source.protocol
if proto not in ["rbd"]:
return _("Disk network type '%s' is not cloneable.") % proto
disk.set_backend_for_existing_path()
if not disk.get_vol_object():
return _("Cloning disk network type '%s' requires "
"managed storage.") % proto
else:
# This case, rbd with managed storage, is implementable. It
# requires open coding a bunch of work in cloner, or reworking
# other disk code to add unique URIs for rbd volumes and pools
return _("Cloning rbd volumes is not yet supported.")
def _get_shareable_msg(disk):
if disk.is_empty():
return _("No storage to clone.")
if disk.read_only:
return _("Read Only")
if disk.shareable or disk.transient_shareBacking:
return _("Marked as shareable")
class _CloneDiskInfo:
"""
Class that tracks some additional information about how we want
to default handle each disk of the source VM
For any source disk there's 3 main scenarios:
* clone: Copy contents from src to dst. If dst path doesn't
exist we attempt to create it. If it exists we overwrite it
* preserve: Destination path is an existing, and no copying is performed.
* share: Original disk XML is used unchanged for the new disk
"""
_ACTION_SHARE = 1
_ACTION_CLONE = 2
_ACTION_PRESERVE = 3
def __init__(self, srcdisk):
self.disk = DeviceDisk(srcdisk.conn, parsexml=srcdisk.get_xml())
self.disk.set_backend_for_existing_path()
self.new_disk = None
self._share_msg = _get_shareable_msg(self.disk)
self._cloneable_msg = -1
self._newpath_msg = None
self._action = None
self.set_clone_requested()
if self.get_share_msg():
self.set_share_requested()
def is_clone_requested(self):
return self._action in [self._ACTION_CLONE]
def is_share_requested(self):
return self._action in [self._ACTION_SHARE]
def is_preserve_requested(self):
return self._action in [self._ACTION_PRESERVE]
def _set_action(self, action):
if action != self._action:
self._action = action
def set_clone_requested(self):
self._set_action(self._ACTION_CLONE)
def set_share_requested(self):
self._set_action(self._ACTION_SHARE)
def set_preserve_requested(self):
self._set_action(self._ACTION_PRESERVE)
def set_new_path(self, path, sparse):
allow_create = not self.is_preserve_requested()
if allow_create:
msg = self.get_cloneable_msg()
if msg:
return
try:
self.new_disk = Cloner.build_clone_disk(
self.disk, path, allow_create, sparse)
except Exception as e:
log.debug("Error setting clone path.", exc_info=True)
err = (_("Could not use path '%(path)s' for cloning: %(error)s") %
{"path": path, "error": str(e)})
self._newpath_msg = err
def get_share_msg(self):
return self._share_msg
def get_cloneable_msg(self):
if self._cloneable_msg == -1:
self._cloneable_msg = _get_cloneable_msg(self.disk)
return self._cloneable_msg
def get_newpath_msg(self):
return self._newpath_msg
def raise_error(self):
if self.is_clone_requested() and self.get_cloneable_msg():
msg = self.get_cloneable_msg()
err = _("Could not determine original disk information: %s" % msg)
raise ValueError(err)
if self.is_share_requested():
return
if self.get_newpath_msg():
msg = self.get_newpath_msg()
raise ValueError(msg)
class Cloner(object):
@staticmethod
def generate_clone_name(conn, basename):
return _generate_clone_name(conn, basename)
@staticmethod
def generate_clone_disk_path(conn, origname, newname, origpath):
return _generate_clone_disk_path(conn, origname, newname, origpath)
@staticmethod
def build_clone_disk(orig_disk, clonepath, allow_create, sparse):
return _build_clone_disk(orig_disk, clonepath, allow_create, sparse)
def __init__(self, conn, src_name=None, src_xml=None):
self.conn = conn
self._src_guest = None
self._new_guest = None
self._diskinfos = []
self._nvram_diskinfo = None
self._init_src(src_name, src_xml)
self._new_nvram_path = None
self._sparse = True
self._replace = False
self._reflink = False
#################
# Init routines #
#################
def _init_src(self, src_name, src_xml):
"""
Set up the source VM info we are cloning, from passed in VM name
or full XML
"""
if not src_xml:
dom = _lookup_vm(self.conn, src_name)
status = dom.info()[0]
if status not in [libvirt.VIR_DOMAIN_SHUTOFF]:
raise RuntimeError(_("Domain to clone must be shutoff."))
flags = libvirt.VIR_DOMAIN_XML_SECURE
src_xml = dom.XMLDesc(flags)
log.debug("Original XML:\n%s", src_xml)
self._src_guest = Guest(self.conn, parsexml=src_xml)
self._new_guest = Guest(self.conn, parsexml=src_xml)
self._init_new_guest()
# Collect disk info for every disk to determine if we will
# default to cloning or not
for disk in self._src_guest.devices.disk:
self._diskinfos.append(_CloneDiskInfo(disk))
for diskinfo in [d for d in self._diskinfos if d.is_clone_requested()]:
disk = diskinfo.disk
log.debug("Wants cloning: size=%s path=%s",
disk.get_size(), disk.get_source_path())
if self._src_guest.os.nvram:
old_nvram = DeviceDisk(self.conn)
old_nvram.set_source_path(self._new_guest.os.nvram)
self._nvram_diskinfo = _CloneDiskInfo(old_nvram)
def _init_new_guest(self):
"""
Perform the series of unconditional new VM changes we always make
"""
self._new_guest.id = None
self._new_guest.title = None
self._new_guest.uuid = None
self._new_guest.uuid = Guest.generate_uuid(self.conn)
for dev in self._new_guest.devices.graphics:
if dev.port and dev.port != -1:
log.warning(_("Setting the graphics device port to autoport, "
"in order to avoid conflicting."))
dev.port = -1
for iface in self._new_guest.devices.interface:
iface.target_dev = None
iface.macaddr = DeviceInterface.generate_mac(self.conn)
# For guest agent channel, remove a path to generate a new one with
# new guest name
for channel in self._new_guest.devices.channel:
if (channel.type == DeviceChannel.TYPE_UNIX and
channel.target_name and channel.source.path and
channel.target_name in channel.source.path):
channel.source.path = None
new_name = Cloner.generate_clone_name(self.conn, self.src_name)
log.debug("Auto-generated clone name '%s'", new_name)
self.set_clone_name(new_name)
##############
# Properties #
##############
@property
def src_name(self):
"""
The name of the original VM we are cloning
"""
return self._src_guest.name
@property
def new_guest(self):
"""
The Guest instance of the new XML we will create
"""
return self._new_guest
@property
def nvram_diskinfo(self):
return self._nvram_diskinfo
def set_clone_name(self, name):
self._new_guest.name = name
def set_clone_uuid(self, uuid):
"""
Override the new VMs generated UUId
"""
self._new_guest.uuid = uuid
def set_replace(self, val):
"""
If True, don't check for clone name collision, simply undefine
any conflicting guest.
"""
self._replace = bool(val)
def set_reflink(self, reflink):
"""
If true, use COW lightweight copy
"""
self._reflink = reflink
def set_sparse(self, flg):
"""
If True, attempt sparse allocation during cloning
"""
self._sparse = flg
def get_diskinfos(self):
"""
Return the list of _CloneDiskInfo instances
"""
return self._diskinfos[:]
def get_nonshare_diskinfos(self):
"""
Return a list of _CloneDiskInfo that are tagged for cloning
"""
return [di for di in self.get_diskinfos() if
not di.is_share_requested()]
def set_nvram_path(self, val):
"""
If the VM needs to have nvram content cloned, this overrides the
destination path
"""
self._new_nvram_path = val
######################
# Functional methods #
######################
def _prepare_nvram(self):
if not self._nvram_diskinfo:
return
new_nvram_path = self._new_nvram_path
if new_nvram_path is None:
nvram_dir = os.path.dirname(self._new_guest.os.nvram)
new_nvram_path = os.path.join(
nvram_dir, "%s_VARS.fd" % self._new_guest.name)
diskinfo = self._nvram_diskinfo
new_nvram = DeviceDisk(self.conn)
new_nvram.set_source_path(new_nvram_path)
old_nvram = DeviceDisk(self.conn)
old_nvram.set_source_path(diskinfo.disk.get_source_path())
if (diskinfo.is_clone_requested() and
new_nvram.wants_storage_creation() and
diskinfo.disk.get_vol_object()):
# We only run validation if there's some existing nvram we
# can copy. It's valid for nvram to not exist at VM define
# time, libvirt will create it for us
diskinfo.set_new_path(new_nvram_path, self._sparse)
diskinfo.raise_error()
diskinfo.new_disk.get_vol_install().reflink = self._reflink
else:
# There's no action to perform for this case, so drop it
self._nvram_diskinfo = None
self._new_guest.os.nvram = new_nvram.get_source_path()
def prepare(self):
"""
Validate and set up all parameters needed for the new (clone) VM
"""
try:
Guest.validate_name(self.conn, self._new_guest.name,
check_collision=not self._replace,
validate=False)
except ValueError as e:
msg = _("Invalid name for new guest: %s") % e
raise ValueError(msg) from None
for diskinfo in self.get_nonshare_diskinfos():
orig_disk = diskinfo.disk
if not diskinfo.new_disk:
# User didn't set a path, generate one
newpath = Cloner.generate_clone_disk_path(
self.conn, self.src_name,
self.new_guest.name,
orig_disk.get_source_path())
diskinfo.set_new_path(newpath, self._sparse)
if not diskinfo.new_disk:
# We hit an error, clients will raise it later
continue
new_disk = diskinfo.new_disk
assert new_disk
log.debug("Cloning srcpath=%s dstpath=%s",
orig_disk.get_source_path(), new_disk.get_source_path())
if self._reflink:
vol_install = new_disk.get_vol_install()
vol_install.reflink = self._reflink
for disk in self._new_guest.devices.disk:
if disk.target == orig_disk.target:
xmldisk = disk
# Change the XML
xmldisk.set_source_path(None)
xmldisk.type = new_disk.type
xmldisk.driver_name = orig_disk.driver_name
xmldisk.driver_type = orig_disk.driver_type
xmldisk.set_source_path(new_disk.get_source_path())
self._prepare_nvram()
# Save altered clone xml
diff = xmlutil.diff(self._src_guest.get_xml(),
self._new_guest.get_xml())
log.debug("Clone guest xml diff:\n%s", diff)
def start_duplicate(self, meter=None):
"""
Actually perform the duplication: cloning disks if needed and defining
the new clone xml.
"""
log.debug("Starting duplicate.")
meter = progress.ensure_meter(meter)
dom = None
try:
# Replace orig VM if required
if self._replace:
_replace_vm(self.conn, self._new_guest.name)
# Define domain early to catch any xml errors before duping storage
dom = self.conn.defineXML(self._new_guest.get_xml())
diskinfos = self.get_diskinfos()
if self._nvram_diskinfo:
diskinfos.append(self._nvram_diskinfo)
for diskinfo in diskinfos:
if not diskinfo.is_clone_requested():
continue
diskinfo.new_disk.build_storage(meter)
except Exception as e:
log.debug("Duplicate failed: %s", str(e))
if dom:
dom.undefine()
raise
log.debug("Duplicating finished.") | unknown | codeparrot/codeparrot-clean | ||
/*
* jfdctint.c
*
* Copyright (C) 1991-1996, Thomas G. Lane.
* Modification developed 2003-2018 by Guido Vollbeding.
* This file is part of the Independent JPEG Group's software.
* For conditions of distribution and use, see the accompanying README file.
*
* This file contains a slow-but-accurate integer implementation of the
* forward DCT (Discrete Cosine Transform).
*
* A 2-D DCT can be done by 1-D DCT on each row followed by 1-D DCT
* on each column. Direct algorithms are also available, but they are
* much more complex and seem not to be any faster when reduced to code.
*
* This implementation is based on an algorithm described in
* C. Loeffler, A. Ligtenberg and G. Moschytz, "Practical Fast 1-D DCT
* Algorithms with 11 Multiplications", Proc. Int'l. Conf. on Acoustics,
* Speech, and Signal Processing 1989 (ICASSP '89), pp. 988-991.
* The primary algorithm described there uses 11 multiplies and 29 adds.
* We use their alternate method with 12 multiplies and 32 adds.
* The advantage of this method is that no data path contains more than one
* multiplication; this allows a very simple and accurate implementation in
* scaled fixed-point arithmetic, with a minimal number of shifts.
*
* We also provide FDCT routines with various input sample block sizes for
* direct resolution reduction or enlargement and for direct resolving the
* common 2x1 and 1x2 subsampling cases without additional resampling: NxN
* (N=1...16), 2NxN, and Nx2N (N=1...8) pixels for one 8x8 output DCT block.
*
* For N<8 we fill the remaining block coefficients with zero.
* For N>8 we apply a partial N-point FDCT on the input samples, computing
* just the lower 8 frequency coefficients and discarding the rest.
*
* We must scale the output coefficients of the N-point FDCT appropriately
* to the standard 8-point FDCT level by 8/N per 1-D pass. This scaling
* is folded into the constant multipliers (pass 2) and/or final/initial
* shifting.
*
* CAUTION: We rely on the FIX() macro except for the N=1,2,4,8 cases
* since there would be too many additional constants to pre-calculate.
*/
#define JPEG_INTERNALS
#include "jinclude.h"
#include "jpeglib.h"
#include "jdct.h" /* Private declarations for DCT subsystem */
#ifdef DCT_ISLOW_SUPPORTED
/*
* This module is specialized to the case DCTSIZE = 8.
*/
#if DCTSIZE != 8
Sorry, this code only copes with 8x8 DCT blocks. /* deliberate syntax err */
#endif
/*
* The poop on this scaling stuff is as follows:
*
* Each 1-D DCT step produces outputs which are a factor of sqrt(N)
* larger than the true DCT outputs. The final outputs are therefore
* a factor of N larger than desired; since N=8 this can be cured by
* a simple right shift at the end of the algorithm. The advantage of
* this arrangement is that we save two multiplications per 1-D DCT,
* because the y0 and y4 outputs need not be divided by sqrt(N).
* In the IJG code, this factor of 8 is removed by the quantization step
* (in jcdctmgr.c), NOT in this module.
*
* We have to do addition and subtraction of the integer inputs, which
* is no problem, and multiplication by fractional constants, which is
* a problem to do in integer arithmetic. We multiply all the constants
* by CONST_SCALE and convert them to integer constants (thus retaining
* CONST_BITS bits of precision in the constants). After doing a
* multiplication we have to divide the product by CONST_SCALE, with proper
* rounding, to produce the correct output. This division can be done
* cheaply as a right shift of CONST_BITS bits. We postpone shifting
* as long as possible so that partial sums can be added together with
* full fractional precision.
*
* The outputs of the first pass are scaled up by PASS1_BITS bits so that
* they are represented to better-than-integral precision. These outputs
* require BITS_IN_JSAMPLE + PASS1_BITS + 3 bits; this fits in a 16-bit word
* with the recommended scaling. (For 12-bit sample data, the intermediate
* array is INT32 anyway.)
*
* To avoid overflow of the 32-bit intermediate results in pass 2, we must
* have BITS_IN_JSAMPLE + CONST_BITS + PASS1_BITS <= 26. Error analysis
* shows that the values given below are the most effective.
*/
#if BITS_IN_JSAMPLE == 8
#define CONST_BITS 13
#define PASS1_BITS 2
#else
#define CONST_BITS 13
#define PASS1_BITS 1 /* lose a little precision to avoid overflow */
#endif
/* Some C compilers fail to reduce "FIX(constant)" at compile time, thus
* causing a lot of useless floating-point operations at run time.
* To get around this we use the following pre-calculated constants.
* If you change CONST_BITS you may want to add appropriate values.
* (With a reasonable C compiler, you can just rely on the FIX() macro...)
*/
#if CONST_BITS == 13
#define FIX_0_298631336 ((INT32) 2446) /* FIX(0.298631336) */
#define FIX_0_390180644 ((INT32) 3196) /* FIX(0.390180644) */
#define FIX_0_541196100 ((INT32) 4433) /* FIX(0.541196100) */
#define FIX_0_765366865 ((INT32) 6270) /* FIX(0.765366865) */
#define FIX_0_899976223 ((INT32) 7373) /* FIX(0.899976223) */
#define FIX_1_175875602 ((INT32) 9633) /* FIX(1.175875602) */
#define FIX_1_501321110 ((INT32) 12299) /* FIX(1.501321110) */
#define FIX_1_847759065 ((INT32) 15137) /* FIX(1.847759065) */
#define FIX_1_961570560 ((INT32) 16069) /* FIX(1.961570560) */
#define FIX_2_053119869 ((INT32) 16819) /* FIX(2.053119869) */
#define FIX_2_562915447 ((INT32) 20995) /* FIX(2.562915447) */
#define FIX_3_072711026 ((INT32) 25172) /* FIX(3.072711026) */
#else
#define FIX_0_298631336 FIX(0.298631336)
#define FIX_0_390180644 FIX(0.390180644)
#define FIX_0_541196100 FIX(0.541196100)
#define FIX_0_765366865 FIX(0.765366865)
#define FIX_0_899976223 FIX(0.899976223)
#define FIX_1_175875602 FIX(1.175875602)
#define FIX_1_501321110 FIX(1.501321110)
#define FIX_1_847759065 FIX(1.847759065)
#define FIX_1_961570560 FIX(1.961570560)
#define FIX_2_053119869 FIX(2.053119869)
#define FIX_2_562915447 FIX(2.562915447)
#define FIX_3_072711026 FIX(3.072711026)
#endif
/* Multiply an INT32 variable by an INT32 constant to yield an INT32 result.
* For 8-bit samples with the recommended scaling, all the variable
* and constant values involved are no more than 16 bits wide, so a
* 16x16->32 bit multiply can be used instead of a full 32x32 multiply.
* For 12-bit samples, a full 32-bit multiplication will be needed.
*/
#if BITS_IN_JSAMPLE == 8
#define MULTIPLY(var,const) MULTIPLY16C16(var,const)
#else
#define MULTIPLY(var,const) ((var) * (const))
#endif
/*
* Perform the forward DCT on one block of samples.
*/
GLOBAL(void)
jpeg_fdct_islow (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)
{
INT32 tmp0, tmp1, tmp2, tmp3;
INT32 tmp10, tmp11, tmp12, tmp13;
INT32 z1;
DCTELEM *dataptr;
JSAMPROW elemptr;
int ctr;
SHIFT_TEMPS
/* Pass 1: process rows.
* Note results are scaled up by sqrt(8) compared to a true DCT;
* furthermore, we scale the results by 2**PASS1_BITS.
* cK represents sqrt(2) * cos(K*pi/16).
*/
dataptr = data;
for (ctr = 0; ctr < DCTSIZE; ctr++) {
elemptr = sample_data[ctr] + start_col;
/* Even part per LL&M figure 1 --- note that published figure is faulty;
* rotator "c1" should be "c6".
*/
tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[7]);
tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[6]);
tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[5]);
tmp3 = GETJSAMPLE(elemptr[3]) + GETJSAMPLE(elemptr[4]);
tmp10 = tmp0 + tmp3;
tmp12 = tmp0 - tmp3;
tmp11 = tmp1 + tmp2;
tmp13 = tmp1 - tmp2;
tmp0 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[7]);
tmp1 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[6]);
tmp2 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[5]);
tmp3 = GETJSAMPLE(elemptr[3]) - GETJSAMPLE(elemptr[4]);
/* Apply unsigned->signed conversion. */
dataptr[0] = (DCTELEM) ((tmp10 + tmp11 - 8 * CENTERJSAMPLE) << PASS1_BITS);
dataptr[4] = (DCTELEM) ((tmp10 - tmp11) << PASS1_BITS);
z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); /* c6 */
/* Add fudge factor here for final descale. */
z1 += ONE << (CONST_BITS-PASS1_BITS-1);
dataptr[2] = (DCTELEM)
RIGHT_SHIFT(z1 + MULTIPLY(tmp12, FIX_0_765366865), /* c2-c6 */
CONST_BITS-PASS1_BITS);
dataptr[6] = (DCTELEM)
RIGHT_SHIFT(z1 - MULTIPLY(tmp13, FIX_1_847759065), /* c2+c6 */
CONST_BITS-PASS1_BITS);
/* Odd part per figure 8 --- note paper omits factor of sqrt(2).
* i0..i3 in the paper are tmp0..tmp3 here.
*/
tmp12 = tmp0 + tmp2;
tmp13 = tmp1 + tmp3;
z1 = MULTIPLY(tmp12 + tmp13, FIX_1_175875602); /* c3 */
/* Add fudge factor here for final descale. */
z1 += ONE << (CONST_BITS-PASS1_BITS-1);
tmp12 = MULTIPLY(tmp12, - FIX_0_390180644); /* -c3+c5 */
tmp13 = MULTIPLY(tmp13, - FIX_1_961570560); /* -c3-c5 */
tmp12 += z1;
tmp13 += z1;
z1 = MULTIPLY(tmp0 + tmp3, - FIX_0_899976223); /* -c3+c7 */
tmp0 = MULTIPLY(tmp0, FIX_1_501321110); /* c1+c3-c5-c7 */
tmp3 = MULTIPLY(tmp3, FIX_0_298631336); /* -c1+c3+c5-c7 */
tmp0 += z1 + tmp12;
tmp3 += z1 + tmp13;
z1 = MULTIPLY(tmp1 + tmp2, - FIX_2_562915447); /* -c1-c3 */
tmp1 = MULTIPLY(tmp1, FIX_3_072711026); /* c1+c3+c5-c7 */
tmp2 = MULTIPLY(tmp2, FIX_2_053119869); /* c1+c3-c5+c7 */
tmp1 += z1 + tmp13;
tmp2 += z1 + tmp12;
dataptr[1] = (DCTELEM) RIGHT_SHIFT(tmp0, CONST_BITS-PASS1_BITS);
dataptr[3] = (DCTELEM) RIGHT_SHIFT(tmp1, CONST_BITS-PASS1_BITS);
dataptr[5] = (DCTELEM) RIGHT_SHIFT(tmp2, CONST_BITS-PASS1_BITS);
dataptr[7] = (DCTELEM) RIGHT_SHIFT(tmp3, CONST_BITS-PASS1_BITS);
dataptr += DCTSIZE; /* advance pointer to next row */
}
/* Pass 2: process columns.
* We remove the PASS1_BITS scaling, but leave the results scaled up
* by an overall factor of 8.
* cK represents sqrt(2) * cos(K*pi/16).
*/
dataptr = data;
for (ctr = DCTSIZE-1; ctr >= 0; ctr--) {
/* Even part per LL&M figure 1 --- note that published figure is faulty;
* rotator "c1" should be "c6".
*/
tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*7];
tmp1 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*6];
tmp2 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*5];
tmp3 = dataptr[DCTSIZE*3] + dataptr[DCTSIZE*4];
/* Add fudge factor here for final descale. */
tmp10 = tmp0 + tmp3 + (ONE << (PASS1_BITS-1));
tmp12 = tmp0 - tmp3;
tmp11 = tmp1 + tmp2;
tmp13 = tmp1 - tmp2;
tmp0 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*7];
tmp1 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*6];
tmp2 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*5];
tmp3 = dataptr[DCTSIZE*3] - dataptr[DCTSIZE*4];
dataptr[DCTSIZE*0] = (DCTELEM) RIGHT_SHIFT(tmp10 + tmp11, PASS1_BITS);
dataptr[DCTSIZE*4] = (DCTELEM) RIGHT_SHIFT(tmp10 - tmp11, PASS1_BITS);
z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); /* c6 */
/* Add fudge factor here for final descale. */
z1 += ONE << (CONST_BITS+PASS1_BITS-1);
dataptr[DCTSIZE*2] = (DCTELEM)
RIGHT_SHIFT(z1 + MULTIPLY(tmp12, FIX_0_765366865), /* c2-c6 */
CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*6] = (DCTELEM)
RIGHT_SHIFT(z1 - MULTIPLY(tmp13, FIX_1_847759065), /* c2+c6 */
CONST_BITS+PASS1_BITS);
/* Odd part per figure 8 --- note paper omits factor of sqrt(2).
* i0..i3 in the paper are tmp0..tmp3 here.
*/
tmp12 = tmp0 + tmp2;
tmp13 = tmp1 + tmp3;
z1 = MULTIPLY(tmp12 + tmp13, FIX_1_175875602); /* c3 */
/* Add fudge factor here for final descale. */
z1 += ONE << (CONST_BITS+PASS1_BITS-1);
tmp12 = MULTIPLY(tmp12, - FIX_0_390180644); /* -c3+c5 */
tmp13 = MULTIPLY(tmp13, - FIX_1_961570560); /* -c3-c5 */
tmp12 += z1;
tmp13 += z1;
z1 = MULTIPLY(tmp0 + tmp3, - FIX_0_899976223); /* -c3+c7 */
tmp0 = MULTIPLY(tmp0, FIX_1_501321110); /* c1+c3-c5-c7 */
tmp3 = MULTIPLY(tmp3, FIX_0_298631336); /* -c1+c3+c5-c7 */
tmp0 += z1 + tmp12;
tmp3 += z1 + tmp13;
z1 = MULTIPLY(tmp1 + tmp2, - FIX_2_562915447); /* -c1-c3 */
tmp1 = MULTIPLY(tmp1, FIX_3_072711026); /* c1+c3+c5-c7 */
tmp2 = MULTIPLY(tmp2, FIX_2_053119869); /* c1+c3-c5+c7 */
tmp1 += z1 + tmp13;
tmp2 += z1 + tmp12;
dataptr[DCTSIZE*1] = (DCTELEM) RIGHT_SHIFT(tmp0, CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*3] = (DCTELEM) RIGHT_SHIFT(tmp1, CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*5] = (DCTELEM) RIGHT_SHIFT(tmp2, CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*7] = (DCTELEM) RIGHT_SHIFT(tmp3, CONST_BITS+PASS1_BITS);
dataptr++; /* advance pointer to next column */
}
}
#ifdef DCT_SCALING_SUPPORTED
/*
* Perform the forward DCT on a 7x7 sample block.
*/
GLOBAL(void)
jpeg_fdct_7x7 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)
{
INT32 tmp0, tmp1, tmp2, tmp3;
INT32 tmp10, tmp11, tmp12;
INT32 z1, z2, z3;
DCTELEM *dataptr;
JSAMPROW elemptr;
int ctr;
SHIFT_TEMPS
/* Pre-zero output coefficient block. */
MEMZERO(data, SIZEOF(DCTELEM) * DCTSIZE2);
/* Pass 1: process rows.
* Note results are scaled up by sqrt(8) compared to a true DCT;
* furthermore, we scale the results by 2**PASS1_BITS.
* cK represents sqrt(2) * cos(K*pi/14).
*/
dataptr = data;
for (ctr = 0; ctr < 7; ctr++) {
elemptr = sample_data[ctr] + start_col;
/* Even part */
tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[6]);
tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[5]);
tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[4]);
tmp3 = GETJSAMPLE(elemptr[3]);
tmp10 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[6]);
tmp11 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[5]);
tmp12 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[4]);
z1 = tmp0 + tmp2;
/* Apply unsigned->signed conversion. */
dataptr[0] = (DCTELEM)
((z1 + tmp1 + tmp3 - 7 * CENTERJSAMPLE) << PASS1_BITS);
tmp3 += tmp3;
z1 -= tmp3;
z1 -= tmp3;
z1 = MULTIPLY(z1, FIX(0.353553391)); /* (c2+c6-c4)/2 */
z2 = MULTIPLY(tmp0 - tmp2, FIX(0.920609002)); /* (c2+c4-c6)/2 */
z3 = MULTIPLY(tmp1 - tmp2, FIX(0.314692123)); /* c6 */
dataptr[2] = (DCTELEM) DESCALE(z1 + z2 + z3, CONST_BITS-PASS1_BITS);
z1 -= z2;
z2 = MULTIPLY(tmp0 - tmp1, FIX(0.881747734)); /* c4 */
dataptr[4] = (DCTELEM)
DESCALE(z2 + z3 - MULTIPLY(tmp1 - tmp3, FIX(0.707106781)), /* c2+c6-c4 */
CONST_BITS-PASS1_BITS);
dataptr[6] = (DCTELEM) DESCALE(z1 + z2, CONST_BITS-PASS1_BITS);
/* Odd part */
tmp1 = MULTIPLY(tmp10 + tmp11, FIX(0.935414347)); /* (c3+c1-c5)/2 */
tmp2 = MULTIPLY(tmp10 - tmp11, FIX(0.170262339)); /* (c3+c5-c1)/2 */
tmp0 = tmp1 - tmp2;
tmp1 += tmp2;
tmp2 = MULTIPLY(tmp11 + tmp12, - FIX(1.378756276)); /* -c1 */
tmp1 += tmp2;
tmp3 = MULTIPLY(tmp10 + tmp12, FIX(0.613604268)); /* c5 */
tmp0 += tmp3;
tmp2 += tmp3 + MULTIPLY(tmp12, FIX(1.870828693)); /* c3+c1-c5 */
dataptr[1] = (DCTELEM) DESCALE(tmp0, CONST_BITS-PASS1_BITS);
dataptr[3] = (DCTELEM) DESCALE(tmp1, CONST_BITS-PASS1_BITS);
dataptr[5] = (DCTELEM) DESCALE(tmp2, CONST_BITS-PASS1_BITS);
dataptr += DCTSIZE; /* advance pointer to next row */
}
/* Pass 2: process columns.
* We remove the PASS1_BITS scaling, but leave the results scaled up
* by an overall factor of 8.
* We must also scale the output by (8/7)**2 = 64/49, which we fold
* into the constant multipliers:
* cK now represents sqrt(2) * cos(K*pi/14) * 64/49.
*/
dataptr = data;
for (ctr = 0; ctr < 7; ctr++) {
/* Even part */
tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*6];
tmp1 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*5];
tmp2 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*4];
tmp3 = dataptr[DCTSIZE*3];
tmp10 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*6];
tmp11 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*5];
tmp12 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*4];
z1 = tmp0 + tmp2;
dataptr[DCTSIZE*0] = (DCTELEM)
DESCALE(MULTIPLY(z1 + tmp1 + tmp3, FIX(1.306122449)), /* 64/49 */
CONST_BITS+PASS1_BITS);
tmp3 += tmp3;
z1 -= tmp3;
z1 -= tmp3;
z1 = MULTIPLY(z1, FIX(0.461784020)); /* (c2+c6-c4)/2 */
z2 = MULTIPLY(tmp0 - tmp2, FIX(1.202428084)); /* (c2+c4-c6)/2 */
z3 = MULTIPLY(tmp1 - tmp2, FIX(0.411026446)); /* c6 */
dataptr[DCTSIZE*2] = (DCTELEM) DESCALE(z1 + z2 + z3, CONST_BITS+PASS1_BITS);
z1 -= z2;
z2 = MULTIPLY(tmp0 - tmp1, FIX(1.151670509)); /* c4 */
dataptr[DCTSIZE*4] = (DCTELEM)
DESCALE(z2 + z3 - MULTIPLY(tmp1 - tmp3, FIX(0.923568041)), /* c2+c6-c4 */
CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*6] = (DCTELEM) DESCALE(z1 + z2, CONST_BITS+PASS1_BITS);
/* Odd part */
tmp1 = MULTIPLY(tmp10 + tmp11, FIX(1.221765677)); /* (c3+c1-c5)/2 */
tmp2 = MULTIPLY(tmp10 - tmp11, FIX(0.222383464)); /* (c3+c5-c1)/2 */
tmp0 = tmp1 - tmp2;
tmp1 += tmp2;
tmp2 = MULTIPLY(tmp11 + tmp12, - FIX(1.800824523)); /* -c1 */
tmp1 += tmp2;
tmp3 = MULTIPLY(tmp10 + tmp12, FIX(0.801442310)); /* c5 */
tmp0 += tmp3;
tmp2 += tmp3 + MULTIPLY(tmp12, FIX(2.443531355)); /* c3+c1-c5 */
dataptr[DCTSIZE*1] = (DCTELEM) DESCALE(tmp0, CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*3] = (DCTELEM) DESCALE(tmp1, CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*5] = (DCTELEM) DESCALE(tmp2, CONST_BITS+PASS1_BITS);
dataptr++; /* advance pointer to next column */
}
}
/*
* Perform the forward DCT on a 6x6 sample block.
*/
GLOBAL(void)
jpeg_fdct_6x6 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)
{
INT32 tmp0, tmp1, tmp2;
INT32 tmp10, tmp11, tmp12;
DCTELEM *dataptr;
JSAMPROW elemptr;
int ctr;
SHIFT_TEMPS
/* Pre-zero output coefficient block. */
MEMZERO(data, SIZEOF(DCTELEM) * DCTSIZE2);
/* Pass 1: process rows.
* Note results are scaled up by sqrt(8) compared to a true DCT;
* furthermore, we scale the results by 2**PASS1_BITS.
* cK represents sqrt(2) * cos(K*pi/12).
*/
dataptr = data;
for (ctr = 0; ctr < 6; ctr++) {
elemptr = sample_data[ctr] + start_col;
/* Even part */
tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[5]);
tmp11 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[4]);
tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[3]);
tmp10 = tmp0 + tmp2;
tmp12 = tmp0 - tmp2;
tmp0 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[5]);
tmp1 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[4]);
tmp2 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[3]);
/* Apply unsigned->signed conversion. */
dataptr[0] = (DCTELEM)
((tmp10 + tmp11 - 6 * CENTERJSAMPLE) << PASS1_BITS);
dataptr[2] = (DCTELEM)
DESCALE(MULTIPLY(tmp12, FIX(1.224744871)), /* c2 */
CONST_BITS-PASS1_BITS);
dataptr[4] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 - tmp11 - tmp11, FIX(0.707106781)), /* c4 */
CONST_BITS-PASS1_BITS);
/* Odd part */
tmp10 = DESCALE(MULTIPLY(tmp0 + tmp2, FIX(0.366025404)), /* c5 */
CONST_BITS-PASS1_BITS);
dataptr[1] = (DCTELEM) (tmp10 + ((tmp0 + tmp1) << PASS1_BITS));
dataptr[3] = (DCTELEM) ((tmp0 - tmp1 - tmp2) << PASS1_BITS);
dataptr[5] = (DCTELEM) (tmp10 + ((tmp2 - tmp1) << PASS1_BITS));
dataptr += DCTSIZE; /* advance pointer to next row */
}
/* Pass 2: process columns.
* We remove the PASS1_BITS scaling, but leave the results scaled up
* by an overall factor of 8.
* We must also scale the output by (8/6)**2 = 16/9, which we fold
* into the constant multipliers:
* cK now represents sqrt(2) * cos(K*pi/12) * 16/9.
*/
dataptr = data;
for (ctr = 0; ctr < 6; ctr++) {
/* Even part */
tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*5];
tmp11 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*4];
tmp2 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*3];
tmp10 = tmp0 + tmp2;
tmp12 = tmp0 - tmp2;
tmp0 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*5];
tmp1 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*4];
tmp2 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*3];
dataptr[DCTSIZE*0] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 + tmp11, FIX(1.777777778)), /* 16/9 */
CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*2] = (DCTELEM)
DESCALE(MULTIPLY(tmp12, FIX(2.177324216)), /* c2 */
CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*4] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 - tmp11 - tmp11, FIX(1.257078722)), /* c4 */
CONST_BITS+PASS1_BITS);
/* Odd part */
tmp10 = MULTIPLY(tmp0 + tmp2, FIX(0.650711829)); /* c5 */
dataptr[DCTSIZE*1] = (DCTELEM)
DESCALE(tmp10 + MULTIPLY(tmp0 + tmp1, FIX(1.777777778)), /* 16/9 */
CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*3] = (DCTELEM)
DESCALE(MULTIPLY(tmp0 - tmp1 - tmp2, FIX(1.777777778)), /* 16/9 */
CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*5] = (DCTELEM)
DESCALE(tmp10 + MULTIPLY(tmp2 - tmp1, FIX(1.777777778)), /* 16/9 */
CONST_BITS+PASS1_BITS);
dataptr++; /* advance pointer to next column */
}
}
/*
* Perform the forward DCT on a 5x5 sample block.
*/
GLOBAL(void)
jpeg_fdct_5x5 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)
{
INT32 tmp0, tmp1, tmp2;
INT32 tmp10, tmp11;
DCTELEM *dataptr;
JSAMPROW elemptr;
int ctr;
SHIFT_TEMPS
/* Pre-zero output coefficient block. */
MEMZERO(data, SIZEOF(DCTELEM) * DCTSIZE2);
/* Pass 1: process rows.
* Note results are scaled up by sqrt(8) compared to a true DCT;
* furthermore, we scale the results by 2**PASS1_BITS.
* We scale the results further by 2 as part of output adaption
* scaling for different DCT size.
* cK represents sqrt(2) * cos(K*pi/10).
*/
dataptr = data;
for (ctr = 0; ctr < 5; ctr++) {
elemptr = sample_data[ctr] + start_col;
/* Even part */
tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[4]);
tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[3]);
tmp2 = GETJSAMPLE(elemptr[2]);
tmp10 = tmp0 + tmp1;
tmp11 = tmp0 - tmp1;
tmp0 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[4]);
tmp1 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[3]);
/* Apply unsigned->signed conversion. */
dataptr[0] = (DCTELEM)
((tmp10 + tmp2 - 5 * CENTERJSAMPLE) << (PASS1_BITS+1));
tmp11 = MULTIPLY(tmp11, FIX(0.790569415)); /* (c2+c4)/2 */
tmp10 -= tmp2 << 2;
tmp10 = MULTIPLY(tmp10, FIX(0.353553391)); /* (c2-c4)/2 */
dataptr[2] = (DCTELEM) DESCALE(tmp11 + tmp10, CONST_BITS-PASS1_BITS-1);
dataptr[4] = (DCTELEM) DESCALE(tmp11 - tmp10, CONST_BITS-PASS1_BITS-1);
/* Odd part */
tmp10 = MULTIPLY(tmp0 + tmp1, FIX(0.831253876)); /* c3 */
dataptr[1] = (DCTELEM)
DESCALE(tmp10 + MULTIPLY(tmp0, FIX(0.513743148)), /* c1-c3 */
CONST_BITS-PASS1_BITS-1);
dataptr[3] = (DCTELEM)
DESCALE(tmp10 - MULTIPLY(tmp1, FIX(2.176250899)), /* c1+c3 */
CONST_BITS-PASS1_BITS-1);
dataptr += DCTSIZE; /* advance pointer to next row */
}
/* Pass 2: process columns.
* We remove the PASS1_BITS scaling, but leave the results scaled up
* by an overall factor of 8.
* We must also scale the output by (8/5)**2 = 64/25, which we partially
* fold into the constant multipliers (other part was done in pass 1):
* cK now represents sqrt(2) * cos(K*pi/10) * 32/25.
*/
dataptr = data;
for (ctr = 0; ctr < 5; ctr++) {
/* Even part */
tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*4];
tmp1 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*3];
tmp2 = dataptr[DCTSIZE*2];
tmp10 = tmp0 + tmp1;
tmp11 = tmp0 - tmp1;
tmp0 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*4];
tmp1 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*3];
dataptr[DCTSIZE*0] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 + tmp2, FIX(1.28)), /* 32/25 */
CONST_BITS+PASS1_BITS);
tmp11 = MULTIPLY(tmp11, FIX(1.011928851)); /* (c2+c4)/2 */
tmp10 -= tmp2 << 2;
tmp10 = MULTIPLY(tmp10, FIX(0.452548340)); /* (c2-c4)/2 */
dataptr[DCTSIZE*2] = (DCTELEM) DESCALE(tmp11 + tmp10, CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*4] = (DCTELEM) DESCALE(tmp11 - tmp10, CONST_BITS+PASS1_BITS);
/* Odd part */
tmp10 = MULTIPLY(tmp0 + tmp1, FIX(1.064004961)); /* c3 */
dataptr[DCTSIZE*1] = (DCTELEM)
DESCALE(tmp10 + MULTIPLY(tmp0, FIX(0.657591230)), /* c1-c3 */
CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*3] = (DCTELEM)
DESCALE(tmp10 - MULTIPLY(tmp1, FIX(2.785601151)), /* c1+c3 */
CONST_BITS+PASS1_BITS);
dataptr++; /* advance pointer to next column */
}
}
/*
* Perform the forward DCT on a 4x4 sample block.
*/
GLOBAL(void)
jpeg_fdct_4x4 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)
{
INT32 tmp0, tmp1;
INT32 tmp10, tmp11;
DCTELEM *dataptr;
JSAMPROW elemptr;
int ctr;
SHIFT_TEMPS
/* Pre-zero output coefficient block. */
MEMZERO(data, SIZEOF(DCTELEM) * DCTSIZE2);
/* Pass 1: process rows.
* Note results are scaled up by sqrt(8) compared to a true DCT;
* furthermore, we scale the results by 2**PASS1_BITS.
* We must also scale the output by (8/4)**2 = 2**2, which we add here.
* cK represents sqrt(2) * cos(K*pi/16) [refers to 8-point FDCT].
*/
dataptr = data;
for (ctr = 0; ctr < 4; ctr++) {
elemptr = sample_data[ctr] + start_col;
/* Even part */
tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[3]);
tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[2]);
tmp10 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[3]);
tmp11 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[2]);
/* Apply unsigned->signed conversion. */
dataptr[0] = (DCTELEM)
((tmp0 + tmp1 - 4 * CENTERJSAMPLE) << (PASS1_BITS+2));
dataptr[2] = (DCTELEM) ((tmp0 - tmp1) << (PASS1_BITS+2));
/* Odd part */
tmp0 = MULTIPLY(tmp10 + tmp11, FIX_0_541196100); /* c6 */
/* Add fudge factor here for final descale. */
tmp0 += ONE << (CONST_BITS-PASS1_BITS-3);
dataptr[1] = (DCTELEM)
RIGHT_SHIFT(tmp0 + MULTIPLY(tmp10, FIX_0_765366865), /* c2-c6 */
CONST_BITS-PASS1_BITS-2);
dataptr[3] = (DCTELEM)
RIGHT_SHIFT(tmp0 - MULTIPLY(tmp11, FIX_1_847759065), /* c2+c6 */
CONST_BITS-PASS1_BITS-2);
dataptr += DCTSIZE; /* advance pointer to next row */
}
/* Pass 2: process columns.
* We remove the PASS1_BITS scaling, but leave the results scaled up
* by an overall factor of 8.
* cK represents sqrt(2) * cos(K*pi/16) [refers to 8-point FDCT].
*/
dataptr = data;
for (ctr = 0; ctr < 4; ctr++) {
/* Even part */
/* Add fudge factor here for final descale. */
tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*3] + (ONE << (PASS1_BITS-1));
tmp1 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*2];
tmp10 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*3];
tmp11 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*2];
dataptr[DCTSIZE*0] = (DCTELEM) RIGHT_SHIFT(tmp0 + tmp1, PASS1_BITS);
dataptr[DCTSIZE*2] = (DCTELEM) RIGHT_SHIFT(tmp0 - tmp1, PASS1_BITS);
/* Odd part */
tmp0 = MULTIPLY(tmp10 + tmp11, FIX_0_541196100); /* c6 */
/* Add fudge factor here for final descale. */
tmp0 += ONE << (CONST_BITS+PASS1_BITS-1);
dataptr[DCTSIZE*1] = (DCTELEM)
RIGHT_SHIFT(tmp0 + MULTIPLY(tmp10, FIX_0_765366865), /* c2-c6 */
CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*3] = (DCTELEM)
RIGHT_SHIFT(tmp0 - MULTIPLY(tmp11, FIX_1_847759065), /* c2+c6 */
CONST_BITS+PASS1_BITS);
dataptr++; /* advance pointer to next column */
}
}
/*
* Perform the forward DCT on a 3x3 sample block.
*/
GLOBAL(void)
jpeg_fdct_3x3 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)
{
INT32 tmp0, tmp1, tmp2;
DCTELEM *dataptr;
JSAMPROW elemptr;
int ctr;
SHIFT_TEMPS
/* Pre-zero output coefficient block. */
MEMZERO(data, SIZEOF(DCTELEM) * DCTSIZE2);
/* Pass 1: process rows.
* Note results are scaled up by sqrt(8) compared to a true DCT;
* furthermore, we scale the results by 2**PASS1_BITS.
* We scale the results further by 2**2 as part of output adaption
* scaling for different DCT size.
* cK represents sqrt(2) * cos(K*pi/6).
*/
dataptr = data;
for (ctr = 0; ctr < 3; ctr++) {
elemptr = sample_data[ctr] + start_col;
/* Even part */
tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[2]);
tmp1 = GETJSAMPLE(elemptr[1]);
tmp2 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[2]);
/* Apply unsigned->signed conversion. */
dataptr[0] = (DCTELEM)
((tmp0 + tmp1 - 3 * CENTERJSAMPLE) << (PASS1_BITS+2));
dataptr[2] = (DCTELEM)
DESCALE(MULTIPLY(tmp0 - tmp1 - tmp1, FIX(0.707106781)), /* c2 */
CONST_BITS-PASS1_BITS-2);
/* Odd part */
dataptr[1] = (DCTELEM)
DESCALE(MULTIPLY(tmp2, FIX(1.224744871)), /* c1 */
CONST_BITS-PASS1_BITS-2);
dataptr += DCTSIZE; /* advance pointer to next row */
}
/* Pass 2: process columns.
* We remove the PASS1_BITS scaling, but leave the results scaled up
* by an overall factor of 8.
* We must also scale the output by (8/3)**2 = 64/9, which we partially
* fold into the constant multipliers (other part was done in pass 1):
* cK now represents sqrt(2) * cos(K*pi/6) * 16/9.
*/
dataptr = data;
for (ctr = 0; ctr < 3; ctr++) {
/* Even part */
tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*2];
tmp1 = dataptr[DCTSIZE*1];
tmp2 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*2];
dataptr[DCTSIZE*0] = (DCTELEM)
DESCALE(MULTIPLY(tmp0 + tmp1, FIX(1.777777778)), /* 16/9 */
CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*2] = (DCTELEM)
DESCALE(MULTIPLY(tmp0 - tmp1 - tmp1, FIX(1.257078722)), /* c2 */
CONST_BITS+PASS1_BITS);
/* Odd part */
dataptr[DCTSIZE*1] = (DCTELEM)
DESCALE(MULTIPLY(tmp2, FIX(2.177324216)), /* c1 */
CONST_BITS+PASS1_BITS);
dataptr++; /* advance pointer to next column */
}
}
/*
* Perform the forward DCT on a 2x2 sample block.
*/
GLOBAL(void)
jpeg_fdct_2x2 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)
{
DCTELEM tmp0, tmp1, tmp2, tmp3;
JSAMPROW elemptr;
/* Pre-zero output coefficient block. */
MEMZERO(data, SIZEOF(DCTELEM) * DCTSIZE2);
/* Pass 1: process rows.
* Note results are scaled up by sqrt(8) compared to a true DCT.
*/
/* Row 0 */
elemptr = sample_data[0] + start_col;
tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[1]);
tmp1 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[1]);
/* Row 1 */
elemptr = sample_data[1] + start_col;
tmp2 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[1]);
tmp3 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[1]);
/* Pass 2: process columns.
* We leave the results scaled up by an overall factor of 8.
* We must also scale the output by (8/2)**2 = 2**4.
*/
/* Column 0 */
/* Apply unsigned->signed conversion. */
data[DCTSIZE*0] = (tmp0 + tmp2 - 4 * CENTERJSAMPLE) << 4;
data[DCTSIZE*1] = (tmp0 - tmp2) << 4;
/* Column 1 */
data[DCTSIZE*0+1] = (tmp1 + tmp3) << 4;
data[DCTSIZE*1+1] = (tmp1 - tmp3) << 4;
}
/*
* Perform the forward DCT on a 1x1 sample block.
*/
GLOBAL(void)
jpeg_fdct_1x1 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)
{
DCTELEM dcval;
/* Pre-zero output coefficient block. */
MEMZERO(data, SIZEOF(DCTELEM) * DCTSIZE2);
dcval = GETJSAMPLE(sample_data[0][start_col]);
/* We leave the result scaled up by an overall factor of 8. */
/* We must also scale the output by (8/1)**2 = 2**6. */
/* Apply unsigned->signed conversion. */
data[0] = (dcval - CENTERJSAMPLE) << 6;
}
/*
* Perform the forward DCT on a 9x9 sample block.
*/
GLOBAL(void)
jpeg_fdct_9x9 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)
{
INT32 tmp0, tmp1, tmp2, tmp3, tmp4;
INT32 tmp10, tmp11, tmp12, tmp13;
INT32 z1, z2;
DCTELEM workspace[8];
DCTELEM *dataptr;
DCTELEM *wsptr;
JSAMPROW elemptr;
int ctr;
SHIFT_TEMPS
/* Pass 1: process rows.
* Note results are scaled up by sqrt(8) compared to a true DCT;
* we scale the results further by 2 as part of output adaption
* scaling for different DCT size.
* cK represents sqrt(2) * cos(K*pi/18).
*/
dataptr = data;
ctr = 0;
for (;;) {
elemptr = sample_data[ctr] + start_col;
/* Even part */
tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[8]);
tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[7]);
tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[6]);
tmp3 = GETJSAMPLE(elemptr[3]) + GETJSAMPLE(elemptr[5]);
tmp4 = GETJSAMPLE(elemptr[4]);
tmp10 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[8]);
tmp11 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[7]);
tmp12 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[6]);
tmp13 = GETJSAMPLE(elemptr[3]) - GETJSAMPLE(elemptr[5]);
z1 = tmp0 + tmp2 + tmp3;
z2 = tmp1 + tmp4;
/* Apply unsigned->signed conversion. */
dataptr[0] = (DCTELEM) ((z1 + z2 - 9 * CENTERJSAMPLE) << 1);
dataptr[6] = (DCTELEM)
DESCALE(MULTIPLY(z1 - z2 - z2, FIX(0.707106781)), /* c6 */
CONST_BITS-1);
z1 = MULTIPLY(tmp0 - tmp2, FIX(1.328926049)); /* c2 */
z2 = MULTIPLY(tmp1 - tmp4 - tmp4, FIX(0.707106781)); /* c6 */
dataptr[2] = (DCTELEM)
DESCALE(MULTIPLY(tmp2 - tmp3, FIX(1.083350441)) /* c4 */
+ z1 + z2, CONST_BITS-1);
dataptr[4] = (DCTELEM)
DESCALE(MULTIPLY(tmp3 - tmp0, FIX(0.245575608)) /* c8 */
+ z1 - z2, CONST_BITS-1);
/* Odd part */
dataptr[3] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 - tmp12 - tmp13, FIX(1.224744871)), /* c3 */
CONST_BITS-1);
tmp11 = MULTIPLY(tmp11, FIX(1.224744871)); /* c3 */
tmp0 = MULTIPLY(tmp10 + tmp12, FIX(0.909038955)); /* c5 */
tmp1 = MULTIPLY(tmp10 + tmp13, FIX(0.483689525)); /* c7 */
dataptr[1] = (DCTELEM) DESCALE(tmp11 + tmp0 + tmp1, CONST_BITS-1);
tmp2 = MULTIPLY(tmp12 - tmp13, FIX(1.392728481)); /* c1 */
dataptr[5] = (DCTELEM) DESCALE(tmp0 - tmp11 - tmp2, CONST_BITS-1);
dataptr[7] = (DCTELEM) DESCALE(tmp1 - tmp11 + tmp2, CONST_BITS-1);
ctr++;
if (ctr != DCTSIZE) {
if (ctr == 9)
break; /* Done. */
dataptr += DCTSIZE; /* advance pointer to next row */
} else
dataptr = workspace; /* switch pointer to extended workspace */
}
/* Pass 2: process columns.
* We leave the results scaled up by an overall factor of 8.
* We must also scale the output by (8/9)**2 = 64/81, which we partially
* fold into the constant multipliers and final/initial shifting:
* cK now represents sqrt(2) * cos(K*pi/18) * 128/81.
*/
dataptr = data;
wsptr = workspace;
for (ctr = DCTSIZE-1; ctr >= 0; ctr--) {
/* Even part */
tmp0 = dataptr[DCTSIZE*0] + wsptr[DCTSIZE*0];
tmp1 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*7];
tmp2 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*6];
tmp3 = dataptr[DCTSIZE*3] + dataptr[DCTSIZE*5];
tmp4 = dataptr[DCTSIZE*4];
tmp10 = dataptr[DCTSIZE*0] - wsptr[DCTSIZE*0];
tmp11 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*7];
tmp12 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*6];
tmp13 = dataptr[DCTSIZE*3] - dataptr[DCTSIZE*5];
z1 = tmp0 + tmp2 + tmp3;
z2 = tmp1 + tmp4;
dataptr[DCTSIZE*0] = (DCTELEM)
DESCALE(MULTIPLY(z1 + z2, FIX(1.580246914)), /* 128/81 */
CONST_BITS+2);
dataptr[DCTSIZE*6] = (DCTELEM)
DESCALE(MULTIPLY(z1 - z2 - z2, FIX(1.117403309)), /* c6 */
CONST_BITS+2);
z1 = MULTIPLY(tmp0 - tmp2, FIX(2.100031287)); /* c2 */
z2 = MULTIPLY(tmp1 - tmp4 - tmp4, FIX(1.117403309)); /* c6 */
dataptr[DCTSIZE*2] = (DCTELEM)
DESCALE(MULTIPLY(tmp2 - tmp3, FIX(1.711961190)) /* c4 */
+ z1 + z2, CONST_BITS+2);
dataptr[DCTSIZE*4] = (DCTELEM)
DESCALE(MULTIPLY(tmp3 - tmp0, FIX(0.388070096)) /* c8 */
+ z1 - z2, CONST_BITS+2);
/* Odd part */
dataptr[DCTSIZE*3] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 - tmp12 - tmp13, FIX(1.935399303)), /* c3 */
CONST_BITS+2);
tmp11 = MULTIPLY(tmp11, FIX(1.935399303)); /* c3 */
tmp0 = MULTIPLY(tmp10 + tmp12, FIX(1.436506004)); /* c5 */
tmp1 = MULTIPLY(tmp10 + tmp13, FIX(0.764348879)); /* c7 */
dataptr[DCTSIZE*1] = (DCTELEM)
DESCALE(tmp11 + tmp0 + tmp1, CONST_BITS+2);
tmp2 = MULTIPLY(tmp12 - tmp13, FIX(2.200854883)); /* c1 */
dataptr[DCTSIZE*5] = (DCTELEM)
DESCALE(tmp0 - tmp11 - tmp2, CONST_BITS+2);
dataptr[DCTSIZE*7] = (DCTELEM)
DESCALE(tmp1 - tmp11 + tmp2, CONST_BITS+2);
dataptr++; /* advance pointer to next column */
wsptr++; /* advance pointer to next column */
}
}
/*
* Perform the forward DCT on a 10x10 sample block.
*/
GLOBAL(void)
jpeg_fdct_10x10 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)
{
INT32 tmp0, tmp1, tmp2, tmp3, tmp4;
INT32 tmp10, tmp11, tmp12, tmp13, tmp14;
DCTELEM workspace[8*2];
DCTELEM *dataptr;
DCTELEM *wsptr;
JSAMPROW elemptr;
int ctr;
SHIFT_TEMPS
/* Pass 1: process rows.
* Note results are scaled up by sqrt(8) compared to a true DCT;
* we scale the results further by 2 as part of output adaption
* scaling for different DCT size.
* cK represents sqrt(2) * cos(K*pi/20).
*/
dataptr = data;
ctr = 0;
for (;;) {
elemptr = sample_data[ctr] + start_col;
/* Even part */
tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[9]);
tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[8]);
tmp12 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[7]);
tmp3 = GETJSAMPLE(elemptr[3]) + GETJSAMPLE(elemptr[6]);
tmp4 = GETJSAMPLE(elemptr[4]) + GETJSAMPLE(elemptr[5]);
tmp10 = tmp0 + tmp4;
tmp13 = tmp0 - tmp4;
tmp11 = tmp1 + tmp3;
tmp14 = tmp1 - tmp3;
tmp0 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[9]);
tmp1 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[8]);
tmp2 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[7]);
tmp3 = GETJSAMPLE(elemptr[3]) - GETJSAMPLE(elemptr[6]);
tmp4 = GETJSAMPLE(elemptr[4]) - GETJSAMPLE(elemptr[5]);
/* Apply unsigned->signed conversion. */
dataptr[0] = (DCTELEM)
((tmp10 + tmp11 + tmp12 - 10 * CENTERJSAMPLE) << 1);
tmp12 += tmp12;
dataptr[4] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 - tmp12, FIX(1.144122806)) - /* c4 */
MULTIPLY(tmp11 - tmp12, FIX(0.437016024)), /* c8 */
CONST_BITS-1);
tmp10 = MULTIPLY(tmp13 + tmp14, FIX(0.831253876)); /* c6 */
dataptr[2] = (DCTELEM)
DESCALE(tmp10 + MULTIPLY(tmp13, FIX(0.513743148)), /* c2-c6 */
CONST_BITS-1);
dataptr[6] = (DCTELEM)
DESCALE(tmp10 - MULTIPLY(tmp14, FIX(2.176250899)), /* c2+c6 */
CONST_BITS-1);
/* Odd part */
tmp10 = tmp0 + tmp4;
tmp11 = tmp1 - tmp3;
dataptr[5] = (DCTELEM) ((tmp10 - tmp11 - tmp2) << 1);
tmp2 <<= CONST_BITS;
dataptr[1] = (DCTELEM)
DESCALE(MULTIPLY(tmp0, FIX(1.396802247)) + /* c1 */
MULTIPLY(tmp1, FIX(1.260073511)) + tmp2 + /* c3 */
MULTIPLY(tmp3, FIX(0.642039522)) + /* c7 */
MULTIPLY(tmp4, FIX(0.221231742)), /* c9 */
CONST_BITS-1);
tmp12 = MULTIPLY(tmp0 - tmp4, FIX(0.951056516)) - /* (c3+c7)/2 */
MULTIPLY(tmp1 + tmp3, FIX(0.587785252)); /* (c1-c9)/2 */
tmp13 = MULTIPLY(tmp10 + tmp11, FIX(0.309016994)) + /* (c3-c7)/2 */
(tmp11 << (CONST_BITS - 1)) - tmp2;
dataptr[3] = (DCTELEM) DESCALE(tmp12 + tmp13, CONST_BITS-1);
dataptr[7] = (DCTELEM) DESCALE(tmp12 - tmp13, CONST_BITS-1);
ctr++;
if (ctr != DCTSIZE) {
if (ctr == 10)
break; /* Done. */
dataptr += DCTSIZE; /* advance pointer to next row */
} else
dataptr = workspace; /* switch pointer to extended workspace */
}
/* Pass 2: process columns.
* We leave the results scaled up by an overall factor of 8.
* We must also scale the output by (8/10)**2 = 16/25, which we partially
* fold into the constant multipliers and final/initial shifting:
* cK now represents sqrt(2) * cos(K*pi/20) * 32/25.
*/
dataptr = data;
wsptr = workspace;
for (ctr = DCTSIZE-1; ctr >= 0; ctr--) {
/* Even part */
tmp0 = dataptr[DCTSIZE*0] + wsptr[DCTSIZE*1];
tmp1 = dataptr[DCTSIZE*1] + wsptr[DCTSIZE*0];
tmp12 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*7];
tmp3 = dataptr[DCTSIZE*3] + dataptr[DCTSIZE*6];
tmp4 = dataptr[DCTSIZE*4] + dataptr[DCTSIZE*5];
tmp10 = tmp0 + tmp4;
tmp13 = tmp0 - tmp4;
tmp11 = tmp1 + tmp3;
tmp14 = tmp1 - tmp3;
tmp0 = dataptr[DCTSIZE*0] - wsptr[DCTSIZE*1];
tmp1 = dataptr[DCTSIZE*1] - wsptr[DCTSIZE*0];
tmp2 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*7];
tmp3 = dataptr[DCTSIZE*3] - dataptr[DCTSIZE*6];
tmp4 = dataptr[DCTSIZE*4] - dataptr[DCTSIZE*5];
dataptr[DCTSIZE*0] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 + tmp11 + tmp12, FIX(1.28)), /* 32/25 */
CONST_BITS+2);
tmp12 += tmp12;
dataptr[DCTSIZE*4] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 - tmp12, FIX(1.464477191)) - /* c4 */
MULTIPLY(tmp11 - tmp12, FIX(0.559380511)), /* c8 */
CONST_BITS+2);
tmp10 = MULTIPLY(tmp13 + tmp14, FIX(1.064004961)); /* c6 */
dataptr[DCTSIZE*2] = (DCTELEM)
DESCALE(tmp10 + MULTIPLY(tmp13, FIX(0.657591230)), /* c2-c6 */
CONST_BITS+2);
dataptr[DCTSIZE*6] = (DCTELEM)
DESCALE(tmp10 - MULTIPLY(tmp14, FIX(2.785601151)), /* c2+c6 */
CONST_BITS+2);
/* Odd part */
tmp10 = tmp0 + tmp4;
tmp11 = tmp1 - tmp3;
dataptr[DCTSIZE*5] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 - tmp11 - tmp2, FIX(1.28)), /* 32/25 */
CONST_BITS+2);
tmp2 = MULTIPLY(tmp2, FIX(1.28)); /* 32/25 */
dataptr[DCTSIZE*1] = (DCTELEM)
DESCALE(MULTIPLY(tmp0, FIX(1.787906876)) + /* c1 */
MULTIPLY(tmp1, FIX(1.612894094)) + tmp2 + /* c3 */
MULTIPLY(tmp3, FIX(0.821810588)) + /* c7 */
MULTIPLY(tmp4, FIX(0.283176630)), /* c9 */
CONST_BITS+2);
tmp12 = MULTIPLY(tmp0 - tmp4, FIX(1.217352341)) - /* (c3+c7)/2 */
MULTIPLY(tmp1 + tmp3, FIX(0.752365123)); /* (c1-c9)/2 */
tmp13 = MULTIPLY(tmp10 + tmp11, FIX(0.395541753)) + /* (c3-c7)/2 */
MULTIPLY(tmp11, FIX(0.64)) - tmp2; /* 16/25 */
dataptr[DCTSIZE*3] = (DCTELEM) DESCALE(tmp12 + tmp13, CONST_BITS+2);
dataptr[DCTSIZE*7] = (DCTELEM) DESCALE(tmp12 - tmp13, CONST_BITS+2);
dataptr++; /* advance pointer to next column */
wsptr++; /* advance pointer to next column */
}
}
/*
* Perform the forward DCT on an 11x11 sample block.
*/
GLOBAL(void)
jpeg_fdct_11x11 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)
{
INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
INT32 tmp10, tmp11, tmp12, tmp13, tmp14;
INT32 z1, z2, z3;
DCTELEM workspace[8*3];
DCTELEM *dataptr;
DCTELEM *wsptr;
JSAMPROW elemptr;
int ctr;
SHIFT_TEMPS
/* Pass 1: process rows.
* Note results are scaled up by sqrt(8) compared to a true DCT;
* we scale the results further by 2 as part of output adaption
* scaling for different DCT size.
* cK represents sqrt(2) * cos(K*pi/22).
*/
dataptr = data;
ctr = 0;
for (;;) {
elemptr = sample_data[ctr] + start_col;
/* Even part */
tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[10]);
tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[9]);
tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[8]);
tmp3 = GETJSAMPLE(elemptr[3]) + GETJSAMPLE(elemptr[7]);
tmp4 = GETJSAMPLE(elemptr[4]) + GETJSAMPLE(elemptr[6]);
tmp5 = GETJSAMPLE(elemptr[5]);
tmp10 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[10]);
tmp11 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[9]);
tmp12 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[8]);
tmp13 = GETJSAMPLE(elemptr[3]) - GETJSAMPLE(elemptr[7]);
tmp14 = GETJSAMPLE(elemptr[4]) - GETJSAMPLE(elemptr[6]);
/* Apply unsigned->signed conversion. */
dataptr[0] = (DCTELEM)
((tmp0 + tmp1 + tmp2 + tmp3 + tmp4 + tmp5 - 11 * CENTERJSAMPLE) << 1);
tmp5 += tmp5;
tmp0 -= tmp5;
tmp1 -= tmp5;
tmp2 -= tmp5;
tmp3 -= tmp5;
tmp4 -= tmp5;
z1 = MULTIPLY(tmp0 + tmp3, FIX(1.356927976)) + /* c2 */
MULTIPLY(tmp2 + tmp4, FIX(0.201263574)); /* c10 */
z2 = MULTIPLY(tmp1 - tmp3, FIX(0.926112931)); /* c6 */
z3 = MULTIPLY(tmp0 - tmp1, FIX(1.189712156)); /* c4 */
dataptr[2] = (DCTELEM)
DESCALE(z1 + z2 - MULTIPLY(tmp3, FIX(1.018300590)) /* c2+c8-c6 */
- MULTIPLY(tmp4, FIX(1.390975730)), /* c4+c10 */
CONST_BITS-1);
dataptr[4] = (DCTELEM)
DESCALE(z2 + z3 + MULTIPLY(tmp1, FIX(0.062335650)) /* c4-c6-c10 */
- MULTIPLY(tmp2, FIX(1.356927976)) /* c2 */
+ MULTIPLY(tmp4, FIX(0.587485545)), /* c8 */
CONST_BITS-1);
dataptr[6] = (DCTELEM)
DESCALE(z1 + z3 - MULTIPLY(tmp0, FIX(1.620527200)) /* c2+c4-c6 */
- MULTIPLY(tmp2, FIX(0.788749120)), /* c8+c10 */
CONST_BITS-1);
/* Odd part */
tmp1 = MULTIPLY(tmp10 + tmp11, FIX(1.286413905)); /* c3 */
tmp2 = MULTIPLY(tmp10 + tmp12, FIX(1.068791298)); /* c5 */
tmp3 = MULTIPLY(tmp10 + tmp13, FIX(0.764581576)); /* c7 */
tmp0 = tmp1 + tmp2 + tmp3 - MULTIPLY(tmp10, FIX(1.719967871)) /* c7+c5+c3-c1 */
+ MULTIPLY(tmp14, FIX(0.398430003)); /* c9 */
tmp4 = MULTIPLY(tmp11 + tmp12, - FIX(0.764581576)); /* -c7 */
tmp5 = MULTIPLY(tmp11 + tmp13, - FIX(1.399818907)); /* -c1 */
tmp1 += tmp4 + tmp5 + MULTIPLY(tmp11, FIX(1.276416582)) /* c9+c7+c1-c3 */
- MULTIPLY(tmp14, FIX(1.068791298)); /* c5 */
tmp10 = MULTIPLY(tmp12 + tmp13, FIX(0.398430003)); /* c9 */
tmp2 += tmp4 + tmp10 - MULTIPLY(tmp12, FIX(1.989053629)) /* c9+c5+c3-c7 */
+ MULTIPLY(tmp14, FIX(1.399818907)); /* c1 */
tmp3 += tmp5 + tmp10 + MULTIPLY(tmp13, FIX(1.305598626)) /* c1+c5-c9-c7 */
- MULTIPLY(tmp14, FIX(1.286413905)); /* c3 */
dataptr[1] = (DCTELEM) DESCALE(tmp0, CONST_BITS-1);
dataptr[3] = (DCTELEM) DESCALE(tmp1, CONST_BITS-1);
dataptr[5] = (DCTELEM) DESCALE(tmp2, CONST_BITS-1);
dataptr[7] = (DCTELEM) DESCALE(tmp3, CONST_BITS-1);
ctr++;
if (ctr != DCTSIZE) {
if (ctr == 11)
break; /* Done. */
dataptr += DCTSIZE; /* advance pointer to next row */
} else
dataptr = workspace; /* switch pointer to extended workspace */
}
/* Pass 2: process columns.
* We leave the results scaled up by an overall factor of 8.
* We must also scale the output by (8/11)**2 = 64/121, which we partially
* fold into the constant multipliers and final/initial shifting:
* cK now represents sqrt(2) * cos(K*pi/22) * 128/121.
*/
dataptr = data;
wsptr = workspace;
for (ctr = DCTSIZE-1; ctr >= 0; ctr--) {
/* Even part */
tmp0 = dataptr[DCTSIZE*0] + wsptr[DCTSIZE*2];
tmp1 = dataptr[DCTSIZE*1] + wsptr[DCTSIZE*1];
tmp2 = dataptr[DCTSIZE*2] + wsptr[DCTSIZE*0];
tmp3 = dataptr[DCTSIZE*3] + dataptr[DCTSIZE*7];
tmp4 = dataptr[DCTSIZE*4] + dataptr[DCTSIZE*6];
tmp5 = dataptr[DCTSIZE*5];
tmp10 = dataptr[DCTSIZE*0] - wsptr[DCTSIZE*2];
tmp11 = dataptr[DCTSIZE*1] - wsptr[DCTSIZE*1];
tmp12 = dataptr[DCTSIZE*2] - wsptr[DCTSIZE*0];
tmp13 = dataptr[DCTSIZE*3] - dataptr[DCTSIZE*7];
tmp14 = dataptr[DCTSIZE*4] - dataptr[DCTSIZE*6];
dataptr[DCTSIZE*0] = (DCTELEM)
DESCALE(MULTIPLY(tmp0 + tmp1 + tmp2 + tmp3 + tmp4 + tmp5,
FIX(1.057851240)), /* 128/121 */
CONST_BITS+2);
tmp5 += tmp5;
tmp0 -= tmp5;
tmp1 -= tmp5;
tmp2 -= tmp5;
tmp3 -= tmp5;
tmp4 -= tmp5;
z1 = MULTIPLY(tmp0 + tmp3, FIX(1.435427942)) + /* c2 */
MULTIPLY(tmp2 + tmp4, FIX(0.212906922)); /* c10 */
z2 = MULTIPLY(tmp1 - tmp3, FIX(0.979689713)); /* c6 */
z3 = MULTIPLY(tmp0 - tmp1, FIX(1.258538479)); /* c4 */
dataptr[DCTSIZE*2] = (DCTELEM)
DESCALE(z1 + z2 - MULTIPLY(tmp3, FIX(1.077210542)) /* c2+c8-c6 */
- MULTIPLY(tmp4, FIX(1.471445400)), /* c4+c10 */
CONST_BITS+2);
dataptr[DCTSIZE*4] = (DCTELEM)
DESCALE(z2 + z3 + MULTIPLY(tmp1, FIX(0.065941844)) /* c4-c6-c10 */
- MULTIPLY(tmp2, FIX(1.435427942)) /* c2 */
+ MULTIPLY(tmp4, FIX(0.621472312)), /* c8 */
CONST_BITS+2);
dataptr[DCTSIZE*6] = (DCTELEM)
DESCALE(z1 + z3 - MULTIPLY(tmp0, FIX(1.714276708)) /* c2+c4-c6 */
- MULTIPLY(tmp2, FIX(0.834379234)), /* c8+c10 */
CONST_BITS+2);
/* Odd part */
tmp1 = MULTIPLY(tmp10 + tmp11, FIX(1.360834544)); /* c3 */
tmp2 = MULTIPLY(tmp10 + tmp12, FIX(1.130622199)); /* c5 */
tmp3 = MULTIPLY(tmp10 + tmp13, FIX(0.808813568)); /* c7 */
tmp0 = tmp1 + tmp2 + tmp3 - MULTIPLY(tmp10, FIX(1.819470145)) /* c7+c5+c3-c1 */
+ MULTIPLY(tmp14, FIX(0.421479672)); /* c9 */
tmp4 = MULTIPLY(tmp11 + tmp12, - FIX(0.808813568)); /* -c7 */
tmp5 = MULTIPLY(tmp11 + tmp13, - FIX(1.480800167)); /* -c1 */
tmp1 += tmp4 + tmp5 + MULTIPLY(tmp11, FIX(1.350258864)) /* c9+c7+c1-c3 */
- MULTIPLY(tmp14, FIX(1.130622199)); /* c5 */
tmp10 = MULTIPLY(tmp12 + tmp13, FIX(0.421479672)); /* c9 */
tmp2 += tmp4 + tmp10 - MULTIPLY(tmp12, FIX(2.104122847)) /* c9+c5+c3-c7 */
+ MULTIPLY(tmp14, FIX(1.480800167)); /* c1 */
tmp3 += tmp5 + tmp10 + MULTIPLY(tmp13, FIX(1.381129125)) /* c1+c5-c9-c7 */
- MULTIPLY(tmp14, FIX(1.360834544)); /* c3 */
dataptr[DCTSIZE*1] = (DCTELEM) DESCALE(tmp0, CONST_BITS+2);
dataptr[DCTSIZE*3] = (DCTELEM) DESCALE(tmp1, CONST_BITS+2);
dataptr[DCTSIZE*5] = (DCTELEM) DESCALE(tmp2, CONST_BITS+2);
dataptr[DCTSIZE*7] = (DCTELEM) DESCALE(tmp3, CONST_BITS+2);
dataptr++; /* advance pointer to next column */
wsptr++; /* advance pointer to next column */
}
}
/*
* Perform the forward DCT on a 12x12 sample block.
*/
GLOBAL(void)
jpeg_fdct_12x12 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)
{
INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
DCTELEM workspace[8*4];
DCTELEM *dataptr;
DCTELEM *wsptr;
JSAMPROW elemptr;
int ctr;
SHIFT_TEMPS
/* Pass 1: process rows.
* Note results are scaled up by sqrt(8) compared to a true DCT.
* cK represents sqrt(2) * cos(K*pi/24).
*/
dataptr = data;
ctr = 0;
for (;;) {
elemptr = sample_data[ctr] + start_col;
/* Even part */
tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[11]);
tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[10]);
tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[9]);
tmp3 = GETJSAMPLE(elemptr[3]) + GETJSAMPLE(elemptr[8]);
tmp4 = GETJSAMPLE(elemptr[4]) + GETJSAMPLE(elemptr[7]);
tmp5 = GETJSAMPLE(elemptr[5]) + GETJSAMPLE(elemptr[6]);
tmp10 = tmp0 + tmp5;
tmp13 = tmp0 - tmp5;
tmp11 = tmp1 + tmp4;
tmp14 = tmp1 - tmp4;
tmp12 = tmp2 + tmp3;
tmp15 = tmp2 - tmp3;
tmp0 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[11]);
tmp1 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[10]);
tmp2 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[9]);
tmp3 = GETJSAMPLE(elemptr[3]) - GETJSAMPLE(elemptr[8]);
tmp4 = GETJSAMPLE(elemptr[4]) - GETJSAMPLE(elemptr[7]);
tmp5 = GETJSAMPLE(elemptr[5]) - GETJSAMPLE(elemptr[6]);
/* Apply unsigned->signed conversion. */
dataptr[0] = (DCTELEM) (tmp10 + tmp11 + tmp12 - 12 * CENTERJSAMPLE);
dataptr[6] = (DCTELEM) (tmp13 - tmp14 - tmp15);
dataptr[4] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 - tmp12, FIX(1.224744871)), /* c4 */
CONST_BITS);
dataptr[2] = (DCTELEM)
DESCALE(tmp14 - tmp15 + MULTIPLY(tmp13 + tmp15, FIX(1.366025404)), /* c2 */
CONST_BITS);
/* Odd part */
tmp10 = MULTIPLY(tmp1 + tmp4, FIX_0_541196100); /* c9 */
tmp14 = tmp10 + MULTIPLY(tmp1, FIX_0_765366865); /* c3-c9 */
tmp15 = tmp10 - MULTIPLY(tmp4, FIX_1_847759065); /* c3+c9 */
tmp12 = MULTIPLY(tmp0 + tmp2, FIX(1.121971054)); /* c5 */
tmp13 = MULTIPLY(tmp0 + tmp3, FIX(0.860918669)); /* c7 */
tmp10 = tmp12 + tmp13 + tmp14 - MULTIPLY(tmp0, FIX(0.580774953)) /* c5+c7-c1 */
+ MULTIPLY(tmp5, FIX(0.184591911)); /* c11 */
tmp11 = MULTIPLY(tmp2 + tmp3, - FIX(0.184591911)); /* -c11 */
tmp12 += tmp11 - tmp15 - MULTIPLY(tmp2, FIX(2.339493912)) /* c1+c5-c11 */
+ MULTIPLY(tmp5, FIX(0.860918669)); /* c7 */
tmp13 += tmp11 - tmp14 + MULTIPLY(tmp3, FIX(0.725788011)) /* c1+c11-c7 */
- MULTIPLY(tmp5, FIX(1.121971054)); /* c5 */
tmp11 = tmp15 + MULTIPLY(tmp0 - tmp3, FIX(1.306562965)) /* c3 */
- MULTIPLY(tmp2 + tmp5, FIX_0_541196100); /* c9 */
dataptr[1] = (DCTELEM) DESCALE(tmp10, CONST_BITS);
dataptr[3] = (DCTELEM) DESCALE(tmp11, CONST_BITS);
dataptr[5] = (DCTELEM) DESCALE(tmp12, CONST_BITS);
dataptr[7] = (DCTELEM) DESCALE(tmp13, CONST_BITS);
ctr++;
if (ctr != DCTSIZE) {
if (ctr == 12)
break; /* Done. */
dataptr += DCTSIZE; /* advance pointer to next row */
} else
dataptr = workspace; /* switch pointer to extended workspace */
}
/* Pass 2: process columns.
* We leave the results scaled up by an overall factor of 8.
* We must also scale the output by (8/12)**2 = 4/9, which we partially
* fold into the constant multipliers and final shifting:
* cK now represents sqrt(2) * cos(K*pi/24) * 8/9.
*/
dataptr = data;
wsptr = workspace;
for (ctr = DCTSIZE-1; ctr >= 0; ctr--) {
/* Even part */
tmp0 = dataptr[DCTSIZE*0] + wsptr[DCTSIZE*3];
tmp1 = dataptr[DCTSIZE*1] + wsptr[DCTSIZE*2];
tmp2 = dataptr[DCTSIZE*2] + wsptr[DCTSIZE*1];
tmp3 = dataptr[DCTSIZE*3] + wsptr[DCTSIZE*0];
tmp4 = dataptr[DCTSIZE*4] + dataptr[DCTSIZE*7];
tmp5 = dataptr[DCTSIZE*5] + dataptr[DCTSIZE*6];
tmp10 = tmp0 + tmp5;
tmp13 = tmp0 - tmp5;
tmp11 = tmp1 + tmp4;
tmp14 = tmp1 - tmp4;
tmp12 = tmp2 + tmp3;
tmp15 = tmp2 - tmp3;
tmp0 = dataptr[DCTSIZE*0] - wsptr[DCTSIZE*3];
tmp1 = dataptr[DCTSIZE*1] - wsptr[DCTSIZE*2];
tmp2 = dataptr[DCTSIZE*2] - wsptr[DCTSIZE*1];
tmp3 = dataptr[DCTSIZE*3] - wsptr[DCTSIZE*0];
tmp4 = dataptr[DCTSIZE*4] - dataptr[DCTSIZE*7];
tmp5 = dataptr[DCTSIZE*5] - dataptr[DCTSIZE*6];
dataptr[DCTSIZE*0] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 + tmp11 + tmp12, FIX(0.888888889)), /* 8/9 */
CONST_BITS+1);
dataptr[DCTSIZE*6] = (DCTELEM)
DESCALE(MULTIPLY(tmp13 - tmp14 - tmp15, FIX(0.888888889)), /* 8/9 */
CONST_BITS+1);
dataptr[DCTSIZE*4] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 - tmp12, FIX(1.088662108)), /* c4 */
CONST_BITS+1);
dataptr[DCTSIZE*2] = (DCTELEM)
DESCALE(MULTIPLY(tmp14 - tmp15, FIX(0.888888889)) + /* 8/9 */
MULTIPLY(tmp13 + tmp15, FIX(1.214244803)), /* c2 */
CONST_BITS+1);
/* Odd part */
tmp10 = MULTIPLY(tmp1 + tmp4, FIX(0.481063200)); /* c9 */
tmp14 = tmp10 + MULTIPLY(tmp1, FIX(0.680326102)); /* c3-c9 */
tmp15 = tmp10 - MULTIPLY(tmp4, FIX(1.642452502)); /* c3+c9 */
tmp12 = MULTIPLY(tmp0 + tmp2, FIX(0.997307603)); /* c5 */
tmp13 = MULTIPLY(tmp0 + tmp3, FIX(0.765261039)); /* c7 */
tmp10 = tmp12 + tmp13 + tmp14 - MULTIPLY(tmp0, FIX(0.516244403)) /* c5+c7-c1 */
+ MULTIPLY(tmp5, FIX(0.164081699)); /* c11 */
tmp11 = MULTIPLY(tmp2 + tmp3, - FIX(0.164081699)); /* -c11 */
tmp12 += tmp11 - tmp15 - MULTIPLY(tmp2, FIX(2.079550144)) /* c1+c5-c11 */
+ MULTIPLY(tmp5, FIX(0.765261039)); /* c7 */
tmp13 += tmp11 - tmp14 + MULTIPLY(tmp3, FIX(0.645144899)) /* c1+c11-c7 */
- MULTIPLY(tmp5, FIX(0.997307603)); /* c5 */
tmp11 = tmp15 + MULTIPLY(tmp0 - tmp3, FIX(1.161389302)) /* c3 */
- MULTIPLY(tmp2 + tmp5, FIX(0.481063200)); /* c9 */
dataptr[DCTSIZE*1] = (DCTELEM) DESCALE(tmp10, CONST_BITS+1);
dataptr[DCTSIZE*3] = (DCTELEM) DESCALE(tmp11, CONST_BITS+1);
dataptr[DCTSIZE*5] = (DCTELEM) DESCALE(tmp12, CONST_BITS+1);
dataptr[DCTSIZE*7] = (DCTELEM) DESCALE(tmp13, CONST_BITS+1);
dataptr++; /* advance pointer to next column */
wsptr++; /* advance pointer to next column */
}
}
/*
* Perform the forward DCT on a 13x13 sample block.
*/
GLOBAL(void)
jpeg_fdct_13x13 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)
{
INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
INT32 z1, z2;
DCTELEM workspace[8*5];
DCTELEM *dataptr;
DCTELEM *wsptr;
JSAMPROW elemptr;
int ctr;
SHIFT_TEMPS
/* Pass 1: process rows.
* Note results are scaled up by sqrt(8) compared to a true DCT.
* cK represents sqrt(2) * cos(K*pi/26).
*/
dataptr = data;
ctr = 0;
for (;;) {
elemptr = sample_data[ctr] + start_col;
/* Even part */
tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[12]);
tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[11]);
tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[10]);
tmp3 = GETJSAMPLE(elemptr[3]) + GETJSAMPLE(elemptr[9]);
tmp4 = GETJSAMPLE(elemptr[4]) + GETJSAMPLE(elemptr[8]);
tmp5 = GETJSAMPLE(elemptr[5]) + GETJSAMPLE(elemptr[7]);
tmp6 = GETJSAMPLE(elemptr[6]);
tmp10 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[12]);
tmp11 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[11]);
tmp12 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[10]);
tmp13 = GETJSAMPLE(elemptr[3]) - GETJSAMPLE(elemptr[9]);
tmp14 = GETJSAMPLE(elemptr[4]) - GETJSAMPLE(elemptr[8]);
tmp15 = GETJSAMPLE(elemptr[5]) - GETJSAMPLE(elemptr[7]);
/* Apply unsigned->signed conversion. */
dataptr[0] = (DCTELEM)
(tmp0 + tmp1 + tmp2 + tmp3 + tmp4 + tmp5 + tmp6 - 13 * CENTERJSAMPLE);
tmp6 += tmp6;
tmp0 -= tmp6;
tmp1 -= tmp6;
tmp2 -= tmp6;
tmp3 -= tmp6;
tmp4 -= tmp6;
tmp5 -= tmp6;
dataptr[2] = (DCTELEM)
DESCALE(MULTIPLY(tmp0, FIX(1.373119086)) + /* c2 */
MULTIPLY(tmp1, FIX(1.058554052)) + /* c6 */
MULTIPLY(tmp2, FIX(0.501487041)) - /* c10 */
MULTIPLY(tmp3, FIX(0.170464608)) - /* c12 */
MULTIPLY(tmp4, FIX(0.803364869)) - /* c8 */
MULTIPLY(tmp5, FIX(1.252223920)), /* c4 */
CONST_BITS);
z1 = MULTIPLY(tmp0 - tmp2, FIX(1.155388986)) - /* (c4+c6)/2 */
MULTIPLY(tmp3 - tmp4, FIX(0.435816023)) - /* (c2-c10)/2 */
MULTIPLY(tmp1 - tmp5, FIX(0.316450131)); /* (c8-c12)/2 */
z2 = MULTIPLY(tmp0 + tmp2, FIX(0.096834934)) - /* (c4-c6)/2 */
MULTIPLY(tmp3 + tmp4, FIX(0.937303064)) + /* (c2+c10)/2 */
MULTIPLY(tmp1 + tmp5, FIX(0.486914739)); /* (c8+c12)/2 */
dataptr[4] = (DCTELEM) DESCALE(z1 + z2, CONST_BITS);
dataptr[6] = (DCTELEM) DESCALE(z1 - z2, CONST_BITS);
/* Odd part */
tmp1 = MULTIPLY(tmp10 + tmp11, FIX(1.322312651)); /* c3 */
tmp2 = MULTIPLY(tmp10 + tmp12, FIX(1.163874945)); /* c5 */
tmp3 = MULTIPLY(tmp10 + tmp13, FIX(0.937797057)) + /* c7 */
MULTIPLY(tmp14 + tmp15, FIX(0.338443458)); /* c11 */
tmp0 = tmp1 + tmp2 + tmp3 -
MULTIPLY(tmp10, FIX(2.020082300)) + /* c3+c5+c7-c1 */
MULTIPLY(tmp14, FIX(0.318774355)); /* c9-c11 */
tmp4 = MULTIPLY(tmp14 - tmp15, FIX(0.937797057)) - /* c7 */
MULTIPLY(tmp11 + tmp12, FIX(0.338443458)); /* c11 */
tmp5 = MULTIPLY(tmp11 + tmp13, - FIX(1.163874945)); /* -c5 */
tmp1 += tmp4 + tmp5 +
MULTIPLY(tmp11, FIX(0.837223564)) - /* c5+c9+c11-c3 */
MULTIPLY(tmp14, FIX(2.341699410)); /* c1+c7 */
tmp6 = MULTIPLY(tmp12 + tmp13, - FIX(0.657217813)); /* -c9 */
tmp2 += tmp4 + tmp6 -
MULTIPLY(tmp12, FIX(1.572116027)) + /* c1+c5-c9-c11 */
MULTIPLY(tmp15, FIX(2.260109708)); /* c3+c7 */
tmp3 += tmp5 + tmp6 +
MULTIPLY(tmp13, FIX(2.205608352)) - /* c3+c5+c9-c7 */
MULTIPLY(tmp15, FIX(1.742345811)); /* c1+c11 */
dataptr[1] = (DCTELEM) DESCALE(tmp0, CONST_BITS);
dataptr[3] = (DCTELEM) DESCALE(tmp1, CONST_BITS);
dataptr[5] = (DCTELEM) DESCALE(tmp2, CONST_BITS);
dataptr[7] = (DCTELEM) DESCALE(tmp3, CONST_BITS);
ctr++;
if (ctr != DCTSIZE) {
if (ctr == 13)
break; /* Done. */
dataptr += DCTSIZE; /* advance pointer to next row */
} else
dataptr = workspace; /* switch pointer to extended workspace */
}
/* Pass 2: process columns.
* We leave the results scaled up by an overall factor of 8.
* We must also scale the output by (8/13)**2 = 64/169, which we partially
* fold into the constant multipliers and final shifting:
* cK now represents sqrt(2) * cos(K*pi/26) * 128/169.
*/
dataptr = data;
wsptr = workspace;
for (ctr = DCTSIZE-1; ctr >= 0; ctr--) {
/* Even part */
tmp0 = dataptr[DCTSIZE*0] + wsptr[DCTSIZE*4];
tmp1 = dataptr[DCTSIZE*1] + wsptr[DCTSIZE*3];
tmp2 = dataptr[DCTSIZE*2] + wsptr[DCTSIZE*2];
tmp3 = dataptr[DCTSIZE*3] + wsptr[DCTSIZE*1];
tmp4 = dataptr[DCTSIZE*4] + wsptr[DCTSIZE*0];
tmp5 = dataptr[DCTSIZE*5] + dataptr[DCTSIZE*7];
tmp6 = dataptr[DCTSIZE*6];
tmp10 = dataptr[DCTSIZE*0] - wsptr[DCTSIZE*4];
tmp11 = dataptr[DCTSIZE*1] - wsptr[DCTSIZE*3];
tmp12 = dataptr[DCTSIZE*2] - wsptr[DCTSIZE*2];
tmp13 = dataptr[DCTSIZE*3] - wsptr[DCTSIZE*1];
tmp14 = dataptr[DCTSIZE*4] - wsptr[DCTSIZE*0];
tmp15 = dataptr[DCTSIZE*5] - dataptr[DCTSIZE*7];
dataptr[DCTSIZE*0] = (DCTELEM)
DESCALE(MULTIPLY(tmp0 + tmp1 + tmp2 + tmp3 + tmp4 + tmp5 + tmp6,
FIX(0.757396450)), /* 128/169 */
CONST_BITS+1);
tmp6 += tmp6;
tmp0 -= tmp6;
tmp1 -= tmp6;
tmp2 -= tmp6;
tmp3 -= tmp6;
tmp4 -= tmp6;
tmp5 -= tmp6;
dataptr[DCTSIZE*2] = (DCTELEM)
DESCALE(MULTIPLY(tmp0, FIX(1.039995521)) + /* c2 */
MULTIPLY(tmp1, FIX(0.801745081)) + /* c6 */
MULTIPLY(tmp2, FIX(0.379824504)) - /* c10 */
MULTIPLY(tmp3, FIX(0.129109289)) - /* c12 */
MULTIPLY(tmp4, FIX(0.608465700)) - /* c8 */
MULTIPLY(tmp5, FIX(0.948429952)), /* c4 */
CONST_BITS+1);
z1 = MULTIPLY(tmp0 - tmp2, FIX(0.875087516)) - /* (c4+c6)/2 */
MULTIPLY(tmp3 - tmp4, FIX(0.330085509)) - /* (c2-c10)/2 */
MULTIPLY(tmp1 - tmp5, FIX(0.239678205)); /* (c8-c12)/2 */
z2 = MULTIPLY(tmp0 + tmp2, FIX(0.073342435)) - /* (c4-c6)/2 */
MULTIPLY(tmp3 + tmp4, FIX(0.709910013)) + /* (c2+c10)/2 */
MULTIPLY(tmp1 + tmp5, FIX(0.368787494)); /* (c8+c12)/2 */
dataptr[DCTSIZE*4] = (DCTELEM) DESCALE(z1 + z2, CONST_BITS+1);
dataptr[DCTSIZE*6] = (DCTELEM) DESCALE(z1 - z2, CONST_BITS+1);
/* Odd part */
tmp1 = MULTIPLY(tmp10 + tmp11, FIX(1.001514908)); /* c3 */
tmp2 = MULTIPLY(tmp10 + tmp12, FIX(0.881514751)); /* c5 */
tmp3 = MULTIPLY(tmp10 + tmp13, FIX(0.710284161)) + /* c7 */
MULTIPLY(tmp14 + tmp15, FIX(0.256335874)); /* c11 */
tmp0 = tmp1 + tmp2 + tmp3 -
MULTIPLY(tmp10, FIX(1.530003162)) + /* c3+c5+c7-c1 */
MULTIPLY(tmp14, FIX(0.241438564)); /* c9-c11 */
tmp4 = MULTIPLY(tmp14 - tmp15, FIX(0.710284161)) - /* c7 */
MULTIPLY(tmp11 + tmp12, FIX(0.256335874)); /* c11 */
tmp5 = MULTIPLY(tmp11 + tmp13, - FIX(0.881514751)); /* -c5 */
tmp1 += tmp4 + tmp5 +
MULTIPLY(tmp11, FIX(0.634110155)) - /* c5+c9+c11-c3 */
MULTIPLY(tmp14, FIX(1.773594819)); /* c1+c7 */
tmp6 = MULTIPLY(tmp12 + tmp13, - FIX(0.497774438)); /* -c9 */
tmp2 += tmp4 + tmp6 -
MULTIPLY(tmp12, FIX(1.190715098)) + /* c1+c5-c9-c11 */
MULTIPLY(tmp15, FIX(1.711799069)); /* c3+c7 */
tmp3 += tmp5 + tmp6 +
MULTIPLY(tmp13, FIX(1.670519935)) - /* c3+c5+c9-c7 */
MULTIPLY(tmp15, FIX(1.319646532)); /* c1+c11 */
dataptr[DCTSIZE*1] = (DCTELEM) DESCALE(tmp0, CONST_BITS+1);
dataptr[DCTSIZE*3] = (DCTELEM) DESCALE(tmp1, CONST_BITS+1);
dataptr[DCTSIZE*5] = (DCTELEM) DESCALE(tmp2, CONST_BITS+1);
dataptr[DCTSIZE*7] = (DCTELEM) DESCALE(tmp3, CONST_BITS+1);
dataptr++; /* advance pointer to next column */
wsptr++; /* advance pointer to next column */
}
}
/*
* Perform the forward DCT on a 14x14 sample block.
*/
GLOBAL(void)
jpeg_fdct_14x14 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)
{
INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16;
DCTELEM workspace[8*6];
DCTELEM *dataptr;
DCTELEM *wsptr;
JSAMPROW elemptr;
int ctr;
SHIFT_TEMPS
/* Pass 1: process rows.
* Note results are scaled up by sqrt(8) compared to a true DCT.
* cK represents sqrt(2) * cos(K*pi/28).
*/
dataptr = data;
ctr = 0;
for (;;) {
elemptr = sample_data[ctr] + start_col;
/* Even part */
tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[13]);
tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[12]);
tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[11]);
tmp13 = GETJSAMPLE(elemptr[3]) + GETJSAMPLE(elemptr[10]);
tmp4 = GETJSAMPLE(elemptr[4]) + GETJSAMPLE(elemptr[9]);
tmp5 = GETJSAMPLE(elemptr[5]) + GETJSAMPLE(elemptr[8]);
tmp6 = GETJSAMPLE(elemptr[6]) + GETJSAMPLE(elemptr[7]);
tmp10 = tmp0 + tmp6;
tmp14 = tmp0 - tmp6;
tmp11 = tmp1 + tmp5;
tmp15 = tmp1 - tmp5;
tmp12 = tmp2 + tmp4;
tmp16 = tmp2 - tmp4;
tmp0 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[13]);
tmp1 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[12]);
tmp2 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[11]);
tmp3 = GETJSAMPLE(elemptr[3]) - GETJSAMPLE(elemptr[10]);
tmp4 = GETJSAMPLE(elemptr[4]) - GETJSAMPLE(elemptr[9]);
tmp5 = GETJSAMPLE(elemptr[5]) - GETJSAMPLE(elemptr[8]);
tmp6 = GETJSAMPLE(elemptr[6]) - GETJSAMPLE(elemptr[7]);
/* Apply unsigned->signed conversion. */
dataptr[0] = (DCTELEM)
(tmp10 + tmp11 + tmp12 + tmp13 - 14 * CENTERJSAMPLE);
tmp13 += tmp13;
dataptr[4] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 - tmp13, FIX(1.274162392)) + /* c4 */
MULTIPLY(tmp11 - tmp13, FIX(0.314692123)) - /* c12 */
MULTIPLY(tmp12 - tmp13, FIX(0.881747734)), /* c8 */
CONST_BITS);
tmp10 = MULTIPLY(tmp14 + tmp15, FIX(1.105676686)); /* c6 */
dataptr[2] = (DCTELEM)
DESCALE(tmp10 + MULTIPLY(tmp14, FIX(0.273079590)) /* c2-c6 */
+ MULTIPLY(tmp16, FIX(0.613604268)), /* c10 */
CONST_BITS);
dataptr[6] = (DCTELEM)
DESCALE(tmp10 - MULTIPLY(tmp15, FIX(1.719280954)) /* c6+c10 */
- MULTIPLY(tmp16, FIX(1.378756276)), /* c2 */
CONST_BITS);
/* Odd part */
tmp10 = tmp1 + tmp2;
tmp11 = tmp5 - tmp4;
dataptr[7] = (DCTELEM) (tmp0 - tmp10 + tmp3 - tmp11 - tmp6);
tmp3 <<= CONST_BITS;
tmp10 = MULTIPLY(tmp10, - FIX(0.158341681)); /* -c13 */
tmp11 = MULTIPLY(tmp11, FIX(1.405321284)); /* c1 */
tmp10 += tmp11 - tmp3;
tmp11 = MULTIPLY(tmp0 + tmp2, FIX(1.197448846)) + /* c5 */
MULTIPLY(tmp4 + tmp6, FIX(0.752406978)); /* c9 */
dataptr[5] = (DCTELEM)
DESCALE(tmp10 + tmp11 - MULTIPLY(tmp2, FIX(2.373959773)) /* c3+c5-c13 */
+ MULTIPLY(tmp4, FIX(1.119999435)), /* c1+c11-c9 */
CONST_BITS);
tmp12 = MULTIPLY(tmp0 + tmp1, FIX(1.334852607)) + /* c3 */
MULTIPLY(tmp5 - tmp6, FIX(0.467085129)); /* c11 */
dataptr[3] = (DCTELEM)
DESCALE(tmp10 + tmp12 - MULTIPLY(tmp1, FIX(0.424103948)) /* c3-c9-c13 */
- MULTIPLY(tmp5, FIX(3.069855259)), /* c1+c5+c11 */
CONST_BITS);
dataptr[1] = (DCTELEM)
DESCALE(tmp11 + tmp12 + tmp3 + tmp6 -
MULTIPLY(tmp0 + tmp6, FIX(1.126980169)), /* c3+c5-c1 */
CONST_BITS);
ctr++;
if (ctr != DCTSIZE) {
if (ctr == 14)
break; /* Done. */
dataptr += DCTSIZE; /* advance pointer to next row */
} else
dataptr = workspace; /* switch pointer to extended workspace */
}
/* Pass 2: process columns.
* We leave the results scaled up by an overall factor of 8.
* We must also scale the output by (8/14)**2 = 16/49, which we partially
* fold into the constant multipliers and final shifting:
* cK now represents sqrt(2) * cos(K*pi/28) * 32/49.
*/
dataptr = data;
wsptr = workspace;
for (ctr = DCTSIZE-1; ctr >= 0; ctr--) {
/* Even part */
tmp0 = dataptr[DCTSIZE*0] + wsptr[DCTSIZE*5];
tmp1 = dataptr[DCTSIZE*1] + wsptr[DCTSIZE*4];
tmp2 = dataptr[DCTSIZE*2] + wsptr[DCTSIZE*3];
tmp13 = dataptr[DCTSIZE*3] + wsptr[DCTSIZE*2];
tmp4 = dataptr[DCTSIZE*4] + wsptr[DCTSIZE*1];
tmp5 = dataptr[DCTSIZE*5] + wsptr[DCTSIZE*0];
tmp6 = dataptr[DCTSIZE*6] + dataptr[DCTSIZE*7];
tmp10 = tmp0 + tmp6;
tmp14 = tmp0 - tmp6;
tmp11 = tmp1 + tmp5;
tmp15 = tmp1 - tmp5;
tmp12 = tmp2 + tmp4;
tmp16 = tmp2 - tmp4;
tmp0 = dataptr[DCTSIZE*0] - wsptr[DCTSIZE*5];
tmp1 = dataptr[DCTSIZE*1] - wsptr[DCTSIZE*4];
tmp2 = dataptr[DCTSIZE*2] - wsptr[DCTSIZE*3];
tmp3 = dataptr[DCTSIZE*3] - wsptr[DCTSIZE*2];
tmp4 = dataptr[DCTSIZE*4] - wsptr[DCTSIZE*1];
tmp5 = dataptr[DCTSIZE*5] - wsptr[DCTSIZE*0];
tmp6 = dataptr[DCTSIZE*6] - dataptr[DCTSIZE*7];
dataptr[DCTSIZE*0] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 + tmp11 + tmp12 + tmp13,
FIX(0.653061224)), /* 32/49 */
CONST_BITS+1);
tmp13 += tmp13;
dataptr[DCTSIZE*4] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 - tmp13, FIX(0.832106052)) + /* c4 */
MULTIPLY(tmp11 - tmp13, FIX(0.205513223)) - /* c12 */
MULTIPLY(tmp12 - tmp13, FIX(0.575835255)), /* c8 */
CONST_BITS+1);
tmp10 = MULTIPLY(tmp14 + tmp15, FIX(0.722074570)); /* c6 */
dataptr[DCTSIZE*2] = (DCTELEM)
DESCALE(tmp10 + MULTIPLY(tmp14, FIX(0.178337691)) /* c2-c6 */
+ MULTIPLY(tmp16, FIX(0.400721155)), /* c10 */
CONST_BITS+1);
dataptr[DCTSIZE*6] = (DCTELEM)
DESCALE(tmp10 - MULTIPLY(tmp15, FIX(1.122795725)) /* c6+c10 */
- MULTIPLY(tmp16, FIX(0.900412262)), /* c2 */
CONST_BITS+1);
/* Odd part */
tmp10 = tmp1 + tmp2;
tmp11 = tmp5 - tmp4;
dataptr[DCTSIZE*7] = (DCTELEM)
DESCALE(MULTIPLY(tmp0 - tmp10 + tmp3 - tmp11 - tmp6,
FIX(0.653061224)), /* 32/49 */
CONST_BITS+1);
tmp3 = MULTIPLY(tmp3 , FIX(0.653061224)); /* 32/49 */
tmp10 = MULTIPLY(tmp10, - FIX(0.103406812)); /* -c13 */
tmp11 = MULTIPLY(tmp11, FIX(0.917760839)); /* c1 */
tmp10 += tmp11 - tmp3;
tmp11 = MULTIPLY(tmp0 + tmp2, FIX(0.782007410)) + /* c5 */
MULTIPLY(tmp4 + tmp6, FIX(0.491367823)); /* c9 */
dataptr[DCTSIZE*5] = (DCTELEM)
DESCALE(tmp10 + tmp11 - MULTIPLY(tmp2, FIX(1.550341076)) /* c3+c5-c13 */
+ MULTIPLY(tmp4, FIX(0.731428202)), /* c1+c11-c9 */
CONST_BITS+1);
tmp12 = MULTIPLY(tmp0 + tmp1, FIX(0.871740478)) + /* c3 */
MULTIPLY(tmp5 - tmp6, FIX(0.305035186)); /* c11 */
dataptr[DCTSIZE*3] = (DCTELEM)
DESCALE(tmp10 + tmp12 - MULTIPLY(tmp1, FIX(0.276965844)) /* c3-c9-c13 */
- MULTIPLY(tmp5, FIX(2.004803435)), /* c1+c5+c11 */
CONST_BITS+1);
dataptr[DCTSIZE*1] = (DCTELEM)
DESCALE(tmp11 + tmp12 + tmp3
- MULTIPLY(tmp0, FIX(0.735987049)) /* c3+c5-c1 */
- MULTIPLY(tmp6, FIX(0.082925825)), /* c9-c11-c13 */
CONST_BITS+1);
dataptr++; /* advance pointer to next column */
wsptr++; /* advance pointer to next column */
}
}
/*
* Perform the forward DCT on a 15x15 sample block.
*/
GLOBAL(void)
jpeg_fdct_15x15 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)
{
INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16;
INT32 z1, z2, z3;
DCTELEM workspace[8*7];
DCTELEM *dataptr;
DCTELEM *wsptr;
JSAMPROW elemptr;
int ctr;
SHIFT_TEMPS
/* Pass 1: process rows.
* Note results are scaled up by sqrt(8) compared to a true DCT.
* cK represents sqrt(2) * cos(K*pi/30).
*/
dataptr = data;
ctr = 0;
for (;;) {
elemptr = sample_data[ctr] + start_col;
/* Even part */
tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[14]);
tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[13]);
tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[12]);
tmp3 = GETJSAMPLE(elemptr[3]) + GETJSAMPLE(elemptr[11]);
tmp4 = GETJSAMPLE(elemptr[4]) + GETJSAMPLE(elemptr[10]);
tmp5 = GETJSAMPLE(elemptr[5]) + GETJSAMPLE(elemptr[9]);
tmp6 = GETJSAMPLE(elemptr[6]) + GETJSAMPLE(elemptr[8]);
tmp7 = GETJSAMPLE(elemptr[7]);
tmp10 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[14]);
tmp11 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[13]);
tmp12 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[12]);
tmp13 = GETJSAMPLE(elemptr[3]) - GETJSAMPLE(elemptr[11]);
tmp14 = GETJSAMPLE(elemptr[4]) - GETJSAMPLE(elemptr[10]);
tmp15 = GETJSAMPLE(elemptr[5]) - GETJSAMPLE(elemptr[9]);
tmp16 = GETJSAMPLE(elemptr[6]) - GETJSAMPLE(elemptr[8]);
z1 = tmp0 + tmp4 + tmp5;
z2 = tmp1 + tmp3 + tmp6;
z3 = tmp2 + tmp7;
/* Apply unsigned->signed conversion. */
dataptr[0] = (DCTELEM) (z1 + z2 + z3 - 15 * CENTERJSAMPLE);
z3 += z3;
dataptr[6] = (DCTELEM)
DESCALE(MULTIPLY(z1 - z3, FIX(1.144122806)) - /* c6 */
MULTIPLY(z2 - z3, FIX(0.437016024)), /* c12 */
CONST_BITS);
tmp2 += ((tmp1 + tmp4) >> 1) - tmp7 - tmp7;
z1 = MULTIPLY(tmp3 - tmp2, FIX(1.531135173)) - /* c2+c14 */
MULTIPLY(tmp6 - tmp2, FIX(2.238241955)); /* c4+c8 */
z2 = MULTIPLY(tmp5 - tmp2, FIX(0.798468008)) - /* c8-c14 */
MULTIPLY(tmp0 - tmp2, FIX(0.091361227)); /* c2-c4 */
z3 = MULTIPLY(tmp0 - tmp3, FIX(1.383309603)) + /* c2 */
MULTIPLY(tmp6 - tmp5, FIX(0.946293579)) + /* c8 */
MULTIPLY(tmp1 - tmp4, FIX(0.790569415)); /* (c6+c12)/2 */
dataptr[2] = (DCTELEM) DESCALE(z1 + z3, CONST_BITS);
dataptr[4] = (DCTELEM) DESCALE(z2 + z3, CONST_BITS);
/* Odd part */
tmp2 = MULTIPLY(tmp10 - tmp12 - tmp13 + tmp15 + tmp16,
FIX(1.224744871)); /* c5 */
tmp1 = MULTIPLY(tmp10 - tmp14 - tmp15, FIX(1.344997024)) + /* c3 */
MULTIPLY(tmp11 - tmp13 - tmp16, FIX(0.831253876)); /* c9 */
tmp12 = MULTIPLY(tmp12, FIX(1.224744871)); /* c5 */
tmp4 = MULTIPLY(tmp10 - tmp16, FIX(1.406466353)) + /* c1 */
MULTIPLY(tmp11 + tmp14, FIX(1.344997024)) + /* c3 */
MULTIPLY(tmp13 + tmp15, FIX(0.575212477)); /* c11 */
tmp0 = MULTIPLY(tmp13, FIX(0.475753014)) - /* c7-c11 */
MULTIPLY(tmp14, FIX(0.513743148)) + /* c3-c9 */
MULTIPLY(tmp16, FIX(1.700497885)) + tmp4 + tmp12; /* c1+c13 */
tmp3 = MULTIPLY(tmp10, - FIX(0.355500862)) - /* -(c1-c7) */
MULTIPLY(tmp11, FIX(2.176250899)) - /* c3+c9 */
MULTIPLY(tmp15, FIX(0.869244010)) + tmp4 - tmp12; /* c11+c13 */
dataptr[1] = (DCTELEM) DESCALE(tmp0, CONST_BITS);
dataptr[3] = (DCTELEM) DESCALE(tmp1, CONST_BITS);
dataptr[5] = (DCTELEM) DESCALE(tmp2, CONST_BITS);
dataptr[7] = (DCTELEM) DESCALE(tmp3, CONST_BITS);
ctr++;
if (ctr != DCTSIZE) {
if (ctr == 15)
break; /* Done. */
dataptr += DCTSIZE; /* advance pointer to next row */
} else
dataptr = workspace; /* switch pointer to extended workspace */
}
/* Pass 2: process columns.
* We leave the results scaled up by an overall factor of 8.
* We must also scale the output by (8/15)**2 = 64/225, which we partially
* fold into the constant multipliers and final shifting:
* cK now represents sqrt(2) * cos(K*pi/30) * 256/225.
*/
dataptr = data;
wsptr = workspace;
for (ctr = DCTSIZE-1; ctr >= 0; ctr--) {
/* Even part */
tmp0 = dataptr[DCTSIZE*0] + wsptr[DCTSIZE*6];
tmp1 = dataptr[DCTSIZE*1] + wsptr[DCTSIZE*5];
tmp2 = dataptr[DCTSIZE*2] + wsptr[DCTSIZE*4];
tmp3 = dataptr[DCTSIZE*3] + wsptr[DCTSIZE*3];
tmp4 = dataptr[DCTSIZE*4] + wsptr[DCTSIZE*2];
tmp5 = dataptr[DCTSIZE*5] + wsptr[DCTSIZE*1];
tmp6 = dataptr[DCTSIZE*6] + wsptr[DCTSIZE*0];
tmp7 = dataptr[DCTSIZE*7];
tmp10 = dataptr[DCTSIZE*0] - wsptr[DCTSIZE*6];
tmp11 = dataptr[DCTSIZE*1] - wsptr[DCTSIZE*5];
tmp12 = dataptr[DCTSIZE*2] - wsptr[DCTSIZE*4];
tmp13 = dataptr[DCTSIZE*3] - wsptr[DCTSIZE*3];
tmp14 = dataptr[DCTSIZE*4] - wsptr[DCTSIZE*2];
tmp15 = dataptr[DCTSIZE*5] - wsptr[DCTSIZE*1];
tmp16 = dataptr[DCTSIZE*6] - wsptr[DCTSIZE*0];
z1 = tmp0 + tmp4 + tmp5;
z2 = tmp1 + tmp3 + tmp6;
z3 = tmp2 + tmp7;
dataptr[DCTSIZE*0] = (DCTELEM)
DESCALE(MULTIPLY(z1 + z2 + z3, FIX(1.137777778)), /* 256/225 */
CONST_BITS+2);
z3 += z3;
dataptr[DCTSIZE*6] = (DCTELEM)
DESCALE(MULTIPLY(z1 - z3, FIX(1.301757503)) - /* c6 */
MULTIPLY(z2 - z3, FIX(0.497227121)), /* c12 */
CONST_BITS+2);
tmp2 += ((tmp1 + tmp4) >> 1) - tmp7 - tmp7;
z1 = MULTIPLY(tmp3 - tmp2, FIX(1.742091575)) - /* c2+c14 */
MULTIPLY(tmp6 - tmp2, FIX(2.546621957)); /* c4+c8 */
z2 = MULTIPLY(tmp5 - tmp2, FIX(0.908479156)) - /* c8-c14 */
MULTIPLY(tmp0 - tmp2, FIX(0.103948774)); /* c2-c4 */
z3 = MULTIPLY(tmp0 - tmp3, FIX(1.573898926)) + /* c2 */
MULTIPLY(tmp6 - tmp5, FIX(1.076671805)) + /* c8 */
MULTIPLY(tmp1 - tmp4, FIX(0.899492312)); /* (c6+c12)/2 */
dataptr[DCTSIZE*2] = (DCTELEM) DESCALE(z1 + z3, CONST_BITS+2);
dataptr[DCTSIZE*4] = (DCTELEM) DESCALE(z2 + z3, CONST_BITS+2);
/* Odd part */
tmp2 = MULTIPLY(tmp10 - tmp12 - tmp13 + tmp15 + tmp16,
FIX(1.393487498)); /* c5 */
tmp1 = MULTIPLY(tmp10 - tmp14 - tmp15, FIX(1.530307725)) + /* c3 */
MULTIPLY(tmp11 - tmp13 - tmp16, FIX(0.945782187)); /* c9 */
tmp12 = MULTIPLY(tmp12, FIX(1.393487498)); /* c5 */
tmp4 = MULTIPLY(tmp10 - tmp16, FIX(1.600246161)) + /* c1 */
MULTIPLY(tmp11 + tmp14, FIX(1.530307725)) + /* c3 */
MULTIPLY(tmp13 + tmp15, FIX(0.654463974)); /* c11 */
tmp0 = MULTIPLY(tmp13, FIX(0.541301207)) - /* c7-c11 */
MULTIPLY(tmp14, FIX(0.584525538)) + /* c3-c9 */
MULTIPLY(tmp16, FIX(1.934788705)) + tmp4 + tmp12; /* c1+c13 */
tmp3 = MULTIPLY(tmp10, - FIX(0.404480980)) - /* -(c1-c7) */
MULTIPLY(tmp11, FIX(2.476089912)) - /* c3+c9 */
MULTIPLY(tmp15, FIX(0.989006518)) + tmp4 - tmp12; /* c11+c13 */
dataptr[DCTSIZE*1] = (DCTELEM) DESCALE(tmp0, CONST_BITS+2);
dataptr[DCTSIZE*3] = (DCTELEM) DESCALE(tmp1, CONST_BITS+2);
dataptr[DCTSIZE*5] = (DCTELEM) DESCALE(tmp2, CONST_BITS+2);
dataptr[DCTSIZE*7] = (DCTELEM) DESCALE(tmp3, CONST_BITS+2);
dataptr++; /* advance pointer to next column */
wsptr++; /* advance pointer to next column */
}
}
/*
* Perform the forward DCT on a 16x16 sample block.
*/
GLOBAL(void)
jpeg_fdct_16x16 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)
{
INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16, tmp17;
DCTELEM workspace[DCTSIZE2];
DCTELEM *dataptr;
DCTELEM *wsptr;
JSAMPROW elemptr;
int ctr;
SHIFT_TEMPS
/* Pass 1: process rows.
* Note results are scaled up by sqrt(8) compared to a true DCT;
* furthermore, we scale the results by 2**PASS1_BITS.
* cK represents sqrt(2) * cos(K*pi/32).
*/
dataptr = data;
ctr = 0;
for (;;) {
elemptr = sample_data[ctr] + start_col;
/* Even part */
tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[15]);
tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[14]);
tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[13]);
tmp3 = GETJSAMPLE(elemptr[3]) + GETJSAMPLE(elemptr[12]);
tmp4 = GETJSAMPLE(elemptr[4]) + GETJSAMPLE(elemptr[11]);
tmp5 = GETJSAMPLE(elemptr[5]) + GETJSAMPLE(elemptr[10]);
tmp6 = GETJSAMPLE(elemptr[6]) + GETJSAMPLE(elemptr[9]);
tmp7 = GETJSAMPLE(elemptr[7]) + GETJSAMPLE(elemptr[8]);
tmp10 = tmp0 + tmp7;
tmp14 = tmp0 - tmp7;
tmp11 = tmp1 + tmp6;
tmp15 = tmp1 - tmp6;
tmp12 = tmp2 + tmp5;
tmp16 = tmp2 - tmp5;
tmp13 = tmp3 + tmp4;
tmp17 = tmp3 - tmp4;
tmp0 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[15]);
tmp1 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[14]);
tmp2 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[13]);
tmp3 = GETJSAMPLE(elemptr[3]) - GETJSAMPLE(elemptr[12]);
tmp4 = GETJSAMPLE(elemptr[4]) - GETJSAMPLE(elemptr[11]);
tmp5 = GETJSAMPLE(elemptr[5]) - GETJSAMPLE(elemptr[10]);
tmp6 = GETJSAMPLE(elemptr[6]) - GETJSAMPLE(elemptr[9]);
tmp7 = GETJSAMPLE(elemptr[7]) - GETJSAMPLE(elemptr[8]);
/* Apply unsigned->signed conversion. */
dataptr[0] = (DCTELEM)
((tmp10 + tmp11 + tmp12 + tmp13 - 16 * CENTERJSAMPLE) << PASS1_BITS);
dataptr[4] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 - tmp13, FIX(1.306562965)) + /* c4[16] = c2[8] */
MULTIPLY(tmp11 - tmp12, FIX_0_541196100), /* c12[16] = c6[8] */
CONST_BITS-PASS1_BITS);
tmp10 = MULTIPLY(tmp17 - tmp15, FIX(0.275899379)) + /* c14[16] = c7[8] */
MULTIPLY(tmp14 - tmp16, FIX(1.387039845)); /* c2[16] = c1[8] */
dataptr[2] = (DCTELEM)
DESCALE(tmp10 + MULTIPLY(tmp15, FIX(1.451774982)) /* c6+c14 */
+ MULTIPLY(tmp16, FIX(2.172734804)), /* c2+c10 */
CONST_BITS-PASS1_BITS);
dataptr[6] = (DCTELEM)
DESCALE(tmp10 - MULTIPLY(tmp14, FIX(0.211164243)) /* c2-c6 */
- MULTIPLY(tmp17, FIX(1.061594338)), /* c10+c14 */
CONST_BITS-PASS1_BITS);
/* Odd part */
tmp11 = MULTIPLY(tmp0 + tmp1, FIX(1.353318001)) + /* c3 */
MULTIPLY(tmp6 - tmp7, FIX(0.410524528)); /* c13 */
tmp12 = MULTIPLY(tmp0 + tmp2, FIX(1.247225013)) + /* c5 */
MULTIPLY(tmp5 + tmp7, FIX(0.666655658)); /* c11 */
tmp13 = MULTIPLY(tmp0 + tmp3, FIX(1.093201867)) + /* c7 */
MULTIPLY(tmp4 - tmp7, FIX(0.897167586)); /* c9 */
tmp14 = MULTIPLY(tmp1 + tmp2, FIX(0.138617169)) + /* c15 */
MULTIPLY(tmp6 - tmp5, FIX(1.407403738)); /* c1 */
tmp15 = MULTIPLY(tmp1 + tmp3, - FIX(0.666655658)) + /* -c11 */
MULTIPLY(tmp4 + tmp6, - FIX(1.247225013)); /* -c5 */
tmp16 = MULTIPLY(tmp2 + tmp3, - FIX(1.353318001)) + /* -c3 */
MULTIPLY(tmp5 - tmp4, FIX(0.410524528)); /* c13 */
tmp10 = tmp11 + tmp12 + tmp13 -
MULTIPLY(tmp0, FIX(2.286341144)) + /* c7+c5+c3-c1 */
MULTIPLY(tmp7, FIX(0.779653625)); /* c15+c13-c11+c9 */
tmp11 += tmp14 + tmp15 + MULTIPLY(tmp1, FIX(0.071888074)) /* c9-c3-c15+c11 */
- MULTIPLY(tmp6, FIX(1.663905119)); /* c7+c13+c1-c5 */
tmp12 += tmp14 + tmp16 - MULTIPLY(tmp2, FIX(1.125726048)) /* c7+c5+c15-c3 */
+ MULTIPLY(tmp5, FIX(1.227391138)); /* c9-c11+c1-c13 */
tmp13 += tmp15 + tmp16 + MULTIPLY(tmp3, FIX(1.065388962)) /* c15+c3+c11-c7 */
+ MULTIPLY(tmp4, FIX(2.167985692)); /* c1+c13+c5-c9 */
dataptr[1] = (DCTELEM) DESCALE(tmp10, CONST_BITS-PASS1_BITS);
dataptr[3] = (DCTELEM) DESCALE(tmp11, CONST_BITS-PASS1_BITS);
dataptr[5] = (DCTELEM) DESCALE(tmp12, CONST_BITS-PASS1_BITS);
dataptr[7] = (DCTELEM) DESCALE(tmp13, CONST_BITS-PASS1_BITS);
ctr++;
if (ctr != DCTSIZE) {
if (ctr == DCTSIZE * 2)
break; /* Done. */
dataptr += DCTSIZE; /* advance pointer to next row */
} else
dataptr = workspace; /* switch pointer to extended workspace */
}
/* Pass 2: process columns.
* We remove the PASS1_BITS scaling, but leave the results scaled up
* by an overall factor of 8.
* We must also scale the output by (8/16)**2 = 1/2**2.
* cK represents sqrt(2) * cos(K*pi/32).
*/
dataptr = data;
wsptr = workspace;
for (ctr = DCTSIZE-1; ctr >= 0; ctr--) {
/* Even part */
tmp0 = dataptr[DCTSIZE*0] + wsptr[DCTSIZE*7];
tmp1 = dataptr[DCTSIZE*1] + wsptr[DCTSIZE*6];
tmp2 = dataptr[DCTSIZE*2] + wsptr[DCTSIZE*5];
tmp3 = dataptr[DCTSIZE*3] + wsptr[DCTSIZE*4];
tmp4 = dataptr[DCTSIZE*4] + wsptr[DCTSIZE*3];
tmp5 = dataptr[DCTSIZE*5] + wsptr[DCTSIZE*2];
tmp6 = dataptr[DCTSIZE*6] + wsptr[DCTSIZE*1];
tmp7 = dataptr[DCTSIZE*7] + wsptr[DCTSIZE*0];
tmp10 = tmp0 + tmp7;
tmp14 = tmp0 - tmp7;
tmp11 = tmp1 + tmp6;
tmp15 = tmp1 - tmp6;
tmp12 = tmp2 + tmp5;
tmp16 = tmp2 - tmp5;
tmp13 = tmp3 + tmp4;
tmp17 = tmp3 - tmp4;
tmp0 = dataptr[DCTSIZE*0] - wsptr[DCTSIZE*7];
tmp1 = dataptr[DCTSIZE*1] - wsptr[DCTSIZE*6];
tmp2 = dataptr[DCTSIZE*2] - wsptr[DCTSIZE*5];
tmp3 = dataptr[DCTSIZE*3] - wsptr[DCTSIZE*4];
tmp4 = dataptr[DCTSIZE*4] - wsptr[DCTSIZE*3];
tmp5 = dataptr[DCTSIZE*5] - wsptr[DCTSIZE*2];
tmp6 = dataptr[DCTSIZE*6] - wsptr[DCTSIZE*1];
tmp7 = dataptr[DCTSIZE*7] - wsptr[DCTSIZE*0];
dataptr[DCTSIZE*0] = (DCTELEM)
DESCALE(tmp10 + tmp11 + tmp12 + tmp13, PASS1_BITS+2);
dataptr[DCTSIZE*4] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 - tmp13, FIX(1.306562965)) + /* c4[16] = c2[8] */
MULTIPLY(tmp11 - tmp12, FIX_0_541196100), /* c12[16] = c6[8] */
CONST_BITS+PASS1_BITS+2);
tmp10 = MULTIPLY(tmp17 - tmp15, FIX(0.275899379)) + /* c14[16] = c7[8] */
MULTIPLY(tmp14 - tmp16, FIX(1.387039845)); /* c2[16] = c1[8] */
dataptr[DCTSIZE*2] = (DCTELEM)
DESCALE(tmp10 + MULTIPLY(tmp15, FIX(1.451774982)) /* c6+c14 */
+ MULTIPLY(tmp16, FIX(2.172734804)), /* c2+10 */
CONST_BITS+PASS1_BITS+2);
dataptr[DCTSIZE*6] = (DCTELEM)
DESCALE(tmp10 - MULTIPLY(tmp14, FIX(0.211164243)) /* c2-c6 */
- MULTIPLY(tmp17, FIX(1.061594338)), /* c10+c14 */
CONST_BITS+PASS1_BITS+2);
/* Odd part */
tmp11 = MULTIPLY(tmp0 + tmp1, FIX(1.353318001)) + /* c3 */
MULTIPLY(tmp6 - tmp7, FIX(0.410524528)); /* c13 */
tmp12 = MULTIPLY(tmp0 + tmp2, FIX(1.247225013)) + /* c5 */
MULTIPLY(tmp5 + tmp7, FIX(0.666655658)); /* c11 */
tmp13 = MULTIPLY(tmp0 + tmp3, FIX(1.093201867)) + /* c7 */
MULTIPLY(tmp4 - tmp7, FIX(0.897167586)); /* c9 */
tmp14 = MULTIPLY(tmp1 + tmp2, FIX(0.138617169)) + /* c15 */
MULTIPLY(tmp6 - tmp5, FIX(1.407403738)); /* c1 */
tmp15 = MULTIPLY(tmp1 + tmp3, - FIX(0.666655658)) + /* -c11 */
MULTIPLY(tmp4 + tmp6, - FIX(1.247225013)); /* -c5 */
tmp16 = MULTIPLY(tmp2 + tmp3, - FIX(1.353318001)) + /* -c3 */
MULTIPLY(tmp5 - tmp4, FIX(0.410524528)); /* c13 */
tmp10 = tmp11 + tmp12 + tmp13 -
MULTIPLY(tmp0, FIX(2.286341144)) + /* c7+c5+c3-c1 */
MULTIPLY(tmp7, FIX(0.779653625)); /* c15+c13-c11+c9 */
tmp11 += tmp14 + tmp15 + MULTIPLY(tmp1, FIX(0.071888074)) /* c9-c3-c15+c11 */
- MULTIPLY(tmp6, FIX(1.663905119)); /* c7+c13+c1-c5 */
tmp12 += tmp14 + tmp16 - MULTIPLY(tmp2, FIX(1.125726048)) /* c7+c5+c15-c3 */
+ MULTIPLY(tmp5, FIX(1.227391138)); /* c9-c11+c1-c13 */
tmp13 += tmp15 + tmp16 + MULTIPLY(tmp3, FIX(1.065388962)) /* c15+c3+c11-c7 */
+ MULTIPLY(tmp4, FIX(2.167985692)); /* c1+c13+c5-c9 */
dataptr[DCTSIZE*1] = (DCTELEM) DESCALE(tmp10, CONST_BITS+PASS1_BITS+2);
dataptr[DCTSIZE*3] = (DCTELEM) DESCALE(tmp11, CONST_BITS+PASS1_BITS+2);
dataptr[DCTSIZE*5] = (DCTELEM) DESCALE(tmp12, CONST_BITS+PASS1_BITS+2);
dataptr[DCTSIZE*7] = (DCTELEM) DESCALE(tmp13, CONST_BITS+PASS1_BITS+2);
dataptr++; /* advance pointer to next column */
wsptr++; /* advance pointer to next column */
}
}
/*
* Perform the forward DCT on a 16x8 sample block.
*
* 16-point FDCT in pass 1 (rows), 8-point in pass 2 (columns).
*/
GLOBAL(void)
jpeg_fdct_16x8 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)
{
INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16, tmp17;
INT32 z1;
DCTELEM *dataptr;
JSAMPROW elemptr;
int ctr;
SHIFT_TEMPS
/* Pass 1: process rows.
* Note results are scaled up by sqrt(8) compared to a true DCT;
* furthermore, we scale the results by 2**PASS1_BITS.
* 16-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/32).
*/
dataptr = data;
ctr = 0;
for (ctr = 0; ctr < DCTSIZE; ctr++) {
elemptr = sample_data[ctr] + start_col;
/* Even part */
tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[15]);
tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[14]);
tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[13]);
tmp3 = GETJSAMPLE(elemptr[3]) + GETJSAMPLE(elemptr[12]);
tmp4 = GETJSAMPLE(elemptr[4]) + GETJSAMPLE(elemptr[11]);
tmp5 = GETJSAMPLE(elemptr[5]) + GETJSAMPLE(elemptr[10]);
tmp6 = GETJSAMPLE(elemptr[6]) + GETJSAMPLE(elemptr[9]);
tmp7 = GETJSAMPLE(elemptr[7]) + GETJSAMPLE(elemptr[8]);
tmp10 = tmp0 + tmp7;
tmp14 = tmp0 - tmp7;
tmp11 = tmp1 + tmp6;
tmp15 = tmp1 - tmp6;
tmp12 = tmp2 + tmp5;
tmp16 = tmp2 - tmp5;
tmp13 = tmp3 + tmp4;
tmp17 = tmp3 - tmp4;
tmp0 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[15]);
tmp1 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[14]);
tmp2 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[13]);
tmp3 = GETJSAMPLE(elemptr[3]) - GETJSAMPLE(elemptr[12]);
tmp4 = GETJSAMPLE(elemptr[4]) - GETJSAMPLE(elemptr[11]);
tmp5 = GETJSAMPLE(elemptr[5]) - GETJSAMPLE(elemptr[10]);
tmp6 = GETJSAMPLE(elemptr[6]) - GETJSAMPLE(elemptr[9]);
tmp7 = GETJSAMPLE(elemptr[7]) - GETJSAMPLE(elemptr[8]);
/* Apply unsigned->signed conversion. */
dataptr[0] = (DCTELEM)
((tmp10 + tmp11 + tmp12 + tmp13 - 16 * CENTERJSAMPLE) << PASS1_BITS);
dataptr[4] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 - tmp13, FIX(1.306562965)) + /* c4[16] = c2[8] */
MULTIPLY(tmp11 - tmp12, FIX_0_541196100), /* c12[16] = c6[8] */
CONST_BITS-PASS1_BITS);
tmp10 = MULTIPLY(tmp17 - tmp15, FIX(0.275899379)) + /* c14[16] = c7[8] */
MULTIPLY(tmp14 - tmp16, FIX(1.387039845)); /* c2[16] = c1[8] */
dataptr[2] = (DCTELEM)
DESCALE(tmp10 + MULTIPLY(tmp15, FIX(1.451774982)) /* c6+c14 */
+ MULTIPLY(tmp16, FIX(2.172734804)), /* c2+c10 */
CONST_BITS-PASS1_BITS);
dataptr[6] = (DCTELEM)
DESCALE(tmp10 - MULTIPLY(tmp14, FIX(0.211164243)) /* c2-c6 */
- MULTIPLY(tmp17, FIX(1.061594338)), /* c10+c14 */
CONST_BITS-PASS1_BITS);
/* Odd part */
tmp11 = MULTIPLY(tmp0 + tmp1, FIX(1.353318001)) + /* c3 */
MULTIPLY(tmp6 - tmp7, FIX(0.410524528)); /* c13 */
tmp12 = MULTIPLY(tmp0 + tmp2, FIX(1.247225013)) + /* c5 */
MULTIPLY(tmp5 + tmp7, FIX(0.666655658)); /* c11 */
tmp13 = MULTIPLY(tmp0 + tmp3, FIX(1.093201867)) + /* c7 */
MULTIPLY(tmp4 - tmp7, FIX(0.897167586)); /* c9 */
tmp14 = MULTIPLY(tmp1 + tmp2, FIX(0.138617169)) + /* c15 */
MULTIPLY(tmp6 - tmp5, FIX(1.407403738)); /* c1 */
tmp15 = MULTIPLY(tmp1 + tmp3, - FIX(0.666655658)) + /* -c11 */
MULTIPLY(tmp4 + tmp6, - FIX(1.247225013)); /* -c5 */
tmp16 = MULTIPLY(tmp2 + tmp3, - FIX(1.353318001)) + /* -c3 */
MULTIPLY(tmp5 - tmp4, FIX(0.410524528)); /* c13 */
tmp10 = tmp11 + tmp12 + tmp13 -
MULTIPLY(tmp0, FIX(2.286341144)) + /* c7+c5+c3-c1 */
MULTIPLY(tmp7, FIX(0.779653625)); /* c15+c13-c11+c9 */
tmp11 += tmp14 + tmp15 + MULTIPLY(tmp1, FIX(0.071888074)) /* c9-c3-c15+c11 */
- MULTIPLY(tmp6, FIX(1.663905119)); /* c7+c13+c1-c5 */
tmp12 += tmp14 + tmp16 - MULTIPLY(tmp2, FIX(1.125726048)) /* c7+c5+c15-c3 */
+ MULTIPLY(tmp5, FIX(1.227391138)); /* c9-c11+c1-c13 */
tmp13 += tmp15 + tmp16 + MULTIPLY(tmp3, FIX(1.065388962)) /* c15+c3+c11-c7 */
+ MULTIPLY(tmp4, FIX(2.167985692)); /* c1+c13+c5-c9 */
dataptr[1] = (DCTELEM) DESCALE(tmp10, CONST_BITS-PASS1_BITS);
dataptr[3] = (DCTELEM) DESCALE(tmp11, CONST_BITS-PASS1_BITS);
dataptr[5] = (DCTELEM) DESCALE(tmp12, CONST_BITS-PASS1_BITS);
dataptr[7] = (DCTELEM) DESCALE(tmp13, CONST_BITS-PASS1_BITS);
dataptr += DCTSIZE; /* advance pointer to next row */
}
/* Pass 2: process columns.
* We remove the PASS1_BITS scaling, but leave the results scaled up
* by an overall factor of 8.
* We must also scale the output by 8/16 = 1/2.
* 8-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/16).
*/
dataptr = data;
for (ctr = DCTSIZE-1; ctr >= 0; ctr--) {
/* Even part per LL&M figure 1 --- note that published figure is faulty;
* rotator "c1" should be "c6".
*/
tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*7];
tmp1 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*6];
tmp2 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*5];
tmp3 = dataptr[DCTSIZE*3] + dataptr[DCTSIZE*4];
tmp10 = tmp0 + tmp3;
tmp12 = tmp0 - tmp3;
tmp11 = tmp1 + tmp2;
tmp13 = tmp1 - tmp2;
tmp0 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*7];
tmp1 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*6];
tmp2 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*5];
tmp3 = dataptr[DCTSIZE*3] - dataptr[DCTSIZE*4];
dataptr[DCTSIZE*0] = (DCTELEM) DESCALE(tmp10 + tmp11, PASS1_BITS+1);
dataptr[DCTSIZE*4] = (DCTELEM) DESCALE(tmp10 - tmp11, PASS1_BITS+1);
z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); /* c6 */
dataptr[DCTSIZE*2] = (DCTELEM)
DESCALE(z1 + MULTIPLY(tmp12, FIX_0_765366865), /* c2-c6 */
CONST_BITS+PASS1_BITS+1);
dataptr[DCTSIZE*6] = (DCTELEM)
DESCALE(z1 - MULTIPLY(tmp13, FIX_1_847759065), /* c2+c6 */
CONST_BITS+PASS1_BITS+1);
/* Odd part per figure 8 --- note paper omits factor of sqrt(2).
* i0..i3 in the paper are tmp0..tmp3 here.
*/
tmp12 = tmp0 + tmp2;
tmp13 = tmp1 + tmp3;
z1 = MULTIPLY(tmp12 + tmp13, FIX_1_175875602); /* c3 */
tmp12 = MULTIPLY(tmp12, - FIX_0_390180644); /* -c3+c5 */
tmp13 = MULTIPLY(tmp13, - FIX_1_961570560); /* -c3-c5 */
tmp12 += z1;
tmp13 += z1;
z1 = MULTIPLY(tmp0 + tmp3, - FIX_0_899976223); /* -c3+c7 */
tmp0 = MULTIPLY(tmp0, FIX_1_501321110); /* c1+c3-c5-c7 */
tmp3 = MULTIPLY(tmp3, FIX_0_298631336); /* -c1+c3+c5-c7 */
tmp0 += z1 + tmp12;
tmp3 += z1 + tmp13;
z1 = MULTIPLY(tmp1 + tmp2, - FIX_2_562915447); /* -c1-c3 */
tmp1 = MULTIPLY(tmp1, FIX_3_072711026); /* c1+c3+c5-c7 */
tmp2 = MULTIPLY(tmp2, FIX_2_053119869); /* c1+c3-c5+c7 */
tmp1 += z1 + tmp13;
tmp2 += z1 + tmp12;
dataptr[DCTSIZE*1] = (DCTELEM) DESCALE(tmp0, CONST_BITS+PASS1_BITS+1);
dataptr[DCTSIZE*3] = (DCTELEM) DESCALE(tmp1, CONST_BITS+PASS1_BITS+1);
dataptr[DCTSIZE*5] = (DCTELEM) DESCALE(tmp2, CONST_BITS+PASS1_BITS+1);
dataptr[DCTSIZE*7] = (DCTELEM) DESCALE(tmp3, CONST_BITS+PASS1_BITS+1);
dataptr++; /* advance pointer to next column */
}
}
/*
* Perform the forward DCT on a 14x7 sample block.
*
* 14-point FDCT in pass 1 (rows), 7-point in pass 2 (columns).
*/
GLOBAL(void)
jpeg_fdct_14x7 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)
{
INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16;
INT32 z1, z2, z3;
DCTELEM *dataptr;
JSAMPROW elemptr;
int ctr;
SHIFT_TEMPS
/* Zero bottom row of output coefficient block. */
MEMZERO(&data[DCTSIZE*7], SIZEOF(DCTELEM) * DCTSIZE);
/* Pass 1: process rows.
* Note results are scaled up by sqrt(8) compared to a true DCT;
* furthermore, we scale the results by 2**PASS1_BITS.
* 14-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/28).
*/
dataptr = data;
for (ctr = 0; ctr < 7; ctr++) {
elemptr = sample_data[ctr] + start_col;
/* Even part */
tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[13]);
tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[12]);
tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[11]);
tmp13 = GETJSAMPLE(elemptr[3]) + GETJSAMPLE(elemptr[10]);
tmp4 = GETJSAMPLE(elemptr[4]) + GETJSAMPLE(elemptr[9]);
tmp5 = GETJSAMPLE(elemptr[5]) + GETJSAMPLE(elemptr[8]);
tmp6 = GETJSAMPLE(elemptr[6]) + GETJSAMPLE(elemptr[7]);
tmp10 = tmp0 + tmp6;
tmp14 = tmp0 - tmp6;
tmp11 = tmp1 + tmp5;
tmp15 = tmp1 - tmp5;
tmp12 = tmp2 + tmp4;
tmp16 = tmp2 - tmp4;
tmp0 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[13]);
tmp1 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[12]);
tmp2 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[11]);
tmp3 = GETJSAMPLE(elemptr[3]) - GETJSAMPLE(elemptr[10]);
tmp4 = GETJSAMPLE(elemptr[4]) - GETJSAMPLE(elemptr[9]);
tmp5 = GETJSAMPLE(elemptr[5]) - GETJSAMPLE(elemptr[8]);
tmp6 = GETJSAMPLE(elemptr[6]) - GETJSAMPLE(elemptr[7]);
/* Apply unsigned->signed conversion. */
dataptr[0] = (DCTELEM)
((tmp10 + tmp11 + tmp12 + tmp13 - 14 * CENTERJSAMPLE) << PASS1_BITS);
tmp13 += tmp13;
dataptr[4] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 - tmp13, FIX(1.274162392)) + /* c4 */
MULTIPLY(tmp11 - tmp13, FIX(0.314692123)) - /* c12 */
MULTIPLY(tmp12 - tmp13, FIX(0.881747734)), /* c8 */
CONST_BITS-PASS1_BITS);
tmp10 = MULTIPLY(tmp14 + tmp15, FIX(1.105676686)); /* c6 */
dataptr[2] = (DCTELEM)
DESCALE(tmp10 + MULTIPLY(tmp14, FIX(0.273079590)) /* c2-c6 */
+ MULTIPLY(tmp16, FIX(0.613604268)), /* c10 */
CONST_BITS-PASS1_BITS);
dataptr[6] = (DCTELEM)
DESCALE(tmp10 - MULTIPLY(tmp15, FIX(1.719280954)) /* c6+c10 */
- MULTIPLY(tmp16, FIX(1.378756276)), /* c2 */
CONST_BITS-PASS1_BITS);
/* Odd part */
tmp10 = tmp1 + tmp2;
tmp11 = tmp5 - tmp4;
dataptr[7] = (DCTELEM) ((tmp0 - tmp10 + tmp3 - tmp11 - tmp6) << PASS1_BITS);
tmp3 <<= CONST_BITS;
tmp10 = MULTIPLY(tmp10, - FIX(0.158341681)); /* -c13 */
tmp11 = MULTIPLY(tmp11, FIX(1.405321284)); /* c1 */
tmp10 += tmp11 - tmp3;
tmp11 = MULTIPLY(tmp0 + tmp2, FIX(1.197448846)) + /* c5 */
MULTIPLY(tmp4 + tmp6, FIX(0.752406978)); /* c9 */
dataptr[5] = (DCTELEM)
DESCALE(tmp10 + tmp11 - MULTIPLY(tmp2, FIX(2.373959773)) /* c3+c5-c13 */
+ MULTIPLY(tmp4, FIX(1.119999435)), /* c1+c11-c9 */
CONST_BITS-PASS1_BITS);
tmp12 = MULTIPLY(tmp0 + tmp1, FIX(1.334852607)) + /* c3 */
MULTIPLY(tmp5 - tmp6, FIX(0.467085129)); /* c11 */
dataptr[3] = (DCTELEM)
DESCALE(tmp10 + tmp12 - MULTIPLY(tmp1, FIX(0.424103948)) /* c3-c9-c13 */
- MULTIPLY(tmp5, FIX(3.069855259)), /* c1+c5+c11 */
CONST_BITS-PASS1_BITS);
dataptr[1] = (DCTELEM)
DESCALE(tmp11 + tmp12 + tmp3 + tmp6 -
MULTIPLY(tmp0 + tmp6, FIX(1.126980169)), /* c3+c5-c1 */
CONST_BITS-PASS1_BITS);
dataptr += DCTSIZE; /* advance pointer to next row */
}
/* Pass 2: process columns.
* We remove the PASS1_BITS scaling, but leave the results scaled up
* by an overall factor of 8.
* We must also scale the output by (8/14)*(8/7) = 32/49, which we
* partially fold into the constant multipliers and final shifting:
* 7-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/14) * 64/49.
*/
dataptr = data;
for (ctr = DCTSIZE-1; ctr >= 0; ctr--) {
/* Even part */
tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*6];
tmp1 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*5];
tmp2 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*4];
tmp3 = dataptr[DCTSIZE*3];
tmp10 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*6];
tmp11 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*5];
tmp12 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*4];
z1 = tmp0 + tmp2;
dataptr[DCTSIZE*0] = (DCTELEM)
DESCALE(MULTIPLY(z1 + tmp1 + tmp3, FIX(1.306122449)), /* 64/49 */
CONST_BITS+PASS1_BITS+1);
tmp3 += tmp3;
z1 -= tmp3;
z1 -= tmp3;
z1 = MULTIPLY(z1, FIX(0.461784020)); /* (c2+c6-c4)/2 */
z2 = MULTIPLY(tmp0 - tmp2, FIX(1.202428084)); /* (c2+c4-c6)/2 */
z3 = MULTIPLY(tmp1 - tmp2, FIX(0.411026446)); /* c6 */
dataptr[DCTSIZE*2] = (DCTELEM) DESCALE(z1 + z2 + z3, CONST_BITS+PASS1_BITS+1);
z1 -= z2;
z2 = MULTIPLY(tmp0 - tmp1, FIX(1.151670509)); /* c4 */
dataptr[DCTSIZE*4] = (DCTELEM)
DESCALE(z2 + z3 - MULTIPLY(tmp1 - tmp3, FIX(0.923568041)), /* c2+c6-c4 */
CONST_BITS+PASS1_BITS+1);
dataptr[DCTSIZE*6] = (DCTELEM) DESCALE(z1 + z2, CONST_BITS+PASS1_BITS+1);
/* Odd part */
tmp1 = MULTIPLY(tmp10 + tmp11, FIX(1.221765677)); /* (c3+c1-c5)/2 */
tmp2 = MULTIPLY(tmp10 - tmp11, FIX(0.222383464)); /* (c3+c5-c1)/2 */
tmp0 = tmp1 - tmp2;
tmp1 += tmp2;
tmp2 = MULTIPLY(tmp11 + tmp12, - FIX(1.800824523)); /* -c1 */
tmp1 += tmp2;
tmp3 = MULTIPLY(tmp10 + tmp12, FIX(0.801442310)); /* c5 */
tmp0 += tmp3;
tmp2 += tmp3 + MULTIPLY(tmp12, FIX(2.443531355)); /* c3+c1-c5 */
dataptr[DCTSIZE*1] = (DCTELEM) DESCALE(tmp0, CONST_BITS+PASS1_BITS+1);
dataptr[DCTSIZE*3] = (DCTELEM) DESCALE(tmp1, CONST_BITS+PASS1_BITS+1);
dataptr[DCTSIZE*5] = (DCTELEM) DESCALE(tmp2, CONST_BITS+PASS1_BITS+1);
dataptr++; /* advance pointer to next column */
}
}
/*
* Perform the forward DCT on a 12x6 sample block.
*
* 12-point FDCT in pass 1 (rows), 6-point in pass 2 (columns).
*/
GLOBAL(void)
jpeg_fdct_12x6 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)
{
INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
DCTELEM *dataptr;
JSAMPROW elemptr;
int ctr;
SHIFT_TEMPS
/* Zero 2 bottom rows of output coefficient block. */
MEMZERO(&data[DCTSIZE*6], SIZEOF(DCTELEM) * DCTSIZE * 2);
/* Pass 1: process rows.
* Note results are scaled up by sqrt(8) compared to a true DCT;
* furthermore, we scale the results by 2**PASS1_BITS.
* 12-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/24).
*/
dataptr = data;
for (ctr = 0; ctr < 6; ctr++) {
elemptr = sample_data[ctr] + start_col;
/* Even part */
tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[11]);
tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[10]);
tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[9]);
tmp3 = GETJSAMPLE(elemptr[3]) + GETJSAMPLE(elemptr[8]);
tmp4 = GETJSAMPLE(elemptr[4]) + GETJSAMPLE(elemptr[7]);
tmp5 = GETJSAMPLE(elemptr[5]) + GETJSAMPLE(elemptr[6]);
tmp10 = tmp0 + tmp5;
tmp13 = tmp0 - tmp5;
tmp11 = tmp1 + tmp4;
tmp14 = tmp1 - tmp4;
tmp12 = tmp2 + tmp3;
tmp15 = tmp2 - tmp3;
tmp0 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[11]);
tmp1 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[10]);
tmp2 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[9]);
tmp3 = GETJSAMPLE(elemptr[3]) - GETJSAMPLE(elemptr[8]);
tmp4 = GETJSAMPLE(elemptr[4]) - GETJSAMPLE(elemptr[7]);
tmp5 = GETJSAMPLE(elemptr[5]) - GETJSAMPLE(elemptr[6]);
/* Apply unsigned->signed conversion. */
dataptr[0] = (DCTELEM)
((tmp10 + tmp11 + tmp12 - 12 * CENTERJSAMPLE) << PASS1_BITS);
dataptr[6] = (DCTELEM) ((tmp13 - tmp14 - tmp15) << PASS1_BITS);
dataptr[4] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 - tmp12, FIX(1.224744871)), /* c4 */
CONST_BITS-PASS1_BITS);
dataptr[2] = (DCTELEM)
DESCALE(tmp14 - tmp15 + MULTIPLY(tmp13 + tmp15, FIX(1.366025404)), /* c2 */
CONST_BITS-PASS1_BITS);
/* Odd part */
tmp10 = MULTIPLY(tmp1 + tmp4, FIX_0_541196100); /* c9 */
tmp14 = tmp10 + MULTIPLY(tmp1, FIX_0_765366865); /* c3-c9 */
tmp15 = tmp10 - MULTIPLY(tmp4, FIX_1_847759065); /* c3+c9 */
tmp12 = MULTIPLY(tmp0 + tmp2, FIX(1.121971054)); /* c5 */
tmp13 = MULTIPLY(tmp0 + tmp3, FIX(0.860918669)); /* c7 */
tmp10 = tmp12 + tmp13 + tmp14 - MULTIPLY(tmp0, FIX(0.580774953)) /* c5+c7-c1 */
+ MULTIPLY(tmp5, FIX(0.184591911)); /* c11 */
tmp11 = MULTIPLY(tmp2 + tmp3, - FIX(0.184591911)); /* -c11 */
tmp12 += tmp11 - tmp15 - MULTIPLY(tmp2, FIX(2.339493912)) /* c1+c5-c11 */
+ MULTIPLY(tmp5, FIX(0.860918669)); /* c7 */
tmp13 += tmp11 - tmp14 + MULTIPLY(tmp3, FIX(0.725788011)) /* c1+c11-c7 */
- MULTIPLY(tmp5, FIX(1.121971054)); /* c5 */
tmp11 = tmp15 + MULTIPLY(tmp0 - tmp3, FIX(1.306562965)) /* c3 */
- MULTIPLY(tmp2 + tmp5, FIX_0_541196100); /* c9 */
dataptr[1] = (DCTELEM) DESCALE(tmp10, CONST_BITS-PASS1_BITS);
dataptr[3] = (DCTELEM) DESCALE(tmp11, CONST_BITS-PASS1_BITS);
dataptr[5] = (DCTELEM) DESCALE(tmp12, CONST_BITS-PASS1_BITS);
dataptr[7] = (DCTELEM) DESCALE(tmp13, CONST_BITS-PASS1_BITS);
dataptr += DCTSIZE; /* advance pointer to next row */
}
/* Pass 2: process columns.
* We remove the PASS1_BITS scaling, but leave the results scaled up
* by an overall factor of 8.
* We must also scale the output by (8/12)*(8/6) = 8/9, which we
* partially fold into the constant multipliers and final shifting:
* 6-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/12) * 16/9.
*/
dataptr = data;
for (ctr = DCTSIZE-1; ctr >= 0; ctr--) {
/* Even part */
tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*5];
tmp11 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*4];
tmp2 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*3];
tmp10 = tmp0 + tmp2;
tmp12 = tmp0 - tmp2;
tmp0 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*5];
tmp1 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*4];
tmp2 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*3];
dataptr[DCTSIZE*0] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 + tmp11, FIX(1.777777778)), /* 16/9 */
CONST_BITS+PASS1_BITS+1);
dataptr[DCTSIZE*2] = (DCTELEM)
DESCALE(MULTIPLY(tmp12, FIX(2.177324216)), /* c2 */
CONST_BITS+PASS1_BITS+1);
dataptr[DCTSIZE*4] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 - tmp11 - tmp11, FIX(1.257078722)), /* c4 */
CONST_BITS+PASS1_BITS+1);
/* Odd part */
tmp10 = MULTIPLY(tmp0 + tmp2, FIX(0.650711829)); /* c5 */
dataptr[DCTSIZE*1] = (DCTELEM)
DESCALE(tmp10 + MULTIPLY(tmp0 + tmp1, FIX(1.777777778)), /* 16/9 */
CONST_BITS+PASS1_BITS+1);
dataptr[DCTSIZE*3] = (DCTELEM)
DESCALE(MULTIPLY(tmp0 - tmp1 - tmp2, FIX(1.777777778)), /* 16/9 */
CONST_BITS+PASS1_BITS+1);
dataptr[DCTSIZE*5] = (DCTELEM)
DESCALE(tmp10 + MULTIPLY(tmp2 - tmp1, FIX(1.777777778)), /* 16/9 */
CONST_BITS+PASS1_BITS+1);
dataptr++; /* advance pointer to next column */
}
}
/*
* Perform the forward DCT on a 10x5 sample block.
*
* 10-point FDCT in pass 1 (rows), 5-point in pass 2 (columns).
*/
GLOBAL(void)
jpeg_fdct_10x5 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)
{
INT32 tmp0, tmp1, tmp2, tmp3, tmp4;
INT32 tmp10, tmp11, tmp12, tmp13, tmp14;
DCTELEM *dataptr;
JSAMPROW elemptr;
int ctr;
SHIFT_TEMPS
/* Zero 3 bottom rows of output coefficient block. */
MEMZERO(&data[DCTSIZE*5], SIZEOF(DCTELEM) * DCTSIZE * 3);
/* Pass 1: process rows.
* Note results are scaled up by sqrt(8) compared to a true DCT;
* furthermore, we scale the results by 2**PASS1_BITS.
* 10-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/20).
*/
dataptr = data;
for (ctr = 0; ctr < 5; ctr++) {
elemptr = sample_data[ctr] + start_col;
/* Even part */
tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[9]);
tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[8]);
tmp12 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[7]);
tmp3 = GETJSAMPLE(elemptr[3]) + GETJSAMPLE(elemptr[6]);
tmp4 = GETJSAMPLE(elemptr[4]) + GETJSAMPLE(elemptr[5]);
tmp10 = tmp0 + tmp4;
tmp13 = tmp0 - tmp4;
tmp11 = tmp1 + tmp3;
tmp14 = tmp1 - tmp3;
tmp0 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[9]);
tmp1 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[8]);
tmp2 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[7]);
tmp3 = GETJSAMPLE(elemptr[3]) - GETJSAMPLE(elemptr[6]);
tmp4 = GETJSAMPLE(elemptr[4]) - GETJSAMPLE(elemptr[5]);
/* Apply unsigned->signed conversion. */
dataptr[0] = (DCTELEM)
((tmp10 + tmp11 + tmp12 - 10 * CENTERJSAMPLE) << PASS1_BITS);
tmp12 += tmp12;
dataptr[4] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 - tmp12, FIX(1.144122806)) - /* c4 */
MULTIPLY(tmp11 - tmp12, FIX(0.437016024)), /* c8 */
CONST_BITS-PASS1_BITS);
tmp10 = MULTIPLY(tmp13 + tmp14, FIX(0.831253876)); /* c6 */
dataptr[2] = (DCTELEM)
DESCALE(tmp10 + MULTIPLY(tmp13, FIX(0.513743148)), /* c2-c6 */
CONST_BITS-PASS1_BITS);
dataptr[6] = (DCTELEM)
DESCALE(tmp10 - MULTIPLY(tmp14, FIX(2.176250899)), /* c2+c6 */
CONST_BITS-PASS1_BITS);
/* Odd part */
tmp10 = tmp0 + tmp4;
tmp11 = tmp1 - tmp3;
dataptr[5] = (DCTELEM) ((tmp10 - tmp11 - tmp2) << PASS1_BITS);
tmp2 <<= CONST_BITS;
dataptr[1] = (DCTELEM)
DESCALE(MULTIPLY(tmp0, FIX(1.396802247)) + /* c1 */
MULTIPLY(tmp1, FIX(1.260073511)) + tmp2 + /* c3 */
MULTIPLY(tmp3, FIX(0.642039522)) + /* c7 */
MULTIPLY(tmp4, FIX(0.221231742)), /* c9 */
CONST_BITS-PASS1_BITS);
tmp12 = MULTIPLY(tmp0 - tmp4, FIX(0.951056516)) - /* (c3+c7)/2 */
MULTIPLY(tmp1 + tmp3, FIX(0.587785252)); /* (c1-c9)/2 */
tmp13 = MULTIPLY(tmp10 + tmp11, FIX(0.309016994)) + /* (c3-c7)/2 */
(tmp11 << (CONST_BITS - 1)) - tmp2;
dataptr[3] = (DCTELEM) DESCALE(tmp12 + tmp13, CONST_BITS-PASS1_BITS);
dataptr[7] = (DCTELEM) DESCALE(tmp12 - tmp13, CONST_BITS-PASS1_BITS);
dataptr += DCTSIZE; /* advance pointer to next row */
}
/* Pass 2: process columns.
* We remove the PASS1_BITS scaling, but leave the results scaled up
* by an overall factor of 8.
* We must also scale the output by (8/10)*(8/5) = 32/25, which we
* fold into the constant multipliers:
* 5-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/10) * 32/25.
*/
dataptr = data;
for (ctr = DCTSIZE-1; ctr >= 0; ctr--) {
/* Even part */
tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*4];
tmp1 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*3];
tmp2 = dataptr[DCTSIZE*2];
tmp10 = tmp0 + tmp1;
tmp11 = tmp0 - tmp1;
tmp0 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*4];
tmp1 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*3];
dataptr[DCTSIZE*0] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 + tmp2, FIX(1.28)), /* 32/25 */
CONST_BITS+PASS1_BITS);
tmp11 = MULTIPLY(tmp11, FIX(1.011928851)); /* (c2+c4)/2 */
tmp10 -= tmp2 << 2;
tmp10 = MULTIPLY(tmp10, FIX(0.452548340)); /* (c2-c4)/2 */
dataptr[DCTSIZE*2] = (DCTELEM) DESCALE(tmp11 + tmp10, CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*4] = (DCTELEM) DESCALE(tmp11 - tmp10, CONST_BITS+PASS1_BITS);
/* Odd part */
tmp10 = MULTIPLY(tmp0 + tmp1, FIX(1.064004961)); /* c3 */
dataptr[DCTSIZE*1] = (DCTELEM)
DESCALE(tmp10 + MULTIPLY(tmp0, FIX(0.657591230)), /* c1-c3 */
CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*3] = (DCTELEM)
DESCALE(tmp10 - MULTIPLY(tmp1, FIX(2.785601151)), /* c1+c3 */
CONST_BITS+PASS1_BITS);
dataptr++; /* advance pointer to next column */
}
}
/*
* Perform the forward DCT on an 8x4 sample block.
*
* 8-point FDCT in pass 1 (rows), 4-point in pass 2 (columns).
*/
GLOBAL(void)
jpeg_fdct_8x4 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)
{
INT32 tmp0, tmp1, tmp2, tmp3;
INT32 tmp10, tmp11, tmp12, tmp13;
INT32 z1;
DCTELEM *dataptr;
JSAMPROW elemptr;
int ctr;
SHIFT_TEMPS
/* Zero 4 bottom rows of output coefficient block. */
MEMZERO(&data[DCTSIZE*4], SIZEOF(DCTELEM) * DCTSIZE * 4);
/* Pass 1: process rows.
* Note results are scaled up by sqrt(8) compared to a true DCT;
* furthermore, we scale the results by 2**PASS1_BITS.
* We must also scale the output by 8/4 = 2, which we add here.
* 8-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/16).
*/
dataptr = data;
for (ctr = 0; ctr < 4; ctr++) {
elemptr = sample_data[ctr] + start_col;
/* Even part per LL&M figure 1 --- note that published figure is faulty;
* rotator "c1" should be "c6".
*/
tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[7]);
tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[6]);
tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[5]);
tmp3 = GETJSAMPLE(elemptr[3]) + GETJSAMPLE(elemptr[4]);
tmp10 = tmp0 + tmp3;
tmp12 = tmp0 - tmp3;
tmp11 = tmp1 + tmp2;
tmp13 = tmp1 - tmp2;
tmp0 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[7]);
tmp1 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[6]);
tmp2 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[5]);
tmp3 = GETJSAMPLE(elemptr[3]) - GETJSAMPLE(elemptr[4]);
/* Apply unsigned->signed conversion. */
dataptr[0] = (DCTELEM)
((tmp10 + tmp11 - 8 * CENTERJSAMPLE) << (PASS1_BITS+1));
dataptr[4] = (DCTELEM) ((tmp10 - tmp11) << (PASS1_BITS+1));
z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); /* c6 */
/* Add fudge factor here for final descale. */
z1 += ONE << (CONST_BITS-PASS1_BITS-2);
dataptr[2] = (DCTELEM)
RIGHT_SHIFT(z1 + MULTIPLY(tmp12, FIX_0_765366865), /* c2-c6 */
CONST_BITS-PASS1_BITS-1);
dataptr[6] = (DCTELEM)
RIGHT_SHIFT(z1 - MULTIPLY(tmp13, FIX_1_847759065), /* c2+c6 */
CONST_BITS-PASS1_BITS-1);
/* Odd part per figure 8 --- note paper omits factor of sqrt(2).
* i0..i3 in the paper are tmp0..tmp3 here.
*/
tmp12 = tmp0 + tmp2;
tmp13 = tmp1 + tmp3;
z1 = MULTIPLY(tmp12 + tmp13, FIX_1_175875602); /* c3 */
/* Add fudge factor here for final descale. */
z1 += ONE << (CONST_BITS-PASS1_BITS-2);
tmp12 = MULTIPLY(tmp12, - FIX_0_390180644); /* -c3+c5 */
tmp13 = MULTIPLY(tmp13, - FIX_1_961570560); /* -c3-c5 */
tmp12 += z1;
tmp13 += z1;
z1 = MULTIPLY(tmp0 + tmp3, - FIX_0_899976223); /* -c3+c7 */
tmp0 = MULTIPLY(tmp0, FIX_1_501321110); /* c1+c3-c5-c7 */
tmp3 = MULTIPLY(tmp3, FIX_0_298631336); /* -c1+c3+c5-c7 */
tmp0 += z1 + tmp12;
tmp3 += z1 + tmp13;
z1 = MULTIPLY(tmp1 + tmp2, - FIX_2_562915447); /* -c1-c3 */
tmp1 = MULTIPLY(tmp1, FIX_3_072711026); /* c1+c3+c5-c7 */
tmp2 = MULTIPLY(tmp2, FIX_2_053119869); /* c1+c3-c5+c7 */
tmp1 += z1 + tmp13;
tmp2 += z1 + tmp12;
dataptr[1] = (DCTELEM) RIGHT_SHIFT(tmp0, CONST_BITS-PASS1_BITS-1);
dataptr[3] = (DCTELEM) RIGHT_SHIFT(tmp1, CONST_BITS-PASS1_BITS-1);
dataptr[5] = (DCTELEM) RIGHT_SHIFT(tmp2, CONST_BITS-PASS1_BITS-1);
dataptr[7] = (DCTELEM) RIGHT_SHIFT(tmp3, CONST_BITS-PASS1_BITS-1);
dataptr += DCTSIZE; /* advance pointer to next row */
}
/* Pass 2: process columns.
* We remove the PASS1_BITS scaling, but leave the results scaled up
* by an overall factor of 8.
* 4-point FDCT kernel,
* cK represents sqrt(2) * cos(K*pi/16) [refers to 8-point FDCT].
*/
dataptr = data;
for (ctr = DCTSIZE-1; ctr >= 0; ctr--) {
/* Even part */
/* Add fudge factor here for final descale. */
tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*3] + (ONE << (PASS1_BITS-1));
tmp1 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*2];
tmp10 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*3];
tmp11 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*2];
dataptr[DCTSIZE*0] = (DCTELEM) RIGHT_SHIFT(tmp0 + tmp1, PASS1_BITS);
dataptr[DCTSIZE*2] = (DCTELEM) RIGHT_SHIFT(tmp0 - tmp1, PASS1_BITS);
/* Odd part */
tmp0 = MULTIPLY(tmp10 + tmp11, FIX_0_541196100); /* c6 */
/* Add fudge factor here for final descale. */
tmp0 += ONE << (CONST_BITS+PASS1_BITS-1);
dataptr[DCTSIZE*1] = (DCTELEM)
RIGHT_SHIFT(tmp0 + MULTIPLY(tmp10, FIX_0_765366865), /* c2-c6 */
CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*3] = (DCTELEM)
RIGHT_SHIFT(tmp0 - MULTIPLY(tmp11, FIX_1_847759065), /* c2+c6 */
CONST_BITS+PASS1_BITS);
dataptr++; /* advance pointer to next column */
}
}
/*
* Perform the forward DCT on a 6x3 sample block.
*
* 6-point FDCT in pass 1 (rows), 3-point in pass 2 (columns).
*/
GLOBAL(void)
jpeg_fdct_6x3 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)
{
INT32 tmp0, tmp1, tmp2;
INT32 tmp10, tmp11, tmp12;
DCTELEM *dataptr;
JSAMPROW elemptr;
int ctr;
SHIFT_TEMPS
/* Pre-zero output coefficient block. */
MEMZERO(data, SIZEOF(DCTELEM) * DCTSIZE2);
/* Pass 1: process rows.
* Note results are scaled up by sqrt(8) compared to a true DCT;
* furthermore, we scale the results by 2**PASS1_BITS.
* We scale the results further by 2 as part of output adaption
* scaling for different DCT size.
* 6-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/12).
*/
dataptr = data;
for (ctr = 0; ctr < 3; ctr++) {
elemptr = sample_data[ctr] + start_col;
/* Even part */
tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[5]);
tmp11 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[4]);
tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[3]);
tmp10 = tmp0 + tmp2;
tmp12 = tmp0 - tmp2;
tmp0 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[5]);
tmp1 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[4]);
tmp2 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[3]);
/* Apply unsigned->signed conversion. */
dataptr[0] = (DCTELEM)
((tmp10 + tmp11 - 6 * CENTERJSAMPLE) << (PASS1_BITS+1));
dataptr[2] = (DCTELEM)
DESCALE(MULTIPLY(tmp12, FIX(1.224744871)), /* c2 */
CONST_BITS-PASS1_BITS-1);
dataptr[4] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 - tmp11 - tmp11, FIX(0.707106781)), /* c4 */
CONST_BITS-PASS1_BITS-1);
/* Odd part */
tmp10 = DESCALE(MULTIPLY(tmp0 + tmp2, FIX(0.366025404)), /* c5 */
CONST_BITS-PASS1_BITS-1);
dataptr[1] = (DCTELEM) (tmp10 + ((tmp0 + tmp1) << (PASS1_BITS+1)));
dataptr[3] = (DCTELEM) ((tmp0 - tmp1 - tmp2) << (PASS1_BITS+1));
dataptr[5] = (DCTELEM) (tmp10 + ((tmp2 - tmp1) << (PASS1_BITS+1)));
dataptr += DCTSIZE; /* advance pointer to next row */
}
/* Pass 2: process columns.
* We remove the PASS1_BITS scaling, but leave the results scaled up
* by an overall factor of 8.
* We must also scale the output by (8/6)*(8/3) = 32/9, which we partially
* fold into the constant multipliers (other part was done in pass 1):
* 3-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/6) * 16/9.
*/
dataptr = data;
for (ctr = 0; ctr < 6; ctr++) {
/* Even part */
tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*2];
tmp1 = dataptr[DCTSIZE*1];
tmp2 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*2];
dataptr[DCTSIZE*0] = (DCTELEM)
DESCALE(MULTIPLY(tmp0 + tmp1, FIX(1.777777778)), /* 16/9 */
CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*2] = (DCTELEM)
DESCALE(MULTIPLY(tmp0 - tmp1 - tmp1, FIX(1.257078722)), /* c2 */
CONST_BITS+PASS1_BITS);
/* Odd part */
dataptr[DCTSIZE*1] = (DCTELEM)
DESCALE(MULTIPLY(tmp2, FIX(2.177324216)), /* c1 */
CONST_BITS+PASS1_BITS);
dataptr++; /* advance pointer to next column */
}
}
/*
* Perform the forward DCT on a 4x2 sample block.
*
* 4-point FDCT in pass 1 (rows), 2-point in pass 2 (columns).
*/
GLOBAL(void)
jpeg_fdct_4x2 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)
{
DCTELEM tmp0, tmp2, tmp10, tmp12, tmp4, tmp5;
INT32 tmp1, tmp3, tmp11, tmp13;
INT32 z1, z2, z3;
JSAMPROW elemptr;
SHIFT_TEMPS
/* Pre-zero output coefficient block. */
MEMZERO(data, SIZEOF(DCTELEM) * DCTSIZE2);
/* Pass 1: process rows.
* Note results are scaled up by sqrt(8) compared to a true DCT.
* 4-point FDCT kernel,
* cK represents sqrt(2) * cos(K*pi/16) [refers to 8-point FDCT].
*/
/* Row 0 */
elemptr = sample_data[0] + start_col;
/* Even part */
tmp4 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[3]);
tmp5 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[2]);
tmp0 = tmp4 + tmp5;
tmp2 = tmp4 - tmp5;
/* Odd part */
z2 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[3]);
z3 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[2]);
z1 = MULTIPLY(z2 + z3, FIX_0_541196100); /* c6 */
/* Add fudge factor here for final descale. */
z1 += ONE << (CONST_BITS-3-1);
tmp1 = z1 + MULTIPLY(z2, FIX_0_765366865); /* c2-c6 */
tmp3 = z1 - MULTIPLY(z3, FIX_1_847759065); /* c2+c6 */
/* Row 1 */
elemptr = sample_data[1] + start_col;
/* Even part */
tmp4 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[3]);
tmp5 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[2]);
tmp10 = tmp4 + tmp5;
tmp12 = tmp4 - tmp5;
/* Odd part */
z2 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[3]);
z3 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[2]);
z1 = MULTIPLY(z2 + z3, FIX_0_541196100); /* c6 */
tmp11 = z1 + MULTIPLY(z2, FIX_0_765366865); /* c2-c6 */
tmp13 = z1 - MULTIPLY(z3, FIX_1_847759065); /* c2+c6 */
/* Pass 2: process columns.
* We leave the results scaled up by an overall factor of 8.
* We must also scale the output by (8/4)*(8/2) = 2**3.
*/
/* Column 0 */
/* Apply unsigned->signed conversion. */
data[DCTSIZE*0] = (tmp0 + tmp10 - 8 * CENTERJSAMPLE) << 3;
data[DCTSIZE*1] = (tmp0 - tmp10) << 3;
/* Column 1 */
data[DCTSIZE*0+1] = (DCTELEM) RIGHT_SHIFT(tmp1 + tmp11, CONST_BITS-3);
data[DCTSIZE*1+1] = (DCTELEM) RIGHT_SHIFT(tmp1 - tmp11, CONST_BITS-3);
/* Column 2 */
data[DCTSIZE*0+2] = (tmp2 + tmp12) << 3;
data[DCTSIZE*1+2] = (tmp2 - tmp12) << 3;
/* Column 3 */
data[DCTSIZE*0+3] = (DCTELEM) RIGHT_SHIFT(tmp3 + tmp13, CONST_BITS-3);
data[DCTSIZE*1+3] = (DCTELEM) RIGHT_SHIFT(tmp3 - tmp13, CONST_BITS-3);
}
/*
* Perform the forward DCT on a 2x1 sample block.
*
* 2-point FDCT in pass 1 (rows), 1-point in pass 2 (columns).
*/
GLOBAL(void)
jpeg_fdct_2x1 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)
{
DCTELEM tmp0, tmp1;
JSAMPROW elemptr;
/* Pre-zero output coefficient block. */
MEMZERO(data, SIZEOF(DCTELEM) * DCTSIZE2);
elemptr = sample_data[0] + start_col;
tmp0 = GETJSAMPLE(elemptr[0]);
tmp1 = GETJSAMPLE(elemptr[1]);
/* We leave the results scaled up by an overall factor of 8.
* We must also scale the output by (8/2)*(8/1) = 2**5.
*/
/* Even part */
/* Apply unsigned->signed conversion. */
data[0] = (tmp0 + tmp1 - 2 * CENTERJSAMPLE) << 5;
/* Odd part */
data[1] = (tmp0 - tmp1) << 5;
}
/*
* Perform the forward DCT on an 8x16 sample block.
*
* 8-point FDCT in pass 1 (rows), 16-point in pass 2 (columns).
*/
GLOBAL(void)
jpeg_fdct_8x16 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)
{
INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16, tmp17;
INT32 z1;
DCTELEM workspace[DCTSIZE2];
DCTELEM *dataptr;
DCTELEM *wsptr;
JSAMPROW elemptr;
int ctr;
SHIFT_TEMPS
/* Pass 1: process rows.
* Note results are scaled up by sqrt(8) compared to a true DCT;
* furthermore, we scale the results by 2**PASS1_BITS.
* 8-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/16).
*/
dataptr = data;
ctr = 0;
for (;;) {
elemptr = sample_data[ctr] + start_col;
/* Even part per LL&M figure 1 --- note that published figure is faulty;
* rotator "c1" should be "c6".
*/
tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[7]);
tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[6]);
tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[5]);
tmp3 = GETJSAMPLE(elemptr[3]) + GETJSAMPLE(elemptr[4]);
tmp10 = tmp0 + tmp3;
tmp12 = tmp0 - tmp3;
tmp11 = tmp1 + tmp2;
tmp13 = tmp1 - tmp2;
tmp0 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[7]);
tmp1 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[6]);
tmp2 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[5]);
tmp3 = GETJSAMPLE(elemptr[3]) - GETJSAMPLE(elemptr[4]);
/* Apply unsigned->signed conversion. */
dataptr[0] = (DCTELEM) ((tmp10 + tmp11 - 8 * CENTERJSAMPLE) << PASS1_BITS);
dataptr[4] = (DCTELEM) ((tmp10 - tmp11) << PASS1_BITS);
z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); /* c6 */
dataptr[2] = (DCTELEM)
DESCALE(z1 + MULTIPLY(tmp12, FIX_0_765366865), /* c2-c6 */
CONST_BITS-PASS1_BITS);
dataptr[6] = (DCTELEM)
DESCALE(z1 - MULTIPLY(tmp13, FIX_1_847759065), /* c2+c6 */
CONST_BITS-PASS1_BITS);
/* Odd part per figure 8 --- note paper omits factor of sqrt(2).
* i0..i3 in the paper are tmp0..tmp3 here.
*/
tmp12 = tmp0 + tmp2;
tmp13 = tmp1 + tmp3;
z1 = MULTIPLY(tmp12 + tmp13, FIX_1_175875602); /* c3 */
tmp12 = MULTIPLY(tmp12, - FIX_0_390180644); /* -c3+c5 */
tmp13 = MULTIPLY(tmp13, - FIX_1_961570560); /* -c3-c5 */
tmp12 += z1;
tmp13 += z1;
z1 = MULTIPLY(tmp0 + tmp3, - FIX_0_899976223); /* -c3+c7 */
tmp0 = MULTIPLY(tmp0, FIX_1_501321110); /* c1+c3-c5-c7 */
tmp3 = MULTIPLY(tmp3, FIX_0_298631336); /* -c1+c3+c5-c7 */
tmp0 += z1 + tmp12;
tmp3 += z1 + tmp13;
z1 = MULTIPLY(tmp1 + tmp2, - FIX_2_562915447); /* -c1-c3 */
tmp1 = MULTIPLY(tmp1, FIX_3_072711026); /* c1+c3+c5-c7 */
tmp2 = MULTIPLY(tmp2, FIX_2_053119869); /* c1+c3-c5+c7 */
tmp1 += z1 + tmp13;
tmp2 += z1 + tmp12;
dataptr[1] = (DCTELEM) DESCALE(tmp0, CONST_BITS-PASS1_BITS);
dataptr[3] = (DCTELEM) DESCALE(tmp1, CONST_BITS-PASS1_BITS);
dataptr[5] = (DCTELEM) DESCALE(tmp2, CONST_BITS-PASS1_BITS);
dataptr[7] = (DCTELEM) DESCALE(tmp3, CONST_BITS-PASS1_BITS);
ctr++;
if (ctr != DCTSIZE) {
if (ctr == DCTSIZE * 2)
break; /* Done. */
dataptr += DCTSIZE; /* advance pointer to next row */
} else
dataptr = workspace; /* switch pointer to extended workspace */
}
/* Pass 2: process columns.
* We remove the PASS1_BITS scaling, but leave the results scaled up
* by an overall factor of 8.
* We must also scale the output by 8/16 = 1/2.
* 16-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/32).
*/
dataptr = data;
wsptr = workspace;
for (ctr = DCTSIZE-1; ctr >= 0; ctr--) {
/* Even part */
tmp0 = dataptr[DCTSIZE*0] + wsptr[DCTSIZE*7];
tmp1 = dataptr[DCTSIZE*1] + wsptr[DCTSIZE*6];
tmp2 = dataptr[DCTSIZE*2] + wsptr[DCTSIZE*5];
tmp3 = dataptr[DCTSIZE*3] + wsptr[DCTSIZE*4];
tmp4 = dataptr[DCTSIZE*4] + wsptr[DCTSIZE*3];
tmp5 = dataptr[DCTSIZE*5] + wsptr[DCTSIZE*2];
tmp6 = dataptr[DCTSIZE*6] + wsptr[DCTSIZE*1];
tmp7 = dataptr[DCTSIZE*7] + wsptr[DCTSIZE*0];
tmp10 = tmp0 + tmp7;
tmp14 = tmp0 - tmp7;
tmp11 = tmp1 + tmp6;
tmp15 = tmp1 - tmp6;
tmp12 = tmp2 + tmp5;
tmp16 = tmp2 - tmp5;
tmp13 = tmp3 + tmp4;
tmp17 = tmp3 - tmp4;
tmp0 = dataptr[DCTSIZE*0] - wsptr[DCTSIZE*7];
tmp1 = dataptr[DCTSIZE*1] - wsptr[DCTSIZE*6];
tmp2 = dataptr[DCTSIZE*2] - wsptr[DCTSIZE*5];
tmp3 = dataptr[DCTSIZE*3] - wsptr[DCTSIZE*4];
tmp4 = dataptr[DCTSIZE*4] - wsptr[DCTSIZE*3];
tmp5 = dataptr[DCTSIZE*5] - wsptr[DCTSIZE*2];
tmp6 = dataptr[DCTSIZE*6] - wsptr[DCTSIZE*1];
tmp7 = dataptr[DCTSIZE*7] - wsptr[DCTSIZE*0];
dataptr[DCTSIZE*0] = (DCTELEM)
DESCALE(tmp10 + tmp11 + tmp12 + tmp13, PASS1_BITS+1);
dataptr[DCTSIZE*4] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 - tmp13, FIX(1.306562965)) + /* c4[16] = c2[8] */
MULTIPLY(tmp11 - tmp12, FIX_0_541196100), /* c12[16] = c6[8] */
CONST_BITS+PASS1_BITS+1);
tmp10 = MULTIPLY(tmp17 - tmp15, FIX(0.275899379)) + /* c14[16] = c7[8] */
MULTIPLY(tmp14 - tmp16, FIX(1.387039845)); /* c2[16] = c1[8] */
dataptr[DCTSIZE*2] = (DCTELEM)
DESCALE(tmp10 + MULTIPLY(tmp15, FIX(1.451774982)) /* c6+c14 */
+ MULTIPLY(tmp16, FIX(2.172734804)), /* c2+c10 */
CONST_BITS+PASS1_BITS+1);
dataptr[DCTSIZE*6] = (DCTELEM)
DESCALE(tmp10 - MULTIPLY(tmp14, FIX(0.211164243)) /* c2-c6 */
- MULTIPLY(tmp17, FIX(1.061594338)), /* c10+c14 */
CONST_BITS+PASS1_BITS+1);
/* Odd part */
tmp11 = MULTIPLY(tmp0 + tmp1, FIX(1.353318001)) + /* c3 */
MULTIPLY(tmp6 - tmp7, FIX(0.410524528)); /* c13 */
tmp12 = MULTIPLY(tmp0 + tmp2, FIX(1.247225013)) + /* c5 */
MULTIPLY(tmp5 + tmp7, FIX(0.666655658)); /* c11 */
tmp13 = MULTIPLY(tmp0 + tmp3, FIX(1.093201867)) + /* c7 */
MULTIPLY(tmp4 - tmp7, FIX(0.897167586)); /* c9 */
tmp14 = MULTIPLY(tmp1 + tmp2, FIX(0.138617169)) + /* c15 */
MULTIPLY(tmp6 - tmp5, FIX(1.407403738)); /* c1 */
tmp15 = MULTIPLY(tmp1 + tmp3, - FIX(0.666655658)) + /* -c11 */
MULTIPLY(tmp4 + tmp6, - FIX(1.247225013)); /* -c5 */
tmp16 = MULTIPLY(tmp2 + tmp3, - FIX(1.353318001)) + /* -c3 */
MULTIPLY(tmp5 - tmp4, FIX(0.410524528)); /* c13 */
tmp10 = tmp11 + tmp12 + tmp13 -
MULTIPLY(tmp0, FIX(2.286341144)) + /* c7+c5+c3-c1 */
MULTIPLY(tmp7, FIX(0.779653625)); /* c15+c13-c11+c9 */
tmp11 += tmp14 + tmp15 + MULTIPLY(tmp1, FIX(0.071888074)) /* c9-c3-c15+c11 */
- MULTIPLY(tmp6, FIX(1.663905119)); /* c7+c13+c1-c5 */
tmp12 += tmp14 + tmp16 - MULTIPLY(tmp2, FIX(1.125726048)) /* c7+c5+c15-c3 */
+ MULTIPLY(tmp5, FIX(1.227391138)); /* c9-c11+c1-c13 */
tmp13 += tmp15 + tmp16 + MULTIPLY(tmp3, FIX(1.065388962)) /* c15+c3+c11-c7 */
+ MULTIPLY(tmp4, FIX(2.167985692)); /* c1+c13+c5-c9 */
dataptr[DCTSIZE*1] = (DCTELEM) DESCALE(tmp10, CONST_BITS+PASS1_BITS+1);
dataptr[DCTSIZE*3] = (DCTELEM) DESCALE(tmp11, CONST_BITS+PASS1_BITS+1);
dataptr[DCTSIZE*5] = (DCTELEM) DESCALE(tmp12, CONST_BITS+PASS1_BITS+1);
dataptr[DCTSIZE*7] = (DCTELEM) DESCALE(tmp13, CONST_BITS+PASS1_BITS+1);
dataptr++; /* advance pointer to next column */
wsptr++; /* advance pointer to next column */
}
}
/*
* Perform the forward DCT on a 7x14 sample block.
*
* 7-point FDCT in pass 1 (rows), 14-point in pass 2 (columns).
*/
GLOBAL(void)
jpeg_fdct_7x14 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)
{
INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16;
INT32 z1, z2, z3;
DCTELEM workspace[8*6];
DCTELEM *dataptr;
DCTELEM *wsptr;
JSAMPROW elemptr;
int ctr;
SHIFT_TEMPS
/* Pre-zero output coefficient block. */
MEMZERO(data, SIZEOF(DCTELEM) * DCTSIZE2);
/* Pass 1: process rows.
* Note results are scaled up by sqrt(8) compared to a true DCT;
* furthermore, we scale the results by 2**PASS1_BITS.
* 7-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/14).
*/
dataptr = data;
ctr = 0;
for (;;) {
elemptr = sample_data[ctr] + start_col;
/* Even part */
tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[6]);
tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[5]);
tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[4]);
tmp3 = GETJSAMPLE(elemptr[3]);
tmp10 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[6]);
tmp11 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[5]);
tmp12 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[4]);
z1 = tmp0 + tmp2;
/* Apply unsigned->signed conversion. */
dataptr[0] = (DCTELEM)
((z1 + tmp1 + tmp3 - 7 * CENTERJSAMPLE) << PASS1_BITS);
tmp3 += tmp3;
z1 -= tmp3;
z1 -= tmp3;
z1 = MULTIPLY(z1, FIX(0.353553391)); /* (c2+c6-c4)/2 */
z2 = MULTIPLY(tmp0 - tmp2, FIX(0.920609002)); /* (c2+c4-c6)/2 */
z3 = MULTIPLY(tmp1 - tmp2, FIX(0.314692123)); /* c6 */
dataptr[2] = (DCTELEM) DESCALE(z1 + z2 + z3, CONST_BITS-PASS1_BITS);
z1 -= z2;
z2 = MULTIPLY(tmp0 - tmp1, FIX(0.881747734)); /* c4 */
dataptr[4] = (DCTELEM)
DESCALE(z2 + z3 - MULTIPLY(tmp1 - tmp3, FIX(0.707106781)), /* c2+c6-c4 */
CONST_BITS-PASS1_BITS);
dataptr[6] = (DCTELEM) DESCALE(z1 + z2, CONST_BITS-PASS1_BITS);
/* Odd part */
tmp1 = MULTIPLY(tmp10 + tmp11, FIX(0.935414347)); /* (c3+c1-c5)/2 */
tmp2 = MULTIPLY(tmp10 - tmp11, FIX(0.170262339)); /* (c3+c5-c1)/2 */
tmp0 = tmp1 - tmp2;
tmp1 += tmp2;
tmp2 = MULTIPLY(tmp11 + tmp12, - FIX(1.378756276)); /* -c1 */
tmp1 += tmp2;
tmp3 = MULTIPLY(tmp10 + tmp12, FIX(0.613604268)); /* c5 */
tmp0 += tmp3;
tmp2 += tmp3 + MULTIPLY(tmp12, FIX(1.870828693)); /* c3+c1-c5 */
dataptr[1] = (DCTELEM) DESCALE(tmp0, CONST_BITS-PASS1_BITS);
dataptr[3] = (DCTELEM) DESCALE(tmp1, CONST_BITS-PASS1_BITS);
dataptr[5] = (DCTELEM) DESCALE(tmp2, CONST_BITS-PASS1_BITS);
ctr++;
if (ctr != DCTSIZE) {
if (ctr == 14)
break; /* Done. */
dataptr += DCTSIZE; /* advance pointer to next row */
} else
dataptr = workspace; /* switch pointer to extended workspace */
}
/* Pass 2: process columns.
* We remove the PASS1_BITS scaling, but leave the results scaled up
* by an overall factor of 8.
* We must also scale the output by (8/7)*(8/14) = 32/49, which we
* fold into the constant multipliers:
* 14-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/28) * 32/49.
*/
dataptr = data;
wsptr = workspace;
for (ctr = 0; ctr < 7; ctr++) {
/* Even part */
tmp0 = dataptr[DCTSIZE*0] + wsptr[DCTSIZE*5];
tmp1 = dataptr[DCTSIZE*1] + wsptr[DCTSIZE*4];
tmp2 = dataptr[DCTSIZE*2] + wsptr[DCTSIZE*3];
tmp13 = dataptr[DCTSIZE*3] + wsptr[DCTSIZE*2];
tmp4 = dataptr[DCTSIZE*4] + wsptr[DCTSIZE*1];
tmp5 = dataptr[DCTSIZE*5] + wsptr[DCTSIZE*0];
tmp6 = dataptr[DCTSIZE*6] + dataptr[DCTSIZE*7];
tmp10 = tmp0 + tmp6;
tmp14 = tmp0 - tmp6;
tmp11 = tmp1 + tmp5;
tmp15 = tmp1 - tmp5;
tmp12 = tmp2 + tmp4;
tmp16 = tmp2 - tmp4;
tmp0 = dataptr[DCTSIZE*0] - wsptr[DCTSIZE*5];
tmp1 = dataptr[DCTSIZE*1] - wsptr[DCTSIZE*4];
tmp2 = dataptr[DCTSIZE*2] - wsptr[DCTSIZE*3];
tmp3 = dataptr[DCTSIZE*3] - wsptr[DCTSIZE*2];
tmp4 = dataptr[DCTSIZE*4] - wsptr[DCTSIZE*1];
tmp5 = dataptr[DCTSIZE*5] - wsptr[DCTSIZE*0];
tmp6 = dataptr[DCTSIZE*6] - dataptr[DCTSIZE*7];
dataptr[DCTSIZE*0] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 + tmp11 + tmp12 + tmp13,
FIX(0.653061224)), /* 32/49 */
CONST_BITS+PASS1_BITS);
tmp13 += tmp13;
dataptr[DCTSIZE*4] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 - tmp13, FIX(0.832106052)) + /* c4 */
MULTIPLY(tmp11 - tmp13, FIX(0.205513223)) - /* c12 */
MULTIPLY(tmp12 - tmp13, FIX(0.575835255)), /* c8 */
CONST_BITS+PASS1_BITS);
tmp10 = MULTIPLY(tmp14 + tmp15, FIX(0.722074570)); /* c6 */
dataptr[DCTSIZE*2] = (DCTELEM)
DESCALE(tmp10 + MULTIPLY(tmp14, FIX(0.178337691)) /* c2-c6 */
+ MULTIPLY(tmp16, FIX(0.400721155)), /* c10 */
CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*6] = (DCTELEM)
DESCALE(tmp10 - MULTIPLY(tmp15, FIX(1.122795725)) /* c6+c10 */
- MULTIPLY(tmp16, FIX(0.900412262)), /* c2 */
CONST_BITS+PASS1_BITS);
/* Odd part */
tmp10 = tmp1 + tmp2;
tmp11 = tmp5 - tmp4;
dataptr[DCTSIZE*7] = (DCTELEM)
DESCALE(MULTIPLY(tmp0 - tmp10 + tmp3 - tmp11 - tmp6,
FIX(0.653061224)), /* 32/49 */
CONST_BITS+PASS1_BITS);
tmp3 = MULTIPLY(tmp3 , FIX(0.653061224)); /* 32/49 */
tmp10 = MULTIPLY(tmp10, - FIX(0.103406812)); /* -c13 */
tmp11 = MULTIPLY(tmp11, FIX(0.917760839)); /* c1 */
tmp10 += tmp11 - tmp3;
tmp11 = MULTIPLY(tmp0 + tmp2, FIX(0.782007410)) + /* c5 */
MULTIPLY(tmp4 + tmp6, FIX(0.491367823)); /* c9 */
dataptr[DCTSIZE*5] = (DCTELEM)
DESCALE(tmp10 + tmp11 - MULTIPLY(tmp2, FIX(1.550341076)) /* c3+c5-c13 */
+ MULTIPLY(tmp4, FIX(0.731428202)), /* c1+c11-c9 */
CONST_BITS+PASS1_BITS);
tmp12 = MULTIPLY(tmp0 + tmp1, FIX(0.871740478)) + /* c3 */
MULTIPLY(tmp5 - tmp6, FIX(0.305035186)); /* c11 */
dataptr[DCTSIZE*3] = (DCTELEM)
DESCALE(tmp10 + tmp12 - MULTIPLY(tmp1, FIX(0.276965844)) /* c3-c9-c13 */
- MULTIPLY(tmp5, FIX(2.004803435)), /* c1+c5+c11 */
CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*1] = (DCTELEM)
DESCALE(tmp11 + tmp12 + tmp3
- MULTIPLY(tmp0, FIX(0.735987049)) /* c3+c5-c1 */
- MULTIPLY(tmp6, FIX(0.082925825)), /* c9-c11-c13 */
CONST_BITS+PASS1_BITS);
dataptr++; /* advance pointer to next column */
wsptr++; /* advance pointer to next column */
}
}
/*
* Perform the forward DCT on a 6x12 sample block.
*
* 6-point FDCT in pass 1 (rows), 12-point in pass 2 (columns).
*/
GLOBAL(void)
jpeg_fdct_6x12 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)
{
INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
DCTELEM workspace[8*4];
DCTELEM *dataptr;
DCTELEM *wsptr;
JSAMPROW elemptr;
int ctr;
SHIFT_TEMPS
/* Pre-zero output coefficient block. */
MEMZERO(data, SIZEOF(DCTELEM) * DCTSIZE2);
/* Pass 1: process rows.
* Note results are scaled up by sqrt(8) compared to a true DCT;
* furthermore, we scale the results by 2**PASS1_BITS.
* 6-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/12).
*/
dataptr = data;
ctr = 0;
for (;;) {
elemptr = sample_data[ctr] + start_col;
/* Even part */
tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[5]);
tmp11 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[4]);
tmp2 = GETJSAMPLE(elemptr[2]) + GETJSAMPLE(elemptr[3]);
tmp10 = tmp0 + tmp2;
tmp12 = tmp0 - tmp2;
tmp0 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[5]);
tmp1 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[4]);
tmp2 = GETJSAMPLE(elemptr[2]) - GETJSAMPLE(elemptr[3]);
/* Apply unsigned->signed conversion. */
dataptr[0] = (DCTELEM)
((tmp10 + tmp11 - 6 * CENTERJSAMPLE) << PASS1_BITS);
dataptr[2] = (DCTELEM)
DESCALE(MULTIPLY(tmp12, FIX(1.224744871)), /* c2 */
CONST_BITS-PASS1_BITS);
dataptr[4] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 - tmp11 - tmp11, FIX(0.707106781)), /* c4 */
CONST_BITS-PASS1_BITS);
/* Odd part */
tmp10 = DESCALE(MULTIPLY(tmp0 + tmp2, FIX(0.366025404)), /* c5 */
CONST_BITS-PASS1_BITS);
dataptr[1] = (DCTELEM) (tmp10 + ((tmp0 + tmp1) << PASS1_BITS));
dataptr[3] = (DCTELEM) ((tmp0 - tmp1 - tmp2) << PASS1_BITS);
dataptr[5] = (DCTELEM) (tmp10 + ((tmp2 - tmp1) << PASS1_BITS));
ctr++;
if (ctr != DCTSIZE) {
if (ctr == 12)
break; /* Done. */
dataptr += DCTSIZE; /* advance pointer to next row */
} else
dataptr = workspace; /* switch pointer to extended workspace */
}
/* Pass 2: process columns.
* We remove the PASS1_BITS scaling, but leave the results scaled up
* by an overall factor of 8.
* We must also scale the output by (8/6)*(8/12) = 8/9, which we
* fold into the constant multipliers:
* 12-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/24) * 8/9.
*/
dataptr = data;
wsptr = workspace;
for (ctr = 0; ctr < 6; ctr++) {
/* Even part */
tmp0 = dataptr[DCTSIZE*0] + wsptr[DCTSIZE*3];
tmp1 = dataptr[DCTSIZE*1] + wsptr[DCTSIZE*2];
tmp2 = dataptr[DCTSIZE*2] + wsptr[DCTSIZE*1];
tmp3 = dataptr[DCTSIZE*3] + wsptr[DCTSIZE*0];
tmp4 = dataptr[DCTSIZE*4] + dataptr[DCTSIZE*7];
tmp5 = dataptr[DCTSIZE*5] + dataptr[DCTSIZE*6];
tmp10 = tmp0 + tmp5;
tmp13 = tmp0 - tmp5;
tmp11 = tmp1 + tmp4;
tmp14 = tmp1 - tmp4;
tmp12 = tmp2 + tmp3;
tmp15 = tmp2 - tmp3;
tmp0 = dataptr[DCTSIZE*0] - wsptr[DCTSIZE*3];
tmp1 = dataptr[DCTSIZE*1] - wsptr[DCTSIZE*2];
tmp2 = dataptr[DCTSIZE*2] - wsptr[DCTSIZE*1];
tmp3 = dataptr[DCTSIZE*3] - wsptr[DCTSIZE*0];
tmp4 = dataptr[DCTSIZE*4] - dataptr[DCTSIZE*7];
tmp5 = dataptr[DCTSIZE*5] - dataptr[DCTSIZE*6];
dataptr[DCTSIZE*0] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 + tmp11 + tmp12, FIX(0.888888889)), /* 8/9 */
CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*6] = (DCTELEM)
DESCALE(MULTIPLY(tmp13 - tmp14 - tmp15, FIX(0.888888889)), /* 8/9 */
CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*4] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 - tmp12, FIX(1.088662108)), /* c4 */
CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*2] = (DCTELEM)
DESCALE(MULTIPLY(tmp14 - tmp15, FIX(0.888888889)) + /* 8/9 */
MULTIPLY(tmp13 + tmp15, FIX(1.214244803)), /* c2 */
CONST_BITS+PASS1_BITS);
/* Odd part */
tmp10 = MULTIPLY(tmp1 + tmp4, FIX(0.481063200)); /* c9 */
tmp14 = tmp10 + MULTIPLY(tmp1, FIX(0.680326102)); /* c3-c9 */
tmp15 = tmp10 - MULTIPLY(tmp4, FIX(1.642452502)); /* c3+c9 */
tmp12 = MULTIPLY(tmp0 + tmp2, FIX(0.997307603)); /* c5 */
tmp13 = MULTIPLY(tmp0 + tmp3, FIX(0.765261039)); /* c7 */
tmp10 = tmp12 + tmp13 + tmp14 - MULTIPLY(tmp0, FIX(0.516244403)) /* c5+c7-c1 */
+ MULTIPLY(tmp5, FIX(0.164081699)); /* c11 */
tmp11 = MULTIPLY(tmp2 + tmp3, - FIX(0.164081699)); /* -c11 */
tmp12 += tmp11 - tmp15 - MULTIPLY(tmp2, FIX(2.079550144)) /* c1+c5-c11 */
+ MULTIPLY(tmp5, FIX(0.765261039)); /* c7 */
tmp13 += tmp11 - tmp14 + MULTIPLY(tmp3, FIX(0.645144899)) /* c1+c11-c7 */
- MULTIPLY(tmp5, FIX(0.997307603)); /* c5 */
tmp11 = tmp15 + MULTIPLY(tmp0 - tmp3, FIX(1.161389302)) /* c3 */
- MULTIPLY(tmp2 + tmp5, FIX(0.481063200)); /* c9 */
dataptr[DCTSIZE*1] = (DCTELEM) DESCALE(tmp10, CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*3] = (DCTELEM) DESCALE(tmp11, CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*5] = (DCTELEM) DESCALE(tmp12, CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*7] = (DCTELEM) DESCALE(tmp13, CONST_BITS+PASS1_BITS);
dataptr++; /* advance pointer to next column */
wsptr++; /* advance pointer to next column */
}
}
/*
* Perform the forward DCT on a 5x10 sample block.
*
* 5-point FDCT in pass 1 (rows), 10-point in pass 2 (columns).
*/
GLOBAL(void)
jpeg_fdct_5x10 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)
{
INT32 tmp0, tmp1, tmp2, tmp3, tmp4;
INT32 tmp10, tmp11, tmp12, tmp13, tmp14;
DCTELEM workspace[8*2];
DCTELEM *dataptr;
DCTELEM *wsptr;
JSAMPROW elemptr;
int ctr;
SHIFT_TEMPS
/* Pre-zero output coefficient block. */
MEMZERO(data, SIZEOF(DCTELEM) * DCTSIZE2);
/* Pass 1: process rows.
* Note results are scaled up by sqrt(8) compared to a true DCT;
* furthermore, we scale the results by 2**PASS1_BITS.
* 5-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/10).
*/
dataptr = data;
ctr = 0;
for (;;) {
elemptr = sample_data[ctr] + start_col;
/* Even part */
tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[4]);
tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[3]);
tmp2 = GETJSAMPLE(elemptr[2]);
tmp10 = tmp0 + tmp1;
tmp11 = tmp0 - tmp1;
tmp0 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[4]);
tmp1 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[3]);
/* Apply unsigned->signed conversion. */
dataptr[0] = (DCTELEM)
((tmp10 + tmp2 - 5 * CENTERJSAMPLE) << PASS1_BITS);
tmp11 = MULTIPLY(tmp11, FIX(0.790569415)); /* (c2+c4)/2 */
tmp10 -= tmp2 << 2;
tmp10 = MULTIPLY(tmp10, FIX(0.353553391)); /* (c2-c4)/2 */
dataptr[2] = (DCTELEM) DESCALE(tmp11 + tmp10, CONST_BITS-PASS1_BITS);
dataptr[4] = (DCTELEM) DESCALE(tmp11 - tmp10, CONST_BITS-PASS1_BITS);
/* Odd part */
tmp10 = MULTIPLY(tmp0 + tmp1, FIX(0.831253876)); /* c3 */
dataptr[1] = (DCTELEM)
DESCALE(tmp10 + MULTIPLY(tmp0, FIX(0.513743148)), /* c1-c3 */
CONST_BITS-PASS1_BITS);
dataptr[3] = (DCTELEM)
DESCALE(tmp10 - MULTIPLY(tmp1, FIX(2.176250899)), /* c1+c3 */
CONST_BITS-PASS1_BITS);
ctr++;
if (ctr != DCTSIZE) {
if (ctr == 10)
break; /* Done. */
dataptr += DCTSIZE; /* advance pointer to next row */
} else
dataptr = workspace; /* switch pointer to extended workspace */
}
/* Pass 2: process columns.
* We remove the PASS1_BITS scaling, but leave the results scaled up
* by an overall factor of 8.
* We must also scale the output by (8/5)*(8/10) = 32/25, which we
* fold into the constant multipliers:
* 10-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/20) * 32/25.
*/
dataptr = data;
wsptr = workspace;
for (ctr = 0; ctr < 5; ctr++) {
/* Even part */
tmp0 = dataptr[DCTSIZE*0] + wsptr[DCTSIZE*1];
tmp1 = dataptr[DCTSIZE*1] + wsptr[DCTSIZE*0];
tmp12 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*7];
tmp3 = dataptr[DCTSIZE*3] + dataptr[DCTSIZE*6];
tmp4 = dataptr[DCTSIZE*4] + dataptr[DCTSIZE*5];
tmp10 = tmp0 + tmp4;
tmp13 = tmp0 - tmp4;
tmp11 = tmp1 + tmp3;
tmp14 = tmp1 - tmp3;
tmp0 = dataptr[DCTSIZE*0] - wsptr[DCTSIZE*1];
tmp1 = dataptr[DCTSIZE*1] - wsptr[DCTSIZE*0];
tmp2 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*7];
tmp3 = dataptr[DCTSIZE*3] - dataptr[DCTSIZE*6];
tmp4 = dataptr[DCTSIZE*4] - dataptr[DCTSIZE*5];
dataptr[DCTSIZE*0] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 + tmp11 + tmp12, FIX(1.28)), /* 32/25 */
CONST_BITS+PASS1_BITS);
tmp12 += tmp12;
dataptr[DCTSIZE*4] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 - tmp12, FIX(1.464477191)) - /* c4 */
MULTIPLY(tmp11 - tmp12, FIX(0.559380511)), /* c8 */
CONST_BITS+PASS1_BITS);
tmp10 = MULTIPLY(tmp13 + tmp14, FIX(1.064004961)); /* c6 */
dataptr[DCTSIZE*2] = (DCTELEM)
DESCALE(tmp10 + MULTIPLY(tmp13, FIX(0.657591230)), /* c2-c6 */
CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*6] = (DCTELEM)
DESCALE(tmp10 - MULTIPLY(tmp14, FIX(2.785601151)), /* c2+c6 */
CONST_BITS+PASS1_BITS);
/* Odd part */
tmp10 = tmp0 + tmp4;
tmp11 = tmp1 - tmp3;
dataptr[DCTSIZE*5] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 - tmp11 - tmp2, FIX(1.28)), /* 32/25 */
CONST_BITS+PASS1_BITS);
tmp2 = MULTIPLY(tmp2, FIX(1.28)); /* 32/25 */
dataptr[DCTSIZE*1] = (DCTELEM)
DESCALE(MULTIPLY(tmp0, FIX(1.787906876)) + /* c1 */
MULTIPLY(tmp1, FIX(1.612894094)) + tmp2 + /* c3 */
MULTIPLY(tmp3, FIX(0.821810588)) + /* c7 */
MULTIPLY(tmp4, FIX(0.283176630)), /* c9 */
CONST_BITS+PASS1_BITS);
tmp12 = MULTIPLY(tmp0 - tmp4, FIX(1.217352341)) - /* (c3+c7)/2 */
MULTIPLY(tmp1 + tmp3, FIX(0.752365123)); /* (c1-c9)/2 */
tmp13 = MULTIPLY(tmp10 + tmp11, FIX(0.395541753)) + /* (c3-c7)/2 */
MULTIPLY(tmp11, FIX(0.64)) - tmp2; /* 16/25 */
dataptr[DCTSIZE*3] = (DCTELEM) DESCALE(tmp12 + tmp13, CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*7] = (DCTELEM) DESCALE(tmp12 - tmp13, CONST_BITS+PASS1_BITS);
dataptr++; /* advance pointer to next column */
wsptr++; /* advance pointer to next column */
}
}
/*
* Perform the forward DCT on a 4x8 sample block.
*
* 4-point FDCT in pass 1 (rows), 8-point in pass 2 (columns).
*/
GLOBAL(void)
jpeg_fdct_4x8 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)
{
INT32 tmp0, tmp1, tmp2, tmp3;
INT32 tmp10, tmp11, tmp12, tmp13;
INT32 z1;
DCTELEM *dataptr;
JSAMPROW elemptr;
int ctr;
SHIFT_TEMPS
/* Pre-zero output coefficient block. */
MEMZERO(data, SIZEOF(DCTELEM) * DCTSIZE2);
/* Pass 1: process rows.
* Note results are scaled up by sqrt(8) compared to a true DCT;
* furthermore, we scale the results by 2**PASS1_BITS.
* We must also scale the output by 8/4 = 2, which we add here.
* 4-point FDCT kernel,
* cK represents sqrt(2) * cos(K*pi/16) [refers to 8-point FDCT].
*/
dataptr = data;
for (ctr = 0; ctr < DCTSIZE; ctr++) {
elemptr = sample_data[ctr] + start_col;
/* Even part */
tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[3]);
tmp1 = GETJSAMPLE(elemptr[1]) + GETJSAMPLE(elemptr[2]);
tmp10 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[3]);
tmp11 = GETJSAMPLE(elemptr[1]) - GETJSAMPLE(elemptr[2]);
/* Apply unsigned->signed conversion. */
dataptr[0] = (DCTELEM)
((tmp0 + tmp1 - 4 * CENTERJSAMPLE) << (PASS1_BITS+1));
dataptr[2] = (DCTELEM) ((tmp0 - tmp1) << (PASS1_BITS+1));
/* Odd part */
tmp0 = MULTIPLY(tmp10 + tmp11, FIX_0_541196100); /* c6 */
/* Add fudge factor here for final descale. */
tmp0 += ONE << (CONST_BITS-PASS1_BITS-2);
dataptr[1] = (DCTELEM)
RIGHT_SHIFT(tmp0 + MULTIPLY(tmp10, FIX_0_765366865), /* c2-c6 */
CONST_BITS-PASS1_BITS-1);
dataptr[3] = (DCTELEM)
RIGHT_SHIFT(tmp0 - MULTIPLY(tmp11, FIX_1_847759065), /* c2+c6 */
CONST_BITS-PASS1_BITS-1);
dataptr += DCTSIZE; /* advance pointer to next row */
}
/* Pass 2: process columns.
* We remove the PASS1_BITS scaling, but leave the results scaled up
* by an overall factor of 8.
* 8-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/16).
*/
dataptr = data;
for (ctr = 0; ctr < 4; ctr++) {
/* Even part per LL&M figure 1 --- note that published figure is faulty;
* rotator "c1" should be "c6".
*/
tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*7];
tmp1 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*6];
tmp2 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*5];
tmp3 = dataptr[DCTSIZE*3] + dataptr[DCTSIZE*4];
/* Add fudge factor here for final descale. */
tmp10 = tmp0 + tmp3 + (ONE << (PASS1_BITS-1));
tmp12 = tmp0 - tmp3;
tmp11 = tmp1 + tmp2;
tmp13 = tmp1 - tmp2;
tmp0 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*7];
tmp1 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*6];
tmp2 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*5];
tmp3 = dataptr[DCTSIZE*3] - dataptr[DCTSIZE*4];
dataptr[DCTSIZE*0] = (DCTELEM) RIGHT_SHIFT(tmp10 + tmp11, PASS1_BITS);
dataptr[DCTSIZE*4] = (DCTELEM) RIGHT_SHIFT(tmp10 - tmp11, PASS1_BITS);
z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); /* c6 */
/* Add fudge factor here for final descale. */
z1 += ONE << (CONST_BITS+PASS1_BITS-1);
dataptr[DCTSIZE*2] = (DCTELEM)
RIGHT_SHIFT(z1 + MULTIPLY(tmp12, FIX_0_765366865), /* c2-c6 */
CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*6] = (DCTELEM)
RIGHT_SHIFT(z1 - MULTIPLY(tmp13, FIX_1_847759065), /* c2+c6 */
CONST_BITS+PASS1_BITS);
/* Odd part per figure 8 --- note paper omits factor of sqrt(2).
* i0..i3 in the paper are tmp0..tmp3 here.
*/
tmp12 = tmp0 + tmp2;
tmp13 = tmp1 + tmp3;
z1 = MULTIPLY(tmp12 + tmp13, FIX_1_175875602); /* c3 */
/* Add fudge factor here for final descale. */
z1 += ONE << (CONST_BITS+PASS1_BITS-1);
tmp12 = MULTIPLY(tmp12, - FIX_0_390180644); /* -c3+c5 */
tmp13 = MULTIPLY(tmp13, - FIX_1_961570560); /* -c3-c5 */
tmp12 += z1;
tmp13 += z1;
z1 = MULTIPLY(tmp0 + tmp3, - FIX_0_899976223); /* -c3+c7 */
tmp0 = MULTIPLY(tmp0, FIX_1_501321110); /* c1+c3-c5-c7 */
tmp3 = MULTIPLY(tmp3, FIX_0_298631336); /* -c1+c3+c5-c7 */
tmp0 += z1 + tmp12;
tmp3 += z1 + tmp13;
z1 = MULTIPLY(tmp1 + tmp2, - FIX_2_562915447); /* -c1-c3 */
tmp1 = MULTIPLY(tmp1, FIX_3_072711026); /* c1+c3+c5-c7 */
tmp2 = MULTIPLY(tmp2, FIX_2_053119869); /* c1+c3-c5+c7 */
tmp1 += z1 + tmp13;
tmp2 += z1 + tmp12;
dataptr[DCTSIZE*1] = (DCTELEM) RIGHT_SHIFT(tmp0, CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*3] = (DCTELEM) RIGHT_SHIFT(tmp1, CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*5] = (DCTELEM) RIGHT_SHIFT(tmp2, CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*7] = (DCTELEM) RIGHT_SHIFT(tmp3, CONST_BITS+PASS1_BITS);
dataptr++; /* advance pointer to next column */
}
}
/*
* Perform the forward DCT on a 3x6 sample block.
*
* 3-point FDCT in pass 1 (rows), 6-point in pass 2 (columns).
*/
GLOBAL(void)
jpeg_fdct_3x6 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)
{
INT32 tmp0, tmp1, tmp2;
INT32 tmp10, tmp11, tmp12;
DCTELEM *dataptr;
JSAMPROW elemptr;
int ctr;
SHIFT_TEMPS
/* Pre-zero output coefficient block. */
MEMZERO(data, SIZEOF(DCTELEM) * DCTSIZE2);
/* Pass 1: process rows.
* Note results are scaled up by sqrt(8) compared to a true DCT;
* furthermore, we scale the results by 2**PASS1_BITS.
* We scale the results further by 2 as part of output adaption
* scaling for different DCT size.
* 3-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/6).
*/
dataptr = data;
for (ctr = 0; ctr < 6; ctr++) {
elemptr = sample_data[ctr] + start_col;
/* Even part */
tmp0 = GETJSAMPLE(elemptr[0]) + GETJSAMPLE(elemptr[2]);
tmp1 = GETJSAMPLE(elemptr[1]);
tmp2 = GETJSAMPLE(elemptr[0]) - GETJSAMPLE(elemptr[2]);
/* Apply unsigned->signed conversion. */
dataptr[0] = (DCTELEM)
((tmp0 + tmp1 - 3 * CENTERJSAMPLE) << (PASS1_BITS+1));
dataptr[2] = (DCTELEM)
DESCALE(MULTIPLY(tmp0 - tmp1 - tmp1, FIX(0.707106781)), /* c2 */
CONST_BITS-PASS1_BITS-1);
/* Odd part */
dataptr[1] = (DCTELEM)
DESCALE(MULTIPLY(tmp2, FIX(1.224744871)), /* c1 */
CONST_BITS-PASS1_BITS-1);
dataptr += DCTSIZE; /* advance pointer to next row */
}
/* Pass 2: process columns.
* We remove the PASS1_BITS scaling, but leave the results scaled up
* by an overall factor of 8.
* We must also scale the output by (8/6)*(8/3) = 32/9, which we partially
* fold into the constant multipliers (other part was done in pass 1):
* 6-point FDCT kernel, cK represents sqrt(2) * cos(K*pi/12) * 16/9.
*/
dataptr = data;
for (ctr = 0; ctr < 3; ctr++) {
/* Even part */
tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*5];
tmp11 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*4];
tmp2 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*3];
tmp10 = tmp0 + tmp2;
tmp12 = tmp0 - tmp2;
tmp0 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*5];
tmp1 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*4];
tmp2 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*3];
dataptr[DCTSIZE*0] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 + tmp11, FIX(1.777777778)), /* 16/9 */
CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*2] = (DCTELEM)
DESCALE(MULTIPLY(tmp12, FIX(2.177324216)), /* c2 */
CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*4] = (DCTELEM)
DESCALE(MULTIPLY(tmp10 - tmp11 - tmp11, FIX(1.257078722)), /* c4 */
CONST_BITS+PASS1_BITS);
/* Odd part */
tmp10 = MULTIPLY(tmp0 + tmp2, FIX(0.650711829)); /* c5 */
dataptr[DCTSIZE*1] = (DCTELEM)
DESCALE(tmp10 + MULTIPLY(tmp0 + tmp1, FIX(1.777777778)), /* 16/9 */
CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*3] = (DCTELEM)
DESCALE(MULTIPLY(tmp0 - tmp1 - tmp2, FIX(1.777777778)), /* 16/9 */
CONST_BITS+PASS1_BITS);
dataptr[DCTSIZE*5] = (DCTELEM)
DESCALE(tmp10 + MULTIPLY(tmp2 - tmp1, FIX(1.777777778)), /* 16/9 */
CONST_BITS+PASS1_BITS);
dataptr++; /* advance pointer to next column */
}
}
/*
* Perform the forward DCT on a 2x4 sample block.
*
* 2-point FDCT in pass 1 (rows), 4-point in pass 2 (columns).
*/
GLOBAL(void)
jpeg_fdct_2x4 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)
{
INT32 tmp0, tmp1;
INT32 tmp10, tmp11;
DCTELEM *dataptr;
JSAMPROW elemptr;
int ctr;
SHIFT_TEMPS
/* Pre-zero output coefficient block. */
MEMZERO(data, SIZEOF(DCTELEM) * DCTSIZE2);
/* Pass 1: process rows.
* Note results are scaled up by sqrt(8) compared to a true DCT.
*/
dataptr = data;
for (ctr = 0; ctr < 4; ctr++) {
elemptr = sample_data[ctr] + start_col;
/* Even part */
tmp0 = GETJSAMPLE(elemptr[0]);
tmp1 = GETJSAMPLE(elemptr[1]);
/* Apply unsigned->signed conversion. */
dataptr[0] = (DCTELEM) (tmp0 + tmp1 - 2 * CENTERJSAMPLE);
/* Odd part */
dataptr[1] = (DCTELEM) (tmp0 - tmp1);
dataptr += DCTSIZE; /* advance pointer to next row */
}
/* Pass 2: process columns.
* We leave the results scaled up by an overall factor of 8.
* We must also scale the output by (8/2)*(8/4) = 2**3.
* 4-point FDCT kernel,
* cK represents sqrt(2) * cos(K*pi/16) [refers to 8-point FDCT].
*/
dataptr = data;
for (ctr = 0; ctr < 2; ctr++) {
/* Even part */
tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*3];
tmp1 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*2];
tmp10 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*3];
tmp11 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*2];
dataptr[DCTSIZE*0] = (DCTELEM) ((tmp0 + tmp1) << 3);
dataptr[DCTSIZE*2] = (DCTELEM) ((tmp0 - tmp1) << 3);
/* Odd part */
tmp0 = MULTIPLY(tmp10 + tmp11, FIX_0_541196100); /* c6 */
/* Add fudge factor here for final descale. */
tmp0 += ONE << (CONST_BITS-3-1);
dataptr[DCTSIZE*1] = (DCTELEM)
RIGHT_SHIFT(tmp0 + MULTIPLY(tmp10, FIX_0_765366865), /* c2-c6 */
CONST_BITS-3);
dataptr[DCTSIZE*3] = (DCTELEM)
RIGHT_SHIFT(tmp0 - MULTIPLY(tmp11, FIX_1_847759065), /* c2+c6 */
CONST_BITS-3);
dataptr++; /* advance pointer to next column */
}
}
/*
* Perform the forward DCT on a 1x2 sample block.
*
* 1-point FDCT in pass 1 (rows), 2-point in pass 2 (columns).
*/
GLOBAL(void)
jpeg_fdct_1x2 (DCTELEM * data, JSAMPARRAY sample_data, JDIMENSION start_col)
{
DCTELEM tmp0, tmp1;
/* Pre-zero output coefficient block. */
MEMZERO(data, SIZEOF(DCTELEM) * DCTSIZE2);
/* Pass 1: empty. */
/* Pass 2: process columns.
* We leave the results scaled up by an overall factor of 8.
* We must also scale the output by (8/1)*(8/2) = 2**5.
*/
/* Even part */
tmp0 = GETJSAMPLE(sample_data[0][start_col]);
tmp1 = GETJSAMPLE(sample_data[1][start_col]);
/* Apply unsigned->signed conversion. */
data[DCTSIZE*0] = (tmp0 + tmp1 - 2 * CENTERJSAMPLE) << 5;
/* Odd part */
data[DCTSIZE*1] = (tmp0 - tmp1) << 5;
}
#endif /* DCT_SCALING_SUPPORTED */
#endif /* DCT_ISLOW_SUPPORTED */ | c | github | https://github.com/opencv/opencv | 3rdparty/libjpeg/jfdctint.c |
# coding=utf-8
import os
import unittest
from collections import namedtuple, Counter
import six
from requests.exceptions import HTTPError
from conans.client.rest.file_uploader import FileUploader
from conans.errors import AuthenticationException, ForbiddenException
from conans.test.utils.test_files import temp_folder
from conans.test.utils.mocks import TestBufferConanOutput
from conans.util.files import save
class _ResponseMock:
def __init__(self, status_code, content):
self.status_code = status_code
self.content = content
def raise_for_status(self):
"""Raises stored :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if 400 <= self.status_code < 500:
http_error_msg = u'%s Client Error: %s' % (self.status_code, self.content)
elif 500 <= self.status_code < 600:
http_error_msg = u'%s Server Error: %s' % (self.status_code, self.content)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
class _RequesterMock:
def __init__(self, status_code, content):
self.response = _ResponseMock(status_code, content)
self.retry = 0
self.retry_wait = 0
def put(self, *args, **kwargs):
return self.response
class _ConfigMock:
def __init__(self):
self.retry = 0
self.retry_wait = 0
class RetryDownloadTests(unittest.TestCase):
def setUp(self):
self.filename = os.path.join(temp_folder(), "anyfile")
save(self.filename, "anything")
def test_error_401(self):
output = TestBufferConanOutput()
uploader = FileUploader(requester=_RequesterMock(401, "content"), output=output,
verify=False, config=_ConfigMock())
with six.assertRaisesRegex(self, AuthenticationException, "content"):
uploader.upload(url="fake", abs_path=self.filename, retry=2)
output_lines = str(output).splitlines()
counter = Counter(output_lines)
self.assertEqual(counter["ERROR: content"], 0)
self.assertEqual(counter["Waiting 0 seconds to retry..."], 0)
def test_error_403_forbidden(self):
output = TestBufferConanOutput()
uploader = FileUploader(requester=_RequesterMock(403, "content"), output=output,
verify=False, config=_ConfigMock())
with six.assertRaisesRegex(self, ForbiddenException, "content"):
auth = namedtuple("auth", "token")
uploader.upload(url="fake", abs_path=self.filename, retry=2, auth=auth("token"))
output_lines = str(output).splitlines()
counter = Counter(output_lines)
self.assertEqual(counter["ERROR: content"], 0)
self.assertEqual(counter["Waiting 0 seconds to retry..."], 0)
def test_error_403_authentication(self):
output = TestBufferConanOutput()
uploader = FileUploader(requester=_RequesterMock(403, "content"), output=output,
verify=False, config=_ConfigMock())
with six.assertRaisesRegex(self, AuthenticationException, "content"):
auth = namedtuple("auth", "token")
uploader.upload(url="fake", abs_path=self.filename, retry=2, auth=auth(None))
output_lines = str(output).splitlines()
counter = Counter(output_lines)
self.assertEqual(counter["ERROR: content"], 0)
self.assertEqual(counter["Waiting 0 seconds to retry..."], 0)
def test_error_requests(self):
class _RequesterMock:
def put(self, *args, **kwargs):
raise Exception("any exception")
output = TestBufferConanOutput()
uploader = FileUploader(requester=_RequesterMock(), output=output,
verify=False, config=_ConfigMock())
with six.assertRaisesRegex(self, Exception, "any exception"):
uploader.upload(url="fake", abs_path=self.filename, retry=2)
output_lines = str(output).splitlines()
counter = Counter(output_lines)
self.assertEqual(counter["ERROR: any exception"], 2)
self.assertEqual(counter["Waiting 0 seconds to retry..."], 2)
def test_error_500(self):
output = TestBufferConanOutput()
uploader = FileUploader(requester=_RequesterMock(500, "content"), output=output,
verify=False, config=_ConfigMock())
with six.assertRaisesRegex(self, Exception, "500 Server Error: content"):
uploader.upload(url="fake", abs_path=self.filename, retry=2)
output_lines = str(output).splitlines()
counter = Counter(output_lines)
self.assertEqual(counter["ERROR: 500 Server Error: content"], 2)
self.assertEqual(counter["Waiting 0 seconds to retry..."], 2) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# encoding: utf-8
from __future__ import unicode_literals
from django.db import models
from django_extensions.db.fields import AutoSlugField
from django.utils.translation import ugettext_lazy as _
class News(models.Model):
"""
Django Model to hold News that will display on the homepage.
:title: = Title of the News
:content: = A short description of the news
:frontpage: = True if it should be displayed on the homepage
:image: = Large background image for the homepage
:pub_date: = Date created
"""
title = models.CharField(_('title'), max_length=191, unique=True)
slug = AutoSlugField(_('slug'), populate_from='title', unique=True)
content = models.TextField(_('content'), blank=True)
frontpage = models.BooleanField(_('frontpage'), default=False,
help_text="determines if the story appears on the front page")
image = models.ImageField(_('image'), upload_to='uploads/news/', blank=True)
pub_date = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ('pub_date',)
def __unicode__(self):
return '%s' % self.title | unknown | codeparrot/codeparrot-clean | ||
---
c: Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
SPDX-License-Identifier: curl
Long: login-options
Arg: <options>
Protocols: IMAP LDAP POP3 SMTP
Help: Server login options
Added: 7.34.0
Category: imap pop3 smtp auth ldap
Multi: single
See-also:
- user
Example:
- --login-options 'AUTH=*' imap://example.com
---
# `--login-options`
Specify the login options to use during server authentication.
You can use login options to specify protocol specific options that may be
used during authentication. At present only IMAP, POP3 and SMTP support login
options. For more information about login options please see RFC 2384,
RFC 5092 and the IETF draft
https://datatracker.ietf.org/doc/html/draft-earhart-url-smtp-00
Since 8.2.0, IMAP supports the login option `AUTH=+LOGIN`. With this option,
curl uses the plain (not SASL) `LOGIN IMAP` command even if the server
advertises SASL authentication. Care should be taken in using this option, as
it sends your password over the network in plain text. This does not work if
the IMAP server disables the plain `LOGIN` (e.g. to prevent password
snooping). | unknown | github | https://github.com/curl/curl | docs/cmdline-opts/login-options.md |
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import re
import sys
from command import InteractiveCommand
from editor import Editor
from error import UploadError
UNUSUAL_COMMIT_THRESHOLD = 5
def _ConfirmManyUploads(multiple_branches=False):
if multiple_branches:
print "ATTENTION: One or more branches has an unusually high number of commits."
else:
print "ATTENTION: You are uploading an unusually high number of commits."
print "YOU PROBABLY DO NOT MEAN TO DO THIS. (Did you rebase across branches?)"
answer = raw_input("If you are sure you intend to do this, type 'yes': ").strip()
return answer == "yes"
def _die(fmt, *args):
msg = fmt % args
print >>sys.stderr, 'error: %s' % msg
sys.exit(1)
def _SplitEmails(values):
result = []
for str in values:
result.extend([s.strip() for s in str.split(',')])
return result
class Upload(InteractiveCommand):
common = True
helpSummary = "Upload changes for code review"
helpUsage="""
%prog [--re --cc] [<project>]...
"""
helpDescription = """
The '%prog' command is used to send changes to the Gerrit Code
Review system. It searches for topic branches in local projects
that have not yet been published for review. If multiple topic
branches are found, '%prog' opens an editor to allow the user to
select which branches to upload.
'%prog' searches for uploadable changes in all projects listed at
the command line. Projects can be specified either by name, or by
a relative or absolute path to the project's local directory. If no
projects are specified, '%prog' will search for uploadable changes
in all projects listed in the manifest.
If the --reviewers or --cc options are passed, those emails are
added to the respective list of users, and emails are sent to any
new users. Users passed as --reviewers must already be registered
with the code review system, or the upload will fail.
Configuration
-------------
review.URL.autoupload:
To disable the "Upload ... (y/n)?" prompt, you can set a per-project
or global Git configuration option. If review.URL.autoupload is set
to "true" then repo will assume you always answer "y" at the prompt,
and will not prompt you further. If it is set to "false" then repo
will assume you always answer "n", and will abort.
review.URL.autocopy:
To automatically copy a user or mailing list to all uploaded reviews,
you can set a per-project or global Git option to do so. Specifically,
review.URL.autocopy can be set to a comma separated list of reviewers
who you always want copied on all uploads with a non-empty --re
argument.
review.URL.username:
Override the username used to connect to Gerrit Code Review.
By default the local part of the email address is used.
The URL must match the review URL listed in the manifest XML file,
or in the .git/config within the project. For example:
[remote "origin"]
url = git://git.example.com/project.git
review = http://review.example.com/
[review "http://review.example.com/"]
autoupload = true
autocopy = johndoe@company.com,my-team-alias@company.com
References
----------
Gerrit Code Review: http://code.google.com/p/gerrit/
"""
def _Options(self, p):
p.add_option('-t',
dest='auto_topic', action='store_true',
help='Send local branch name to Gerrit Code Review')
p.add_option('--re', '--reviewers',
type='string', action='append', dest='reviewers',
help='Request reviews from these people.')
p.add_option('--cc',
type='string', action='append', dest='cc',
help='Also send email to these email addresses.')
def _SingleBranch(self, opt, branch, people):
project = branch.project
name = branch.name
remote = project.GetBranch(name).remote
key = 'review.%s.autoupload' % remote.review
answer = project.config.GetBoolean(key)
if answer is False:
_die("upload blocked by %s = false" % key)
if answer is None:
date = branch.date
list = branch.commits
print 'Upload project %s/:' % project.relpath
print ' branch %s (%2d commit%s, %s):' % (
name,
len(list),
len(list) != 1 and 's' or '',
date)
for commit in list:
print ' %s' % commit
sys.stdout.write('to %s (y/n)? ' % remote.review)
answer = sys.stdin.readline().strip()
answer = answer in ('y', 'Y', 'yes', '1', 'true', 't')
if answer:
if len(branch.commits) > UNUSUAL_COMMIT_THRESHOLD:
answer = _ConfirmManyUploads()
if answer:
self._UploadAndReport(opt, [branch], people)
else:
_die("upload aborted by user")
def _MultipleBranches(self, opt, pending, people):
projects = {}
branches = {}
script = []
script.append('# Uncomment the branches to upload:')
for project, avail in pending:
script.append('#')
script.append('# project %s/:' % project.relpath)
b = {}
for branch in avail:
name = branch.name
date = branch.date
list = branch.commits
if b:
script.append('#')
script.append('# branch %s (%2d commit%s, %s):' % (
name,
len(list),
len(list) != 1 and 's' or '',
date))
for commit in list:
script.append('# %s' % commit)
b[name] = branch
projects[project.relpath] = project
branches[project.name] = b
script.append('')
script = Editor.EditString("\n".join(script)).split("\n")
project_re = re.compile(r'^#?\s*project\s*([^\s]+)/:$')
branch_re = re.compile(r'^\s*branch\s*([^\s(]+)\s*\(.*')
project = None
todo = []
for line in script:
m = project_re.match(line)
if m:
name = m.group(1)
project = projects.get(name)
if not project:
_die('project %s not available for upload', name)
continue
m = branch_re.match(line)
if m:
name = m.group(1)
if not project:
_die('project for branch %s not in script', name)
branch = branches[project.name].get(name)
if not branch:
_die('branch %s not in %s', name, project.relpath)
todo.append(branch)
if not todo:
_die("nothing uncommented for upload")
many_commits = False
for branch in todo:
if len(branch.commits) > UNUSUAL_COMMIT_THRESHOLD:
many_commits = True
break
if many_commits:
if not _ConfirmManyUploads(multiple_branches=True):
_die("upload aborted by user")
self._UploadAndReport(opt, todo, people)
def _AppendAutoCcList(self, branch, people):
"""
Appends the list of users in the CC list in the git project's config if a
non-empty reviewer list was found.
"""
name = branch.name
project = branch.project
key = 'review.%s.autocopy' % project.GetBranch(name).remote.review
raw_list = project.config.GetString(key)
if not raw_list is None and len(people[0]) > 0:
people[1].extend([entry.strip() for entry in raw_list.split(',')])
def _FindGerritChange(self, branch):
last_pub = branch.project.WasPublished(branch.name)
if last_pub is None:
return ""
refs = branch.GetPublishedRefs()
try:
# refs/changes/XYZ/N --> XYZ
return refs.get(last_pub).split('/')[-2]
except:
return ""
def _UploadAndReport(self, opt, todo, original_people):
have_errors = False
for branch in todo:
try:
people = copy.deepcopy(original_people)
self._AppendAutoCcList(branch, people)
# Check if there are local changes that may have been forgotten
if branch.project.HasChanges():
key = 'review.%s.autoupload' % branch.project.remote.review
answer = branch.project.config.GetBoolean(key)
# if they want to auto upload, let's not ask because it could be automated
if answer is None:
sys.stdout.write('Uncommitted changes in ' + branch.project.name + ' (did you forget to amend?). Continue uploading? (y/n) ')
a = sys.stdin.readline().strip().lower()
if a not in ('y', 'yes', 't', 'true', 'on'):
print >>sys.stderr, "skipping upload"
branch.uploaded = False
branch.error = 'User aborted'
continue
branch.UploadForReview(people, auto_topic=opt.auto_topic)
branch.uploaded = True
except UploadError, e:
branch.error = e
branch.uploaded = False
have_errors = True
print >>sys.stderr, ''
print >>sys.stderr, '--------------------------------------------'
if have_errors:
for branch in todo:
if not branch.uploaded:
print >>sys.stderr, '[FAILED] %-15s %-15s (%s)' % (
branch.project.relpath + '/', \
branch.name, \
branch.error)
print >>sys.stderr, ''
for branch in todo:
if branch.uploaded:
print >>sys.stderr, '[OK ] %-15s %s' % (
branch.project.relpath + '/',
branch.name)
if have_errors:
sys.exit(1)
def Execute(self, opt, args):
project_list = self.GetProjects(args)
pending = []
reviewers = []
cc = []
if opt.reviewers:
reviewers = _SplitEmails(opt.reviewers)
if opt.cc:
cc = _SplitEmails(opt.cc)
people = (reviewers,cc)
for project in project_list:
avail = project.GetUploadableBranches()
if avail:
pending.append((project, avail))
if not pending:
print >>sys.stdout, "no branches ready for upload"
elif len(pending) == 1 and len(pending[0][1]) == 1:
self._SingleBranch(opt, pending[0][1][0], people)
else:
self._MultipleBranches(opt, pending, people) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Transform that parses serialized tensorflow.Example protos."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.python.ops import parsing_ops
class ExampleParser(transform.TensorFlowTransform):
"""A Transform that parses serialized `tensorflow.Example` protos."""
def __init__(self, features):
"""Initialize `ExampleParser`.
The `features` argument must be an object that can be converted to an
`OrderedDict`. The keys should be strings and will be used to name the
output. Values should be either `VarLenFeature` or `FixedLenFeature`. If
`features` is a dict, it will be sorted by key.
Args:
features: An object that can be converted to an `OrderedDict` mapping
column names to feature definitions.
"""
super(ExampleParser, self).__init__()
if isinstance(features, dict):
self._ordered_features = collections.OrderedDict(sorted(features.items(
), key=lambda f: f[0]))
else:
self._ordered_features = collections.OrderedDict(features)
@property
def name(self):
return "ExampleParser"
@property
def input_valency(self):
return 1
@property
def _output_names(self):
return list(self._ordered_features.keys())
@transform._parameter # pylint: disable=protected-access
def feature_definitions(self):
return self._ordered_features
def _apply_transform(self, input_tensors, **kwargs):
parsed_values = parsing_ops.parse_example(input_tensors[0],
features=self._ordered_features)
# pylint: disable=not-callable
return self.return_type(**parsed_values) | unknown | codeparrot/codeparrot-clean | ||
# coding=utf-8
##Copyright (C) [2005] [Jürgen Hamel, D-32584 Löhne]
##This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as
##published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version.
##This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
##warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
##for more details.
##You should have received a copy of the GNU General Public License along with this program; if not, write to the
##Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import time
from datetime import datetime
import random
import xmlrpclib
from twisted.web import xmlrpc
import types
from basics import basics
import Database
class Order(xmlrpc.XMLRPC, basics):
def __init__(self):
basics.__init__(self)
self.oDatabase = Database.Database()
def xmlrpc_getSupply_GetNumber(self, orderNumber, dicUser):
iGetNumber = 0
iSupplyNumber = 0
sSql = "select gets_number from fct_getGet_number(" + `orderNumber` + ") as gets_number(integer) "
iGetNumber = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser )[0]['gets_number']
sSql = "select supply_number from fct_getSupply_number(" + `orderNumber` + ") as supply_number(integer) "
iSupplyNumber = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser )[0]['supply_number']
return iGetNumber, iSupplyNumber
def xmlrpc_getDeliveryNumber(self, orderNumber, dicUser ):
nr = 0
sc = '_client_' + `dicUser['client']`
sSql = 'select delivery_number from list_of_deliveries where order_number = ' + `orderNumber`
sSql = sSql + self.getWhere("",dicUser,2)
dicResult = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser )
if dicResult in self.liSQL_ERRORS:
liFields, liValues = self.getNormalSqlData(dicUser)
sSql1 = 'insert into list_of_deliveries ( id, delivery_number, order_number '
for sFields in liFields:
sSql1 += sFields
sSql1 += " values (nextval(\'list_of_deliveries_id\'),nextval(\'numerical_misc_standard_delivery"+sc +"\'), "
sSql1 += `orderNumber`
for sValues in liValues:
sSql1 += sValues
self.oDatabase.xmlrpc_executeNormalQuery(sSql1, dicUser )
dicResult = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser )
if dicResult not in ['NONE','ERROR']:
nr = dicResult[0]['delivery_number']
return nr
def xmlrpc_getInvoiceAddress(self, dicOrder, dicUser):
sSql = "select orderbook.number as order_number, orderbook.designation as order_designation , orderbook.customers_ordernumber as customers_ordernumber, "
sSql += " to_char(orderbook.orderedat, \'" + dicUser['SQLDateFormat'] + "\') as order_orderedat ,"
sSql += " to_char(orderbook.deliveredat, \'" + dicUser['SQLDateFormat'] + "\') as order_deliverdat, "
sSql += " address.address as address , address.firstname as firstname, "
sSql += " address.lastname as lastname, address.lastname2 as lastname2, "
sSql += " address.street as street, (address.zip || ' ' || address.city) as city , "
sSql += " (address.country || '-' || address.zip || ' ' || address.city) as city_country , "
sSql += "address.zip as zip, address.country as country, address.city as city_alone "
sSql += " from orderbook, address where orderbook.id = " + `dicOrder['orderid']`
sSql += " and address.id = orderbook.addressnumber "
liResult = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser )
try:
if liResult :
dicResult = liResult[0]
if dicResult['firstname'] == None:
dicResult['first_last'] = dicResult['lastname']
dicResult['last_first'] = dicResult['lastname']
else:
dicResult['first_last'] = dicResult['firstname'] + ' ' + dicResult['lastname']
dicResult['last_first'] = dicResult['lastname'] + ', ' + dicResult['firstname']
liResult[0] = dicResult
except Exception, params:
print Exception, params
return liResult
def xmlrpc_getOrderPositions(self, dicOrder, dicUser):
sSql = 'select * from orderposition where orderid = ' + `dicOrder['orderid']`
dicResult = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser )
return dicResult
def xmlrpc_getInvoiceNumber(self, orderNumber, dicUser):
nr = 0
try:
orderNumber = int(orderNumber)
except:
orderNumber = 0
sc = '_client_' + `dicUser['client']`
sSql = 'select invoice_number from list_of_invoices where order_number = ' + `orderNumber`
sSql += self.getWhere(None, dicUser,2)
dicResult = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser )
if dicResult not in ['NONE','ERROR']:
nr = dicResult[0]['invoice_number']
else:
nr = 0
return nr
def xmlrpc_changeProposal2Order(self, ProposalID, dicUser):
ok = True
print "proposalID = ", ProposalID
#sSql = "update orderbook set process_status = 500 where id = " + `ProposalID`
sSql = "select * from fct_changeProposal2Order(" + `ProposalID` + " )"
dicResult = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser )
return ok
def xmlrpc_getProposalNumber(self, orderNumber, dicUser):
nr = 0
try:
orderNumber = int(orderNumber)
except:
orderNumber = 0
sc = '_client_' + `dicUser['client']`
sSql = 'select proposal_number from orderbook where id = ' + `orderNumber`
sSql += self.getWhere(None, dicUser,2)
dicResult = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser )
if dicResult and dicResult not in ['NONE','ERROR']:
nr = dicResult[0]['proposal_number']
else:
sSql = 'select max(proposal_number) from orderbook '
sSql += self.getWhere(None, dicUser,1)
dicResult = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser )
if dicResult and dicResult not in ['NONE','ERROR']:
nr = dicResult[0]['proposal_number']
nr = nr + 1
return nr
def xmlrpc_getInvoiceDate(self, orderNumber, dicUser):
date = ' '
try:
orderNumber = int(orderNumber)
except:
orderNumber = 0
sc = '_client_' + `dicUser['client']`
sSql = "select to_char(date_of_invoice, \'" + dicUser['SQLDateFormat'] + "\') as date_of_invoice from list_of_invoices where order_number = " + `orderNumber`
sSql += self.getWhere(None, dicUser,2)
dicResult = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser )
if dicResult not in ['NONE','ERROR']:
date = dicResult[0]['date_of_invoice']
else:
date = ' '
return date
def xmlrpc_getSupplyDate(self, orderNumber, dicUser):
date = ' '
try:
orderNumber = int(orderNumber)
except:
orderNumber = 0
sc = '_client_' + `dicUser['client']`
sSql = "select to_char(date_of_delivery, \'" + dicUser['SQLDateFormat'] + "\') as date_of_delivery from list_of_delivery where order_number = " + `orderNumber`
sSql += self.getWhere(None, dicUser,2)
dicResult = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser )
if dicResult not in ['NONE','ERROR']:
date = dicResult[0]['date_of_delivery']
else:
date = ' '
return date
def xmlrpc_getOrderValues(self, orderid, dicUser):
liResultStaff = None
liResultPartner = None
print '############ get order values ####################'
sSql = "select discount, misc_cost, postage_cost, packing_cost, orderbook.customers_ordernumber as customers_ordernumber, "
sSql += " orderbook.designation as order_designation , orderbook.number as order_number, staff_id as order_staff_id, "
sSql += " orderbook.customers_partner_id as order_customers_partner_id , "
sSql += " to_char(orderbook.orderedat, \'" + dicUser['SQLDateFormat'] + "\') as order_orderedat ,"
sSql += " to_char(orderbook.deliveredat, \'" + dicUser['SQLDateFormat'] + "\') as order_deliverdat "
sSql += " from orderbook where id = " + `orderid`
sSql += self.getWhere(None, dicUser,2)
liResult = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser )
for row in liResult:
try:
if row['discount'] == None or row['discount'] in self.liSQL_ERRORS:
row['discount'] = 0.0
except:
pass
try:
if row['order_staff_id'] > 0:
sSql = "select *, lastname || ', ' || firstname as last_first, firstname || ' ' || lastname as first_last from staff where id = " + `row['order_staff_id']`
liResultStaff = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser )
except:
pass
try:
if row['order_customers_partner_id'] > 0:
sSql = "select *, lastname || ', ' || firstname as last_first, firstname || ' ' || lastname as first_last from partner where id = " + `row['order_customers_partner_id']`
liResultPartner = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser )
except:
pass
#print 'liResultStaff = ', liResultStaff
if liResultStaff:
try:
row = liResultStaff[0]
print 'row_keys = ' , row.keys()
for key in row.keys():
print 'key = ', key
liResult[0]['staff_' + key] = row[key]
except Exception, params:
print Exception, params
if liResultPartner:
try:
row = liResultPartner[0]
print 'row_keys = ' , row.keys()
for key in row.keys():
print 'key = ', key
liResult[0]['partner_' + key] = row[key]
except Exception, params:
print Exception, params
print 'liResult = ', liResult
top_id = self .getToPID({'orderid':orderid}, dicUser)
# sSql2 = 'select order_top as top_id from orderinvoice where orderid = ' + `orderid`
# sSql2 += self.getWhere(None, dicUser,2)
# liResultTop = self.oDatabase.xmlrpc_executeNormalQuery(sSql2, dicUser )
# if not liResultTop or liResultTop in ['NONE','ERROR']:
# '''No term of payment found, try default from customer '''
# sSql2 = 'select addresses_misc.top_id as top_id from addresses_misc, orderbook '
# sSql2 += ' where addresses_misc.address_id = orderbook.addressnumber '
# sSql2 += self.getWhere(None, dicUser,2,'orderbook.')
# liResultTop = self.oDatabase.xmlrpc_executeNormalQuery(sSql2, dicUser )
# if liResultTop and liResultTop not in ['NONE','ERROR']:
# top_id = liResultTop[0]['top_id']
#
if liResult not in self.liSQL_ERRORS:
sSql3 = ' select term_of_payment from terms_of_payment where id = ' + `top_id`
liResultTop2 = self.oDatabase.xmlrpc_executeNormalQuery(sSql3, dicUser )
if liResultTop2 and liResultTop2 not in self.liSQL_ERRORS:
liResult[0]['term_of_payment'] = liResultTop2[0]['term_of_payment']
else :
liResult[0]['term_of_payment'] = ''
liResult[0]['gets_number'], liResult[0]['supply_number'] = self.xmlrpc_getSupply_GetNumber(orderid, dicUser)
return liResult
def xmlrpc_setInvoiceNumber(self, orderNumber, dicUser):
nr = 0
sc = '_client_' + `dicUser['client']`
sSql = 'select invoice_number from list_of_invoices where order_number = ' + `orderNumber`
sSql += self.getWhere(None, dicUser,2)
dicResult = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser )
print 'InvoiceNumber dicResult = ', dicResult
if dicResult in ['NONE','ERROR'] or dicResult[0]['invoice_number'] == 0:
liFields, liValues = self.getNormalSqlData(dicUser)
sSql1 = 'insert into list_of_invoices ( id, invoice_number, order_number, date_of_invoice, total_amount'
for sFields in liFields:
sSql1 += sFields
sSql1 += " values (nextval('list_of_invoices_id'),nextval('numerical_misc_standard_invoice" + sc + "'), "
sSql1 += `orderNumber` + ",'today', " + `self.getTotalSum(orderNumber, dicUser)`
for sValues in liValues:
sSql1 += sValues
print sSql1
self.oDatabase.xmlrpc_executeNormalQuery(sSql1, dicUser )
dicResult = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser )
else:
liFields, liValues = self.getNormalSqlData(dicUser, False, False)
sSql1 = "update list_of_invoices set total_amount = " + `self.getTotalSum(orderNumber, dicUser)`
mFields = len(liFields)
for counter in range(mFields):
sSql1 += ", " + liFields[counter] + " = " + liValues[counter]
sSql1 += " where order_number = " + `orderNumber`
sSql1 += self.getWhere(None,dicUser,2)
print sSql1
self.oDatabase.xmlrpc_executeNormalQuery(sSql1, dicUser )
if dicResult not in ['NONE','ERROR']:
nr = dicResult[0]['invoice_number']
else:
nr = 0
return nr
def xmlrpc_getPickupAddress(self, dicOrder, dicUser):
sSql = "select orderbook.number as order_number, orderbook.designation as order_designation , "
sSql = sSql + " to_char(orderbook.orderedat, \'" + dicUser['SQLDateFormat'] + "\') as o_orderedat ,"
sSql = sSql + " to_char(orderbook.deliveredat, \'" + dicUser['SQLDateFormat'] + "\') as order_deliverdat, "
sSql = sSql + " address.lastname as lastname, address.lastname2 as lastname2, "
sSql = sSql + " address.street as street, (address.zip || ' ' || address.city) as city "
sSql = sSql + " from orderbook, address where orderbook.number = \'" + dicOrder['orderNumber'] +"\' "
sSql = sSql + "and address.id = orderbook.addressnumber"
return self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser )
def xmlrpc_getPickupData(self, dicOrder, dicUser):
sSql = "select orderbook.number as order_number, orderbook.designation as order_designation "
sSql = sSql + " from orderbook, orderget where orderbook.number = \'" + dicOrder['orderNumber'] +"\' "
sSql = sSql + "and orderget.ordernumber = orderbook.id "
sSql = sSql + " order by orderbook.number "
return self.executeNormalQuery(sSql, dicUser )
def xmlrpc_getPickupNumber(self, orderNumber, dicUser):
nr = 0
sSql = 'select pickup_number from list_of_pickups where order_number = ' + `orderNumber`
dicResult = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser )
if dicResult not in ['NONE','ERROR']:
liFields, liValues = self.getNormalSqlData(dicUser)
sSql1 = 'insert into list_of_pickups ( id, pickup_number, order_number '
for sFields in liFields:
sSql1 += sFields
sSql1 += ' values (nextval(\'list_of_pickups_id\'),nextval(\'numerical_misc_standard_pickup\'), '
sSql1 += `orderNumber`
for sValues in liValues:
sSql1 += sValues
self.oDatabase.xmlrpc_executeNormalQuery(sSql1, dicUser )
dicResult = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser )
if dicResult not in ['NONE','ERROR']:
nr = dicResult[0]['pickup_number']
return nr
def xmlrpc_getStandardInvoice(self, dicOrder , dicUser):
result2=[]
try:
print dicOrder
sSql = "select orderbook.number as order_number, orderbook.designation as order_designation , "
sSql += " to_char(orderbook.orderedat, \'" + dicUser['SQLDateFormat'] + "\') as order_orderedat ,"
sSql += " to_char(orderbook.deliveredat, \'" + dicUser['SQLDateFormat'] + "\') as order_deliverdat, "
sSql += " orderbook.discount as total_discount, "
sSql += "(select tax_vat_for_all_positions from orderinvoice where orderinvoice.orderid = " + `dicOrder['orderid']`
sSql += " ) as tax_vat_for_all_positions, "
sSql += " orderposition.tax_vat as order_tax_vat_order_position_id, "
sSql += " (select tax_vat.vat_value from tax_vat,material_group,articles where "
sSql += " articles.material_group = material_group.id and material_group.tax_vat = tax_vat.id and articles.id = orderposition.articleid) as tax_vat, "
sSql += " (select tax_vat.vat_value from tax_vat,material_group,articles where "
sSql += " articles.material_group = material_group.id and material_group.tax_vat = tax_vat.id and articles.id = orderposition.articleid) as tax_vat_material_group, "
sSql += " (select material_group.tax_vat from material_group,articles where "
sSql += " articles.material_group = material_group.id and articles.id = orderposition.articleid) as tax_vat_material_group_id, "
sSql += "(select material_group.price_type_net from material_group, articles where articles.material_group = material_group.id and articles.id = orderposition.articleid) as material_group_price_type_net, "
sSql += " articles.number as article_id, articles.designation as article_designation, articles.tax_vat_id as tax_vat_article_id, articles.articles_notes as articles_notes, "
sSql += "articles.wrapping as article_wrapping, articles.quantumperwrap as article_quantumperwrap, articles.unit as article_unit, "
sSql += " orderposition.designation as designation, orderposition.amount as amount, "
sSql += " orderposition.position as position, orderposition.price as price, "
sSql += " orderposition.discount as discount, "
sSql += " case ( select material_group.price_type_net from material_group, articles where articles.material_group = material_group.id and articles.id = orderposition.articleid) when true then price when false then price / (100 + (select tax_vat.vat_value from tax_vat,material_group,articles where articles.material_group = material_group.id and material_group.tax_vat = tax_vat.id and articles.id = orderposition.articleid)) * 100 when NULL then 0.00 end as end_price_netto, case ( select material_group.price_type_net from material_group, articles where articles.material_group = material_group.id and articles.id = orderposition.articleid) when true then price /100 * (100 + (select tax_vat.vat_value from tax_vat,material_group,articles where articles.material_group = material_group.id and material_group.tax_vat = tax_vat.id and articles.id = orderposition.articleid)) when false then price when NULL then 0.00 end as end_price_gross , "
sSql += " case articles.associated_with when 1 then (select botany.description from botany, articles where botany.article_id = articles.id and articles.id = orderposition.articleid and orderbook.id = " + `dicOrder['orderid']` + ") when 0 then articles.designation end as pos_designation "
sSql += " from orderposition, articles, orderbook where orderbook.id = " + `dicOrder['orderid']`
sSql += " and orderposition.orderid = orderbook.id and articles.id = orderposition.articleid "
sSql += " order by orderposition.position "
dicUser['noWhereClient'] = 'Yes'
result = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser )
result2 = []
for oneResult in result:
try:
print 'oneResult = ', oneResult
oneResult['MWST_ID'] = 0
oneResult['MWST_VALUE'] = 0
oneResult['MWST_NAME'] = ''
if oneResult not in self.liSQL_ERRORS :
if oneResult['tax_vat_for_all_positions'] not in self.liSQL_ERRORS:
if oneResult['tax_vat_for_all_positions'] > 0:
oneResult['MWST_ID'] = oneResult['tax_vat_for_all_positions']
self.writeLog( 'TAXVATNEW1 '+ `oneResult['MWST_ID']`)
if oneResult['MWST_ID'] == 0:
if oneResult['order_tax_vat_order_position_id'] not in self.liSQL_ERRORS:
if oneResult['order_tax_vat_order_position_id'] > 0:
oneResult['MWST_ID'] = oneResult['order_tax_vat_order_position_id']
self.writeLog( 'TAXVATNEW2 '+ `oneResult['MWST_ID']`)
if oneResult['tax_vat_article_id'] not in self.liSQL_ERRORS:
if oneResult['tax_vat_article_id'] > 0:
oneResult['MWST_ID'] = oneResult['tax_vat_article_id']
self.writeLog( 'TAXVATNEW3 '+ `oneResult['MWST_ID']`)
if oneResult['MWST_ID'] == 0:
if oneResult['tax_vat_material_group_id'] not in self.liSQL_ERRORS:
if oneResult['tax_vat_material_group_id'] > 0:
oneResult['MWST_ID'] = oneResult['tax_vat_material_group_id']
self.writeLog( 'TAXVATNEW4 '+ `oneResult['MWST_ID']`)
if oneResult['MWST_ID'] > 0:
sSql = "select vat_value, vat_name, vat_designation from tax_vat where tax_vat.id = " + `oneResult['MWST_ID']`
mwstResult = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser )
try:
oneResult['MWST_VALUE'] = mwstResult[0]['vat_value']
oneResult['MWST_NAME'] = mwstResult[0]['vat_name']
oneResult['MWST_DESIGNATION'] = mwstResult[0]['vat_designation']
except:
pass
self.writeLog( 'TAXVATNEWValue '+ `oneResult['MWST_VALUE']`)
result2.append(oneResult)
except:
oneResult = {}
#print 'oneResult = ', oneResult
oneResult['MWST_ID'] = 0
oneResult['MWST_VALUE'] = 0
oneResult['MWST_NAME'] = ''
self.writeLog( 'TAXVATRESULT ' + `result2`)
for i in result[2]:
if i['articles_notes'] in liSQL_ERRORS:
i['articles_notes'] = ''
except Exception, params:
self.writeLog('Error at getStandardInvoice = ' + `Exception` + ', ' + `params` )
return result2
def xmlrpc_checkExistModulOrder(self, dicUser, dicOrder):
print 'check Exist Modul Order '
sSql = 'select * from orderbook where modul_order_number = ' + `dicOrder['ModulOrderNumber']` + ' and modul_number = ' + `dicOrder['ModulNumber']`
sSql += self.getWhere(None,dicUser,2)
dicResult = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser )
self.writeLog( 'Order99 = ' + `dicResult`)
return dicResult
def xmlrpc_checkExistModulProposal(self, dicUser, dicOrder):
print 'check Exist Modul Proposal '
sSql = 'select * from orderbook where modul_order_number = ' + `dicOrder['ModulOrderNumber']` + ' and modul_number = ' + `dicOrder['ModulNumber']`
sSql += self.getWhere(None,dicUser,2)
dicResult = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser )
self.writeLog( 'Order99 = ' + `dicResult`)
return dicResult
def xmlrpc_createNewOrder(self,dicUser,dicOrder):
print 'create new Order'
print dicOrder
dicValues = {}
if dicOrder.has_key('ModulOrderNumber'):
dicValues['modul_order_number'] = [dicOrder['ModulOrderNumber'],'int']
if dicOrder.has_key('ModulNumber'):
dicValues['modul_number'] = [dicOrder['ModulNumber'],'int']
if dicOrder.has_key( 'Number'):
dicValues['number'] = [dicOrder['Number'],'string']
dicValues['addressnumber'] = [dicOrder['addressnumber'],'int']
print 'Locales:', dicUser['Locales']
print 'Dateformatstring', dicUser['DateformatString']
print dicOrder
if dicOrder.has_key('orderedat'):
try:
dO = time.strptime(dicOrder['orderedat'], dicUser['DateformatString'])
dicValues['orderedat'] = [`dO[0]`+'/'+ `dO[1]` + '/'+ `dO[2]`,'date']
print 'Orderedat = ', dicValues['orderedat']
except Exception, params:
print Exception, params
else:
dicValues['orderedat'] = [time.strftime('%m/%d/%Y', time.localtime()),'date']
if dicOrder.has_key('deliveredat'):
try:
dD = time.strptime(dicOrder['deliveredat'], dicUser['DateformatString'])
dicValues['deliveredat'] = [`dD[0]`+'/'+ `dD[1]` + '/'+ `dD[2]`,'date']
self.writeLog('Deliveredat = ' + `dicValues['deliveredat']`)
except Exception, params:
print Exception, params
self.writeLog(dicValues)
if dicOrder.has_key('process_status'):
dicValues['process_status'] = [dicOrder['process_status'], 'int']
newID = self.oDatabase.xmlrpc_saveRecord('orderbook', -1, dicValues, dicUser, 'NO')
if dicOrder.has_key('Positions') and newID > 0:
for position in dicOrder['Positions']:
position['orderid'] = [newID,'int']
print '-----------------------------------------------'
print 'Position = ', position
print ':::::::::::::::::::::::::::::::::::::::::::::::'
dicResult2 = self.oDatabase.xmlrpc_saveRecord('orderposition', -1, position, dicUser, 'NO')
try:
if newID > 0:
dicValues, sSave = self.checkDefaultOrder(dicUser, newID)
if sSave:
dR4 = self.oDatabase.xmlrpc_saveRecord('orderbook', newID, dicValues, dicUser, 'NO')
except:
pass
return newID
def checkDefaultOrder(self, dicUser, id) :
print 101
sSave = False
print 102
dicValues = {}
sc = '_client_' + `dicUser['client']`
try:
cpServer, f = self.getParser(self.CUON_FS + '/clients.ini')
defaultOrderNumber = self.getConfigOption('CLIENT_' + `dicUser['client']`,'orderbook_number', cpServer)
defaultOrderDesignation = self.getConfigOption('CLIENT_' + `dicUser['client']`,'orderbook_designation', cpServer)
print defaultOrderDesignation, defaultOrderNumber
print 0
t1 = time.localtime()
print 1
if defaultOrderNumber:
sSave = True
liValues = defaultOrderNumber.split(',')
sON = ''
for i in liValues:
print 2, i
if i == '!id':
sON += self.convertTo(id, 'String')
elif i=='!year':
sON += `t1.tm_year`
elif i=='!month':
sON += `t1.tm_mon`
elif i=='!day':
sON += `t1.tm_mday`
elif i=='!seq':
sSql = "select nextval('numerical_orderbook_ordernumber" +sc +"' )"
print sSql
newSeq = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser)[0]['nextval']
sON += `newSeq`
else:
sON += i
print 'sON', sON
dicValues['number'] = [sON, 'string']
if defaultOrderDesignation:
print 3
sSave = True
liValues = defaultOrderDesignation.split(',')
sOD = ''
sSql = ' select * from address where id = ( select addressnumber from orderbook where id = ' + self.convertTo(id, 'String') + ')'
#print sSql
dicResult = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser )
#print dicResult
for i in liValues:
#print 4, i
#print '4-1',i[1:]
if i[0] == '!':
try:
if isinstance(dicResult[0][i[1:]], types.StringType):
sOD += dicResult[0][i[1:]].decode('utf-8')
else:
sOD += `dicResult[0][i[1:]]`
except Exception, params:
print Exception,params
else:
sOD += i
#print 'sOD', sOD
dicValues['designation'] = [sOD, 'string']
#print 5
#print dicValues, sSave
except Exception, params:
print Exception, params
return dicValues, sSave
def getTotalSum(self,OrderID, dicUser):
total_sum = 0
# sSql = 'select sum(amount * price) as total_sum from orderposition where orderid = '
# sSql += `OrderID`
# sSql += self.getWhere(None,dicUser,2)
# dicResult = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser )
# if dicResult and dicResult not in ['NONE','ERROR']:
# total_sum = dicResult[0]['total_sum']
sSql = "select total_sum from fct_getOrderTotalSum(" + `OrderID` +") as total_sum(float) "
dicResult = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser )
#print 'Total-sum = ', dicResult
if dicResult and dicResult not in ['NONE','ERROR']:
total_sum = dicResult[0]['total_sum']
#print 'Total-sum 2= ', total_sum
return total_sum
def xmlrpc_getTotalSumString(self, OrderID, dicUser):
retValue = '0'
total_sum = self.getTotalSum(OrderID,dicUser)
try:
#"%.2f"%y
total_sum = ("%." + `self.CURRENCY_ROUND` + "f") % round(total_sum,self.CURRENCY_ROUND)
retValue = total_sum + ' ' + self.CURRENCY_SIGN
except:
pass
return retValue
def xmlrpc_getPaidAt(self,OrderID, dicUser):
paidAt = ' '
sSql = "select to_char(date_of_paid, \'" + dicUser['SQLDateFormat'] + "\') as paid_at from in_payment where order_id = " + `OrderID`
sSql += self.getWhere(None,dicUser,2)
liResult = self.oDatabase.xmlrpc_executeNormalQuery(sSql,dicUser)
if liResult and liResult not in ['NONE','ERROR']:
try:
paidAt = liResult[0]['paid_at']
except:
pass
return paidAt
def xmlrpc_getNextPosition(self, orderid, dicUser):
pos = 0
sSql = " select max(position) as max_position from orderposition where orderid = " + `orderid`
liResult = self.oDatabase.xmlrpc_executeNormalQuery(sSql,dicUser)
if liResult and liResult not in ['NONE','ERROR']:
try:
pos = liResult[0]['max_position']
pos = int(pos)
except:
pos = 0
pos += 1
return pos
def xmlrpc_getUserInfoOrder(self, dicOrder, dicUser):
return None
def xmlrpc_getUserInfoInvoice(self, dicOrder, dicUser):
sSql = 'select * from staff, list_of_invoices as lii where cuon_username = lii.user_id and lii.invoice_number = ' + `dicOrder['invoiceNumber'] `
sSql += self.getWhere(None,dicUser,2,'list_of_invoices.')
return self.oDatabase.xmlrpc_executeNormalQuery(sSql,dicUser)
def getListOfInvoices( self, dicOrder, dicUser ):
dBegin = datetime.fromtimestamp(dicOrder['dBegin'])
dEnd = datetime.fromtimestamp(dicOrder['dEnd'])
print dBegin, dEnd
sSql = ' select list_of_invoices.order_number as order_number, list_of_invoices.invoice_number as invoice_number, '
sSql += ' list_of_invoices.date_of_invoice as date_of_invoice, list_of_invoices.total_amount as total_amount, '
sSql += ' list_of_invoices.maturity as maturity, '
sSql += 'address.lastname as lastname, address.city as city, address.id as addressid , address.zip as zip '
sSql += ' from list_of_invoices, orderbook,address where orderbook.id = list_of_invoices.order_number and address.id = orderbook.addressnumber '
sSql += " and list_of_invoices.date_of_invoice between '" + dBegin.strftime('%Y-%m-%d') + "' and '" + dEnd.strftime('%Y-%m-%d') +"' "
sSql += self.getWhere(None,dicUser,2,'list_of_invoices.')
sSql += ' order by list_of_invoices.invoice_number '
return self.oDatabase.xmlrpc_executeNormalQuery(sSql,dicUser)
def getListOfInpayment( self, dicOrder, dicUser ):
self.checkMaturityDay(dicUser)
dBegin = datetime.fromtimestamp(dicOrder['dBegin'])
dEnd = datetime.fromtimestamp(dicOrder['dEnd'])
print dBegin, dEnd
sSql = ' select in_payment.invoice_number as invoice_number, in_payment.inpayment as inpayment, '
sSql += 'in_payment.date_of_paid as date_of_paid, in_payment.order_id as order_id, '
sSql += "list_of_invoices.date_of_invoice, "
sSql += 'address.lastname as lastname, address.city as city, address.id as addressid '
sSql += ' from in_payment, orderbook, address, list_of_invoices where orderbook.id = in_payment.order_id and address.id = orderbook.addressnumber '
sSql += " and in_payment.date_of_paid between '" + dBegin.strftime('%Y-%m-%d') + "' and '" + dEnd.strftime('%Y-%m-%d') +"' "
sSql += " and list_of_invoices.invoice_number = to_number(in_payment.invoice_number,'999999999') "
sSql += self.getWhere(None,dicUser,2,'in_payment.')
sSql += ' order by in_payment.date_of_paid '
return self.oDatabase.xmlrpc_executeNormalQuery(sSql,dicUser)
def xmlrpc_getOrderForAddress(self, address_id, dicUser, iBegin = 500, iEnd = 799):
sSql = ' select id, number,designation, orderedat from orderbook '
sSql += " where addressnumber = " + `address_id` + " "
sSql += " and process_status between " + `iBegin` + " and " + `iEnd` + " "
sSql += self.getWhere(None,dicUser,2)
sSql += " order by id desc "
return self.oDatabase.xmlrpc_executeNormalQuery(sSql,dicUser)
def xmlrpc_getOrderForProject(self, project_id, dicUser):
sSql = ' select id, number,designation, orderedat from orderbook '
sSql += " where project_id= " + `project_id` + " "
sSql += self.getWhere(None,dicUser,2)
sSql += " order by id desc "
return self.oDatabase.xmlrpc_executeNormalQuery(sSql,dicUser)
def xmlrpc_getInvoicesForAddress(self, address_id, dicUser):
sSql = ' select li.id as id , orderbook.designation, li.invoice_number as number,li.date_of_invoice as date from orderbook, list_of_invoices as li '
sSql += " where orderbook.addressnumber = " + `address_id` + " "
sSql += ' and li.order_number = orderbook.id '
sSql += self.getWhere(None,dicUser,2,'li.')
sSql += " order by li.id desc "
return self.oDatabase.xmlrpc_executeNormalQuery(sSql,dicUser)
def getToPID(self, dicOrder, dicUser):
topID = 0
sSql = "select * from fct_getTopIDForOrder(" + `dicOrder['orderid']` + " ) as topid "
#print 'Before ', sSql
#print dicUser['Name']
result = self.oDatabase.xmlrpc_executeNormalQuery(sSql,dicUser)
print result
if result not in ['NONE','ERROR']:
try:
topID = int(result[0]['topid'])
except:
topID = 0
#print 'topID = ', topID
if not topID or topID == 0 :
#print 'read from INI'
try:
cpServer, f = self.getParser(self.CUON_FS + '/clients.ini')
#print cpServer
#print cpServer.sections()
topID = self.getConfigOption('CLIENT_' + `dicUser['client']`,'modul_order_default_top', cpServer)
#print 'topID from ini = ', topID
topID = int(topID.strip())
#print 'topID_zahl'
except Exception,params:
#print Exception,params
topID = 0
return topID
def xmlrpc_getToP(self, dicOrder, dicUser):
topID = self.getToPID(dicOrder, dicUser)
if topID > 0:
sSql = "select * from terms_of_payment where id = " + `topID`
result = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser)
else:
result = 'NONE'
print 'result by getTop: ', result
return result
def xmlrpc_getAllOrderWithoutInvoice(self, dicUser):
liOrder = []
sSql = " select id from orderbook where process_status = 500 and ready_for_invoice = true "
sSql += self.getWhere(None,dicUser,2)
sSql += ' order by id '
result = self.oDatabase.xmlrpc_executeNormalQuery(sSql,dicUser)
if result and result not in ['NONE','ERROR']:
for row in result:
order_id = row['id']
sSql = " select max(invoice_number) as max_invoice_number from list_of_invoices where order_number = " + `order_id`
sSql += self.getWhere(None,dicUser,2)
result2 = self.oDatabase.xmlrpc_executeNormalQuery(sSql,dicUser)
#print 'result2 act.1', result2
if result2 and result2 not in ['NONE','ERROR'] and result2[0]['max_invoice_number'] not in ['NONE','ERROR'] :
if result2[0]['max_invoice_number'] < 1 :
liOrder.append(order_id)
#print 'append1 = ', order_id
else:
liOrder.append(order_id)
#print 'append2 = ', order_id
if not liOrder:
liOrder = 'NONE'
#print liOrder
self.writeLog('liOrder all invoices')
self.writeLog(liOrder)
return liOrder
def checkMaturityDay(self, dicUser):
sSql = 'select id, order_number from list_of_invoices where maturity is null '
sSql += self.getWhere('',dicUser,2)
result = self.oDatabase.xmlrpc_executeNormalQuery(sSql,dicUser)
if result and result not in ['NONE','ERROR']:
for row in result:
dicOrder = {}
dicOrder['orderid'] = row['order_number']
topID = self.getToPID(dicOrder, dicUser)
sSql = 'select days from terms_of_payment where id = ' + `topID`
result2 = self.oDatabase.xmlrpc_executeNormalQuery(sSql,dicUser)
days = 0
if result2 and result2 not in ['NONE','ERROR']:
days = result2[0]['days']
sSql = ' update list_of_invoices set maturity = date_of_invoice + ' + `days`
sSql += ' where id = ' + `row['id']`
result3 = self.oDatabase.xmlrpc_executeNormalQuery(sSql,dicUser)
# def getResidue(self, dicUser):
# self.checkMaturityDay(dicUser)
# sNameOfView = "v_" + dicUser['Name'] + "_" + `dicUser['client']`+ "_residue"
#
# sDeleteResidue = "drop view " + sNameOfView
# result = self.oDatabase.xmlrpc_executeNormalQuery(sDeleteResidue,dicUser)
# sResidue = "create view " + sNameOfView + " as "
# sResidue += " select list_of_invoices.total_amount - (case when (select sum(in_payment.inpayment) from in_payment where to_number(in_payment.invoice_number,'999999999') = list_of_invoices.invoice_number and status != 'delete' and client = " + `dicUser['client']` + ") != 0 then (select sum(in_payment.inpayment) + sum(in_payment.cash_discount) from in_payment where to_number(in_payment.invoice_number,'999999999') = list_of_invoices.invoice_number and status != 'delete' and client = " + `dicUser['client']` + ") else 0 end) as residue, list_of_invoices.total_amount as total_amount, "
# sResidue += " list_of_invoices.maturity as maturity, list_of_invoices.order_number as order_number, list_of_invoices.id as id , list_of_invoices.invoice_number as invoice_number, list_of_invoices.date_of_invoice as date_of_invoice from list_of_invoices "
# sResidue += self.getWhere('',dicUser,'1','list_of_invoices.')
#
# result = self.oDatabase.xmlrpc_executeNormalQuery(sResidue,dicUser)
# print "result at create residue view", result
# sSql = 'select distinct '
# sSql += 'v_residue.total_amount as total_amount, '
# sSql += 'address.lastname as lastname, address.city as city, '
# sSql += 'orderbook.id as order_id, v_residue.maturity as maturity, '
# sSql += " v_residue.residue as residue, "
# sSql += ' v_residue.order_number as order_number, v_residue.id, v_residue.invoice_number as invoice_number, v_residue.date_of_invoice as date_of_invoice '
# sSql += " from list_of_invoices , orderbook, address, " + sNameOfView + " as v_residue "
# sSql += " where v_residue.residue -(case when orderbook.discount is not Null then orderbook.discount else 0.00 end) > 0.01 and v_residue.order_number = orderbook.id"
# sSql += " and orderbook.id = list_of_invoices.order_number and address.id = orderbook.addressnumber"
# sSql += " order by lastname, v_residue.date_of_invoice "
# result = self.oDatabase.xmlrpc_executeNormalQuery(sSql,dicUser)
# print "result at list residue from view", result
# return result
def getResidue(self, dicUser):
sSql = "select total_amount,lastname, city, order_id, maturity, residue , order_number, invoice_number,date_of_invoice, this_date from fct_getResidue() as (total_amount float, lastname varchar(150), city varchar(150), order_id integer, maturity date,residue float, order_number integer, invoice_number integer, date_of_invoice date, this_date date) "
result = self.oDatabase.xmlrpc_executeNormalQuery(sSql,dicUser)
return result
# def getReminder(self, dicUser):
# self.checkMaturityDay(dicUser)
# iReminder = 10
# sResidue = "list_of_invoices.total_amount - (case when (select sum(in_payment.inpayment) from in_payment where to_number(in_payment.invoice_number,'999999999') = list_of_invoices.invoice_number and status != 'delete' and client = " + `dicUser['client']` + ") != 0 then (select sum(in_payment.inpayment) + sum(in_payment.cash_discount) from in_payment where to_number(in_payment.invoice_number,'999999999') = list_of_invoices.invoice_number and status != 'delete' and client = " + `dicUser['client']` + ") else 0 end) "
#
#
# sSql = 'select distinct '
# sSql += 'list_of_invoices.total_amount as total_amount, '
# sSql += 'address.lastname as lastname, address.city as city, '
# sSql += "orderbook.id as order_id, to_char(list_of_invoices.maturity, \'" + dicUser['SQLDateFormat'] + "\') as maturity, "
# sSql += sResidue + " as residue, "
# sSql += " current_date - list_of_invoices.maturity as remind_days, "
# sSql += " list_of_invoices.order_number as order_number, list_of_invoices.id, list_of_invoices.invoice_number as invoice_number, to_char(list_of_invoices.date_of_invoice, \'" + dicUser['SQLDateFormat'] + "\') as date_of_invoice "
# sSql += " from list_of_invoices ,in_payment, orderbook, address "
# sSql += self.getWhere('',dicUser,'1','list_of_invoices.')
# sSql += "and " + sResidue + " > 0.01"
# sSql += " and (current_date - list_of_invoices.maturity > " + `iReminder` + ") "
# sSql += " and orderbook.id = list_of_invoices.order_number and address.id = orderbook.addressnumber"
#
# result = self.oDatabase.xmlrpc_executeNormalQuery(sSql,dicUser)
# return result
def getReminder(self, dicUser):
iReminder = 10
sSql = "select total_amount,lastname, city, order_id, maturity, residue , order_number, invoice_number,date_of_invoice , this_date , this_date - maturity as remind_days from fct_getReminder(" + `iReminder` + ") as (total_amount float, lastname varchar(150), city varchar(150), order_id integer, maturity date,residue float, order_number integer, invoice_number integer, date_of_invoice date, this_date date ) "
result = self.oDatabase.xmlrpc_executeNormalQuery(sSql,dicUser)
return result
def getListOfInvoicesByTop(self, dicExtraData, dicUser ):
self.checkMaturityDay(dicUser)
#print dicExtraData
iReminder = 10
sResidue = "list_of_invoices.total_amount - (case when (select sum(in_payment.inpayment) from in_payment where to_number(in_payment.invoice_number,'999999999') = list_of_invoices.invoice_number and status != 'delete' and client = " + `dicUser['client']` + ") != 0 then (select sum(in_payment.inpayment) from in_payment where to_number(in_payment.invoice_number,'999999999') = list_of_invoices.invoice_number and status != 'delete' and client = " + `dicUser['client']` + ") else 0 end) "
sSql = 'select distinct '
sSql += 'list_of_invoices.total_amount as total_amount, '
sSql += 'address.lastname as lastname, address.city as city, '
sSql += "orderbook.id as order_id, to_char(list_of_invoices.maturity, \'" + dicUser['SQLDateFormat'] + "\') as maturity, "
sSql += sResidue + " as residue, "
sSql += " current_date - list_of_invoices.maturity as remind_days, "
sSql += " list_of_invoices.order_number as order_number, list_of_invoices.id, list_of_invoices.invoice_number as invoice_number, to_char(list_of_invoices.date_of_invoice, \'" + dicUser['SQLDateFormat'] + "\') as date_of_invoice "
sSql += " from list_of_invoices ,in_payment, orderbook, address "
sSql += self.getWhere('',dicUser,'1','list_of_invoices.')
sSql += "and " + sResidue + " > 0.01"
sTops = dicExtraData['Tops']
tops = None
try:
cpServer, f = self.getParser(self.CUON_FS + '/clients.ini')
if sTops == 'directDebit':
tops = self.getConfigOption('CLIENT_' + `dicUser['client']`,'list_of_invoices_directDebit', cpServer)
print tops
except:
tops = None
if tops:
liTops = tops.split(',')
if liTops:
sSql += ' and ('
for sTop in liTops:
sSql += 'case when (select max(orderinvoice.order_top) = ' + sTop + ' as top from orderinvoice where orderbook.id = orderinvoice.orderid) = true then true else case when (select max(orderinvoice.order_top) isnull as top from orderinvoice where orderbook.id = orderinvoice.orderid) = true then (select top_id = ' + sTop + ' from addresses_misc where addresses_misc.address_id = address.id ) else false end end or '
#sSql += 'case orderinvoice.order_top = ' + sTop + ' or'
sSql = sSql[:len(sSql)-3]
sSql += ' )'
#sSql += " and (current_date - list_of_invoices.maturity > " + `iReminder` + ") "
sSql += " and orderbook.id = list_of_invoices.order_number and address.id = orderbook.addressnumber"
#sSql += ' and orderbook.id = orderinvoice.orderid '
#print sSql
result = self.oDatabase.xmlrpc_executeNormalQuery(sSql,dicUser)
result2 = []
return result
def xmlrpc_getStatsMisc(self, dicUser):
return ['NONE']
def xmlrpc_getStatsGlobal(self, dicUser):
result = {}
iCentury = 2
iDecade = 5
iYear = 3
iQuarter = 6
iMonth = 14
iWeek = 5
liSql = []
liSql.append({'id':'day','sql':'doy','logic':'='})
liSql.append({'id':'week','sql':'week','logic':'='})
liSql.append({'id':'month','sql':'month','logic':'='})
liSql.append({'id':'quarter','sql':'quarter','logic':'='})
liSql.append({'id':'year','sql':'year','logic':'='})
liSql.append({'id':'decade','sql':'decade','logic':'='})
liSql.append({'id':'century','sql':'century','logic':'='})
sSql = "select now(), "
for vSql in liSql:
for z1 in xrange(0,30):
if vSql['id'] == 'decade' and z1 > iDecade:
pass
elif vSql['id'] == 'century' and z1 > iCentury:
pass
elif vSql['id'] == 'year' and z1 > iYear:
pass
elif vSql['id'] == 'quarter' and z1 > iQuarter:
pass
elif vSql['id'] == 'month' and z1 > iMonth:
pass
elif vSql['id'] == 'week' and z1 > iWeek:
pass
else:
#print "z1 = ", z1
sSql += " (select sum(po.amount * po.price) from list_of_invoices as li, orderposition as po, orderbook as ob "
sSql += " where date_part('" + vSql['sql'] +"', li.date_of_invoice) " + vSql['logic'] + " " + self.getNow(vSql, z1)[0]
sSql += " and date_part('year', li.date_of_invoice) " + vSql['logic'] + " " + self.getNow(vSql, z1)[1]
sSql += " and li.order_number = ob.id and po.orderid = li.order_number "
sSql += self.getWhere('', dicUser, 2,'li.')
sSql += " ) as " + 'order_global_' + vSql['id'] + '_count_' + `z1` +", "
sSql += "( select sum(inpayment) from in_payment "
sSql += " where date_part('" + vSql['sql'] +"', date_of_paid) " + vSql['logic'] + " " + self.getNow(vSql, z1)[0]
sSql += " and date_part('year', date_of_paid) " + vSql['logic'] + " " + self.getNow(vSql, z1)[1]
sSql += self.getWhere('', dicUser, 2)
sSql += " ) as " + 'order_global_incoming_' + vSql['id'] + '_count_' + `z1` +", "
sSql = sSql[0:len(sSql)-2]
self.writeLog(sSql)
tmpResult = self.oDatabase.xmlrpc_executeNormalQuery(sSql,dicUser)
if tmpResult and tmpResult not in ['NONE','ERROR']:
# oneResult = tmpResult[0]
# for key in oneResult.keys():
# result[key] = oneResult[key]
result = tmpResult[0]
return result
def xmlrpc_getStatsCaller(self, dicUser):
result = {}
CALLER_ID = None
WITHOUT_ID = None
MIN_SCHEDUL_YEAR = '2005'
SCHEDUL_PROCESS_STATUS = None
liCaller = None
liSchedulProcessStatus = None
iCentury = 2
iDecade = 5
iYear = 3
iQuarter = 6
iMonth = 14
iWeek = 5
try:
cpServer, f = self.getParser(self.CUON_FS + '/user.cfg')
#print cpServer
#print cpServer.sections()
CALLER_ID = self.getConfigOption('STATS','CALLER_ID', cpServer)
WITHOUT_ID = self.getConfigOption('STATS','WITHOUT_ID', cpServer)
except:
pass
try:
cpServer, f = self.getParser(self.CUON_FS + '/clients.ini')
#print cpServer
#print cpServer.sections()
SCHEDUL_PROCESS_STATUS = self.getConfigOption('CLIENT_' + `dicUser['client']`,'SchedulProcessStatus', cpServer)
iValue = self.getConfigOption('CLIENT_' + `dicUser['client']`,'StatsCallerCentury', cpServer)
if iValue:
iCentury = int(iValue)
iValue = self.getConfigOption('CLIENT_' + `dicUser['client']`,'StatsCallerDecade', cpServer)
if iValue:
iDecade = int(iValue)
iValue = self.getConfigOption('CLIENT_' + `dicUser['client']`,'StatsCallerYear', cpServer)
if iValue:
iYear = int(iValue)
iValue = self.getConfigOption('CLIENT_' + `dicUser['client']`,'StatsCallerQuarter', cpServer)
if iValue:
iQuarter = int(iValue)
iValue = self.getConfigOption('CLIENT_' + `dicUser['client']`,'StatsCallerMonth', cpServer)
if iValue:
iMonth = int(iValue)
iValue = self.getConfigOption('CLIENT_' + `dicUser['client']`,'StatsCallerWeek', cpServer)
if iValue:
iWeek = int(iValue)
except:
pass
print "SCHEDUL_PROCESS_STATUS", SCHEDUL_PROCESS_STATUS
if SCHEDUL_PROCESS_STATUS:
liSPS = SCHEDUL_PROCESS_STATUS.split(',')
print "liSPS", liSPS
liSchedulProcessStatus = []
for st in liSPS:
print st
liSchedulProcessStatus.append(int(st.strip()))
if CALLER_ID:
liCaller = CALLER_ID.split(',')
print 'liCaller = ', liCaller
liSql = []
liSql.append({'id':'day','sql':'doy','logic':'='})
liSql.append({'id':'week','sql':'week','logic':'='})
liSql.append({'id':'month','sql':'month','logic':'='})
liSql.append({'id':'quarter','sql':'quarter','logic':'='})
liSql.append({'id':'year','sql':'year','logic':'='})
liSql.append({'id':'decade','sql':'decade','logic':'='})
liSql.append({'id':'century','sql':'century','logic':'='})
for caller in liCaller:
caller_name = None
sSql = 'select cuon_username from staff where staff.id = ' + caller
res1 = self.oDatabase.xmlrpc_executeNormalQuery(sSql,dicUser)
print 'dicUser' , dicUser
if res1 and res1 not in ['NONE','ERROR']:
caller_name = res1[0]['cuon_username']
if caller_name:
sSql = "select '" + caller_name + "' as caller_name_" + caller + " ,"
print sSql
for vSql in liSql:
for z1 in xrange(0,30):
if vSql['id'] == 'decade' and z1 > iDecade:
pass
elif vSql['id'] == 'century' and z1 > iCentury:
pass
elif vSql['id'] == 'year' and z1 > iYear:
pass
elif vSql['id'] == 'quarter' and z1 > iQuarter:
pass
elif vSql['id'] == 'month' and z1 > iMonth:
pass
elif vSql['id'] == 'week' and z1 > iWeek:
pass
else:
sSql += " (select sum(po.amount * po.price) from list_of_invoices as li, orderposition as po, orderbook as ob, address as ad "
sSql += " where date_part('" + vSql['sql'] +"', li.date_of_invoice) " + vSql['logic'] + " " + self.getNow(vSql, z1)[0]
sSql += " and date_part('year', li.date_of_invoice) " + vSql['logic'] + " " + self.getNow(vSql, z1)[1]
sSql += " and li.order_number = ob.id and po.orderid = li.order_number "
sSql += " and ad.id = ob.addressnumber and ad.caller_id = " + `caller` + " "
sSql += self.getWhere('', dicUser, 2,'li.')
sSql += " ) as " + 'order_caller_'+caller +'_' + vSql['id'] + '_count_' + `z1` +", "
sSql += "( select sum(inpayment) from in_payment , orderbook as ob, address as ad "
sSql += " where date_part('" + vSql['sql'] +"', date_of_paid) " + vSql['logic'] + " " + self.getNow(vSql, z1) [0]
sSql += " and date_part('year' , date_of_paid) " + vSql['logic'] + " " + self.getNow(vSql, z1)[1]
sSql += " and in_payment.order_id = ob.id "
sSql += " and ad.id = ob.addressnumber and ad.caller_id = " + `caller` + " "
sSql += self.getWhere('', dicUser, 2, 'in_payment.')
sSql += " ) as " + 'order_incoming_caller_' + caller +'_' + vSql['id'] + '_count_' + `z1` +", "
print "sSql 2 = ", sSql
sSql = sSql[0:len(sSql)-2]
print "Caller = ", caller
if caller == 4:
print sSql
#print "len sql = ", len(sSql)
self.writeLog(sSql)
tmpResult = self.oDatabase.xmlrpc_executeNormalQuery(sSql,dicUser)
if tmpResult and tmpResult not in ['NONE','ERROR']:
oneResult = tmpResult[0]
for key in oneResult.keys():
if oneResult[key] and oneResult[key] not in ['NONE','ERROR']:
result[key] = oneResult[key]
else:
result[key] =0
#result[key] = oneResult[key]
return result
def xmlrpc_getStatsReps(self, dicUser):
return ['NONE']
def xmlrpc_getStatsSalesman(self, dicUser):
return ['NONE']
def xmlrpc_getStatTaxVat1(self, dicUser):
self.writeLog('start tax vat stats')
sSql = " select * from fct_getStatTaxVat() as (id int, vat_value float , vat_name varchar(20), vat_designation varchar(60), tax_vatSum numeric, sum_price_netto numeric, z1 int) "
res1 = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser)
#print 'Data from sql = ' , res1
result = {}
for row in res1:
#print row['id'], row['z1'], row['tax_vatsum'], row['vat_value'] , row['vat_name']
result['TaxVat_month_tax_vatSum_taxvatID_' + `row['id']` + '_taxvatMonth_' + `row['z1']`] = float(row['tax_vatsum'] )
result['TaxVat_month_tax_vatValue_taxvatID_' + `row['id']` + '_taxvatMonth_' + `row['z1']`] = row['vat_value']
result['TaxVat_month_tax_vatName_taxvatID_' + `row['id']` + '_taxvatMonth_' + `row['z1']`] = row['vat_name']
result['TaxVat_month_tax_NetSum_taxvatID_' + `row['id']` + '_taxvatMonth_' + `row['z1']`] = float(row['sum_price_netto'] )
#self.writeLog('STATS-RESULT = ' + `result`)
return result
def xmlrpc_dup_order(self, iOrderID, dicUser, iType = 0):
sSql = "select * from fct_duplicateOrder(" + `iOrderID` + ", " + `iType` + ")"
result = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser)
return result
def xmlrpc_getArticleParts(self, dicOrder, dicUser):
sSql = "select article_id from fct_getArticlePartsListForOrder(" + `dicOrder['orderid']` + ") as (article_id int) "
result = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser)
return result | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import errno
import os
import sys
from datetime import datetime
from functools import partial
import time
_socket = __import__("socket")
# workaround on osx, disable kqueue
if sys.platform == "darwin":
os.environ['EVENT_NOKQUEUE'] = "1"
try:
import gevent
except ImportError:
raise RuntimeError("You need gevent installed to use this worker.")
from gevent.pool import Pool
from gevent.server import StreamServer
from gevent.socket import wait_write, socket
from gevent import pywsgi
import gunicorn
from gunicorn.http.wsgi import base_environ
from gunicorn.workers.async import AsyncWorker
from gunicorn.http.wsgi import sendfile as o_sendfile
VERSION = "gevent/%s gunicorn/%s" % (gevent.__version__, gunicorn.__version__)
def _gevent_sendfile(fdout, fdin, offset, nbytes):
while True:
try:
return o_sendfile(fdout, fdin, offset, nbytes)
except OSError as e:
if e.args[0] == errno.EAGAIN:
wait_write(fdout)
else:
raise
def patch_sendfile():
from gunicorn.http import wsgi
if o_sendfile is not None:
setattr(wsgi, "sendfile", _gevent_sendfile)
class GeventWorker(AsyncWorker):
server_class = None
wsgi_handler = None
def patch(self):
from gevent import monkey
monkey.noisy = False
# if the new version is used make sure to patch subprocess
if gevent.version_info[0] == 0:
monkey.patch_all()
else:
monkey.patch_all(subprocess=True)
# monkey patch sendfile to make it none blocking
patch_sendfile()
# patch sockets
sockets = []
for s in self.sockets:
sockets.append(socket(s.FAMILY, _socket.SOCK_STREAM,
_sock=s))
self.sockets = sockets
def notify(self):
super(GeventWorker, self).notify()
if self.ppid != os.getppid():
self.log.info("Parent changed, shutting down: %s", self)
sys.exit(0)
def timeout_ctx(self):
return gevent.Timeout(self.cfg.keepalive, False)
def run(self):
servers = []
ssl_args = {}
if self.cfg.is_ssl:
ssl_args = dict(server_side=True, **self.cfg.ssl_options)
for s in self.sockets:
s.setblocking(1)
pool = Pool(self.worker_connections)
if self.server_class is not None:
environ = base_environ(self.cfg)
environ.update({
"wsgi.multithread": True,
"SERVER_SOFTWARE": VERSION,
})
server = self.server_class(
s, application=self.wsgi, spawn=pool, log=self.log,
handler_class=self.wsgi_handler, environ=environ,
**ssl_args)
else:
hfun = partial(self.handle, s)
server = StreamServer(s, handle=hfun, spawn=pool, **ssl_args)
server.start()
servers.append(server)
try:
while self.alive:
self.notify()
gevent.sleep(1.0)
except KeyboardInterrupt:
pass
except:
for server in servers:
try:
server.stop()
except:
pass
raise
try:
# Stop accepting requests
for server in servers:
if hasattr(server, 'close'): # gevent 1.0
server.close()
if hasattr(server, 'kill'): # gevent < 1.0
server.kill()
# Handle current requests until graceful_timeout
ts = time.time()
while time.time() - ts <= self.cfg.graceful_timeout:
accepting = 0
for server in servers:
if server.pool.free_count() != server.pool.size:
accepting += 1
# if no server is accepting a connection, we can exit
if not accepting:
return
self.notify()
gevent.sleep(1.0)
# Force kill all active the handlers
self.log.warning("Worker graceful timeout (pid:%s)" % self.pid)
[server.stop(timeout=1) for server in servers]
except:
pass
def handle_request(self, *args):
try:
super(GeventWorker, self).handle_request(*args)
except gevent.GreenletExit:
pass
except SystemExit:
pass
if gevent.version_info[0] == 0:
def init_process(self):
# monkey patch here
self.patch()
# reinit the hub
import gevent.core
gevent.core.reinit()
#gevent 0.13 and older doesn't reinitialize dns for us after forking
#here's the workaround
gevent.core.dns_shutdown(fail_requests=1)
gevent.core.dns_init()
super(GeventWorker, self).init_process()
else:
def init_process(self):
# monkey patch here
self.patch()
# reinit the hub
from gevent import hub
hub.reinit()
# then initialize the process
super(GeventWorker, self).init_process()
class GeventResponse(object):
status = None
headers = None
sent = None
def __init__(self, status, headers, clength):
self.status = status
self.headers = headers
self.sent = clength
class PyWSGIHandler(pywsgi.WSGIHandler):
def log_request(self):
start = datetime.fromtimestamp(self.time_start)
finish = datetime.fromtimestamp(self.time_finish)
response_time = finish - start
resp_headers = getattr(self, 'response_headers', {})
resp = GeventResponse(self.status, resp_headers, self.response_length)
if hasattr(self, 'headers'):
req_headers = [h.split(":", 1) for h in self.headers.headers]
else:
req_headers = []
self.server.log.access(resp, req_headers, self.environ, response_time)
def get_environ(self):
env = super(PyWSGIHandler, self).get_environ()
env['gunicorn.sock'] = self.socket
env['RAW_URI'] = self.path
return env
class PyWSGIServer(pywsgi.WSGIServer):
pass
class GeventPyWSGIWorker(GeventWorker):
"The Gevent StreamServer based workers."
server_class = PyWSGIServer
wsgi_handler = PyWSGIHandler | unknown | codeparrot/codeparrot-clean | ||
import re, os, logging, commands
from autotest.client.shared import error
from virttest import remote, libvirt_vm, virsh, libvirt_xml
from xml.dom.minidom import parse
def run_virsh_setvcpus(test, params, env):
"""
Test command: virsh setvcpus.
The conmand can change the number of virtual CPUs in the guest domain.
1.Prepare test environment,destroy or suspend a VM.
2.Perform virsh setvcpus operation.
3.Recover test environment.
4.Confirm the test result.
"""
vm_name = params.get("main_vm")
vm = env.get_vm(vm_name)
xml_file = params.get("setvcpus_xml_file", "vm.xml")
virsh.dumpxml(vm_name, extra="", to_file=xml_file)
tmp_file = params.get("setvcpus_tmp_file", "tmp.xml")
pre_vm_state = params.get("setvcpus_pre_vm_state")
command = params.get("setvcpus_command", "setvcpus")
options = params.get("setvcpus_options")
domain = params.get("setvcpus_domain")
count = params.get("setvcpus_count")
extra_param = params.get("setvcpus_extra_param")
count_option = "%s %s" % (count, extra_param)
status_error = params.get("status_error")
def get_current_vcpus():
"""
Get current vcpu number.
"""
vcpus_set = ""
virsh.dumpxml(vm_name, extra="", to_file=tmp_file)
dom = parse(tmp_file)
root = dom.documentElement
vcpus_2 = root.getElementsByTagName("vcpu")
for n in vcpus_2:
vcpus_set += n.getAttribute("current")
vcpus_set = int(vcpus_set)
dom.unlink()
return vcpus_set
if vm.is_alive():
vm.destroy()
vm_xml = libvirt_xml.VMXML()
vm_xml.set_vm_vcpus(vm_name, 2)
vm.start()
vm.wait_for_login()
if status_error == "no":
vcpus_new = len(vm.vcpuinfo())
domid = vm.get_id()
domuuid = vm.get_uuid()
if pre_vm_state == "paused":
vm.pause()
elif pre_vm_state == "shut off":
vm.destroy()
if domain == "remote_name":
remote_ssh_addr = params.get("remote_ip", None)
remote_addr = params.get("local_ip", None)
remote_password = params.get("remote_password", None)
host_type = virsh.driver()
if host_type == "qemu":
remote_string = "qemu+ssh://%s/system" % remote_addr
elif host_type == "xen":
remote_string = "xen+ssh://%s" % remote_addr
command = "virsh -c %s setvcpus %s 1 --live" % (remote_string, vm_name)
if virsh.has_command_help_match(command, "--live") == None:
status_error = "yes"
session = remote.remote_login("ssh", remote_ssh_addr, "22", "root", remote_password, "#")
session.cmd_output('LANG=C')
status, output = session.cmd_status_output(command, internal_timeout=5)
session.close()
vcpus_current = len(vm.vcpuinfo())
else:
if domain == "name":
dom_option = vm_name
elif domain == "id":
dom_option = domid
if params.get("setvcpus_hex_id") != None:
dom_option = hex(int(domid))
elif params.get("setvcpus_invalid_id") != None:
dom_option = params.get("setvcpus_invalid_id")
elif domain == "uuid":
dom_option = domuuid
if params.get("setvcpus_invalid_uuid") != None:
dom_option = params.get("setvcpus_invalid_uuid")
else:
dom_option = domain
option_list = options.split(" ")
for item in option_list:
if virsh.has_command_help_match(command, item) == None:
status_error = "yes"
break
status = virsh.setvcpus(dom_option, count_option, options, ignore_status=True).exit_status
if pre_vm_state == "paused":
virsh.resume(vm_name, ignore_status=True)
if status_error == "no":
if status == 0:
if pre_vm_state == "shut off":
if options == "--config":
vcpus_set = len(vm.vcpuinfo())
elif options == "--current":
vcpus_set = get_current_vcpus()
elif options == "--maximum --config":
vcpus_set = ""
dom = parse("/etc/libvirt/qemu/%s.xml" % vm_name)
vcpus_set = dom.getElementsByTagName("vcpu")[0].firstChild.data
vcpus_set = int(vcpus_set)
dom.unlink()
else:
vcpus_set = len(vm.vcpuinfo())
if domain == "id":
cmd_chk = "cat /etc/libvirt/qemu/%s.xml" % vm_name
output1 = commands.getoutput(cmd_chk)
logging.info("guest-info:\n%s" % output1)
virsh.destroy(vm_name)
virsh.undefine(vm_name)
virsh.define(xml_file)
if os.path.exists(xml_file):
os.remove(xml_file)
if os.path.exists(tmp_file):
os.remove(tmp_file)
#check status_error
if status_error == "yes":
if status == 0:
raise error.TestFail("Run successfully with wrong command!")
else:
if status != 0:
raise error.TestFail("Run failed with right command")
else:
if options == "--maximum --config":
if vcpus_set != 4:
raise error.TestFail("Run failed with right command1")
elif domain == "id":
if options == "--config":
if vcpus_set != vcpus_new or not re.search('<vcpu current=\'1\'>%s</vcpu>' % vcpus_new, output1):
raise error.TestFail("Run failed with right command2")
elif options == "--config --live":
if vcpus_set != 1 or not re.search('<vcpu current=\'1\'>%s</vcpu>' % vcpus_new, output1):
raise error.TestFail("Run failed with right command3")
else:
if vcpus_set != 1 or re.search('<vcpu current=\'1\'>%s</vcpu>' % vcpus_new, output1):
raise error.TestFail("Run failed with right command4")
else:
if vcpus_set != 1:
raise error.TestFail("Run failed with right command5") | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
***************************************************************************
ReliefColorsWidget.py
---------------------
Date : December 2016
Copyright : (C) 2016 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'December 2016'
__copyright__ = '(C) 2016, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import codecs
from qgis.PyQt import uic
from qgis.PyQt.QtCore import pyqtSlot, QDir
from qgis.PyQt.QtGui import QColor, QBrush
from qgis.PyQt.QtWidgets import (QTreeWidgetItem,
QFileDialog,
QMessageBox,
QInputDialog,
QColorDialog
)
from qgis.PyQt.QtXml import QDomDocument
from qgis.core import QgsApplication, QgsMapLayer
from qgis.analysis import QgsRelief
from processing.gui.wrappers import WidgetWrapper
from processing.tools import system
pluginPath = os.path.dirname(__file__)
WIDGET, BASE = uic.loadUiType(os.path.join(pluginPath, 'reliefcolorswidgetbase.ui'))
class ReliefColorsWidget(BASE, WIDGET):
def __init__(self):
super(ReliefColorsWidget, self).__init__(None)
self.setupUi(self)
self.btnAdd.setIcon(QgsApplication.getThemeIcon('/symbologyAdd.svg'))
self.btnRemove.setIcon(QgsApplication.getThemeIcon('/symbologyRemove.svg'))
self.btnUp.setIcon(QgsApplication.getThemeIcon('/mActionArrowUp.svg'))
self.btnDown.setIcon(QgsApplication.getThemeIcon('/mActionArrowDown.svg'))
self.btnLoad.setIcon(QgsApplication.getThemeIcon('/mActionFileOpen.svg'))
self.btnSave.setIcon(QgsApplication.getThemeIcon('/mActionFileSave.svg'))
self.btnAuto.setIcon(QgsApplication.getThemeIcon('/mActionReload.svg'))
self.layer = None
@pyqtSlot()
def on_btnAdd_clicked(self):
item = QTreeWidgetItem()
item.setText(0, '0.00')
item.setText(1, '0.00')
item.setBackground(2, QBrush(QColor(127, 127, 127)))
self.reliefClassTree.addTopLevelItem(item)
@pyqtSlot()
def on_btnRemove_clicked(self):
selectedItems = self.reliefClassTree.selectedItems()
for item in selectedItems:
self.reliefClassTree.invisibleRootItem().removeChild(item)
item = None
@pyqtSlot()
def on_btnDown_clicked(self):
selectedItems = self.reliefClassTree.selectedItems()
for item in selectedItems:
currentIndex = self.reliefClassTree.indexOfTopLevelItem(item)
if currentIndex < self.reliefClassTree.topLevelItemCount() - 1:
self.reliefClassTree.takeTopLevelItem(currentIndex)
self.reliefClassTree.insertTopLevelItem(currentIndex + 1, item)
self.reliefClassTree.setCurrentItem(item)
@pyqtSlot()
def on_btnUp_clicked(self):
selectedItems = self.reliefClassTree.selectedItems()
for item in selectedItems:
currentIndex = self.reliefClassTree.indexOfTopLevelItem(item)
if currentIndex > 0:
self.reliefClassTree.takeTopLevelItem(currentIndex)
self.reliefClassTree.insertTopLevelItem(currentIndex - 1, item)
self.reliefClassTree.setCurrentItem(item)
@pyqtSlot()
def on_btnLoad_clicked(self):
fileName, _ = QFileDialog.getOpenFileName(None,
self.tr('Import Colors and elevations from XML'),
QDir.homePath(),
self.tr('XML files (*.xml *.XML)'))
if fileName == '':
return
doc = QDomDocument()
with codecs.open(fileName, 'r', encoding='utf-8') as f:
content = f.read()
if not doc.setContent(content):
QMessageBox.critical(None,
self.tr('Error parsing XML'),
self.tr('The XML file could not be loaded'))
return
self.reliefClassTree.clear()
reliefColorList = doc.elementsByTagName('ReliefColor')
for i in range(reliefColorList.length()):
elem = reliefColorList.at(i).toElement()
item = QTreeWidgetItem()
item.setText(0, elem.attribute('MinElevation'))
item.setText(1, elem.attribute('MaxElevation'))
item.setBackground(2, QBrush(QColor(int(elem.attribute('red')),
int(elem.attribute('green')),
int(elem.attribute('blue')))))
self.reliefClassTree.addTopLevelItem(item)
@pyqtSlot()
def on_btnSave_clicked(self):
fileName, _ = QFileDialog.getSaveFileName(None,
self.tr('Export Colors and elevations as XML'),
QDir.homePath(),
self.tr('XML files (*.xml *.XML)'))
if fileName == '':
return
if not fileName.lower().endswith('.xml'):
fileName += '.xml'
doc = QDomDocument()
colorsElem = doc.createElement('ReliefColors')
doc.appendChild(colorsElem)
colors = self.reliefColors()
for c in colors:
elem = doc.createElement('ReliefColor')
elem.setAttribute('MinElevation', str(c.minElevation))
elem.setAttribute('MaxElevation', str(c.maxElevation))
elem.setAttribute('red', str(c.color.red()))
elem.setAttribute('green', str(c.color.green()))
elem.setAttribute('blue', str(c.color.blue()))
colorsElem.appendChild(elem)
with codecs.open(fileName, 'w', encoding='utf-8') as f:
f.write(doc.toString(2))
@pyqtSlot()
def on_btnAuto_clicked(self):
if self.layer is None:
return
relief = QgsRelief(self.layer, system.getTempFilename(), 'GTiff')
colors = relief.calculateOptimizedReliefClasses()
self.populateColors(colors)
@pyqtSlot(QTreeWidgetItem, int)
def on_reliefClassTree_itemDoubleClicked(self, item, column):
if not item:
return
if column == 0:
d, ok = QInputDialog.getDouble(None,
self.tr('Enter lower elevation class bound'),
self.tr('Elevation'),
float(item.text(0)),
decimals=2)
if ok:
item.setText(0, str(d))
elif column == 1:
d, ok = QInputDialog.getDouble(None,
self.tr('Enter upper elevation class bound'),
self.tr('Elevation'),
float(item.text(1)),
decimals=2)
if ok:
item.setText(1, str(d))
elif column == 2:
c = QColorDialog.getColor(item.background(2).color(),
None,
self.tr('Select color for relief class'))
if c.isValid():
item.setBackground(2, QBrush(c))
def reliefColors(self):
colors = []
for i in range(self.reliefClassTree.topLevelItemCount()):
item = self.reliefClassTree.topLevelItem(i)
if item:
c = QgsRelief.ReliefColor(item.background(2).color(),
float(item.text(0)),
float(item.text(1)))
colors.append(c)
return colors
def populateColors(self, colors):
self.reliefClassTree.clear()
for c in colors:
item = QTreeWidgetItem()
item.setText(0, str(c.minElevation))
item.setText(1, str(c.maxElevation))
item.setBackground(2, QBrush(c.color))
self.reliefClassTree.addTopLevelItem(item)
def setLayer(self, layer):
self.layer = layer
def setValue(self, value):
self.reliefClassTree.clear()
rows = value.split(';')
for r in rows:
v = r.split(',')
item = QTreeWidgetItem()
item.setText(0, v[0])
item.setText(1, v[1])
color = QColor(int(v[2]), int(v[3]), int(v[4]))
item.setBackground(2, QBrush(color))
self.reliefClassTree.addTopLevelItem(item)
def value(self):
rColors = self.reliefColors()
colors = ''
for c in rColors:
colors += '{:f}, {:f}, {:d}, {:d}, {:d};'.format(c.minElevation,
c.maxElevation,
c.color.red(),
c.color.green(),
c.color.blue())
return colors[:-1]
class ReliefColorsWidgetWrapper(WidgetWrapper):
def createWidget(self):
return ReliefColorsWidget()
def postInitialize(self, wrappers):
for wrapper in wrappers:
if wrapper.param.name == self.param.parent:
self.setLayer(wrapper.value())
wrapper.widgetValueHasChanged.connect(self.parentValueChanged)
break
def parentValueChanged(self, wrapper):
self.setLayer(wrapper.value())
def setLayer(self, layer):
if isinstance(layer, QgsMapLayer):
layer = layer.source()
self.widget.setLayer(layer)
def setValue(self, value):
self.widget.setValue(value)
def value(self):
return self.widget.value() | unknown | codeparrot/codeparrot-clean | ||
// SPDX-License-Identifier: GPL-2.0-or-later
extern void *jent_kvzalloc(unsigned int len);
extern void jent_kvzfree(void *ptr, unsigned int len);
extern void *jent_zalloc(unsigned int len);
extern void jent_zfree(void *ptr);
extern void jent_get_nstime(__u64 *out);
extern int jent_hash_time(void *hash_state, __u64 time, u8 *addtl,
unsigned int addtl_len, __u64 hash_loop_cnt,
unsigned int stuck);
int jent_read_random_block(void *hash_state, char *dst, unsigned int dst_len);
struct rand_data;
extern int jent_entropy_init(unsigned int osr, unsigned int flags,
void *hash_state, struct rand_data *p_ec);
extern int jent_read_entropy(struct rand_data *ec, unsigned char *data,
unsigned int len);
extern struct rand_data *jent_entropy_collector_alloc(unsigned int osr,
unsigned int flags,
void *hash_state);
extern void jent_entropy_collector_free(struct rand_data *entropy_collector);
#ifdef CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE
int jent_raw_hires_entropy_store(__u64 value);
void jent_testing_init(void);
void jent_testing_exit(void);
#else /* CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE */
static inline int jent_raw_hires_entropy_store(__u64 value) { return 0; }
static inline void jent_testing_init(void) { }
static inline void jent_testing_exit(void) { }
#endif /* CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE */ | c | github | https://github.com/torvalds/linux | crypto/jitterentropy.h |
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: io/prometheus/client/metrics.proto
package io_prometheus_client
import (
encoding_binary "encoding/binary"
fmt "fmt"
io "io"
math "math"
math_bits "math/bits"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
types "github.com/gogo/protobuf/types"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type MetricType int32
const (
// COUNTER must use the Metric field "counter".
MetricType_COUNTER MetricType = 0
// GAUGE must use the Metric field "gauge".
MetricType_GAUGE MetricType = 1
// SUMMARY must use the Metric field "summary".
MetricType_SUMMARY MetricType = 2
// UNTYPED must use the Metric field "untyped".
MetricType_UNTYPED MetricType = 3
// HISTOGRAM must use the Metric field "histogram".
MetricType_HISTOGRAM MetricType = 4
// GAUGE_HISTOGRAM must use the Metric field "histogram".
MetricType_GAUGE_HISTOGRAM MetricType = 5
)
var MetricType_name = map[int32]string{
0: "COUNTER",
1: "GAUGE",
2: "SUMMARY",
3: "UNTYPED",
4: "HISTOGRAM",
5: "GAUGE_HISTOGRAM",
}
var MetricType_value = map[string]int32{
"COUNTER": 0,
"GAUGE": 1,
"SUMMARY": 2,
"UNTYPED": 3,
"HISTOGRAM": 4,
"GAUGE_HISTOGRAM": 5,
}
func (x MetricType) String() string {
return proto.EnumName(MetricType_name, int32(x))
}
func (MetricType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_d1e5ddb18987a258, []int{0}
}
type LabelPair struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *LabelPair) Reset() { *m = LabelPair{} }
func (m *LabelPair) String() string { return proto.CompactTextString(m) }
func (*LabelPair) ProtoMessage() {}
func (*LabelPair) Descriptor() ([]byte, []int) {
return fileDescriptor_d1e5ddb18987a258, []int{0}
}
func (m *LabelPair) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *LabelPair) XXX_Merge(src proto.Message) {
xxx_messageInfo_LabelPair.Merge(m, src)
}
func (m *LabelPair) XXX_Size() int {
return m.Size()
}
func (m *LabelPair) XXX_DiscardUnknown() {
xxx_messageInfo_LabelPair.DiscardUnknown(m)
}
var xxx_messageInfo_LabelPair proto.InternalMessageInfo
func (m *LabelPair) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *LabelPair) GetValue() string {
if m != nil {
return m.Value
}
return ""
}
type Gauge struct {
Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Gauge) Reset() { *m = Gauge{} }
func (m *Gauge) String() string { return proto.CompactTextString(m) }
func (*Gauge) ProtoMessage() {}
func (*Gauge) Descriptor() ([]byte, []int) {
return fileDescriptor_d1e5ddb18987a258, []int{1}
}
func (m *Gauge) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Gauge.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Gauge) XXX_Merge(src proto.Message) {
xxx_messageInfo_Gauge.Merge(m, src)
}
func (m *Gauge) XXX_Size() int {
return m.Size()
}
func (m *Gauge) XXX_DiscardUnknown() {
xxx_messageInfo_Gauge.DiscardUnknown(m)
}
var xxx_messageInfo_Gauge proto.InternalMessageInfo
func (m *Gauge) GetValue() float64 {
if m != nil {
return m.Value
}
return 0
}
type Counter struct {
Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar,proto3" json:"exemplar,omitempty"`
CreatedTimestamp *types.Timestamp `protobuf:"bytes,3,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Counter) Reset() { *m = Counter{} }
func (m *Counter) String() string { return proto.CompactTextString(m) }
func (*Counter) ProtoMessage() {}
func (*Counter) Descriptor() ([]byte, []int) {
return fileDescriptor_d1e5ddb18987a258, []int{2}
}
func (m *Counter) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Counter.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Counter) XXX_Merge(src proto.Message) {
xxx_messageInfo_Counter.Merge(m, src)
}
func (m *Counter) XXX_Size() int {
return m.Size()
}
func (m *Counter) XXX_DiscardUnknown() {
xxx_messageInfo_Counter.DiscardUnknown(m)
}
var xxx_messageInfo_Counter proto.InternalMessageInfo
func (m *Counter) GetValue() float64 {
if m != nil {
return m.Value
}
return 0
}
func (m *Counter) GetExemplar() *Exemplar {
if m != nil {
return m.Exemplar
}
return nil
}
func (m *Counter) GetCreatedTimestamp() *types.Timestamp {
if m != nil {
return m.CreatedTimestamp
}
return nil
}
type Quantile struct {
Quantile float64 `protobuf:"fixed64,1,opt,name=quantile,proto3" json:"quantile,omitempty"`
Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Quantile) Reset() { *m = Quantile{} }
func (m *Quantile) String() string { return proto.CompactTextString(m) }
func (*Quantile) ProtoMessage() {}
func (*Quantile) Descriptor() ([]byte, []int) {
return fileDescriptor_d1e5ddb18987a258, []int{3}
}
func (m *Quantile) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Quantile.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Quantile) XXX_Merge(src proto.Message) {
xxx_messageInfo_Quantile.Merge(m, src)
}
func (m *Quantile) XXX_Size() int {
return m.Size()
}
func (m *Quantile) XXX_DiscardUnknown() {
xxx_messageInfo_Quantile.DiscardUnknown(m)
}
var xxx_messageInfo_Quantile proto.InternalMessageInfo
func (m *Quantile) GetQuantile() float64 {
if m != nil {
return m.Quantile
}
return 0
}
func (m *Quantile) GetValue() float64 {
if m != nil {
return m.Value
}
return 0
}
type Summary struct {
SampleCount uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount,proto3" json:"sample_count,omitempty"`
SampleSum float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum,proto3" json:"sample_sum,omitempty"`
Quantile []Quantile `protobuf:"bytes,3,rep,name=quantile,proto3" json:"quantile"`
CreatedTimestamp *types.Timestamp `protobuf:"bytes,4,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Summary) Reset() { *m = Summary{} }
func (m *Summary) String() string { return proto.CompactTextString(m) }
func (*Summary) ProtoMessage() {}
func (*Summary) Descriptor() ([]byte, []int) {
return fileDescriptor_d1e5ddb18987a258, []int{4}
}
func (m *Summary) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Summary.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Summary) XXX_Merge(src proto.Message) {
xxx_messageInfo_Summary.Merge(m, src)
}
func (m *Summary) XXX_Size() int {
return m.Size()
}
func (m *Summary) XXX_DiscardUnknown() {
xxx_messageInfo_Summary.DiscardUnknown(m)
}
var xxx_messageInfo_Summary proto.InternalMessageInfo
func (m *Summary) GetSampleCount() uint64 {
if m != nil {
return m.SampleCount
}
return 0
}
func (m *Summary) GetSampleSum() float64 {
if m != nil {
return m.SampleSum
}
return 0
}
func (m *Summary) GetQuantile() []Quantile {
if m != nil {
return m.Quantile
}
return nil
}
func (m *Summary) GetCreatedTimestamp() *types.Timestamp {
if m != nil {
return m.CreatedTimestamp
}
return nil
}
type Untyped struct {
Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Untyped) Reset() { *m = Untyped{} }
func (m *Untyped) String() string { return proto.CompactTextString(m) }
func (*Untyped) ProtoMessage() {}
func (*Untyped) Descriptor() ([]byte, []int) {
return fileDescriptor_d1e5ddb18987a258, []int{5}
}
func (m *Untyped) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Untyped.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Untyped) XXX_Merge(src proto.Message) {
xxx_messageInfo_Untyped.Merge(m, src)
}
func (m *Untyped) XXX_Size() int {
return m.Size()
}
func (m *Untyped) XXX_DiscardUnknown() {
xxx_messageInfo_Untyped.DiscardUnknown(m)
}
var xxx_messageInfo_Untyped proto.InternalMessageInfo
func (m *Untyped) GetValue() float64 {
if m != nil {
return m.Value
}
return 0
}
type Histogram struct {
SampleCount uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount,proto3" json:"sample_count,omitempty"`
SampleCountFloat float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat,proto3" json:"sample_count_float,omitempty"`
SampleSum float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum,proto3" json:"sample_sum,omitempty"`
// Buckets for the classic histogram.
Bucket []Bucket `protobuf:"bytes,3,rep,name=bucket,proto3" json:"bucket"`
CreatedTimestamp *types.Timestamp `protobuf:"bytes,15,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"`
// schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8.
// They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and
// then each power of two is divided into 2^n logarithmic buckets.
// Or in other words, each bucket boundary is the previous boundary times 2^(2^-n).
// In the future, more bucket schemas may be added using numbers < -4 or > 8.
Schema int32 `protobuf:"zigzag32,5,opt,name=schema,proto3" json:"schema,omitempty"`
ZeroThreshold float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"`
ZeroCount uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount,proto3" json:"zero_count,omitempty"`
ZeroCountFloat float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat,proto3" json:"zero_count_float,omitempty"`
// Negative buckets for the native histogram.
NegativeSpan []BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan,proto3" json:"negative_span"`
// Use either "negative_delta" or "negative_count", the former for
// regular histograms with integer counts, the latter for float
// histograms.
NegativeDelta []int64 `protobuf:"zigzag64,10,rep,packed,name=negative_delta,json=negativeDelta,proto3" json:"negative_delta,omitempty"`
NegativeCount []float64 `protobuf:"fixed64,11,rep,packed,name=negative_count,json=negativeCount,proto3" json:"negative_count,omitempty"`
// Positive buckets for the native histogram.
// Use a no-op span (offset 0, length 0) for a native histogram without any
// observations yet and with a zero_threshold of 0. Otherwise, it would be
// indistinguishable from a classic histogram.
PositiveSpan []BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan,proto3" json:"positive_span"`
// Use either "positive_delta" or "positive_count", the former for
// regular histograms with integer counts, the latter for float
// histograms.
PositiveDelta []int64 `protobuf:"zigzag64,13,rep,packed,name=positive_delta,json=positiveDelta,proto3" json:"positive_delta,omitempty"`
PositiveCount []float64 `protobuf:"fixed64,14,rep,packed,name=positive_count,json=positiveCount,proto3" json:"positive_count,omitempty"`
// Only used for native histograms. These exemplars MUST have a timestamp.
Exemplars []*Exemplar `protobuf:"bytes,16,rep,name=exemplars,proto3" json:"exemplars,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Histogram) Reset() { *m = Histogram{} }
func (m *Histogram) String() string { return proto.CompactTextString(m) }
func (*Histogram) ProtoMessage() {}
func (*Histogram) Descriptor() ([]byte, []int) {
return fileDescriptor_d1e5ddb18987a258, []int{6}
}
func (m *Histogram) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Histogram.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Histogram) XXX_Merge(src proto.Message) {
xxx_messageInfo_Histogram.Merge(m, src)
}
func (m *Histogram) XXX_Size() int {
return m.Size()
}
func (m *Histogram) XXX_DiscardUnknown() {
xxx_messageInfo_Histogram.DiscardUnknown(m)
}
var xxx_messageInfo_Histogram proto.InternalMessageInfo
func (m *Histogram) GetSampleCount() uint64 {
if m != nil {
return m.SampleCount
}
return 0
}
func (m *Histogram) GetSampleCountFloat() float64 {
if m != nil {
return m.SampleCountFloat
}
return 0
}
func (m *Histogram) GetSampleSum() float64 {
if m != nil {
return m.SampleSum
}
return 0
}
func (m *Histogram) GetBucket() []Bucket {
if m != nil {
return m.Bucket
}
return nil
}
func (m *Histogram) GetCreatedTimestamp() *types.Timestamp {
if m != nil {
return m.CreatedTimestamp
}
return nil
}
func (m *Histogram) GetSchema() int32 {
if m != nil {
return m.Schema
}
return 0
}
func (m *Histogram) GetZeroThreshold() float64 {
if m != nil {
return m.ZeroThreshold
}
return 0
}
func (m *Histogram) GetZeroCount() uint64 {
if m != nil {
return m.ZeroCount
}
return 0
}
func (m *Histogram) GetZeroCountFloat() float64 {
if m != nil {
return m.ZeroCountFloat
}
return 0
}
func (m *Histogram) GetNegativeSpan() []BucketSpan {
if m != nil {
return m.NegativeSpan
}
return nil
}
func (m *Histogram) GetNegativeDelta() []int64 {
if m != nil {
return m.NegativeDelta
}
return nil
}
func (m *Histogram) GetNegativeCount() []float64 {
if m != nil {
return m.NegativeCount
}
return nil
}
func (m *Histogram) GetPositiveSpan() []BucketSpan {
if m != nil {
return m.PositiveSpan
}
return nil
}
func (m *Histogram) GetPositiveDelta() []int64 {
if m != nil {
return m.PositiveDelta
}
return nil
}
func (m *Histogram) GetPositiveCount() []float64 {
if m != nil {
return m.PositiveCount
}
return nil
}
func (m *Histogram) GetExemplars() []*Exemplar {
if m != nil {
return m.Exemplars
}
return nil
}
type Bucket struct {
CumulativeCount uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount,proto3" json:"cumulative_count,omitempty"`
CumulativeCountFloat float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat,proto3" json:"cumulative_count_float,omitempty"`
UpperBound float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound,proto3" json:"upper_bound,omitempty"`
Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar,proto3" json:"exemplar,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Bucket) Reset() { *m = Bucket{} }
func (m *Bucket) String() string { return proto.CompactTextString(m) }
func (*Bucket) ProtoMessage() {}
func (*Bucket) Descriptor() ([]byte, []int) {
return fileDescriptor_d1e5ddb18987a258, []int{7}
}
func (m *Bucket) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Bucket.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Bucket) XXX_Merge(src proto.Message) {
xxx_messageInfo_Bucket.Merge(m, src)
}
func (m *Bucket) XXX_Size() int {
return m.Size()
}
func (m *Bucket) XXX_DiscardUnknown() {
xxx_messageInfo_Bucket.DiscardUnknown(m)
}
var xxx_messageInfo_Bucket proto.InternalMessageInfo
func (m *Bucket) GetCumulativeCount() uint64 {
if m != nil {
return m.CumulativeCount
}
return 0
}
func (m *Bucket) GetCumulativeCountFloat() float64 {
if m != nil {
return m.CumulativeCountFloat
}
return 0
}
func (m *Bucket) GetUpperBound() float64 {
if m != nil {
return m.UpperBound
}
return 0
}
func (m *Bucket) GetExemplar() *Exemplar {
if m != nil {
return m.Exemplar
}
return nil
}
// A BucketSpan defines a number of consecutive buckets in a native
// histogram with their offset. Logically, it would be more
// straightforward to include the bucket counts in the Span. However,
// the protobuf representation is more compact in the way the data is
// structured here (with all the buckets in a single array separate
// from the Spans).
type BucketSpan struct {
Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"`
Length uint32 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *BucketSpan) Reset() { *m = BucketSpan{} }
func (m *BucketSpan) String() string { return proto.CompactTextString(m) }
func (*BucketSpan) ProtoMessage() {}
func (*BucketSpan) Descriptor() ([]byte, []int) {
return fileDescriptor_d1e5ddb18987a258, []int{8}
}
func (m *BucketSpan) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *BucketSpan) XXX_Merge(src proto.Message) {
xxx_messageInfo_BucketSpan.Merge(m, src)
}
func (m *BucketSpan) XXX_Size() int {
return m.Size()
}
func (m *BucketSpan) XXX_DiscardUnknown() {
xxx_messageInfo_BucketSpan.DiscardUnknown(m)
}
var xxx_messageInfo_BucketSpan proto.InternalMessageInfo
func (m *BucketSpan) GetOffset() int32 {
if m != nil {
return m.Offset
}
return 0
}
func (m *BucketSpan) GetLength() uint32 {
if m != nil {
return m.Length
}
return 0
}
type Exemplar struct {
Label []LabelPair `protobuf:"bytes,1,rep,name=label,proto3" json:"label"`
Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
Timestamp *types.Timestamp `protobuf:"bytes,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Exemplar) Reset() { *m = Exemplar{} }
func (m *Exemplar) String() string { return proto.CompactTextString(m) }
func (*Exemplar) ProtoMessage() {}
func (*Exemplar) Descriptor() ([]byte, []int) {
return fileDescriptor_d1e5ddb18987a258, []int{9}
}
func (m *Exemplar) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Exemplar) XXX_Merge(src proto.Message) {
xxx_messageInfo_Exemplar.Merge(m, src)
}
func (m *Exemplar) XXX_Size() int {
return m.Size()
}
func (m *Exemplar) XXX_DiscardUnknown() {
xxx_messageInfo_Exemplar.DiscardUnknown(m)
}
var xxx_messageInfo_Exemplar proto.InternalMessageInfo
func (m *Exemplar) GetLabel() []LabelPair {
if m != nil {
return m.Label
}
return nil
}
func (m *Exemplar) GetValue() float64 {
if m != nil {
return m.Value
}
return 0
}
func (m *Exemplar) GetTimestamp() *types.Timestamp {
if m != nil {
return m.Timestamp
}
return nil
}
type Metric struct {
Label []LabelPair `protobuf:"bytes,1,rep,name=label,proto3" json:"label"`
Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge,proto3" json:"gauge,omitempty"`
Counter *Counter `protobuf:"bytes,3,opt,name=counter,proto3" json:"counter,omitempty"`
Summary *Summary `protobuf:"bytes,4,opt,name=summary,proto3" json:"summary,omitempty"`
Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped,proto3" json:"untyped,omitempty"`
Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram,proto3" json:"histogram,omitempty"`
TimestampMs int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs,proto3" json:"timestamp_ms,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Metric) Reset() { *m = Metric{} }
func (m *Metric) String() string { return proto.CompactTextString(m) }
func (*Metric) ProtoMessage() {}
func (*Metric) Descriptor() ([]byte, []int) {
return fileDescriptor_d1e5ddb18987a258, []int{10}
}
func (m *Metric) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Metric) XXX_Merge(src proto.Message) {
xxx_messageInfo_Metric.Merge(m, src)
}
func (m *Metric) XXX_Size() int {
return m.Size()
}
func (m *Metric) XXX_DiscardUnknown() {
xxx_messageInfo_Metric.DiscardUnknown(m)
}
var xxx_messageInfo_Metric proto.InternalMessageInfo
func (m *Metric) GetLabel() []LabelPair {
if m != nil {
return m.Label
}
return nil
}
func (m *Metric) GetGauge() *Gauge {
if m != nil {
return m.Gauge
}
return nil
}
func (m *Metric) GetCounter() *Counter {
if m != nil {
return m.Counter
}
return nil
}
func (m *Metric) GetSummary() *Summary {
if m != nil {
return m.Summary
}
return nil
}
func (m *Metric) GetUntyped() *Untyped {
if m != nil {
return m.Untyped
}
return nil
}
func (m *Metric) GetHistogram() *Histogram {
if m != nil {
return m.Histogram
}
return nil
}
func (m *Metric) GetTimestampMs() int64 {
if m != nil {
return m.TimestampMs
}
return 0
}
type MetricFamily struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Help string `protobuf:"bytes,2,opt,name=help,proto3" json:"help,omitempty"`
Type MetricType `protobuf:"varint,3,opt,name=type,proto3,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
Metric []Metric `protobuf:"bytes,4,rep,name=metric,proto3" json:"metric"`
Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *MetricFamily) Reset() { *m = MetricFamily{} }
func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
func (*MetricFamily) ProtoMessage() {}
func (*MetricFamily) Descriptor() ([]byte, []int) {
return fileDescriptor_d1e5ddb18987a258, []int{11}
}
func (m *MetricFamily) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *MetricFamily) XXX_Merge(src proto.Message) {
xxx_messageInfo_MetricFamily.Merge(m, src)
}
func (m *MetricFamily) XXX_Size() int {
return m.Size()
}
func (m *MetricFamily) XXX_DiscardUnknown() {
xxx_messageInfo_MetricFamily.DiscardUnknown(m)
}
var xxx_messageInfo_MetricFamily proto.InternalMessageInfo
func (m *MetricFamily) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *MetricFamily) GetHelp() string {
if m != nil {
return m.Help
}
return ""
}
func (m *MetricFamily) GetType() MetricType {
if m != nil {
return m.Type
}
return MetricType_COUNTER
}
func (m *MetricFamily) GetMetric() []Metric {
if m != nil {
return m.Metric
}
return nil
}
func (m *MetricFamily) GetUnit() string {
if m != nil {
return m.Unit
}
return ""
}
func init() {
proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair")
proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge")
proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter")
proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile")
proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary")
proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped")
proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram")
proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket")
proto.RegisterType((*BucketSpan)(nil), "io.prometheus.client.BucketSpan")
proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar")
proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric")
proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily")
}
func init() {
proto.RegisterFile("io/prometheus/client/metrics.proto", fileDescriptor_d1e5ddb18987a258)
}
var fileDescriptor_d1e5ddb18987a258 = []byte{
// 982 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0x4d, 0x8f, 0xdb, 0x44,
0x18, 0xae, 0x9b, 0x4f, 0xbf, 0xd9, 0x6c, 0xbd, 0x43, 0x54, 0x59, 0x0b, 0xbb, 0x09, 0x96, 0x90,
0x16, 0x84, 0x12, 0x01, 0x45, 0xa0, 0xb2, 0x48, 0xec, 0xb6, 0xdb, 0x14, 0x95, 0xb4, 0x65, 0x92,
0x1c, 0xca, 0xc5, 0x9a, 0x24, 0xb3, 0x8e, 0x85, 0xbf, 0xb0, 0xc7, 0x15, 0xcb, 0x9d, 0xdf, 0xc0,
0x1f, 0xe0, 0x67, 0x70, 0x46, 0x3d, 0x72, 0xe2, 0x88, 0xd0, 0xfe, 0x0e, 0x0e, 0x68, 0xbe, 0xec,
0x6c, 0xe5, 0x2c, 0x2c, 0xdc, 0x3c, 0x8f, 0x9f, 0x67, 0xe6, 0x79, 0x1f, 0xdb, 0xef, 0x6b, 0x70,
0xfc, 0x78, 0x94, 0xa4, 0x71, 0x48, 0xd9, 0x9a, 0xe6, 0xd9, 0x68, 0x19, 0xf8, 0x34, 0x62, 0xa3,
0x90, 0xb2, 0xd4, 0x5f, 0x66, 0xc3, 0x24, 0x8d, 0x59, 0x8c, 0x7a, 0x7e, 0x3c, 0x2c, 0x39, 0x43,
0xc9, 0xd9, 0xef, 0x79, 0xb1, 0x17, 0x0b, 0xc2, 0x88, 0x5f, 0x49, 0xee, 0x7e, 0xdf, 0x8b, 0x63,
0x2f, 0xa0, 0x23, 0xb1, 0x5a, 0xe4, 0xe7, 0x23, 0xe6, 0x87, 0x34, 0x63, 0x24, 0x4c, 0x24, 0xc1,
0xf9, 0x18, 0xcc, 0xaf, 0xc8, 0x82, 0x06, 0xcf, 0x89, 0x9f, 0x22, 0x04, 0xf5, 0x88, 0x84, 0xd4,
0x36, 0x06, 0xc6, 0x91, 0x89, 0xc5, 0x35, 0xea, 0x41, 0xe3, 0x25, 0x09, 0x72, 0x6a, 0xdf, 0x16,
0xa0, 0x5c, 0x38, 0x07, 0xd0, 0x18, 0x93, 0xdc, 0xdb, 0xb8, 0xcd, 0x35, 0x86, 0xbe, 0xfd, 0xb3,
0x01, 0xad, 0x07, 0x71, 0x1e, 0x31, 0x9a, 0x56, 0x33, 0xd0, 0x7d, 0x68, 0xd3, 0xef, 0x69, 0x98,
0x04, 0x24, 0x15, 0x3b, 0x77, 0x3e, 0x3c, 0x1c, 0x56, 0xd5, 0x35, 0x3c, 0x53, 0x2c, 0x5c, 0xf0,
0xd1, 0x18, 0xf6, 0x96, 0x29, 0x25, 0x8c, 0xae, 0xdc, 0xa2, 0x1c, 0xbb, 0x26, 0x36, 0xd9, 0x1f,
0xca, 0x82, 0x87, 0xba, 0xe0, 0xe1, 0x4c, 0x33, 0xb0, 0xa5, 0x44, 0x05, 0xe2, 0x1c, 0x43, 0xfb,
0xeb, 0x9c, 0x44, 0xcc, 0x0f, 0x28, 0xda, 0x87, 0xf6, 0x77, 0xea, 0x5a, 0x39, 0x2d, 0xd6, 0x57,
0x33, 0x28, 0x8a, 0xfc, 0xdd, 0x80, 0xd6, 0x34, 0x0f, 0x43, 0x92, 0x5e, 0xa0, 0xb7, 0x61, 0x27,
0x23, 0x61, 0x12, 0x50, 0x77, 0xc9, 0xcb, 0x16, 0x3b, 0xd4, 0x71, 0x47, 0x62, 0x22, 0x09, 0x74,
0x00, 0xa0, 0x28, 0x59, 0x1e, 0xaa, 0x9d, 0x4c, 0x89, 0x4c, 0xf3, 0x10, 0x7d, 0xb1, 0x71, 0x7e,
0x6d, 0x50, 0xdb, 0x1e, 0x88, 0x76, 0x7c, 0x5a, 0x7f, 0xf5, 0x47, 0xff, 0xd6, 0x86, 0xcb, 0xca,
0x58, 0xea, 0xff, 0x21, 0x96, 0x3e, 0xb4, 0xe6, 0x11, 0xbb, 0x48, 0xe8, 0x6a, 0xcb, 0xe3, 0xfd,
0xab, 0x01, 0xe6, 0x63, 0x3f, 0x63, 0xb1, 0x97, 0x92, 0xf0, 0xdf, 0xd4, 0xfe, 0x3e, 0xa0, 0x4d,
0x8a, 0x7b, 0x1e, 0xc4, 0x84, 0x09, 0x6f, 0x06, 0xb6, 0x36, 0x88, 0x8f, 0x38, 0xfe, 0x4f, 0x49,
0xdd, 0x87, 0xe6, 0x22, 0x5f, 0x7e, 0x4b, 0x99, 0xca, 0xe9, 0xad, 0xea, 0x9c, 0x4e, 0x05, 0x47,
0xa5, 0xa4, 0x14, 0xd5, 0x19, 0xdd, 0xb9, 0x79, 0x46, 0xe8, 0x2e, 0x34, 0xb3, 0xe5, 0x9a, 0x86,
0xc4, 0x6e, 0x0c, 0x8c, 0xa3, 0x3d, 0xac, 0x56, 0xe8, 0x1d, 0xd8, 0xfd, 0x81, 0xa6, 0xb1, 0xcb,
0xd6, 0x29, 0xcd, 0xd6, 0x71, 0xb0, 0xb2, 0x9b, 0xc2, 0x7f, 0x97, 0xa3, 0x33, 0x0d, 0xf2, 0x12,
0x05, 0x4d, 0x26, 0xd6, 0x12, 0x89, 0x99, 0x1c, 0x91, 0x79, 0x1d, 0x81, 0x55, 0xde, 0x56, 0x69,
0xb5, 0xc5, 0x3e, 0xbb, 0x05, 0x49, 0x66, 0xf5, 0x04, 0xba, 0x11, 0xf5, 0x08, 0xf3, 0x5f, 0x52,
0x37, 0x4b, 0x48, 0x64, 0x9b, 0x22, 0x93, 0xc1, 0x75, 0x99, 0x4c, 0x13, 0x12, 0xa9, 0x5c, 0x76,
0xb4, 0x98, 0x63, 0xdc, 0x7c, 0xb1, 0xd9, 0x8a, 0x06, 0x8c, 0xd8, 0x30, 0xa8, 0x1d, 0x21, 0x5c,
0x1c, 0xf1, 0x90, 0x83, 0x57, 0x68, 0xb2, 0x80, 0xce, 0xa0, 0xc6, 0x6b, 0xd4, 0xa8, 0x2c, 0xe2,
0x09, 0x74, 0x93, 0x38, 0xf3, 0x4b, 0x6b, 0x3b, 0x37, 0xb3, 0xa6, 0xc5, 0xda, 0x5a, 0xb1, 0x99,
0xb4, 0xd6, 0x95, 0xd6, 0x34, 0x5a, 0x58, 0x2b, 0x68, 0xd2, 0xda, 0xae, 0xb4, 0xa6, 0x51, 0x69,
0xed, 0x18, 0x4c, 0xdd, 0x4d, 0x32, 0xdb, 0xba, 0xee, 0x6b, 0x2b, 0xda, 0x4f, 0x29, 0x70, 0x7e,
0x35, 0xa0, 0x29, 0xed, 0xa2, 0x77, 0xc1, 0x5a, 0xe6, 0x61, 0x1e, 0x6c, 0x86, 0x21, 0xdf, 0xff,
0x3b, 0x25, 0x2e, 0xcf, 0xbc, 0x07, 0x77, 0x5f, 0xa7, 0x5e, 0xf9, 0x0e, 0x7a, 0xaf, 0x09, 0xe4,
0xf3, 0xed, 0x43, 0x27, 0x4f, 0x12, 0x9a, 0xba, 0x8b, 0x38, 0x8f, 0x56, 0xea, 0x63, 0x00, 0x01,
0x9d, 0x72, 0xe4, 0x4a, 0x23, 0xad, 0xdd, 0xac, 0x91, 0x3a, 0xc7, 0x00, 0x65, 0xec, 0xfc, 0x95,
0x8e, 0xcf, 0xcf, 0x33, 0x2a, 0x2b, 0xd8, 0xc3, 0x6a, 0xc5, 0xf1, 0x80, 0x46, 0x1e, 0x5b, 0x8b,
0xd3, 0xbb, 0x58, 0xad, 0x9c, 0x9f, 0x0c, 0x68, 0xeb, 0x4d, 0xd1, 0x67, 0xd0, 0x08, 0xf8, 0x1c,
0xb1, 0x0d, 0x91, 0x66, 0xbf, 0xda, 0x43, 0x31, 0x6a, 0xd4, 0x33, 0x96, 0x9a, 0xea, 0xfe, 0x8a,
0x3e, 0x05, 0xf3, 0x26, 0xed, 0xbd, 0x24, 0x3b, 0x3f, 0xd6, 0xa0, 0x39, 0x11, 0x33, 0xf3, 0xff,
0xf9, 0xfa, 0x00, 0x1a, 0x1e, 0x9f, 0x72, 0x6a, 0x42, 0xbd, 0x59, 0x2d, 0x16, 0x83, 0x10, 0x4b,
0x26, 0xfa, 0x04, 0x5a, 0x4b, 0x39, 0xf8, 0x94, 0xe5, 0x83, 0x6a, 0x91, 0x9a, 0x8e, 0x58, 0xb3,
0xb9, 0x30, 0x93, 0xc3, 0x44, 0xf5, 0xec, 0x2d, 0x42, 0x35, 0x71, 0xb0, 0x66, 0x73, 0x61, 0x2e,
0xbb, 0xb5, 0x68, 0x45, 0x5b, 0x85, 0xaa, 0xa5, 0x63, 0xcd, 0x46, 0x9f, 0x83, 0xb9, 0xd6, 0x4d,
0x5c, 0xb4, 0xa0, 0xad, 0xf1, 0x14, 0xbd, 0x1e, 0x97, 0x0a, 0xde, 0xf6, 0x8b, 0xc4, 0xdd, 0x30,
0x13, 0x7d, 0xae, 0x86, 0x3b, 0x05, 0x36, 0xc9, 0x9c, 0x5f, 0x0c, 0xd8, 0x91, 0xcf, 0xe1, 0x11,
0x09, 0xfd, 0xe0, 0xa2, 0xf2, 0x07, 0x03, 0x41, 0x7d, 0x4d, 0x83, 0x44, 0xfd, 0x5f, 0x88, 0x6b,
0x74, 0x0f, 0xea, 0xdc, 0xa3, 0x88, 0x70, 0x77, 0x5b, 0xc7, 0x90, 0x3b, 0xcf, 0x2e, 0x12, 0x8a,
0x05, 0x9b, 0x0f, 0x06, 0xf9, 0xa7, 0x64, 0xd7, 0xaf, 0x1b, 0x0c, 0x52, 0xa7, 0x07, 0x83, 0x54,
0x70, 0x17, 0x79, 0xe4, 0x33, 0x11, 0xa1, 0x89, 0xc5, 0xf5, 0x7b, 0x0b, 0x80, 0xf2, 0x0c, 0xd4,
0x81, 0xd6, 0x83, 0x67, 0xf3, 0xa7, 0xb3, 0x33, 0x6c, 0xdd, 0x42, 0x26, 0x34, 0xc6, 0x27, 0xf3,
0xf1, 0x99, 0x65, 0x70, 0x7c, 0x3a, 0x9f, 0x4c, 0x4e, 0xf0, 0x0b, 0xeb, 0x36, 0x5f, 0xcc, 0x9f,
0xce, 0x5e, 0x3c, 0x3f, 0x7b, 0x68, 0xd5, 0x50, 0x17, 0xcc, 0xc7, 0x5f, 0x4e, 0x67, 0xcf, 0xc6,
0xf8, 0x64, 0x62, 0xd5, 0xd1, 0x1b, 0x70, 0x47, 0x68, 0xdc, 0x12, 0x6c, 0x9c, 0x3a, 0xaf, 0x2e,
0x0f, 0x8d, 0xdf, 0x2e, 0x0f, 0x8d, 0x3f, 0x2f, 0x0f, 0x8d, 0x6f, 0x7a, 0x7e, 0xec, 0x96, 0x86,
0x5d, 0x69, 0x78, 0xd1, 0x14, 0x6f, 0xfb, 0x47, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0x1c, 0xe1,
0xcf, 0xb8, 0x1d, 0x0a, 0x00, 0x00,
}
func (m *LabelPair) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *LabelPair) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *LabelPair) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Value) > 0 {
i -= len(m.Value)
copy(dAtA[i:], m.Value)
i = encodeVarintMetrics(dAtA, i, uint64(len(m.Value)))
i--
dAtA[i] = 0x12
}
if len(m.Name) > 0 {
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintMetrics(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *Gauge) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Gauge) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Gauge) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Value != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value))))
i--
dAtA[i] = 0x9
}
return len(dAtA) - i, nil
}
func (m *Counter) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Counter) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Counter) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if m.CreatedTimestamp != nil {
{
size, err := m.CreatedTimestamp.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
if m.Exemplar != nil {
{
size, err := m.Exemplar.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
if m.Value != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value))))
i--
dAtA[i] = 0x9
}
return len(dAtA) - i, nil
}
func (m *Quantile) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Quantile) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Quantile) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Value != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value))))
i--
dAtA[i] = 0x11
}
if m.Quantile != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Quantile))))
i--
dAtA[i] = 0x9
}
return len(dAtA) - i, nil
}
func (m *Summary) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Summary) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Summary) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if m.CreatedTimestamp != nil {
{
size, err := m.CreatedTimestamp.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x22
}
if len(m.Quantile) > 0 {
for iNdEx := len(m.Quantile) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Quantile[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
}
if m.SampleSum != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.SampleSum))))
i--
dAtA[i] = 0x11
}
if m.SampleCount != 0 {
i = encodeVarintMetrics(dAtA, i, uint64(m.SampleCount))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func (m *Untyped) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Untyped) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Untyped) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Value != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value))))
i--
dAtA[i] = 0x9
}
return len(dAtA) - i, nil
}
func (m *Histogram) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Histogram) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Exemplars) > 0 {
for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0x82
}
}
if m.CreatedTimestamp != nil {
{
size, err := m.CreatedTimestamp.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x7a
}
if len(m.PositiveCount) > 0 {
for iNdEx := len(m.PositiveCount) - 1; iNdEx >= 0; iNdEx-- {
f5 := math.Float64bits(float64(m.PositiveCount[iNdEx]))
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f5))
}
i = encodeVarintMetrics(dAtA, i, uint64(len(m.PositiveCount)*8))
i--
dAtA[i] = 0x72
}
if len(m.PositiveDelta) > 0 {
var j6 int
dAtA8 := make([]byte, len(m.PositiveDelta)*10)
for _, num := range m.PositiveDelta {
x7 := (uint64(num) << 1) ^ uint64((num >> 63))
for x7 >= 1<<7 {
dAtA8[j6] = uint8(uint64(x7)&0x7f | 0x80)
j6++
x7 >>= 7
}
dAtA8[j6] = uint8(x7)
j6++
}
i -= j6
copy(dAtA[i:], dAtA8[:j6])
i = encodeVarintMetrics(dAtA, i, uint64(j6))
i--
dAtA[i] = 0x6a
}
if len(m.PositiveSpan) > 0 {
for iNdEx := len(m.PositiveSpan) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.PositiveSpan[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x62
}
}
if len(m.NegativeCount) > 0 {
for iNdEx := len(m.NegativeCount) - 1; iNdEx >= 0; iNdEx-- {
f9 := math.Float64bits(float64(m.NegativeCount[iNdEx]))
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f9))
}
i = encodeVarintMetrics(dAtA, i, uint64(len(m.NegativeCount)*8))
i--
dAtA[i] = 0x5a
}
if len(m.NegativeDelta) > 0 {
var j10 int
dAtA12 := make([]byte, len(m.NegativeDelta)*10)
for _, num := range m.NegativeDelta {
x11 := (uint64(num) << 1) ^ uint64((num >> 63))
for x11 >= 1<<7 {
dAtA12[j10] = uint8(uint64(x11)&0x7f | 0x80)
j10++
x11 >>= 7
}
dAtA12[j10] = uint8(x11)
j10++
}
i -= j10
copy(dAtA[i:], dAtA12[:j10])
i = encodeVarintMetrics(dAtA, i, uint64(j10))
i--
dAtA[i] = 0x52
}
if len(m.NegativeSpan) > 0 {
for iNdEx := len(m.NegativeSpan) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.NegativeSpan[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x4a
}
}
if m.ZeroCountFloat != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroCountFloat))))
i--
dAtA[i] = 0x41
}
if m.ZeroCount != 0 {
i = encodeVarintMetrics(dAtA, i, uint64(m.ZeroCount))
i--
dAtA[i] = 0x38
}
if m.ZeroThreshold != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroThreshold))))
i--
dAtA[i] = 0x31
}
if m.Schema != 0 {
i = encodeVarintMetrics(dAtA, i, uint64((uint32(m.Schema)<<1)^uint32((m.Schema>>31))))
i--
dAtA[i] = 0x28
}
if m.SampleCountFloat != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.SampleCountFloat))))
i--
dAtA[i] = 0x21
}
if len(m.Bucket) > 0 {
for iNdEx := len(m.Bucket) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Bucket[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
}
if m.SampleSum != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.SampleSum))))
i--
dAtA[i] = 0x11
}
if m.SampleCount != 0 {
i = encodeVarintMetrics(dAtA, i, uint64(m.SampleCount))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func (m *Bucket) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Bucket) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Bucket) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if m.CumulativeCountFloat != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.CumulativeCountFloat))))
i--
dAtA[i] = 0x21
}
if m.Exemplar != nil {
{
size, err := m.Exemplar.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
if m.UpperBound != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.UpperBound))))
i--
dAtA[i] = 0x11
}
if m.CumulativeCount != 0 {
i = encodeVarintMetrics(dAtA, i, uint64(m.CumulativeCount))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func (m *BucketSpan) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *BucketSpan) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *BucketSpan) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Length != 0 {
i = encodeVarintMetrics(dAtA, i, uint64(m.Length))
i--
dAtA[i] = 0x10
}
if m.Offset != 0 {
i = encodeVarintMetrics(dAtA, i, uint64((uint32(m.Offset)<<1)^uint32((m.Offset>>31))))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func (m *Exemplar) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Exemplar) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Exemplar) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Timestamp != nil {
{
size, err := m.Timestamp.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
if m.Value != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value))))
i--
dAtA[i] = 0x11
}
if len(m.Label) > 0 {
for iNdEx := len(m.Label) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Label[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *Metric) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Metric) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Metric) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Histogram != nil {
{
size, err := m.Histogram.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x3a
}
if m.TimestampMs != 0 {
i = encodeVarintMetrics(dAtA, i, uint64(m.TimestampMs))
i--
dAtA[i] = 0x30
}
if m.Untyped != nil {
{
size, err := m.Untyped.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x2a
}
if m.Summary != nil {
{
size, err := m.Summary.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x22
}
if m.Counter != nil {
{
size, err := m.Counter.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
if m.Gauge != nil {
{
size, err := m.Gauge.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
if len(m.Label) > 0 {
for iNdEx := len(m.Label) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Label[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *MetricFamily) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *MetricFamily) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *MetricFamily) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Unit) > 0 {
i -= len(m.Unit)
copy(dAtA[i:], m.Unit)
i = encodeVarintMetrics(dAtA, i, uint64(len(m.Unit)))
i--
dAtA[i] = 0x2a
}
if len(m.Metric) > 0 {
for iNdEx := len(m.Metric) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Metric[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x22
}
}
if m.Type != 0 {
i = encodeVarintMetrics(dAtA, i, uint64(m.Type))
i--
dAtA[i] = 0x18
}
if len(m.Help) > 0 {
i -= len(m.Help)
copy(dAtA[i:], m.Help)
i = encodeVarintMetrics(dAtA, i, uint64(len(m.Help)))
i--
dAtA[i] = 0x12
}
if len(m.Name) > 0 {
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintMetrics(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int {
offset -= sovMetrics(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *LabelPair) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovMetrics(uint64(l))
}
l = len(m.Value)
if l > 0 {
n += 1 + l + sovMetrics(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *Gauge) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Value != 0 {
n += 9
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *Counter) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Value != 0 {
n += 9
}
if m.Exemplar != nil {
l = m.Exemplar.Size()
n += 1 + l + sovMetrics(uint64(l))
}
if m.CreatedTimestamp != nil {
l = m.CreatedTimestamp.Size()
n += 1 + l + sovMetrics(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *Quantile) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Quantile != 0 {
n += 9
}
if m.Value != 0 {
n += 9
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *Summary) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.SampleCount != 0 {
n += 1 + sovMetrics(uint64(m.SampleCount))
}
if m.SampleSum != 0 {
n += 9
}
if len(m.Quantile) > 0 {
for _, e := range m.Quantile {
l = e.Size()
n += 1 + l + sovMetrics(uint64(l))
}
}
if m.CreatedTimestamp != nil {
l = m.CreatedTimestamp.Size()
n += 1 + l + sovMetrics(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *Untyped) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Value != 0 {
n += 9
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *Histogram) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.SampleCount != 0 {
n += 1 + sovMetrics(uint64(m.SampleCount))
}
if m.SampleSum != 0 {
n += 9
}
if len(m.Bucket) > 0 {
for _, e := range m.Bucket {
l = e.Size()
n += 1 + l + sovMetrics(uint64(l))
}
}
if m.SampleCountFloat != 0 {
n += 9
}
if m.Schema != 0 {
n += 1 + sozMetrics(uint64(m.Schema))
}
if m.ZeroThreshold != 0 {
n += 9
}
if m.ZeroCount != 0 {
n += 1 + sovMetrics(uint64(m.ZeroCount))
}
if m.ZeroCountFloat != 0 {
n += 9
}
if len(m.NegativeSpan) > 0 {
for _, e := range m.NegativeSpan {
l = e.Size()
n += 1 + l + sovMetrics(uint64(l))
}
}
if len(m.NegativeDelta) > 0 {
l = 0
for _, e := range m.NegativeDelta {
l += sozMetrics(uint64(e))
}
n += 1 + sovMetrics(uint64(l)) + l
}
if len(m.NegativeCount) > 0 {
n += 1 + sovMetrics(uint64(len(m.NegativeCount)*8)) + len(m.NegativeCount)*8
}
if len(m.PositiveSpan) > 0 {
for _, e := range m.PositiveSpan {
l = e.Size()
n += 1 + l + sovMetrics(uint64(l))
}
}
if len(m.PositiveDelta) > 0 {
l = 0
for _, e := range m.PositiveDelta {
l += sozMetrics(uint64(e))
}
n += 1 + sovMetrics(uint64(l)) + l
}
if len(m.PositiveCount) > 0 {
n += 1 + sovMetrics(uint64(len(m.PositiveCount)*8)) + len(m.PositiveCount)*8
}
if m.CreatedTimestamp != nil {
l = m.CreatedTimestamp.Size()
n += 1 + l + sovMetrics(uint64(l))
}
if len(m.Exemplars) > 0 {
for _, e := range m.Exemplars {
l = e.Size()
n += 2 + l + sovMetrics(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *Bucket) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.CumulativeCount != 0 {
n += 1 + sovMetrics(uint64(m.CumulativeCount))
}
if m.UpperBound != 0 {
n += 9
}
if m.Exemplar != nil {
l = m.Exemplar.Size()
n += 1 + l + sovMetrics(uint64(l))
}
if m.CumulativeCountFloat != 0 {
n += 9
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *BucketSpan) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Offset != 0 {
n += 1 + sozMetrics(uint64(m.Offset))
}
if m.Length != 0 {
n += 1 + sovMetrics(uint64(m.Length))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *Exemplar) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Label) > 0 {
for _, e := range m.Label {
l = e.Size()
n += 1 + l + sovMetrics(uint64(l))
}
}
if m.Value != 0 {
n += 9
}
if m.Timestamp != nil {
l = m.Timestamp.Size()
n += 1 + l + sovMetrics(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *Metric) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Label) > 0 {
for _, e := range m.Label {
l = e.Size()
n += 1 + l + sovMetrics(uint64(l))
}
}
if m.Gauge != nil {
l = m.Gauge.Size()
n += 1 + l + sovMetrics(uint64(l))
}
if m.Counter != nil {
l = m.Counter.Size()
n += 1 + l + sovMetrics(uint64(l))
}
if m.Summary != nil {
l = m.Summary.Size()
n += 1 + l + sovMetrics(uint64(l))
}
if m.Untyped != nil {
l = m.Untyped.Size()
n += 1 + l + sovMetrics(uint64(l))
}
if m.TimestampMs != 0 {
n += 1 + sovMetrics(uint64(m.TimestampMs))
}
if m.Histogram != nil {
l = m.Histogram.Size()
n += 1 + l + sovMetrics(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *MetricFamily) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovMetrics(uint64(l))
}
l = len(m.Help)
if l > 0 {
n += 1 + l + sovMetrics(uint64(l))
}
if m.Type != 0 {
n += 1 + sovMetrics(uint64(m.Type))
}
if len(m.Metric) > 0 {
for _, e := range m.Metric {
l = e.Size()
n += 1 + l + sovMetrics(uint64(l))
}
}
l = len(m.Unit)
if l > 0 {
n += 1 + l + sovMetrics(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func sovMetrics(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozMetrics(x uint64) (n int) {
return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *LabelPair) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: LabelPair: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: LabelPair: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Value = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Gauge) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Gauge: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Gauge: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
m.Value = float64(math.Float64frombits(v))
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Counter) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Counter: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Counter: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
m.Value = float64(math.Float64frombits(v))
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Exemplar", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Exemplar == nil {
m.Exemplar = &Exemplar{}
}
if err := m.Exemplar.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.CreatedTimestamp == nil {
m.CreatedTimestamp = &types.Timestamp{}
}
if err := m.CreatedTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Quantile) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Quantile: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Quantile: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field Quantile", wireType)
}
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
m.Quantile = float64(math.Float64frombits(v))
case 2:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
m.Value = float64(math.Float64frombits(v))
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Summary) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Summary: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Summary: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field SampleCount", wireType)
}
m.SampleCount = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.SampleCount |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field SampleSum", wireType)
}
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
m.SampleSum = float64(math.Float64frombits(v))
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Quantile", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Quantile = append(m.Quantile, Quantile{})
if err := m.Quantile[len(m.Quantile)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.CreatedTimestamp == nil {
m.CreatedTimestamp = &types.Timestamp{}
}
if err := m.CreatedTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Untyped) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Untyped: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Untyped: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
m.Value = float64(math.Float64frombits(v))
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Histogram) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Histogram: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Histogram: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field SampleCount", wireType)
}
m.SampleCount = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.SampleCount |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field SampleSum", wireType)
}
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
m.SampleSum = float64(math.Float64frombits(v))
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Bucket", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Bucket = append(m.Bucket, Bucket{})
if err := m.Bucket[len(m.Bucket)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field SampleCountFloat", wireType)
}
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
m.SampleCountFloat = float64(math.Float64frombits(v))
case 5:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType)
}
var v int32
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31))
m.Schema = v
case 6:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field ZeroThreshold", wireType)
}
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
m.ZeroThreshold = float64(math.Float64frombits(v))
case 7:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ZeroCount", wireType)
}
m.ZeroCount = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ZeroCount |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 8:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountFloat", wireType)
}
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
m.ZeroCountFloat = float64(math.Float64frombits(v))
case 9:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field NegativeSpan", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.NegativeSpan = append(m.NegativeSpan, BucketSpan{})
if err := m.NegativeSpan[len(m.NegativeSpan)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 10:
if wireType == 0 {
var v uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63)
m.NegativeDelta = append(m.NegativeDelta, int64(v))
} else if wireType == 2 {
var packedLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
packedLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if packedLen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + packedLen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
var elementCount int
var count int
for _, integer := range dAtA[iNdEx:postIndex] {
if integer < 128 {
count++
}
}
elementCount = count
if elementCount != 0 && len(m.NegativeDelta) == 0 {
m.NegativeDelta = make([]int64, 0, elementCount)
}
for iNdEx < postIndex {
var v uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63)
m.NegativeDelta = append(m.NegativeDelta, int64(v))
}
} else {
return fmt.Errorf("proto: wrong wireType = %d for field NegativeDelta", wireType)
}
case 11:
if wireType == 1 {
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
v2 := float64(math.Float64frombits(v))
m.NegativeCount = append(m.NegativeCount, v2)
} else if wireType == 2 {
var packedLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
packedLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if packedLen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + packedLen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
var elementCount int
elementCount = packedLen / 8
if elementCount != 0 && len(m.NegativeCount) == 0 {
m.NegativeCount = make([]float64, 0, elementCount)
}
for iNdEx < postIndex {
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
v2 := float64(math.Float64frombits(v))
m.NegativeCount = append(m.NegativeCount, v2)
}
} else {
return fmt.Errorf("proto: wrong wireType = %d for field NegativeCount", wireType)
}
case 12:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PositiveSpan", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.PositiveSpan = append(m.PositiveSpan, BucketSpan{})
if err := m.PositiveSpan[len(m.PositiveSpan)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 13:
if wireType == 0 {
var v uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63)
m.PositiveDelta = append(m.PositiveDelta, int64(v))
} else if wireType == 2 {
var packedLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
packedLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if packedLen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + packedLen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
var elementCount int
var count int
for _, integer := range dAtA[iNdEx:postIndex] {
if integer < 128 {
count++
}
}
elementCount = count
if elementCount != 0 && len(m.PositiveDelta) == 0 {
m.PositiveDelta = make([]int64, 0, elementCount)
}
for iNdEx < postIndex {
var v uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63)
m.PositiveDelta = append(m.PositiveDelta, int64(v))
}
} else {
return fmt.Errorf("proto: wrong wireType = %d for field PositiveDelta", wireType)
}
case 14:
if wireType == 1 {
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
v2 := float64(math.Float64frombits(v))
m.PositiveCount = append(m.PositiveCount, v2)
} else if wireType == 2 {
var packedLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
packedLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if packedLen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + packedLen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
var elementCount int
elementCount = packedLen / 8
if elementCount != 0 && len(m.PositiveCount) == 0 {
m.PositiveCount = make([]float64, 0, elementCount)
}
for iNdEx < postIndex {
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
v2 := float64(math.Float64frombits(v))
m.PositiveCount = append(m.PositiveCount, v2)
}
} else {
return fmt.Errorf("proto: wrong wireType = %d for field PositiveCount", wireType)
}
case 15:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.CreatedTimestamp == nil {
m.CreatedTimestamp = &types.Timestamp{}
}
if err := m.CreatedTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 16:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Exemplars = append(m.Exemplars, &Exemplar{})
if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Bucket) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Bucket: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Bucket: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field CumulativeCount", wireType)
}
m.CumulativeCount = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.CumulativeCount |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field UpperBound", wireType)
}
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
m.UpperBound = float64(math.Float64frombits(v))
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Exemplar", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Exemplar == nil {
m.Exemplar = &Exemplar{}
}
if err := m.Exemplar.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field CumulativeCountFloat", wireType)
}
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
m.CumulativeCountFloat = float64(math.Float64frombits(v))
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *BucketSpan) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: BucketSpan: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: BucketSpan: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType)
}
var v int32
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31))
m.Offset = v
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType)
}
m.Length = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Length |= uint32(b&0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Exemplar) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Exemplar: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Exemplar: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Label", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Label = append(m.Label, LabelPair{})
if err := m.Label[len(m.Label)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
m.Value = float64(math.Float64frombits(v))
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Timestamp == nil {
m.Timestamp = &types.Timestamp{}
}
if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Metric) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Metric: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Label", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Label = append(m.Label, LabelPair{})
if err := m.Label[len(m.Label)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Gauge", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Gauge == nil {
m.Gauge = &Gauge{}
}
if err := m.Gauge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Counter", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Counter == nil {
m.Counter = &Counter{}
}
if err := m.Counter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Summary == nil {
m.Summary = &Summary{}
}
if err := m.Summary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Untyped", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Untyped == nil {
m.Untyped = &Untyped{}
}
if err := m.Untyped.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 6:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field TimestampMs", wireType)
}
m.TimestampMs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.TimestampMs |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 7:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Histogram", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Histogram == nil {
m.Histogram = &Histogram{}
}
if err := m.Histogram.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *MetricFamily) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: MetricFamily: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: MetricFamily: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Help", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Help = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
m.Type = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Type |= MetricType(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Metric = append(m.Metric, Metric{})
if err := m.Metric[len(m.Metric)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Unit = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipMetrics(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMetrics
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMetrics
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMetrics
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthMetrics
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupMetrics
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthMetrics
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupMetrics = fmt.Errorf("proto: unexpected end of group")
) | go | github | https://github.com/prometheus/prometheus | prompb/io/prometheus/client/metrics.pb.go |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# $Id$
#
# Copyright 2009 Glencoe Software, Inc. All rights reserved.
# Use is subject to license terms supplied in LICENSE.txt
#
# Version comparison functionality
import re
import logging
# Regex copied from ome.api.IConfig.VERSION_REGEX
REGEX = re.compile("^.*?[-]?(\\d+[.]\\d+([.]\\d+)?)[-]?.*?$")
LOG = logging.getLogger("omero.version")
def needs_upgrade(client_version, server_version, verbose=False):
"""
Tests whether the client version is behind the server version.
For example::
import omero
from omero_version import omero_version as client_version
client = omero.client()
session = client.createSession()
config = session.getConfigService()
server_version = config.getVersion()
upgrade = needs_upgrade(client_version, server_version)
if upgrade:
# Inform client
Alternatively, from the command-line::
./versions.py --quiet 4.1.0 4.2.0-DEV || echo upgrade
"""
try:
client_cleaned = REGEX.match(client_version).group(1)
client_split = client_cleaned.split(".")
server_cleaned = REGEX.match(server_version).group(1)
server_split = server_cleaned.split(".")
rv = (client_split < server_split)
if verbose:
LOG.info("Client=%20s (%-5s) v. Server=%20s (%-5s) Upgrade? %s",
client_version, ".".join(client_split),
server_version, ".".join(server_split), rv)
return rv
except:
LOG.warn("Bad versions: client=%s server=%s", client_version,
server_version, exc_info=1)
return True
if __name__ == "__main__":
import sys
args = list(sys.argv[1:])
if "--quiet" in args:
args.remove("--quiet")
logging.basicConfig(level=logging.WARN)
else:
logging.basicConfig(level=logging.DEBUG)
if "--test" in args:
print "="*10, "Test", "="*72
needs_upgrade("4.0", "4.1.1", True)
needs_upgrade("4.1", "4.1.1", True)
needs_upgrade("4.1.0", "4.1.1", True)
needs_upgrade("4.1.0", "4.1.1-Dev", True)
needs_upgrade("4.1.0-Dev", "4.1.1", True)
needs_upgrade("4.1.1", "4.1.1", True)
needs_upgrade("Beta-4.1", "4.1.1", True)
needs_upgrade("Beta-4.1.0", "4.1.1", True)
needs_upgrade("Beta-4.1.1", "4.1.1", True)
needs_upgrade("4.1.1", "Beta-4.1.1", True)
needs_upgrade("Beta-4.1.0", "Beta-4.1.1", True)
needs_upgrade("4.1.1-Foo", "4.1.1", True)
needs_upgrade("4.1.1-Foo", "4.1.1-Dev", True)
needs_upgrade("4.1.1-Foo", "4.1.2-Dev", True)
needs_upgrade("4.1.1-Foo", "4.2.0-Dev", True)
needs_upgrade("4.1.1-Foo", "4.2", True)
needs_upgrade("4.1.1-Foo", "5.0", True)
needs_upgrade("v.4.1.1-Foo", "5.0", True)
# Additions post git-describe
needs_upgrade("v.4.1.1-Foo", "5.0", True)
needs_upgrade("v4.1.1-Foo", "5.0", True)
needs_upgrade("Beta-v4.1.1-Foo", "5.0", True)
needs_upgrade("A1-4.1.1-Foo", "5.0", True)
needs_upgrade("A1-v4.1.1-Foo", "5.0", True)
else:
try:
rv = int(needs_upgrade(args[0], args[1], True))
except:
rv = 2
print """ %s [--quiet] client_version server_version
or: %s [--quiet] --test """ % (sys.argv[0], sys.argv[0])
sys.exit(rv) | unknown | codeparrot/codeparrot-clean | ||
from unittest import TestCase
from DownloaderForReddit.utils.importers import text_importer
class TestTextImporter(TestCase):
def test_remove_forbidden_chars(self):
text = ' this \n is a\nname-for-import '
clean = text_importer.remove_forbidden_chars(text)
self.assertEqual('thisisaname-for-import', clean)
def test_split_names(self):
names = 'name_one, name_two, name_three, name_four'
names = text_importer.split_names(names)
self.assertEqual(['name_one', 'name_two', 'name_three', 'name_four'], names)
def test_split_names_with_extra_commas(self):
names = ', name_one, name_two, name_three, name_four, '
names = text_importer.split_names(names)
self.assertEqual(['name_one', 'name_two', 'name_three', 'name_four'], names)
def test_filter_import_list(self):
names = ['one', 'two', 'one', 'three', 'One', 'ONE', 'oNe', 'four', 'one', '', 'five', 'one', 'ONE', 'six']
filtered_names = text_importer.filter_import_list(names)
correct_names = ['one', 'two', 'three', 'four', 'five', 'six']
self.assertEqual(correct_names, filtered_names) | unknown | codeparrot/codeparrot-clean | ||
---
- hosts: testhost
gather_facts: False
tasks:
- pause:
seconds: 1 | unknown | github | https://github.com/ansible/ansible | test/integration/targets/plugin_filtering/pause.yml |
from sqlalchemy.testing import eq_, assert_raises, \
assert_raises_message, ne_, expect_warnings
import sys
from sqlalchemy import event
from sqlalchemy.testing.engines import testing_engine
from sqlalchemy import create_engine, MetaData, INT, VARCHAR, Sequence, \
select, Integer, String, func, text, exc
from sqlalchemy.testing.schema import Table
from sqlalchemy.testing.schema import Column
from sqlalchemy import testing
from sqlalchemy.testing import fixtures
users, metadata = None, None
class TransactionTest(fixtures.TestBase):
__backend__ = True
@classmethod
def setup_class(cls):
global users, metadata
metadata = MetaData()
users = Table('query_users', metadata,
Column('user_id', INT, primary_key=True),
Column('user_name', VARCHAR(20)),
test_needs_acid=True,
)
users.create(testing.db)
def teardown(self):
testing.db.execute(users.delete()).close()
@classmethod
def teardown_class(cls):
users.drop(testing.db)
def test_commits(self):
connection = testing.db.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
transaction.commit()
transaction = connection.begin()
connection.execute(users.insert(), user_id=2, user_name='user2')
connection.execute(users.insert(), user_id=3, user_name='user3')
transaction.commit()
transaction = connection.begin()
result = connection.execute("select * from query_users")
assert len(result.fetchall()) == 3
transaction.commit()
connection.close()
def test_rollback(self):
"""test a basic rollback"""
connection = testing.db.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
connection.execute(users.insert(), user_id=2, user_name='user2')
connection.execute(users.insert(), user_id=3, user_name='user3')
transaction.rollback()
result = connection.execute("select * from query_users")
assert len(result.fetchall()) == 0
connection.close()
def test_raise(self):
connection = testing.db.connect()
transaction = connection.begin()
try:
connection.execute(users.insert(), user_id=1, user_name='user1')
connection.execute(users.insert(), user_id=2, user_name='user2')
connection.execute(users.insert(), user_id=1, user_name='user3')
transaction.commit()
assert False
except Exception as e:
print("Exception: ", e)
transaction.rollback()
result = connection.execute("select * from query_users")
assert len(result.fetchall()) == 0
connection.close()
def test_transaction_container(self):
def go(conn, table, data):
for d in data:
conn.execute(table.insert(), d)
testing.db.transaction(go, users, [dict(user_id=1,
user_name='user1')])
eq_(testing.db.execute(users.select()).fetchall(), [(1, 'user1'
)])
assert_raises(exc.DBAPIError, testing.db.transaction, go,
users, [{'user_id': 2, 'user_name': 'user2'},
{'user_id': 1, 'user_name': 'user3'}])
eq_(testing.db.execute(users.select()).fetchall(), [(1, 'user1'
)])
def test_nested_rollback(self):
connection = testing.db.connect()
try:
transaction = connection.begin()
try:
connection.execute(users.insert(), user_id=1,
user_name='user1')
connection.execute(users.insert(), user_id=2,
user_name='user2')
connection.execute(users.insert(), user_id=3,
user_name='user3')
trans2 = connection.begin()
try:
connection.execute(users.insert(), user_id=4,
user_name='user4')
connection.execute(users.insert(), user_id=5,
user_name='user5')
raise Exception('uh oh')
trans2.commit()
except:
trans2.rollback()
raise
transaction.rollback()
except Exception as e:
transaction.rollback()
raise
except Exception as e:
try:
assert str(e) == 'uh oh' # and not "This transaction is
# inactive"
finally:
connection.close()
def test_branch_nested_rollback(self):
connection = testing.db.connect()
try:
connection.begin()
branched = connection.connect()
assert branched.in_transaction()
branched.execute(users.insert(), user_id=1, user_name='user1')
nested = branched.begin()
branched.execute(users.insert(), user_id=2, user_name='user2')
nested.rollback()
assert not connection.in_transaction()
eq_(connection.scalar("select count(*) from query_users"), 0)
finally:
connection.close()
def test_branch_autorollback(self):
connection = testing.db.connect()
try:
branched = connection.connect()
branched.execute(users.insert(), user_id=1, user_name='user1')
try:
branched.execute(users.insert(), user_id=1, user_name='user1')
except exc.DBAPIError:
pass
finally:
connection.close()
def test_branch_orig_rollback(self):
connection = testing.db.connect()
try:
branched = connection.connect()
branched.execute(users.insert(), user_id=1, user_name='user1')
nested = branched.begin()
assert branched.in_transaction()
branched.execute(users.insert(), user_id=2, user_name='user2')
nested.rollback()
eq_(connection.scalar("select count(*) from query_users"), 1)
finally:
connection.close()
def test_branch_autocommit(self):
connection = testing.db.connect()
try:
branched = connection.connect()
branched.execute(users.insert(), user_id=1, user_name='user1')
finally:
connection.close()
eq_(testing.db.scalar("select count(*) from query_users"), 1)
@testing.requires.savepoints
def test_branch_savepoint_rollback(self):
connection = testing.db.connect()
try:
trans = connection.begin()
branched = connection.connect()
assert branched.in_transaction()
branched.execute(users.insert(), user_id=1, user_name='user1')
nested = branched.begin_nested()
branched.execute(users.insert(), user_id=2, user_name='user2')
nested.rollback()
assert connection.in_transaction()
trans.commit()
eq_(connection.scalar("select count(*) from query_users"), 1)
finally:
connection.close()
@testing.requires.two_phase_transactions
def test_branch_twophase_rollback(self):
connection = testing.db.connect()
try:
branched = connection.connect()
assert not branched.in_transaction()
branched.execute(users.insert(), user_id=1, user_name='user1')
nested = branched.begin_twophase()
branched.execute(users.insert(), user_id=2, user_name='user2')
nested.rollback()
assert not connection.in_transaction()
eq_(connection.scalar("select count(*) from query_users"), 1)
finally:
connection.close()
def test_retains_through_options(self):
connection = testing.db.connect()
try:
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
conn2 = connection.execution_options(dummy=True)
conn2.execute(users.insert(), user_id=2, user_name='user2')
transaction.rollback()
eq_(connection.scalar("select count(*) from query_users"), 0)
finally:
connection.close()
def test_nesting(self):
connection = testing.db.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
connection.execute(users.insert(), user_id=2, user_name='user2')
connection.execute(users.insert(), user_id=3, user_name='user3')
trans2 = connection.begin()
connection.execute(users.insert(), user_id=4, user_name='user4')
connection.execute(users.insert(), user_id=5, user_name='user5')
trans2.commit()
transaction.rollback()
self.assert_(connection.scalar('select count(*) from '
'query_users') == 0)
result = connection.execute('select * from query_users')
assert len(result.fetchall()) == 0
connection.close()
def test_with_interface(self):
connection = testing.db.connect()
trans = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
connection.execute(users.insert(), user_id=2, user_name='user2')
try:
connection.execute(users.insert(), user_id=2, user_name='user2.5')
except Exception as e:
trans.__exit__(*sys.exc_info())
assert not trans.is_active
self.assert_(connection.scalar('select count(*) from '
'query_users') == 0)
trans = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
trans.__exit__(None, None, None)
assert not trans.is_active
self.assert_(connection.scalar('select count(*) from '
'query_users') == 1)
connection.close()
def test_close(self):
connection = testing.db.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
connection.execute(users.insert(), user_id=2, user_name='user2')
connection.execute(users.insert(), user_id=3, user_name='user3')
trans2 = connection.begin()
connection.execute(users.insert(), user_id=4, user_name='user4')
connection.execute(users.insert(), user_id=5, user_name='user5')
assert connection.in_transaction()
trans2.close()
assert connection.in_transaction()
transaction.commit()
assert not connection.in_transaction()
self.assert_(connection.scalar('select count(*) from '
'query_users') == 5)
result = connection.execute('select * from query_users')
assert len(result.fetchall()) == 5
connection.close()
def test_close2(self):
connection = testing.db.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
connection.execute(users.insert(), user_id=2, user_name='user2')
connection.execute(users.insert(), user_id=3, user_name='user3')
trans2 = connection.begin()
connection.execute(users.insert(), user_id=4, user_name='user4')
connection.execute(users.insert(), user_id=5, user_name='user5')
assert connection.in_transaction()
trans2.close()
assert connection.in_transaction()
transaction.close()
assert not connection.in_transaction()
self.assert_(connection.scalar('select count(*) from '
'query_users') == 0)
result = connection.execute('select * from query_users')
assert len(result.fetchall()) == 0
connection.close()
@testing.requires.savepoints
def test_nested_subtransaction_rollback(self):
connection = testing.db.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
trans2 = connection.begin_nested()
connection.execute(users.insert(), user_id=2, user_name='user2')
trans2.rollback()
connection.execute(users.insert(), user_id=3, user_name='user3')
transaction.commit()
eq_(connection.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, ), (3, )])
connection.close()
@testing.requires.savepoints
@testing.crashes('oracle+zxjdbc',
'Errors out and causes subsequent tests to '
'deadlock')
def test_nested_subtransaction_commit(self):
connection = testing.db.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
trans2 = connection.begin_nested()
connection.execute(users.insert(), user_id=2, user_name='user2')
trans2.commit()
connection.execute(users.insert(), user_id=3, user_name='user3')
transaction.commit()
eq_(connection.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, ), (2, ), (3, )])
connection.close()
@testing.requires.savepoints
def test_rollback_to_subtransaction(self):
connection = testing.db.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
trans2 = connection.begin_nested()
connection.execute(users.insert(), user_id=2, user_name='user2')
trans3 = connection.begin()
connection.execute(users.insert(), user_id=3, user_name='user3')
trans3.rollback()
connection.execute(users.insert(), user_id=4, user_name='user4')
transaction.commit()
eq_(connection.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, ), (4, )])
connection.close()
@testing.requires.two_phase_transactions
def test_two_phase_transaction(self):
connection = testing.db.connect()
transaction = connection.begin_twophase()
connection.execute(users.insert(), user_id=1, user_name='user1')
transaction.prepare()
transaction.commit()
transaction = connection.begin_twophase()
connection.execute(users.insert(), user_id=2, user_name='user2')
transaction.commit()
transaction.close()
transaction = connection.begin_twophase()
connection.execute(users.insert(), user_id=3, user_name='user3')
transaction.rollback()
transaction = connection.begin_twophase()
connection.execute(users.insert(), user_id=4, user_name='user4')
transaction.prepare()
transaction.rollback()
transaction.close()
eq_(connection.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, ), (2, )])
connection.close()
# PG emergency shutdown:
# select * from pg_prepared_xacts
# ROLLBACK PREPARED '<xid>'
@testing.crashes('mysql', 'Crashing on 5.5, not worth it')
@testing.requires.skip_mysql_on_windows
@testing.requires.two_phase_transactions
@testing.requires.savepoints
def test_mixed_two_phase_transaction(self):
connection = testing.db.connect()
transaction = connection.begin_twophase()
connection.execute(users.insert(), user_id=1, user_name='user1')
transaction2 = connection.begin()
connection.execute(users.insert(), user_id=2, user_name='user2')
transaction3 = connection.begin_nested()
connection.execute(users.insert(), user_id=3, user_name='user3')
transaction4 = connection.begin()
connection.execute(users.insert(), user_id=4, user_name='user4')
transaction4.commit()
transaction3.rollback()
connection.execute(users.insert(), user_id=5, user_name='user5')
transaction2.commit()
transaction.prepare()
transaction.commit()
eq_(connection.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, ), (2, ), (5, )])
connection.close()
@testing.requires.two_phase_transactions
@testing.crashes('mysql+oursql',
'Times out in full test runs only, causing '
'subsequent tests to fail')
@testing.crashes('mysql+zxjdbc',
'Deadlocks, causing subsequent tests to fail')
@testing.fails_on('mysql', 'FIXME: unknown')
def test_two_phase_recover(self):
# MySQL recovery doesn't currently seem to work correctly
# Prepared transactions disappear when connections are closed
# and even when they aren't it doesn't seem possible to use the
# recovery id.
connection = testing.db.connect()
transaction = connection.begin_twophase()
connection.execute(users.insert(), user_id=1, user_name='user1')
transaction.prepare()
connection.invalidate()
connection2 = testing.db.connect()
eq_(
connection2.execution_options(autocommit=True).
execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(), [])
recoverables = connection2.recover_twophase()
assert transaction.xid in recoverables
connection2.commit_prepared(transaction.xid, recover=True)
eq_(connection2.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, )])
connection2.close()
@testing.requires.two_phase_transactions
def test_multiple_two_phase(self):
conn = testing.db.connect()
xa = conn.begin_twophase()
conn.execute(users.insert(), user_id=1, user_name='user1')
xa.prepare()
xa.commit()
xa = conn.begin_twophase()
conn.execute(users.insert(), user_id=2, user_name='user2')
xa.prepare()
xa.rollback()
xa = conn.begin_twophase()
conn.execute(users.insert(), user_id=3, user_name='user3')
xa.rollback()
xa = conn.begin_twophase()
conn.execute(users.insert(), user_id=4, user_name='user4')
xa.prepare()
xa.commit()
result = \
conn.execute(select([users.c.user_name]).
order_by(users.c.user_id))
eq_(result.fetchall(), [('user1', ), ('user4', )])
conn.close()
@testing.requires.two_phase_transactions
def test_reset_rollback_two_phase_no_rollback(self):
# test [ticket:2907], essentially that the
# TwoPhaseTransaction is given the job of "reset on return"
# so that picky backends like MySQL correctly clear out
# their state when a connection is closed without handling
# the transaction explicitly.
eng = testing_engine()
# MySQL raises if you call straight rollback() on
# a connection with an XID present
@event.listens_for(eng, "invalidate")
def conn_invalidated(dbapi_con, con_record, exception):
dbapi_con.close()
raise exception
with eng.connect() as conn:
rec = conn.connection._connection_record
raw_dbapi_con = rec.connection
xa = conn.begin_twophase()
conn.execute(users.insert(), user_id=1, user_name='user1')
assert rec.connection is raw_dbapi_con
with eng.connect() as conn:
result = \
conn.execute(select([users.c.user_name]).
order_by(users.c.user_id))
eq_(result.fetchall(), [])
class ResetAgentTest(fixtures.TestBase):
__backend__ = True
def test_begin_close(self):
with testing.db.connect() as connection:
trans = connection.begin()
assert connection.connection._reset_agent is trans
assert not trans.is_active
def test_begin_rollback(self):
with testing.db.connect() as connection:
trans = connection.begin()
assert connection.connection._reset_agent is trans
trans.rollback()
assert connection.connection._reset_agent is None
def test_begin_commit(self):
with testing.db.connect() as connection:
trans = connection.begin()
assert connection.connection._reset_agent is trans
trans.commit()
assert connection.connection._reset_agent is None
@testing.requires.savepoints
def test_begin_nested_close(self):
with testing.db.connect() as connection:
trans = connection.begin_nested()
assert connection.connection._reset_agent is trans
assert not trans.is_active
@testing.requires.savepoints
def test_begin_begin_nested_close(self):
with testing.db.connect() as connection:
trans = connection.begin()
trans2 = connection.begin_nested()
assert connection.connection._reset_agent is trans
assert trans2.is_active # was never closed
assert not trans.is_active
@testing.requires.savepoints
def test_begin_begin_nested_rollback_commit(self):
with testing.db.connect() as connection:
trans = connection.begin()
trans2 = connection.begin_nested()
assert connection.connection._reset_agent is trans
trans2.rollback()
assert connection.connection._reset_agent is trans
trans.commit()
assert connection.connection._reset_agent is None
@testing.requires.savepoints
def test_begin_begin_nested_rollback_rollback(self):
with testing.db.connect() as connection:
trans = connection.begin()
trans2 = connection.begin_nested()
assert connection.connection._reset_agent is trans
trans2.rollback()
assert connection.connection._reset_agent is trans
trans.rollback()
assert connection.connection._reset_agent is None
def test_begin_begin_rollback_rollback(self):
with testing.db.connect() as connection:
trans = connection.begin()
trans2 = connection.begin()
assert connection.connection._reset_agent is trans
trans2.rollback()
assert connection.connection._reset_agent is None
trans.rollback()
assert connection.connection._reset_agent is None
def test_begin_begin_commit_commit(self):
with testing.db.connect() as connection:
trans = connection.begin()
trans2 = connection.begin()
assert connection.connection._reset_agent is trans
trans2.commit()
assert connection.connection._reset_agent is trans
trans.commit()
assert connection.connection._reset_agent is None
@testing.requires.two_phase_transactions
def test_reset_via_agent_begin_twophase(self):
with testing.db.connect() as connection:
trans = connection.begin_twophase()
assert connection.connection._reset_agent is trans
@testing.requires.two_phase_transactions
def test_reset_via_agent_begin_twophase_commit(self):
with testing.db.connect() as connection:
trans = connection.begin_twophase()
assert connection.connection._reset_agent is trans
trans.commit()
assert connection.connection._reset_agent is None
@testing.requires.two_phase_transactions
def test_reset_via_agent_begin_twophase_rollback(self):
with testing.db.connect() as connection:
trans = connection.begin_twophase()
assert connection.connection._reset_agent is trans
trans.rollback()
assert connection.connection._reset_agent is None
class AutoRollbackTest(fixtures.TestBase):
__backend__ = True
@classmethod
def setup_class(cls):
global metadata
metadata = MetaData()
@classmethod
def teardown_class(cls):
metadata.drop_all(testing.db)
def test_rollback_deadlock(self):
"""test that returning connections to the pool clears any object
locks."""
conn1 = testing.db.connect()
conn2 = testing.db.connect()
users = Table('deadlock_users', metadata, Column('user_id',
INT, primary_key=True), Column('user_name',
VARCHAR(20)), test_needs_acid=True)
users.create(conn1)
conn1.execute('select * from deadlock_users')
conn1.close()
# without auto-rollback in the connection pool's return() logic,
# this deadlocks in PostgreSQL, because conn1 is returned to the
# pool but still has a lock on "deadlock_users". comment out the
# rollback in pool/ConnectionFairy._close() to see !
users.drop(conn2)
conn2.close()
class ExplicitAutoCommitTest(fixtures.TestBase):
"""test the 'autocommit' flag on select() and text() objects.
Requires PostgreSQL so that we may define a custom function which
modifies the database. """
__only_on__ = 'postgresql'
@classmethod
def setup_class(cls):
global metadata, foo
metadata = MetaData(testing.db)
foo = Table('foo', metadata, Column('id', Integer,
primary_key=True), Column('data', String(100)))
metadata.create_all()
testing.db.execute("create function insert_foo(varchar) "
"returns integer as 'insert into foo(data) "
"values ($1);select 1;' language sql")
def teardown(self):
foo.delete().execute().close()
@classmethod
def teardown_class(cls):
testing.db.execute('drop function insert_foo(varchar)')
metadata.drop_all()
def test_control(self):
# test that not using autocommit does not commit
conn1 = testing.db.connect()
conn2 = testing.db.connect()
conn1.execute(select([func.insert_foo('data1')]))
assert conn2.execute(select([foo.c.data])).fetchall() == []
conn1.execute(text("select insert_foo('moredata')"))
assert conn2.execute(select([foo.c.data])).fetchall() == []
trans = conn1.begin()
trans.commit()
assert conn2.execute(select([foo.c.data])).fetchall() \
== [('data1', ), ('moredata', )]
conn1.close()
conn2.close()
def test_explicit_compiled(self):
conn1 = testing.db.connect()
conn2 = testing.db.connect()
conn1.execute(select([func.insert_foo('data1'
)]).execution_options(autocommit=True))
assert conn2.execute(select([foo.c.data])).fetchall() \
== [('data1', )]
conn1.close()
conn2.close()
def test_explicit_connection(self):
conn1 = testing.db.connect()
conn2 = testing.db.connect()
conn1.execution_options(autocommit=True).\
execute(select([func.insert_foo('data1'
)]))
eq_(conn2.execute(select([foo.c.data])).fetchall(), [('data1',
)])
# connection supersedes statement
conn1.execution_options(autocommit=False).\
execute(select([func.insert_foo('data2'
)]).execution_options(autocommit=True))
eq_(conn2.execute(select([foo.c.data])).fetchall(), [('data1',
)])
# ditto
conn1.execution_options(autocommit=True).\
execute(select([func.insert_foo('data3'
)]).execution_options(autocommit=False))
eq_(conn2.execute(select([foo.c.data])).fetchall(), [('data1',
), ('data2', ), ('data3', )])
conn1.close()
conn2.close()
def test_explicit_text(self):
conn1 = testing.db.connect()
conn2 = testing.db.connect()
conn1.execute(text("select insert_foo('moredata')"
).execution_options(autocommit=True))
assert conn2.execute(select([foo.c.data])).fetchall() \
== [('moredata', )]
conn1.close()
conn2.close()
@testing.uses_deprecated(r'autocommit on select\(\) is deprecated',
r'``autocommit\(\)`` is deprecated')
def test_explicit_compiled_deprecated(self):
conn1 = testing.db.connect()
conn2 = testing.db.connect()
conn1.execute(select([func.insert_foo('data1')],
autocommit=True))
assert conn2.execute(select([foo.c.data])).fetchall() \
== [('data1', )]
conn1.execute(select([func.insert_foo('data2')]).autocommit())
assert conn2.execute(select([foo.c.data])).fetchall() \
== [('data1', ), ('data2', )]
conn1.close()
conn2.close()
@testing.uses_deprecated(r'autocommit on text\(\) is deprecated')
def test_explicit_text_deprecated(self):
conn1 = testing.db.connect()
conn2 = testing.db.connect()
conn1.execute(text("select insert_foo('moredata')",
autocommit=True))
assert conn2.execute(select([foo.c.data])).fetchall() \
== [('moredata', )]
conn1.close()
conn2.close()
def test_implicit_text(self):
conn1 = testing.db.connect()
conn2 = testing.db.connect()
conn1.execute(text("insert into foo (data) values "
"('implicitdata')"))
assert conn2.execute(select([foo.c.data])).fetchall() \
== [('implicitdata', )]
conn1.close()
conn2.close()
tlengine = None
class TLTransactionTest(fixtures.TestBase):
__requires__ = ('ad_hoc_engines', )
__backend__ = True
@classmethod
def setup_class(cls):
global users, metadata, tlengine
tlengine = testing_engine(options=dict(strategy='threadlocal'))
metadata = MetaData()
users = Table('query_users', metadata, Column('user_id', INT,
Sequence('query_users_id_seq', optional=True),
primary_key=True), Column('user_name',
VARCHAR(20)), test_needs_acid=True)
metadata.create_all(tlengine)
def teardown(self):
tlengine.execute(users.delete()).close()
@classmethod
def teardown_class(cls):
tlengine.close()
metadata.drop_all(tlengine)
tlengine.dispose()
def setup(self):
# ensure tests start with engine closed
tlengine.close()
@testing.crashes('oracle', 'TNS error of unknown origin occurs on the buildbot.')
def test_rollback_no_trans(self):
tlengine = testing_engine(options=dict(strategy="threadlocal"))
# shouldn't fail
tlengine.rollback()
tlengine.begin()
tlengine.rollback()
# shouldn't fail
tlengine.rollback()
def test_commit_no_trans(self):
tlengine = testing_engine(options=dict(strategy="threadlocal"))
# shouldn't fail
tlengine.commit()
tlengine.begin()
tlengine.rollback()
# shouldn't fail
tlengine.commit()
def test_prepare_no_trans(self):
tlengine = testing_engine(options=dict(strategy="threadlocal"))
# shouldn't fail
tlengine.prepare()
tlengine.begin()
tlengine.rollback()
# shouldn't fail
tlengine.prepare()
def test_connection_close(self):
"""test that when connections are closed for real, transactions
are rolled back and disposed."""
c = tlengine.contextual_connect()
c.begin()
assert c.in_transaction()
c.close()
assert not c.in_transaction()
def test_transaction_close(self):
c = tlengine.contextual_connect()
t = c.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.execute(users.insert(), user_id=2, user_name='user2')
t2 = c.begin()
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.execute(users.insert(), user_id=4, user_name='user4')
t2.close()
result = c.execute('select * from query_users')
assert len(result.fetchall()) == 4
t.close()
external_connection = tlengine.connect()
result = external_connection.execute('select * from query_users'
)
try:
assert len(result.fetchall()) == 0
finally:
c.close()
external_connection.close()
def test_rollback(self):
"""test a basic rollback"""
tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.execute(users.insert(), user_id=2, user_name='user2')
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.rollback()
external_connection = tlengine.connect()
result = external_connection.execute('select * from query_users'
)
try:
assert len(result.fetchall()) == 0
finally:
external_connection.close()
def test_commit(self):
"""test a basic commit"""
tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.execute(users.insert(), user_id=2, user_name='user2')
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.commit()
external_connection = tlengine.connect()
result = external_connection.execute('select * from query_users'
)
try:
assert len(result.fetchall()) == 3
finally:
external_connection.close()
def test_with_interface(self):
trans = tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.execute(users.insert(), user_id=2, user_name='user2')
trans.commit()
trans = tlengine.begin()
tlengine.execute(users.insert(), user_id=3, user_name='user3')
trans.__exit__(Exception, "fake", None)
trans = tlengine.begin()
tlengine.execute(users.insert(), user_id=4, user_name='user4')
trans.__exit__(None, None, None)
eq_(
tlengine.execute(users.select().order_by(users.c.user_id)).fetchall(),
[
(1, 'user1'),
(2, 'user2'),
(4, 'user4'),
]
)
def test_commits(self):
connection = tlengine.connect()
assert connection.execute('select count(*) from query_users'
).scalar() == 0
connection.close()
connection = tlengine.contextual_connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
transaction.commit()
transaction = connection.begin()
connection.execute(users.insert(), user_id=2, user_name='user2')
connection.execute(users.insert(), user_id=3, user_name='user3')
transaction.commit()
transaction = connection.begin()
result = connection.execute('select * from query_users')
l = result.fetchall()
assert len(l) == 3, 'expected 3 got %d' % len(l)
transaction.commit()
connection.close()
def test_rollback_off_conn(self):
# test that a TLTransaction opened off a TLConnection allows
# that TLConnection to be aware of the transactional context
conn = tlengine.contextual_connect()
trans = conn.begin()
conn.execute(users.insert(), user_id=1, user_name='user1')
conn.execute(users.insert(), user_id=2, user_name='user2')
conn.execute(users.insert(), user_id=3, user_name='user3')
trans.rollback()
external_connection = tlengine.connect()
result = external_connection.execute('select * from query_users'
)
try:
assert len(result.fetchall()) == 0
finally:
conn.close()
external_connection.close()
def test_morerollback_off_conn(self):
# test that an existing TLConnection automatically takes place
# in a TLTransaction opened on a second TLConnection
conn = tlengine.contextual_connect()
conn2 = tlengine.contextual_connect()
trans = conn2.begin()
conn.execute(users.insert(), user_id=1, user_name='user1')
conn.execute(users.insert(), user_id=2, user_name='user2')
conn.execute(users.insert(), user_id=3, user_name='user3')
trans.rollback()
external_connection = tlengine.connect()
result = external_connection.execute('select * from query_users'
)
try:
assert len(result.fetchall()) == 0
finally:
conn.close()
conn2.close()
external_connection.close()
def test_commit_off_connection(self):
conn = tlengine.contextual_connect()
trans = conn.begin()
conn.execute(users.insert(), user_id=1, user_name='user1')
conn.execute(users.insert(), user_id=2, user_name='user2')
conn.execute(users.insert(), user_id=3, user_name='user3')
trans.commit()
external_connection = tlengine.connect()
result = external_connection.execute('select * from query_users'
)
try:
assert len(result.fetchall()) == 3
finally:
conn.close()
external_connection.close()
def test_nesting_rollback(self):
"""tests nesting of transactions, rollback at the end"""
external_connection = tlengine.connect()
self.assert_(external_connection.connection
is not tlengine.contextual_connect().connection)
tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.execute(users.insert(), user_id=2, user_name='user2')
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.begin()
tlengine.execute(users.insert(), user_id=4, user_name='user4')
tlengine.execute(users.insert(), user_id=5, user_name='user5')
tlengine.commit()
tlengine.rollback()
try:
self.assert_(external_connection.scalar(
'select count(*) from query_users'
) == 0)
finally:
external_connection.close()
def test_nesting_commit(self):
"""tests nesting of transactions, commit at the end."""
external_connection = tlengine.connect()
self.assert_(external_connection.connection
is not tlengine.contextual_connect().connection)
tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.execute(users.insert(), user_id=2, user_name='user2')
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.begin()
tlengine.execute(users.insert(), user_id=4, user_name='user4')
tlengine.execute(users.insert(), user_id=5, user_name='user5')
tlengine.commit()
tlengine.commit()
try:
self.assert_(external_connection.scalar(
'select count(*) from query_users'
) == 5)
finally:
external_connection.close()
def test_mixed_nesting(self):
"""tests nesting of transactions off the TLEngine directly
inside of transactions off the connection from the TLEngine"""
external_connection = tlengine.connect()
self.assert_(external_connection.connection
is not tlengine.contextual_connect().connection)
conn = tlengine.contextual_connect()
trans = conn.begin()
trans2 = conn.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.execute(users.insert(), user_id=2, user_name='user2')
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.begin()
tlengine.execute(users.insert(), user_id=4, user_name='user4')
tlengine.begin()
tlengine.execute(users.insert(), user_id=5, user_name='user5')
tlengine.execute(users.insert(), user_id=6, user_name='user6')
tlengine.execute(users.insert(), user_id=7, user_name='user7')
tlengine.commit()
tlengine.execute(users.insert(), user_id=8, user_name='user8')
tlengine.commit()
trans2.commit()
trans.rollback()
conn.close()
try:
self.assert_(external_connection.scalar(
'select count(*) from query_users'
) == 0)
finally:
external_connection.close()
def test_more_mixed_nesting(self):
"""tests nesting of transactions off the connection from the
TLEngine inside of transactions off the TLEngine directly."""
external_connection = tlengine.connect()
self.assert_(external_connection.connection
is not tlengine.contextual_connect().connection)
tlengine.begin()
connection = tlengine.contextual_connect()
connection.execute(users.insert(), user_id=1, user_name='user1')
tlengine.begin()
connection.execute(users.insert(), user_id=2, user_name='user2')
connection.execute(users.insert(), user_id=3, user_name='user3')
trans = connection.begin()
connection.execute(users.insert(), user_id=4, user_name='user4')
connection.execute(users.insert(), user_id=5, user_name='user5')
trans.commit()
tlengine.commit()
tlengine.rollback()
connection.close()
try:
self.assert_(external_connection.scalar(
'select count(*) from query_users'
) == 0)
finally:
external_connection.close()
@testing.requires.savepoints
def test_nested_subtransaction_rollback(self):
tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.begin_nested()
tlengine.execute(users.insert(), user_id=2, user_name='user2')
tlengine.rollback()
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.commit()
tlengine.close()
eq_(tlengine.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, ), (3, )])
tlengine.close()
@testing.requires.savepoints
@testing.crashes('oracle+zxjdbc',
'Errors out and causes subsequent tests to '
'deadlock')
def test_nested_subtransaction_commit(self):
tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.begin_nested()
tlengine.execute(users.insert(), user_id=2, user_name='user2')
tlengine.commit()
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.commit()
tlengine.close()
eq_(tlengine.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, ), (2, ), (3, )])
tlengine.close()
@testing.requires.savepoints
def test_rollback_to_subtransaction(self):
tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.begin_nested()
tlengine.execute(users.insert(), user_id=2, user_name='user2')
tlengine.begin()
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.rollback()
tlengine.rollback()
tlengine.execute(users.insert(), user_id=4, user_name='user4')
tlengine.commit()
tlengine.close()
eq_(tlengine.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, ), (4, )])
tlengine.close()
def test_connections(self):
"""tests that contextual_connect is threadlocal"""
c1 = tlengine.contextual_connect()
c2 = tlengine.contextual_connect()
assert c1.connection is c2.connection
c2.close()
assert not c1.closed
assert not tlengine.closed
@testing.requires.independent_cursors
def test_result_closing(self):
"""tests that contextual_connect is threadlocal"""
r1 = tlengine.execute(select([1]))
r2 = tlengine.execute(select([1]))
row1 = r1.fetchone()
row2 = r2.fetchone()
r1.close()
assert r2.connection is r1.connection
assert not r2.connection.closed
assert not tlengine.closed
# close again, nothing happens since resultproxy calls close()
# only once
r1.close()
assert r2.connection is r1.connection
assert not r2.connection.closed
assert not tlengine.closed
r2.close()
assert r2.connection.closed
assert tlengine.closed
@testing.crashes('oracle+cx_oracle', 'intermittent failures on the buildbot')
def test_dispose(self):
eng = testing_engine(options=dict(strategy='threadlocal'))
result = eng.execute(select([1]))
eng.dispose()
eng.execute(select([1]))
@testing.requires.two_phase_transactions
def test_two_phase_transaction(self):
tlengine.begin_twophase()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.prepare()
tlengine.commit()
tlengine.begin_twophase()
tlengine.execute(users.insert(), user_id=2, user_name='user2')
tlengine.commit()
tlengine.begin_twophase()
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.rollback()
tlengine.begin_twophase()
tlengine.execute(users.insert(), user_id=4, user_name='user4')
tlengine.prepare()
tlengine.rollback()
eq_(tlengine.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, ), (2, )])
class IsolationLevelTest(fixtures.TestBase):
__requires__ = ('isolation_level', 'ad_hoc_engines')
__backend__ = True
def _default_isolation_level(self):
if testing.against('sqlite'):
return 'SERIALIZABLE'
elif testing.against('postgresql'):
return 'READ COMMITTED'
elif testing.against('mysql'):
return "REPEATABLE READ"
else:
assert False, "default isolation level not known"
def _non_default_isolation_level(self):
if testing.against('sqlite'):
return 'READ UNCOMMITTED'
elif testing.against('postgresql'):
return 'SERIALIZABLE'
elif testing.against('mysql'):
return "SERIALIZABLE"
else:
assert False, "non default isolation level not known"
def test_engine_param_stays(self):
eng = testing_engine()
isolation_level = eng.dialect.get_isolation_level(
eng.connect().connection)
level = self._non_default_isolation_level()
ne_(isolation_level, level)
eng = testing_engine(options=dict(isolation_level=level))
eq_(
eng.dialect.get_isolation_level(
eng.connect().connection),
level
)
# check that it stays
conn = eng.connect()
eq_(
eng.dialect.get_isolation_level(conn.connection),
level
)
conn.close()
conn = eng.connect()
eq_(
eng.dialect.get_isolation_level(conn.connection),
level
)
conn.close()
def test_default_level(self):
eng = testing_engine(options=dict())
isolation_level = eng.dialect.get_isolation_level(
eng.connect().connection)
eq_(isolation_level, self._default_isolation_level())
def test_reset_level(self):
eng = testing_engine(options=dict())
conn = eng.connect()
eq_(
eng.dialect.get_isolation_level(conn.connection),
self._default_isolation_level()
)
eng.dialect.set_isolation_level(
conn.connection, self._non_default_isolation_level()
)
eq_(
eng.dialect.get_isolation_level(conn.connection),
self._non_default_isolation_level()
)
eng.dialect.reset_isolation_level(conn.connection)
eq_(
eng.dialect.get_isolation_level(conn.connection),
self._default_isolation_level()
)
conn.close()
def test_reset_level_with_setting(self):
eng = testing_engine(
options=dict(
isolation_level=self._non_default_isolation_level()))
conn = eng.connect()
eq_(eng.dialect.get_isolation_level(conn.connection),
self._non_default_isolation_level())
eng.dialect.set_isolation_level(
conn.connection,
self._default_isolation_level())
eq_(eng.dialect.get_isolation_level(conn.connection),
self._default_isolation_level())
eng.dialect.reset_isolation_level(conn.connection)
eq_(eng.dialect.get_isolation_level(conn.connection),
self._non_default_isolation_level())
conn.close()
def test_invalid_level(self):
eng = testing_engine(options=dict(isolation_level='FOO'))
assert_raises_message(
exc.ArgumentError,
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s" %
("FOO",
eng.dialect.name, ", ".join(eng.dialect._isolation_lookup)),
eng.connect
)
def test_connection_invalidated(self):
eng = testing_engine()
conn = eng.connect()
c2 = conn.execution_options(
isolation_level=self._non_default_isolation_level())
c2.invalidate()
c2.connection
# TODO: do we want to rebuild the previous isolation?
# for now, this is current behavior so we will leave it.
eq_(c2.get_isolation_level(), self._default_isolation_level())
def test_per_connection(self):
from sqlalchemy.pool import QueuePool
eng = testing_engine(
options=dict(
poolclass=QueuePool,
pool_size=2, max_overflow=0))
c1 = eng.connect()
c1 = c1.execution_options(
isolation_level=self._non_default_isolation_level()
)
c2 = eng.connect()
eq_(
eng.dialect.get_isolation_level(c1.connection),
self._non_default_isolation_level()
)
eq_(
eng.dialect.get_isolation_level(c2.connection),
self._default_isolation_level()
)
c1.close()
c2.close()
c3 = eng.connect()
eq_(
eng.dialect.get_isolation_level(c3.connection),
self._default_isolation_level()
)
c4 = eng.connect()
eq_(
eng.dialect.get_isolation_level(c4.connection),
self._default_isolation_level()
)
c3.close()
c4.close()
def test_warning_in_transaction(self):
eng = testing_engine()
c1 = eng.connect()
with expect_warnings(
"Connection is already established with a Transaction; "
"setting isolation_level may implicitly rollback or commit "
"the existing transaction, or have no effect until next "
"transaction"
):
with c1.begin():
c1 = c1.execution_options(
isolation_level=self._non_default_isolation_level()
)
eq_(
eng.dialect.get_isolation_level(c1.connection),
self._non_default_isolation_level()
)
# stays outside of transaction
eq_(
eng.dialect.get_isolation_level(c1.connection),
self._non_default_isolation_level()
)
def test_per_statement_bzzt(self):
assert_raises_message(
exc.ArgumentError,
r"'isolation_level' execution option may only be specified "
r"on Connection.execution_options\(\), or "
r"per-engine using the isolation_level "
r"argument to create_engine\(\).",
select([1]).execution_options,
isolation_level=self._non_default_isolation_level()
)
def test_per_engine(self):
# new in 0.9
eng = create_engine(
testing.db.url,
execution_options={
'isolation_level':
self._non_default_isolation_level()}
)
conn = eng.connect()
eq_(
eng.dialect.get_isolation_level(conn.connection),
self._non_default_isolation_level()
)
def test_isolation_level_accessors_connection_default(self):
eng = create_engine(
testing.db.url
)
with eng.connect() as conn:
eq_(conn.default_isolation_level, self._default_isolation_level())
with eng.connect() as conn:
eq_(conn.get_isolation_level(), self._default_isolation_level())
def test_isolation_level_accessors_connection_option_modified(self):
eng = create_engine(
testing.db.url
)
with eng.connect() as conn:
c2 = conn.execution_options(
isolation_level=self._non_default_isolation_level())
eq_(conn.default_isolation_level, self._default_isolation_level())
eq_(conn.get_isolation_level(),
self._non_default_isolation_level())
eq_(c2.get_isolation_level(), self._non_default_isolation_level()) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from sara_flexbe_states.TF_transform import TF_transformation
from flexbe_states.log_key_state import LogKeyState
from sara_flexbe_states.set_gripper_state import SetGripperState
from sara_flexbe_states.moveit_moveCartesian import MoveitMoveCartesian
from flexbe_states.log_state import LogState
from sara_flexbe_states.torque_reader import ReadTorque
from sara_flexbe_states.sara_set_head_angle import SaraSetHeadAngle
from sara_flexbe_states.gen_gripper_pose import GenGripperPose
from sara_flexbe_states.moveit_move import MoveitMove
from sara_flexbe_states.pose_gen_euler import GenPoseEuler
from sara_flexbe_states.sara_move_base import SaraMoveBase
from sara_flexbe_states.sara_say import SaraSay
from sara_flexbe_states.run_trajectory import RunTrajectory
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Sat May 12 2018
@author: Raphael Duchaine
'''
class Action_place_2SM(Behavior):
'''
Place un objet a une position
'''
def __init__(self):
super(Action_place_2SM, self).__init__()
self.name = 'Action_place_2'
# parameters of this behavior
# references to used behaviors
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
# O 52 47
# TF Transform |nFrame1 Frame2|n
# O 36 230
# Gen Grip pose|n|nA
# O 33 308
# MoveIt move|nmove = false|n|nPos
# O 6 135
# PreGrip Pose #pre grip
# O 27 264
# #approach_pos|nGen Grip pose|ndistance = 0.25
# O 0 491
# MoveIt move|nmove =True|n|nA
# O 279 628
# open grip
# O 36 446
# MoveIt move|nmove =True|n|nB
# O 472 612
# MoveIt move|n|nB
# O 460 492
# #preGrip|nMoveIt move
def create(self):
# x:682 y:306, x:452 y:252
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['pos'])
_state_machine.userdata.pos = {"x":0.8, "y":-0.2, "z":1}
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
# x:30 y:458, x:130 y:458, x:230 y:458, x:330 y:458, x:430 y:458, x:530 y:458, x:630 y:458, x:59 y:533, x:830 y:458
_sm_group_0 = ConcurrencyContainer(outcomes=['threshold', 'watchdog', 'fail'], conditions=[
('threshold', [('read', 'threshold')]),
('watchdog', [('read', 'watchdog')]),
('fail', [('read', 'fail')]),
('threshold', [('read yaw', 'threshold')]),
('fail', [('read yaw', 'fail')]),
('watchdog', [('read yaw', 'watchdog')])
])
with _sm_group_0:
# x:86 y:125
OperatableStateMachine.add('read',
ReadTorque(watchdog=1, Joint="right_elbow_pitch_joint", Threshold=0.7, min_time=0.4),
transitions={'threshold': 'threshold', 'watchdog': 'watchdog', 'fail': 'fail'},
autonomy={'threshold': Autonomy.Off, 'watchdog': Autonomy.Off, 'fail': Autonomy.Off},
remapping={'torque': 'torque'})
# x:252 y:135
OperatableStateMachine.add('read yaw',
ReadTorque(watchdog=1, Joint="right_elbow_pitch_joint", Threshold=0.5, min_time=0.4),
transitions={'threshold': 'threshold', 'watchdog': 'watchdog', 'fail': 'fail'},
autonomy={'threshold': Autonomy.Off, 'watchdog': Autonomy.Off, 'fail': Autonomy.Off},
remapping={'torque': 'torque'})
# x:30 y:458
_sm_read_torque_1 = OperatableStateMachine(outcomes=['done'])
with _sm_read_torque_1:
# x:142 y:61
OperatableStateMachine.add('log',
LogState(text="going down", severity=Logger.REPORT_HINT),
transitions={'done': 'Group'},
autonomy={'done': Autonomy.Off})
# x:131 y:164
OperatableStateMachine.add('Group',
_sm_group_0,
transitions={'threshold': 'done', 'watchdog': 'log', 'fail': 'done'},
autonomy={'threshold': Autonomy.Inherit, 'watchdog': Autonomy.Inherit, 'fail': Autonomy.Inherit})
# x:30 y:458
_sm_go_down_2 = OperatableStateMachine(outcomes=['done'], input_keys=['GripPose'])
with _sm_go_down_2:
# x:92 y:127
OperatableStateMachine.add('place down',
MoveitMoveCartesian(move=True, waitForExecution=True, group="RightArm", watchdog=15),
transitions={'done': 'done', 'failed': 'done'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'targetPose': 'GripPose'})
# x:30 y:324, x:130 y:324
_sm_releasing_3 = OperatableStateMachine(outcomes=['object', 'no_object'])
with _sm_releasing_3:
# x:30 y:40
OperatableStateMachine.add('say touchdown',
SaraSay(sentence="Touchdown!", input_keys=[], emotion=1, block=False),
transitions={'done': 'open gripper'},
autonomy={'done': Autonomy.Off})
# x:139 y:176
OperatableStateMachine.add('open gripper',
SetGripperState(width=0.14, effort=1),
transitions={'object': 'object', 'no_object': 'no_object'},
autonomy={'object': Autonomy.Off, 'no_object': Autonomy.Off},
remapping={'object_size': 'object_size'})
# x:30 y:324, x:130 y:324
_sm_moveback_4 = OperatableStateMachine(outcomes=['arrived', 'failed'])
with _sm_moveback_4:
# x:30 y:40
OperatableStateMachine.add('genpose',
GenPoseEuler(x=-0.3, y=-0.3, z=0, roll=0, pitch=0, yaw=0),
transitions={'done': 'move back'},
autonomy={'done': Autonomy.Off},
remapping={'pose': 'backPose'})
# x:40 y:163
OperatableStateMachine.add('move back',
SaraMoveBase(reference="base_link"),
transitions={'arrived': 'arrived', 'failed': 'failed'},
autonomy={'arrived': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'pose': 'backPose'})
# x:536 y:72, x:231 y:292
_sm_prepare_grip_5 = OperatableStateMachine(outcomes=['failed', 'done'], input_keys=['pos'], output_keys=['approach_pose', 'grip_pose'])
with _sm_prepare_grip_5:
# x:50 y:40
OperatableStateMachine.add('Gen place_pos',
GenGripperPose(l=0, z=-0.05, planar=True),
transitions={'done': 'Gen approach_pos', 'fail': 'failed'},
autonomy={'done': Autonomy.Off, 'fail': Autonomy.Off},
remapping={'pose_in': 'pos', 'pose_out': 'grip_pose'})
# x:30 y:176
OperatableStateMachine.add('MoveIt_isReachable',
MoveitMove(move=False, waitForExecution=True, group="RightArm", watchdog=15),
transitions={'done': 'log app', 'failed': 'failed'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'target': 'grip_pose'})
# x:37 y:108
OperatableStateMachine.add('Gen approach_pos',
GenGripperPose(l=0.0, z=0.20, planar=True),
transitions={'done': 'log place pos', 'fail': 'failed'},
autonomy={'done': Autonomy.Off, 'fail': Autonomy.Off},
remapping={'pose_in': 'pos', 'pose_out': 'approach_pose'})
# x:41 y:269
OperatableStateMachine.add('log app',
LogKeyState(text="{}", severity=Logger.REPORT_HINT),
transitions={'done': 'done'},
autonomy={'done': Autonomy.Off},
remapping={'data': 'approach_pose'})
# x:360 y:180
OperatableStateMachine.add('log place pos',
LogKeyState(text="place pose is {}", severity=Logger.REPORT_HINT),
transitions={'done': 'MoveIt_isReachable'},
autonomy={'done': Autonomy.Off},
remapping={'data': 'grip_pose'})
# x:30 y:458, x:130 y:458, x:230 y:458
_sm_get_down_6 = ConcurrencyContainer(outcomes=['done'], input_keys=['GripPose'], conditions=[
('done', [('Go down', 'done')]),
('done', [('read torque', 'done')])
])
with _sm_get_down_6:
# x:178 y:127
OperatableStateMachine.add('Go down',
_sm_go_down_2,
transitions={'done': 'done'},
autonomy={'done': Autonomy.Inherit},
remapping={'GripPose': 'GripPose'})
# x:405 y:150
OperatableStateMachine.add('read torque',
_sm_read_torque_1,
transitions={'done': 'done'},
autonomy={'done': Autonomy.Inherit})
# x:30 y:324, x:130 y:324
_sm_pretraitement_7 = OperatableStateMachine(outcomes=['fail', 'done'], input_keys=['pos'], output_keys=['pos'])
with _sm_pretraitement_7:
# x:30 y:40
OperatableStateMachine.add('TF_transformation',
TF_transformation(in_ref="map", out_ref="base_link"),
transitions={'done': 'LOG POSE', 'fail': 'fail'},
autonomy={'done': Autonomy.Off, 'fail': Autonomy.Off},
remapping={'in_pos': 'pos', 'out_pos': 'pos'})
# x:33 y:107
OperatableStateMachine.add('LOG POSE',
LogKeyState(text="{}", severity=Logger.REPORT_HINT),
transitions={'done': 'done'},
autonomy={'done': Autonomy.Off},
remapping={'data': 'pos'})
with _state_machine:
# x:148 y:34
OperatableStateMachine.add('Pretraitement',
_sm_pretraitement_7,
transitions={'fail': 'failed', 'done': 'Pregrip'},
autonomy={'fail': Autonomy.Inherit, 'done': Autonomy.Inherit},
remapping={'pos': 'pos'})
# x:634 y:410
OperatableStateMachine.add('close gripper',
SetGripperState(width=0, effort=1),
transitions={'object': 'finished', 'no_object': 'finished'},
autonomy={'object': Autonomy.Off, 'no_object': Autonomy.Off},
remapping={'object_size': 'object_size'})
# x:141 y:522
OperatableStateMachine.add('Get_down',
_sm_get_down_6,
transitions={'done': 'releasing'},
autonomy={'done': Autonomy.Inherit},
remapping={'GripPose': 'grip_pose'})
# x:159 y:352
OperatableStateMachine.add('look down',
SaraSetHeadAngle(pitch=0.6, yaw=-0.3),
transitions={'done': 'Move_approach'},
autonomy={'done': Autonomy.Off})
# x:156 y:238
OperatableStateMachine.add('Prepare grip',
_sm_prepare_grip_5,
transitions={'failed': 'failed', 'done': 'look down'},
autonomy={'failed': Autonomy.Inherit, 'done': Autonomy.Inherit},
remapping={'pos': 'pos', 'approach_pose': 'approach_pose', 'grip_pose': 'grip_pose'})
# x:139 y:444
OperatableStateMachine.add('Move_approach',
MoveitMove(move=True, waitForExecution=True, group="RightArm", watchdog=15),
transitions={'done': 'Get_down', 'failed': 'failed'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'target': 'approach_pose'})
# x:623 y:525
OperatableStateMachine.add('Moveback',
_sm_moveback_4,
transitions={'arrived': 'close gripper', 'failed': 'failed'},
autonomy={'arrived': Autonomy.Inherit, 'failed': Autonomy.Inherit})
# x:298 y:520
OperatableStateMachine.add('releasing',
_sm_releasing_3,
transitions={'object': 'Pregrip_2', 'no_object': 'Pregrip_2'},
autonomy={'object': Autonomy.Inherit, 'no_object': Autonomy.Inherit})
# x:159 y:139
OperatableStateMachine.add('Pregrip',
RunTrajectory(file="pre_grip_pose", duration=6),
transitions={'done': 'Prepare grip'},
autonomy={'done': Autonomy.Off})
# x:440 y:537
OperatableStateMachine.add('Pregrip_2',
RunTrajectory(file="pre_grip_pose", duration=0),
transitions={'done': 'Moveback'},
autonomy={'done': Autonomy.Off})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC] | unknown | codeparrot/codeparrot-clean | ||
# coding=utf-8
# Author: Daniel Heimans
# URL: http://code.google.com/p/sickbeard
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import time
import socket
import math
import jsonrpclib
from datetime import datetime
import sickbeard
from sickbeard import logger
from sickbeard import classes
from sickbeard import tvcache
from sickbeard import scene_exceptions
from sickbeard.providers import generic
from sickbeard.helpers import sanitizeSceneName
from sickbeard.common import cpu_presets
from sickrage.helper.exceptions import AuthException, ex
class BTNProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "BTN")
self.supportsBacklog = True
self.supportsAbsoluteNumbering = True
self.api_key = None
self.ratio = None
self.cache = BTNCache(self)
self.urls = {'base_url': u'http://api.btnapps.net',
'website': u'http://broadcasthe.net/',}
self.url = self.urls['website']
def _checkAuth(self):
if not self.api_key:
logger.log(u"Invalid api key. Check your settings", logger.WARNING)
return True
def _checkAuthFromData(self, parsedJSON):
if parsedJSON is None:
return self._checkAuth()
if 'api-error' in parsedJSON:
logger.log(u"Incorrect authentication credentials: % s" % parsedJSON['api-error'], logger.DEBUG)
raise AuthException(
"Your authentication credentials for " + self.name + " are incorrect, check your config.")
return True
def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0, epObj=None):
self._checkAuth()
results = []
params = {}
apikey = self.api_key
# age in seconds
if age:
params['age'] = "<=" + str(int(age))
if search_params:
params.update(search_params)
logger.log(u"Search string: %s" % search_params, logger.DEBUG)
parsedJSON = self._api_call(apikey, params)
if not parsedJSON:
logger.log(u"No data returned from provider", logger.DEBUG)
return results
if self._checkAuthFromData(parsedJSON):
if 'torrents' in parsedJSON:
found_torrents = parsedJSON['torrents']
else:
found_torrents = {}
# We got something, we know the API sends max 1000 results at a time.
# See if there are more than 1000 results for our query, if not we
# keep requesting until we've got everything.
# max 150 requests per hour so limit at that. Scan every 15 minutes. 60 / 15 = 4.
max_pages = 150
results_per_page = 1000
if 'results' in parsedJSON and int(parsedJSON['results']) >= results_per_page:
pages_needed = int(math.ceil(int(parsedJSON['results']) / results_per_page))
if pages_needed > max_pages:
pages_needed = max_pages
# +1 because range(1,4) = 1, 2, 3
for page in range(1, pages_needed + 1):
parsedJSON = self._api_call(apikey, params, results_per_page, page * results_per_page)
# Note that this these are individual requests and might time out individually. This would result in 'gaps'
# in the results. There is no way to fix this though.
if 'torrents' in parsedJSON:
found_torrents.update(parsedJSON['torrents'])
for torrentid, torrent_info in found_torrents.iteritems():
(title, url) = self._get_title_and_url(torrent_info)
if title and url:
logger.log(u"Found result: %s " % title, logger.DEBUG)
results.append(torrent_info)
# FIXME SORT RESULTS
return results
def _api_call(self, apikey, params={}, results_per_page=1000, offset=0):
server = jsonrpclib.Server(self.urls['base_url'])
parsedJSON = {}
try:
parsedJSON = server.getTorrents(apikey, params, int(results_per_page), int(offset))
time.sleep(cpu_presets[sickbeard.CPU_PRESET])
except jsonrpclib.jsonrpc.ProtocolError, error:
if error.message == 'Call Limit Exceeded':
logger.log(u"You have exceeded the limit of 150 calls per hour, per API key which is unique to your user account", logger.WARNING)
else:
logger.log(u"JSON-RPC protocol error while accessing provicer. Error: %s " % repr(error), logger.ERROR)
parsedJSON = {'api-error': ex(error)}
return parsedJSON
except socket.timeout:
logger.log(u"Timeout while accessing provider", logger.WARNING)
except socket.error, error:
# Note that sometimes timeouts are thrown as socket errors
logger.log(u"Socket error while accessing provider. Error: %s " % error[1], logger.WARNING)
except Exception, error:
errorstring = str(error)
if errorstring.startswith('<') and errorstring.endswith('>'):
errorstring = errorstring[1:-1]
logger.log(u"Unknown error while accessing provider. Error: %s " % errorstring, logger.WARNING)
return parsedJSON
def _get_title_and_url(self, parsedJSON):
# The BTN API gives a lot of information in response,
# however SickRage is built mostly around Scene or
# release names, which is why we are using them here.
if 'ReleaseName' in parsedJSON and parsedJSON['ReleaseName']:
title = parsedJSON['ReleaseName']
else:
# If we don't have a release name we need to get creative
title = u''
if 'Series' in parsedJSON:
title += parsedJSON['Series']
if 'GroupName' in parsedJSON:
title += '.' + parsedJSON['GroupName'] if title else parsedJSON['GroupName']
if 'Resolution' in parsedJSON:
title += '.' + parsedJSON['Resolution'] if title else parsedJSON['Resolution']
if 'Source' in parsedJSON:
title += '.' + parsedJSON['Source'] if title else parsedJSON['Source']
if 'Codec' in parsedJSON:
title += '.' + parsedJSON['Codec'] if title else parsedJSON['Codec']
if title:
title = title.replace(' ', '.')
url = None
if 'DownloadURL' in parsedJSON:
url = parsedJSON['DownloadURL']
if url:
# unescaped / is valid in JSON, but it can be escaped
url = url.replace("\\/", "/")
return (title, url)
def _get_season_search_strings(self, ep_obj):
search_params = []
current_params = {'category': 'Season'}
# Search for entire seasons: no need to do special things for air by date or sports shows
if ep_obj.show.air_by_date or ep_obj.show.sports:
# Search for the year of the air by date show
current_params['name'] = str(ep_obj.airdate).split('-')[0]
elif ep_obj.show.is_anime:
current_params['name'] = "%d" % ep_obj.scene_absolute_number
else:
current_params['name'] = 'Season ' + str(ep_obj.scene_season)
# search
if ep_obj.show.indexer == 1:
current_params['tvdb'] = ep_obj.show.indexerid
search_params.append(current_params)
else:
name_exceptions = list(
set(scene_exceptions.get_scene_exceptions(ep_obj.show.indexerid) + [ep_obj.show.name]))
for name in name_exceptions:
# Search by name if we don't have tvdb id
current_params['series'] = sanitizeSceneName(name)
search_params.append(current_params)
return search_params
def _get_episode_search_strings(self, ep_obj, add_string=''):
if not ep_obj:
return [{}]
to_return = []
search_params = {'category': 'Episode'}
# episode
if ep_obj.show.air_by_date or ep_obj.show.sports:
date_str = str(ep_obj.airdate)
# BTN uses dots in dates, we just search for the date since that
# combined with the series identifier should result in just one episode
search_params['name'] = date_str.replace('-', '.')
elif ep_obj.show.anime:
search_params['name'] = "%i" % int(ep_obj.scene_absolute_number)
else:
# Do a general name search for the episode, formatted like SXXEYY
search_params['name'] = "S%02dE%02d" % (ep_obj.scene_season, ep_obj.scene_episode)
# search
if ep_obj.show.indexer == 1:
search_params['tvdb'] = ep_obj.show.indexerid
to_return.append(search_params)
else:
# add new query string for every exception
name_exceptions = list(
set(scene_exceptions.get_scene_exceptions(ep_obj.show.indexerid) + [ep_obj.show.name]))
for cur_exception in name_exceptions:
search_params['series'] = sanitizeSceneName(cur_exception)
to_return.append(search_params)
return to_return
def _doGeneralSearch(self, search_string):
# 'search' looks as broad is it can find. Can contain episode overview and title for example,
# use with caution!
return self._doSearch({'search': search_string})
def findPropers(self, search_date=None):
results = []
search_terms = ['%.proper.%', '%.repack.%']
for term in search_terms:
for item in self._doSearch({'release': term}, age=4 * 24 * 60 * 60):
if item['Time']:
try:
result_date = datetime.fromtimestamp(float(item['Time']))
except TypeError:
result_date = None
if result_date:
if not search_date or result_date > search_date:
title, url = self._get_title_and_url(item)
results.append(classes.Proper(title, url, result_date, self.show))
return results
def seedRatio(self):
return self.ratio
class BTNCache(tvcache.TVCache):
def __init__(self, provider_obj):
tvcache.TVCache.__init__(self, provider_obj)
# At least 15 minutes between queries
self.minTime = 15
def _getRSSData(self):
# Get the torrents uploaded since last check.
seconds_since_last_update = math.ceil(time.time() - time.mktime(self._getLastUpdate().timetuple()))
# default to 15 minutes
seconds_minTime = self.minTime * 60
if seconds_since_last_update < seconds_minTime:
seconds_since_last_update = seconds_minTime
# Set maximum to 24 hours (24 * 60 * 60 = 86400 seconds) of "RSS" data search, older things will need to be done through backlog
if seconds_since_last_update > 86400:
logger.log(
u"The last known successful update was more than 24 hours ago, only trying to fetch the last 24 hours!",
logger.DEBUG)
seconds_since_last_update = 86400
return {'entries': self.provider._doSearch(search_params=None, age=seconds_since_last_update)}
provider = BTNProvider() | unknown | codeparrot/codeparrot-clean | ||
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* The metrics filter interface. The MetricsFilter objects can be used either to
* filter the metrics from {@link MetricsSource}s or to filter metrics per
* {@link MetricsSink}.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public abstract class MetricsFilter implements MetricsPlugin {
/**
* Whether to accept the name
* @param name to filter on
* @return true to accept; false otherwise.
*/
public abstract boolean accepts(String name);
/**
* Whether to accept the tag
* @param tag to filter on
* @return true to accept; false otherwise
*/
public abstract boolean accepts(MetricsTag tag);
/**
* Whether to accept the tags
* @param tags to filter on
* @return true to accept; false otherwise
*/
public abstract boolean accepts(Iterable<MetricsTag> tags);
/**
* Whether to accept the record
* @param record to filter on
* @return true to accept; false otherwise.
*/
public boolean accepts(MetricsRecord record) {
return accepts(record.name()) && accepts(record.tags());
}
} | java | github | https://github.com/apache/hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsFilter.java |
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package funcs
import (
"fmt"
"math/big"
"github.com/apparentlymart/go-cidr/cidr"
"github.com/hashicorp/terraform/internal/ipaddr"
"github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/function"
"github.com/zclconf/go-cty/cty/gocty"
)
// CidrHostFunc contructs a function that calculates a full host IP address
// within a given IP network address prefix.
var CidrHostFunc = function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "prefix",
Type: cty.String,
},
{
Name: "hostnum",
Type: cty.Number,
},
},
Type: function.StaticReturnType(cty.String),
RefineResult: refineNotNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
var hostNum *big.Int
if err := gocty.FromCtyValue(args[1], &hostNum); err != nil {
return cty.UnknownVal(cty.String), err
}
_, network, err := ipaddr.ParseCIDR(args[0].AsString())
if err != nil {
return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err)
}
ip, err := cidr.HostBig(network, hostNum)
if err != nil {
return cty.UnknownVal(cty.String), err
}
return cty.StringVal(ip.String()), nil
},
})
// CidrNetmaskFunc contructs a function that converts an IPv4 address prefix given
// in CIDR notation into a subnet mask address.
var CidrNetmaskFunc = function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "prefix",
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.String),
RefineResult: refineNotNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
_, network, err := ipaddr.ParseCIDR(args[0].AsString())
if err != nil {
return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err)
}
if network.IP.To4() == nil {
return cty.UnknownVal(cty.String), fmt.Errorf("IPv6 addresses cannot have a netmask: %s", args[0].AsString())
}
return cty.StringVal(ipaddr.IP(network.Mask).String()), nil
},
})
// CidrSubnetFunc contructs a function that calculates a subnet address within
// a given IP network address prefix.
var CidrSubnetFunc = function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "prefix",
Type: cty.String,
},
{
Name: "newbits",
Type: cty.Number,
},
{
Name: "netnum",
Type: cty.Number,
},
},
Type: function.StaticReturnType(cty.String),
RefineResult: refineNotNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
var newbits int
if err := gocty.FromCtyValue(args[1], &newbits); err != nil {
return cty.UnknownVal(cty.String), err
}
var netnum *big.Int
if err := gocty.FromCtyValue(args[2], &netnum); err != nil {
return cty.UnknownVal(cty.String), err
}
_, network, err := ipaddr.ParseCIDR(args[0].AsString())
if err != nil {
return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err)
}
newNetwork, err := cidr.SubnetBig(network, newbits, netnum)
if err != nil {
return cty.UnknownVal(cty.String), err
}
return cty.StringVal(newNetwork.String()), nil
},
})
// CidrSubnetsFunc is similar to CidrSubnetFunc but calculates many consecutive
// subnet addresses at once, rather than just a single subnet extension.
var CidrSubnetsFunc = function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "prefix",
Type: cty.String,
},
},
VarParam: &function.Parameter{
Name: "newbits",
Type: cty.Number,
},
Type: function.StaticReturnType(cty.List(cty.String)),
RefineResult: refineNotNull,
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
_, network, err := ipaddr.ParseCIDR(args[0].AsString())
if err != nil {
return cty.UnknownVal(cty.String), function.NewArgErrorf(0, "invalid CIDR expression: %s", err)
}
startPrefixLen, _ := network.Mask.Size()
prefixLengthArgs := args[1:]
if len(prefixLengthArgs) == 0 {
return cty.ListValEmpty(cty.String), nil
}
var firstLength int
if err := gocty.FromCtyValue(prefixLengthArgs[0], &firstLength); err != nil {
return cty.UnknownVal(cty.String), function.NewArgError(1, err)
}
firstLength += startPrefixLen
retVals := make([]cty.Value, len(prefixLengthArgs))
current, _ := cidr.PreviousSubnet(network, firstLength)
for i, lengthArg := range prefixLengthArgs {
var length int
if err := gocty.FromCtyValue(lengthArg, &length); err != nil {
return cty.UnknownVal(cty.String), function.NewArgError(i+1, err)
}
if length < 1 {
return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "must extend prefix by at least one bit")
}
// For portability with 32-bit systems where the subnet number
// will be a 32-bit int, we only allow extension of 32 bits in
// one call even if we're running on a 64-bit machine.
// (Of course, this is significant only for IPv6.)
if length > 32 {
return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "may not extend prefix by more than 32 bits")
}
length += startPrefixLen
if length > (len(network.IP) * 8) {
protocol := "IP"
switch len(network.IP) * 8 {
case 32:
protocol = "IPv4"
case 128:
protocol = "IPv6"
}
return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "would extend prefix to %d bits, which is too long for an %s address", length, protocol)
}
next, rollover := cidr.NextSubnet(current, length)
if rollover || !network.Contains(next.IP) {
// If we run out of suffix bits in the base CIDR prefix then
// NextSubnet will start incrementing the prefix bits, which
// we don't allow because it would then allocate addresses
// outside of the caller's given prefix.
return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "not enough remaining address space for a subnet with a prefix of %d bits after %s", length, current.String())
}
current = next
retVals[i] = cty.StringVal(current.String())
}
return cty.ListVal(retVals), nil
},
})
// CidrHost calculates a full host IP address within a given IP network address prefix.
func CidrHost(prefix, hostnum cty.Value) (cty.Value, error) {
return CidrHostFunc.Call([]cty.Value{prefix, hostnum})
}
// CidrNetmask converts an IPv4 address prefix given in CIDR notation into a subnet mask address.
func CidrNetmask(prefix cty.Value) (cty.Value, error) {
return CidrNetmaskFunc.Call([]cty.Value{prefix})
}
// CidrSubnet calculates a subnet address within a given IP network address prefix.
func CidrSubnet(prefix, newbits, netnum cty.Value) (cty.Value, error) {
return CidrSubnetFunc.Call([]cty.Value{prefix, newbits, netnum})
}
// CidrSubnets calculates a sequence of consecutive subnet prefixes that may
// be of different prefix lengths under a common base prefix.
func CidrSubnets(prefix cty.Value, newbits ...cty.Value) (cty.Value, error) {
args := make([]cty.Value, len(newbits)+1)
args[0] = prefix
copy(args[1:], newbits)
return CidrSubnetsFunc.Call(args)
} | go | github | https://github.com/hashicorp/terraform | internal/lang/funcs/cidr.go |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Monitors instrument the training process.
@@get_default_monitors
@@BaseMonitor
@@CaptureVariable
@@CheckpointSaver
@@EveryN
@@ExportMonitor
@@GraphDump
@@LoggingTrainable
@@NanLoss
@@PrintTensor
@@StepCounter
@@StopAtStep
@@SummarySaver
@@ValidationMonitor
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
import time
import numpy as np
import six
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.estimator import estimator as core_estimator
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary as core_summary
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
from tensorflow.python.util import deprecation
from tensorflow.python.util import tf_inspect
# TODO(ptucker): Split each monitor class into a separate file.
# TODO(ptucker): Fail if epoch or step does not monotonically increase?
class BaseMonitor(object):
"""Base class for Monitors.
Defines basic interfaces of Monitors.
Monitors can either be run on all workers or, more commonly, restricted
to run exclusively on the elected chief worker.
"""
@deprecation.deprecated(
"2016-12-05",
"Monitors are deprecated. Please use tf.train.SessionRunHook.")
def __init__(self):
self._begun = False
self._current_epoch = None
self._current_step = None
self._max_steps = None
self._estimator = None
@property
def run_on_all_workers(self):
return False
def set_estimator(self, estimator):
"""A setter called automatically by the target estimator.
If the estimator is locked, this method does nothing.
Args:
estimator: the estimator that this monitor monitors.
Raises:
ValueError: if the estimator is None.
"""
if estimator is None:
raise ValueError("Missing estimator.")
# TODO(mdan): This should fail if called twice with the same estimator.
self._estimator = estimator
def begin(self, max_steps=None):
"""Called at the beginning of training.
When called, the default graph is the one we are executing.
Args:
max_steps: `int`, the maximum global step this training will run until.
Raises:
ValueError: if we've already begun a run.
"""
if self._begun:
raise ValueError("begin called twice without end.")
self._max_steps = max_steps
self._begun = True
def end(self, session=None):
"""Callback at the end of training/evaluation.
Args:
session: A `tf.Session` object that can be used to run ops.
Raises:
ValueError: if we've not begun a run.
"""
_ = session
if not self._begun:
raise ValueError("end called without begin.")
self._max_steps = None
self._begun = False
def epoch_begin(self, epoch):
"""Begin epoch.
Args:
epoch: `int`, the epoch number.
Raises:
ValueError: if we've already begun an epoch, or `epoch` < 0.
"""
if self._current_epoch is not None:
raise ValueError("epoch_begin called twice without epoch_end.")
if epoch < 0:
raise ValueError("Invalid epoch %s." % epoch)
self._current_epoch = epoch
def epoch_end(self, epoch):
"""End epoch.
Args:
epoch: `int`, the epoch number.
Raises:
ValueError: if we've not begun an epoch, or `epoch` number does not match.
"""
if self._current_epoch != epoch:
raise ValueError(
"epoch_end expected %s but got %s.", self._current_epoch, epoch)
self._current_epoch = None
def step_begin(self, step):
"""Callback before training step begins.
You may use this callback to request evaluation of additional tensors
in the graph.
Args:
step: `int`, the current value of the global step.
Returns:
List of `Tensor` objects or string tensor names to be run.
Raises:
ValueError: if we've already begun a step, or `step` < 0, or
`step` > `max_steps`.
"""
if (step < 0) or (
(self._max_steps is not None) and (step > self._max_steps)):
raise ValueError("Invalid step %s." % step)
self._current_step = step
return []
def step_end(self, step, output): # pylint: disable=unused-argument
"""Callback after training step finished.
This callback provides access to the tensors/ops evaluated at this step,
including the additional tensors for which evaluation was requested in
`step_begin`.
In addition, the callback has the opportunity to stop training by returning
`True`. This is useful for early stopping, for example.
Note that this method is not called if the call to `Session.run()` that
followed the last call to `step_begin()` failed.
Args:
step: `int`, the current value of the global step.
output: `dict` mapping `string` values representing tensor names to
the value resulted from running these tensors. Values may be either
scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors.
Returns:
`bool`. True if training should stop.
Raises:
ValueError: if we've not begun a step, or `step` number does not match.
"""
if self._current_step != step:
raise ValueError(
"step_end expected %s but got %s.", self._current_step, step)
self._current_step = None
return False
def post_step(self, step, session): # pylint: disable=unused-argument
"""Callback after the step is finished.
Called after step_end and receives session to perform extra session.run
calls. If failure occurred in the process, will be called as well.
Args:
step: `int`, global step of the model.
session: `Session` object.
"""
_ = step, session
def _extract_output(outputs, request):
if request in outputs:
return outputs[request]
return outputs[request.name]
class EveryN(BaseMonitor):
"""Base class for monitors that execute callbacks every N steps.
This class adds three new callbacks:
- every_n_step_begin
- every_n_step_end
- every_n_post_step
The callbacks are executed every n steps, or optionally every step for the
first m steps, where m and n can both be user-specified.
When extending this class, note that if you wish to use any of the
`BaseMonitor` callbacks, you must call their respective super implementation:
def step_begin(self, step):
super(ExampleMonitor, self).step_begin(step)
return []
Failing to call the super implementation will cause unpredictable behavior.
The `every_n_post_step()` callback is also called after the last step if it
was not already called through the regular conditions. Note that
`every_n_step_begin()` and `every_n_step_end()` do not receive that special
treatment.
"""
# TODO(ipolosukhin): Add also every n seconds.
def __init__(self, every_n_steps=100, first_n_steps=1):
"""Initializes an `EveryN` monitor.
Args:
every_n_steps: `int`, the number of steps to allow between callbacks.
first_n_steps: `int`, specifying the number of initial steps during
which the callbacks will always be executed, regardless of the value
of `every_n_steps`. Note that this value is relative to the global step
"""
super(EveryN, self).__init__()
self._every_n_steps = every_n_steps
self._first_n_steps = first_n_steps
# Last step in the model.
self._last_successful_step = None
# Last step at which we called one of the every_n methods
self._last_active_step = 0
self._every_n_step_begin_called = False
def every_n_step_begin(self, step): # pylint: disable=unused-argument
"""Callback before every n'th step begins.
Args:
step: `int`, the current value of the global step.
Returns:
A `list` of tensors that will be evaluated at this step.
"""
return []
def every_n_step_end(self, step, outputs): # pylint: disable=unused-argument
"""Callback after every n'th step finished.
This callback provides access to the tensors/ops evaluated at this step,
including the additional tensors for which evaluation was requested in
`step_begin`.
In addition, the callback has the opportunity to stop training by returning
`True`. This is useful for early stopping, for example.
Args:
step: `int`, the current value of the global step.
outputs: `dict` mapping `string` values representing tensor names to
the value resulted from running these tensors. Values may be either
scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors.
Returns:
`bool`. True if training should stop.
"""
return False
def every_n_post_step(self, step, session):
"""Callback after a step is finished or `end()` is called.
Args:
step: `int`, the current value of the global step.
session: `Session` object.
"""
pass
def step_begin(self, step):
"""Overrides `BaseMonitor.step_begin`.
When overriding this method, you must call the super implementation.
Args:
step: `int`, the current value of the global step.
Returns:
A `list`, the result of every_n_step_begin, if that was called this step,
or an empty list otherwise.
Raises:
ValueError: if called more than once during a step.
"""
super(EveryN, self).step_begin(step)
if (step <= self._first_n_steps or
step >= (self._every_n_steps + self._last_active_step) or
step == self._max_steps): # Note: max_steps can be None here.
self._every_n_step_begin_called = True
return self.every_n_step_begin(step)
self._every_n_step_begin_called = False
return []
def step_end(self, step, output):
"""Overrides `BaseMonitor.step_end`.
When overriding this method, you must call the super implementation.
Args:
step: `int`, the current value of the global step.
output: `dict` mapping `string` values representing tensor names to
the value resulted from running these tensors. Values may be either
scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors.
Returns:
`bool`, the result of every_n_step_end, if that was called this step,
or `False` otherwise.
"""
super(EveryN, self).step_end(step, output)
if self._every_n_step_begin_called:
return self.every_n_step_end(step, output)
return False
def post_step(self, step, session):
super(EveryN, self).post_step(step, session)
if self._every_n_step_begin_called:
self.every_n_post_step(step, session)
self._last_active_step = step
self._last_successful_step = step
def end(self, session=None):
super(EveryN, self).end(session=session)
if self._last_successful_step != self._last_active_step:
self.every_n_post_step(self._last_successful_step, session)
class StopAtStep(BaseMonitor):
"""Monitor to request stop at a specified step."""
def __init__(self, num_steps=None, last_step=None):
"""Create a StopAtStep monitor.
This monitor requests stop after either a number of steps have been
executed or a last step has been reached. Only of the two options can be
specified.
if `num_steps` is specified, it indicates the number of steps to execute
after `begin()` is called. If instead `last_step` is specified, it
indicates the last step we want to execute, as passed to the `step_begin()`
call.
Args:
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
super(StopAtStep, self).__init__()
if num_steps is None and last_step is None:
raise ValueError("One of num_steps or last_step must be specified.")
if num_steps is not None and last_step is not None:
raise ValueError("Only one of num_steps or last_step can be specified.")
self._num_steps = num_steps
self._last_step = last_step
@property
def run_on_all_workers(self):
return True
def step_begin(self, step):
super(StopAtStep, self).step_begin(step)
if self._last_step is None:
self._last_step = step + self._num_steps - 1
return []
def step_end(self, step, output):
super(StopAtStep, self).step_end(step, output)
return step >= self._last_step
# TODO(ptucker): Rename to LoggingTensor since it's not writing to stdout.
class PrintTensor(EveryN):
"""Prints given tensors every N steps.
This is an `EveryN` monitor and has consistent semantic for `every_n`
and `first_n`.
The tensors will be printed to the log, with `INFO` severity.
"""
def __init__(self, tensor_names, every_n=100, first_n=1):
"""Initializes a PrintTensor monitor.
Args:
tensor_names: `dict` of tag to tensor names or
`iterable` of tensor names (strings).
every_n: `int`, print every N steps. See `PrintN.`
first_n: `int`, also print the first N steps. See `PrintN.`
"""
super(PrintTensor, self).__init__(every_n, first_n)
if not isinstance(tensor_names, dict):
tensor_names = {item: item for item in tensor_names}
self._tensor_names = tensor_names
def every_n_step_begin(self, step):
super(PrintTensor, self).every_n_step_begin(step)
return list(self._tensor_names.values())
def every_n_step_end(self, step, outputs):
super(PrintTensor, self).every_n_step_end(step, outputs)
stats = []
for tag, tensor_name in six.iteritems(self._tensor_names):
if tensor_name in outputs:
stats.append("%s = %s" % (tag,
str(_extract_output(outputs, tensor_name))))
logging.info("Step %d: %s", step, ", ".join(stats))
class LoggingTrainable(EveryN):
"""Writes trainable variable values into log every N steps.
Write the tensors in trainable variables `every_n` steps,
starting with the `first_n`th step.
"""
def __init__(self, scope=None, every_n=100, first_n=1):
"""Initializes LoggingTrainable monitor.
Args:
scope: An optional string to match variable names using re.match.
every_n: Print every N steps.
first_n: Print first N steps.
"""
super(LoggingTrainable, self).__init__(every_n, first_n)
self._scope = scope
def every_n_step_begin(self, step):
super(LoggingTrainable, self).every_n_step_begin(step)
# Get a list of trainable variables at the beginning of every N steps.
# We cannot get this in __init__ because train_op has not been generated.
trainables = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES,
scope=self._scope)
self._names = {}
for var in trainables:
self._names[var.name] = var.value().name
return list(self._names.values())
def every_n_step_end(self, step, outputs):
super(LoggingTrainable, self).every_n_step_end(step, outputs)
stats = []
for tag, tensor_name in six.iteritems(self._names):
if tensor_name in outputs:
stats.append("%s = %s" % (tag,
str(_extract_output(outputs, tensor_name))))
logging.info("Logging Trainable: Step %d: %s", step, ", ".join(stats))
class SummarySaver(EveryN):
"""Saves summaries every N steps."""
def __init__(self,
summary_op,
save_steps=100,
output_dir=None,
summary_writer=None,
scaffold=None):
"""Initializes a `SummarySaver` monitor.
Args:
summary_op: `Tensor` of type `string`. A serialized `Summary` protocol
buffer, as output by TF summary methods like `summary.scalar` or
`summary.merge_all`.
save_steps: `int`, save summaries every N steps. See `EveryN`.
output_dir: `string`, the directory to save the summaries to. Only used
if no `summary_writer` is supplied.
summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed,
one will be created accordingly.
scaffold: `Scaffold` to get summary_op if it's not provided.
"""
# TODO(ipolosukhin): Implement every N seconds.
super(SummarySaver, self).__init__(every_n_steps=save_steps)
self._summary_op = summary_op
self._summary_writer = summary_writer
if summary_writer is None and output_dir:
self._summary_writer = core_summary.FileWriter(output_dir)
self._scaffold = scaffold
# TODO(mdan): Throw an error if output_dir and summary_writer are None.
def set_estimator(self, estimator):
super(SummarySaver, self).set_estimator(estimator)
# TODO(mdan): This line looks redundant.
if self._summary_writer is None:
self._summary_writer = core_summary.FileWriter(estimator.model_dir)
def every_n_step_begin(self, step):
super(SummarySaver, self).every_n_step_begin(step)
if self._summary_op is None and self._scaffold is not None:
self._summary_op = self._scaffold.summary_op
if self._summary_op is not None:
return [self._summary_op]
return []
def every_n_step_end(self, step, outputs):
super(SummarySaver, self).every_n_step_end(step, outputs)
if self._summary_op is not None:
summary_strs = _extract_output(outputs, self._summary_op)
if self._summary_writer:
self._summary_writer.add_summary(summary_strs, step)
return False
def end(self, session=None):
super(SummarySaver, self).end(session=session)
if self._summary_writer:
self._summary_writer.flush()
class ValidationMonitor(EveryN):
"""Runs evaluation of a given estimator, at most every N steps.
Note that the evaluation is done based on the saved checkpoint, which will
usually be older than the current step.
Can do early stopping on validation metrics if `early_stopping_rounds` is
provided.
"""
def __init__(self, x=None, y=None, input_fn=None, batch_size=None,
eval_steps=None,
every_n_steps=100, metrics=None, hooks=None,
early_stopping_rounds=None,
early_stopping_metric="loss",
early_stopping_metric_minimize=True, name=None):
"""Initializes a ValidationMonitor.
Args:
x: See `BaseEstimator.evaluate`.
y: See `BaseEstimator.evaluate`.
input_fn: See `BaseEstimator.evaluate`.
batch_size: See `BaseEstimator.evaluate`.
eval_steps: See `BaseEstimator.evaluate`.
every_n_steps: Check for new checkpoints to evaluate every N steps. If a
new checkpoint is found, it is evaluated. See `EveryN`.
metrics: See `BaseEstimator.evaluate`.
hooks: A list of `SessionRunHook` hooks to pass to the
`Estimator`'s `evaluate` function.
early_stopping_rounds: `int`. If the metric indicated by
`early_stopping_metric` does not change according to
`early_stopping_metric_minimize` for this many steps, then training
will be stopped.
early_stopping_metric: `string`, name of the metric to check for early
stopping.
early_stopping_metric_minimize: `bool`, True if `early_stopping_metric` is
expected to decrease (thus early stopping occurs when this metric
stops decreasing), False if `early_stopping_metric` is expected to
increase. Typically, `early_stopping_metric_minimize` is True for
loss metrics like mean squared error, and False for performance
metrics like accuracy.
name: See `BaseEstimator.evaluate`.
Raises:
ValueError: If both x and input_fn are provided.
"""
super(ValidationMonitor, self).__init__(every_n_steps=every_n_steps,
first_n_steps=-1)
# TODO(mdan): Checks like this are already done by evaluate.
if x is None and input_fn is None:
raise ValueError("Either x or input_fn should be provided.")
self.x = x
self.y = y
self.input_fn = input_fn
self.batch_size = batch_size
self.eval_steps = eval_steps
self.metrics = metrics
self.hooks = hooks
self.early_stopping_rounds = early_stopping_rounds
self.early_stopping_metric = early_stopping_metric
self.early_stopping_metric_minimize = early_stopping_metric_minimize
self.name = name
self._best_value_step = None
self._best_value = None
self._best_metrics = None
self._early_stopped = False
self._latest_path = None
self._latest_path_step = None
@property
def early_stopped(self):
"""Returns True if this monitor caused an early stop."""
return self._early_stopped
@property
def best_step(self):
"""Returns the step at which the best early stopping metric was found."""
return self._best_value_step
@property
def best_value(self):
"""Returns the best early stopping metric value found so far."""
return self._best_value
@property
def best_metrics(self):
"""Returns all eval metrics computed with the best early stopping metric.
For instance, if the metrics computed in two successive evals are
1. {'loss':40, 'auc':0.5}
2. {'loss':50, 'auc':0.6}
this function would return the first dict {'loss':40, 'auc':0.5} after both
first and second eval (if `early_stopping_metric` is 'loss' and
`early_stopping_metric_minimize` is True).
Returns:
The output dict of estimator.evaluate which contains the best value of
the early stopping metric seen so far.
"""
return self._best_metrics
def _evaluate_estimator(self):
if isinstance(self._estimator, core_estimator.Estimator):
if any((x is not None for x in
[self.x, self.y, self.batch_size, self.metrics])):
raise ValueError(
"tf.estimator.Estimator does not support following "
"arguments: x, y, batch_size, metrics. Should set as `None` "
"in ValidationMonitor")
return self._estimator.evaluate(
input_fn=self.input_fn, steps=self.eval_steps, hooks=self.hooks,
name=self.name)
else:
return self._estimator.evaluate(
x=self.x, y=self.y, input_fn=self.input_fn,
batch_size=self.batch_size, steps=self.eval_steps,
metrics=self.metrics, hooks=self.hooks, name=self.name)
def every_n_step_end(self, step, outputs):
super(ValidationMonitor, self).every_n_step_end(step, outputs)
# TODO(mdan): The use of step below is probably misleading.
# The code should probably use the step from the checkpoint, because
# that's what is being evaluated.
if self._estimator is None:
raise ValueError("Missing call to set_estimator.")
# Check that we are not running evaluation on the same checkpoint.
latest_path = saver_lib.latest_checkpoint(self._estimator.model_dir)
if latest_path is None:
logging.debug("Skipping evaluation since model has not been saved yet "
"at step %d.", step)
return False
if latest_path is not None and latest_path == self._latest_path:
logging.debug("Skipping evaluation due to same checkpoint %s for step %d "
"as for step %d.", latest_path, step,
self._latest_path_step)
return False
self._latest_path = latest_path
self._latest_path_step = step
# Run evaluation and log it.
validation_outputs = self._evaluate_estimator()
stats = []
for name in validation_outputs:
stats.append("%s = %s" % (name, str(validation_outputs[name])))
logging.info("Validation (step %d): %s", step, ", ".join(stats))
# Early stopping logic.
if self.early_stopping_rounds is not None:
if self.early_stopping_metric not in validation_outputs:
raise ValueError("Metric %s missing from outputs %s." % (
self.early_stopping_metric, set(validation_outputs.keys())))
current_value = validation_outputs[self.early_stopping_metric]
if (self._best_value is None or (self.early_stopping_metric_minimize and
(current_value < self._best_value)) or
(not self.early_stopping_metric_minimize and
(current_value > self._best_value))):
self._best_value = current_value
self._best_metrics = copy.deepcopy(validation_outputs)
self._best_value_step = step
stop_now = (step - self._best_value_step >= self.early_stopping_rounds)
if stop_now:
logging.info("Stopping. Best step: {} with {} = {}."
.format(self._best_value_step,
self.early_stopping_metric, self._best_value))
self._early_stopped = True
return True
return False
# TODO(ptucker): This really reads any tensor, not just vars, and requires the
# ':0' suffix on var_name.
class CaptureVariable(EveryN):
"""Captures a variable's values into a collection.
This monitor is useful for unit testing. You should exercise caution when
using this monitor in production, since it never discards values.
This is an `EveryN` monitor and has consistent semantic for `every_n`
and `first_n`.
"""
def __init__(self, var_name, every_n=100, first_n=1):
"""Initializes a CaptureVariable monitor.
Args:
var_name: `string`. The variable name, including suffix (typically ":0").
every_n: `int`, print every N steps. See `PrintN.`
first_n: `int`, also print the first N steps. See `PrintN.`
"""
super(CaptureVariable, self).__init__(every_n, first_n)
self._var_name = var_name
self._var_values = {}
@property
def values(self):
"""Returns the values captured so far.
Returns:
`dict` mapping `int` step numbers to that values of the variable at the
respective step.
"""
return self._var_values
def every_n_step_begin(self, step):
super(CaptureVariable, self).every_n_step_begin(step)
return [self._var_name]
def every_n_step_end(self, step, outputs):
super(CaptureVariable, self).every_n_step_end(step, outputs)
self._var_values[step] = _extract_output(outputs, self._var_name)
def get_default_monitors(loss_op=None, summary_op=None, save_summary_steps=100,
output_dir=None, summary_writer=None):
"""Returns a default set of typically-used monitors.
Args:
loss_op: `Tensor`, the loss tensor. This will be printed using `PrintTensor`
at the default interval.
summary_op: See `SummarySaver`.
save_summary_steps: See `SummarySaver`.
output_dir: See `SummarySaver`.
summary_writer: See `SummarySaver`.
Returns:
`list` of monitors.
"""
monitors = []
if loss_op is not None:
monitors.append(PrintTensor(tensor_names={"loss": loss_op.name}))
if summary_op is not None:
monitors.append(SummarySaver(summary_op, save_steps=save_summary_steps,
output_dir=output_dir,
summary_writer=summary_writer))
return monitors
class GraphDump(BaseMonitor):
"""Dumps almost all tensors in the graph at every step.
Note, this is very expensive, prefer `PrintTensor` in production.
"""
IGNORE_OPS = ["Const", "Assign", "Identity", "Placeholder",
"RandomUniform", "Cast", "RestoreSlice"]
def __init__(self, ignore_ops=None):
"""Initializes GraphDump monitor.
Args:
ignore_ops: `list` of `string`. Names of ops to ignore.
If None, `GraphDump.IGNORE_OPS` is used.
"""
super(GraphDump, self).__init__()
self._ignore_ops = ignore_ops or GraphDump.IGNORE_OPS
self._data = {}
def begin(self, max_steps=None):
super(GraphDump, self).begin(max_steps=max_steps)
self._tensors = []
graph = ops.get_default_graph()
graph_def = graph.as_graph_def()
for node in graph_def.node:
if node.op in self._ignore_ops:
continue
logging.info("op=%s name=%s.", node.op, node.name)
try:
self._tensors.append(graph.get_tensor_by_name(node.name + ":0"))
except KeyError:
pass
def step_begin(self, step):
super(GraphDump, self).step_begin(step)
return self._tensors
def step_end(self, step, output):
super(GraphDump, self).step_end(step, output)
self._data[step] = output
@property
def data(self):
return self._data
# TODO(ptucker): Handle keys that are in one but not the other.
def compare(self, other_dump, step, atol=1e-06):
"""Compares two `GraphDump` monitors and returns differences.
Args:
other_dump: Another `GraphDump` monitor.
step: `int`, step to compare on.
atol: `float`, absolute tolerance in comparison of floating arrays.
Returns:
Returns tuple:
matched: `list` of keys that matched.
non_matched: `dict` of keys to tuple of 2 mismatched values.
Raises:
ValueError: if a key in `data` is missing from `other_dump` at `step`.
"""
non_matched = {}
matched = []
this_output = self.data[step] if step in self.data else {}
other_output = other_dump.data[step] if step in other_dump.data else {}
for key in this_output:
if not isinstance(key, str) and not isinstance(key, unicode):
continue
if key not in other_output:
raise ValueError("%s missing at step %s.", (key, step))
value1 = _extract_output(this_output, key)
value2 = _extract_output(other_output, key)
if isinstance(value1, str):
continue
if isinstance(value1, np.ndarray):
if not np.allclose(value1, value2, atol=atol):
non_matched[key] = value1 - value2
else:
matched.append(key)
else:
if value1 != value2:
non_matched[key] = (value1, value2)
else:
matched.append(key)
return matched, non_matched
class ExportMonitor(EveryN):
"""Monitor that exports Estimator every N steps."""
@deprecation.deprecated("2017-03-25",
"ExportMonitor is deprecated. Please pass an "
"ExportStrategy to Experiment instead.")
def __init__(self,
every_n_steps,
export_dir,
input_fn=None,
input_feature_key=None,
exports_to_keep=5,
signature_fn=None,
default_batch_size=1):
"""Initializes ExportMonitor.
Args:
every_n_steps: Run monitor every N steps.
export_dir: str, folder to export.
input_fn: A function that takes no argument and returns a tuple of
(features, labels), where features is a dict of string key to `Tensor`
and labels is a `Tensor` that's currently not used (and so can be
`None`).
input_feature_key: String key into the features dict returned by
`input_fn` that corresponds to the raw `Example` strings `Tensor` that
the exported model will take as input. Should be `None` if and only if
you're passing in a `signature_fn` that does not use the first arg
(`Tensor` of `Example` strings).
exports_to_keep: int, number of exports to keep.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `dict` of `Tensor`s for predictions.
default_batch_size: Default batch size of the `Example` placeholder.
Raises:
ValueError: If `input_fn` and `input_feature_key` are not both defined or
are not both `None`.
"""
super(ExportMonitor, self).__init__(every_n_steps=every_n_steps)
self._export_dir = export_dir
self._input_fn = input_fn
self._input_feature_key = input_feature_key
self._use_deprecated_input_fn = input_fn is None
self._exports_to_keep = exports_to_keep
self._signature_fn = signature_fn
self._default_batch_size = default_batch_size
self._last_export_dir = None
@property
def export_dir(self):
return self._export_dir
@property
def exports_to_keep(self):
return self._exports_to_keep
@property
def signature_fn(self):
return self._signature_fn
@property
def last_export_dir(self):
"""Returns the directory containing the last completed export.
Returns:
The string path to the exported directory. NB: this functionality was
added on 2016/09/25; clients that depend on the return value may need
to handle the case where this function returns None because the
estimator being fitted does not yet return a value during export.
"""
return self._last_export_dir
def every_n_step_end(self, step, outputs):
super(ExportMonitor, self).every_n_step_end(step, outputs)
try:
if isinstance(self._estimator, core_estimator.Estimator):
raise ValueError(
"ExportMonitor does not support `tf.estimator.Estimator. `. "
"Please pass an ExportStrategy to Experiment instead.")
self._last_export_dir = self._estimator.export(
self.export_dir,
exports_to_keep=self.exports_to_keep,
signature_fn=self.signature_fn,
input_fn=self._input_fn,
default_batch_size=self._default_batch_size,
input_feature_key=self._input_feature_key,
use_deprecated_input_fn=self._use_deprecated_input_fn)
except RuntimeError:
# Currently we are not syncronized with saving checkpoints, which leads to
# runtime errors when we are calling export on the same global step.
# Exports depend on saved checkpoints for constructing the graph and
# getting the global step from the graph instance saved in the checkpoint.
# If the checkpoint is stale with respect to current step, the global step
# is taken to be the last saved checkpoint's global step and exporter
# doesn't export the same checkpoint again with the following error.
logging.info("Skipping exporting because the existing checkpoint has "
"already been exported. "
"Consider exporting less frequently.")
def end(self, session=None):
super(ExportMonitor, self).end(session=session)
latest_path = saver_lib.latest_checkpoint(self._estimator.model_dir)
if latest_path is None:
logging.info("Skipping export at the end since model has not been saved "
"yet.")
return
if isinstance(self._estimator, core_estimator.Estimator):
raise ValueError(
"ExportMonitor does not support `tf.estimator.Estimator. `. "
"Please pass an ExportStrategy to Experiment instead.")
try:
self._last_export_dir = self._estimator.export(
self.export_dir,
exports_to_keep=self.exports_to_keep,
signature_fn=self.signature_fn,
input_fn=self._input_fn,
default_batch_size=self._default_batch_size,
input_feature_key=self._input_feature_key,
use_deprecated_input_fn=self._use_deprecated_input_fn)
except RuntimeError:
logging.info("Skipping exporting for the same step.")
class CheckpointSaver(BaseMonitor):
"""Saves checkpoints every N steps or N seconds."""
def __init__(self,
checkpoint_dir,
save_secs=None,
save_steps=None,
saver=None,
checkpoint_basename="model.ckpt",
scaffold=None):
"""Initialize CheckpointSaver monitor.
Args:
checkpoint_dir: `str`, base directory for the checkpoint files.
save_secs: `int`, save every N secs.
save_steps: `int`, save every N steps.
saver: `Saver` object, used for saving.
checkpoint_basename: `str`, base name for the checkpoint files.
scaffold: `Scaffold`, use to get saver object.
Raises:
ValueError: If both `save_steps` and `save_secs` are not `None`.
ValueError: If both `save_steps` and `save_secs` are `None`.
"""
logging.info("Create CheckpointSaver.")
super(CheckpointSaver, self).__init__()
self._saver = saver
self._summary_writer = core_summary.FileWriterCache.get(checkpoint_dir)
self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)
self._scaffold = scaffold
self._save_secs = save_secs
self._save_steps = save_steps
self._last_saved_time = None
self._last_begin_step = None
self._last_saved_step = None
if save_steps is None and save_secs is None:
raise ValueError("Either save_steps or save_secs should be provided")
if (save_steps is not None) and (save_secs is not None):
raise ValueError("Can not provide both save_steps and save_secs.")
def begin(self, max_steps=None):
super(CheckpointSaver, self).begin(max_steps)
self._last_saved_time = None
self._last_begin_step = None
self._last_saved_step = None
def step_begin(self, step):
super(CheckpointSaver, self).step_begin(step)
self._last_begin_step = step
def post_step(self, step, session):
super(CheckpointSaver, self).post_step(step, session)
if self._last_saved_time is None:
self._save(step, session)
if self._save_steps is not None:
if step >= self._last_saved_step + self._save_steps:
self._save(step, session)
if self._save_secs is not None:
if time.time() >= self._last_saved_time + self._save_secs:
self._save(step, session)
def end(self, session=None):
super(CheckpointSaver, self).end(session)
self._save(self._last_begin_step, session)
def _save(self, step, session):
"""Saves the latest checkpoint."""
if step == self._last_saved_step:
return
logging.info("Saving checkpoints for %d into %s.", step, self._save_path)
self._last_saved_time = time.time()
self._last_saved_step = step
if self._saver is None:
self._scaffold.saver.save(session, self._save_path, global_step=step)
else:
self._saver.save(session, self._save_path, global_step=step)
self._summary_writer.add_session_log(
SessionLog(
status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path),
step)
class StepCounter(EveryN):
"""Steps per second monitor."""
def __init__(self, every_n_steps=100, output_dir=None,
summary_writer=None):
super(StepCounter, self).__init__(every_n_steps=every_n_steps)
self._summary_tag = "global_step/sec"
self._last_reported_step = None
self._last_reported_time = None
self._summary_writer = summary_writer
if summary_writer is None and output_dir:
self._summary_writer = core_summary.FileWriterCache.get(output_dir)
def set_estimator(self, estimator):
super(StepCounter, self).set_estimator(estimator)
if self._summary_writer is None:
self._summary_writer = core_summary.FileWriterCache.get(estimator.model_dir)
def every_n_step_end(self, current_step, outputs):
current_time = time.time()
if self._last_reported_time is not None and self._summary_writer:
added_steps = current_step - self._last_reported_step
elapsed_time = current_time - self._last_reported_time
steps_per_sec = added_steps / elapsed_time
summary = Summary(value=[Summary.Value(tag=self._summary_tag,
simple_value=steps_per_sec)])
self._summary_writer.add_summary(summary, current_step)
self._last_reported_step = current_step
self._last_reported_time = current_time
class NanLossDuringTrainingError(RuntimeError):
def __str__(self):
return "NaN loss during training."
class NanLoss(EveryN):
"""NaN Loss monitor.
Monitors loss and stops training if loss is NaN.
Can either fail with exception or just stop training.
"""
def __init__(self, loss_tensor, every_n_steps=100, fail_on_nan_loss=True):
"""Initializes NanLoss monitor.
Args:
loss_tensor: `Tensor`, the loss tensor.
every_n_steps: `int`, run check every this many steps.
fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN.
"""
super(NanLoss, self).__init__(every_n_steps=every_n_steps)
self._loss_tensor = loss_tensor
self._fail_on_nan_loss = fail_on_nan_loss
def every_n_step_begin(self, step):
super(NanLoss, self).every_n_step_begin(step)
return [self._loss_tensor]
def every_n_step_end(self, step, outputs):
super(NanLoss, self).every_n_step_end(step, outputs)
if np.isnan(_extract_output(outputs, self._loss_tensor)):
failure_message = "Model diverged with loss = NaN."
if self._fail_on_nan_loss:
logging.error(failure_message)
raise NanLossDuringTrainingError
else:
logging.warning(failure_message)
# We don't raise an error but we return "should stop" so we stop, but
# without an exception.
return True
class RunHookAdapterForMonitors(session_run_hook.SessionRunHook):
"""Wraps monitors into a SessionRunHook."""
def __init__(self, monitors):
self._monitors = monitors
def begin(self):
self._last_step = None
self._global_step_tensor = training_util.get_global_step()
for m in self._monitors:
m.begin(max_steps=None)
def before_run(self, run_context):
if self._last_step is None:
self._last_step = run_context.session.run(self._global_step_tensor) + 1
request = {self._global_step_tensor: self._global_step_tensor}
monitor_fetches = []
for m in self._monitors:
monitor_requests = m.step_begin(self._last_step)
if monitor_requests:
if not isinstance(monitor_requests, list):
raise ValueError("Monitor.step_begin should return a list.")
monitor_fetches.extend(monitor_requests)
if monitor_fetches:
request["monitors"] = dict(
zip(monitor_fetches, [_as_graph_element(f) for f in monitor_fetches]))
return session_run_hook.SessionRunArgs(request)
def after_run(self, run_context, run_values):
result = run_values.results[
"monitors"] if "monitors" in run_values.results else {}
for m in self._monitors:
induce_stop = m.step_end(self._last_step, result)
if induce_stop:
run_context.request_stop()
for m in self._monitors:
m.post_step(self._last_step, run_context.session)
self._last_step = run_values.results[self._global_step_tensor] + 1
def end(self, session):
self._last_step = None
for m in self._monitors:
if "session" in tf_inspect.getargspec(m.end).args:
m.end(session=session)
else:
m.end()
def replace_monitors_with_hooks(monitors_or_hooks, estimator):
"""Wraps monitors with a hook.
`Monitor` is deprecated in favor of `SessionRunHook`. If you're using a
monitor, you can wrap it with a hook using function. It is recommended to
implement hook version of your monitor.
Args:
monitors_or_hooks: A `list` may contain both monitors and hooks.
estimator: An `Estimator` that monitor will be used with.
Returns:
Returns a list of hooks. If there is any monitor in the given list, it is
replaced by a hook.
"""
monitors_or_hooks = monitors_or_hooks or []
hooks = [
m for m in monitors_or_hooks
if isinstance(m, session_run_hook.SessionRunHook)
]
deprecated_monitors = [
m for m in monitors_or_hooks
if not isinstance(m, session_run_hook.SessionRunHook)
]
if not estimator.config.is_chief:
# Prune list of monitor to the ones runnable on all workers.
deprecated_monitors = [
m for m in deprecated_monitors if m.run_on_all_workers
]
# Setup monitors.
for monitor in deprecated_monitors:
monitor.set_estimator(estimator)
if deprecated_monitors:
hooks.append(RunHookAdapterForMonitors(deprecated_monitors))
return hooks
def _as_graph_element(obj):
"""Retrieves Graph element."""
graph = ops.get_default_graph()
if not isinstance(obj, six.string_types):
if not hasattr(obj, "graph") or obj.graph != graph:
raise ValueError("Passed %s should have graph attribute that is equal "
"to current graph %s." % (obj, graph))
return obj
if ":" in obj:
element = graph.as_graph_element(obj)
else:
element = graph.as_graph_element(obj + ":0")
# Check that there is no :1 (e.g. it's single output).
try:
graph.as_graph_element(obj + ":1")
except (KeyError, ValueError):
pass
else:
raise ValueError("Name %s is ambiguous, "
"as this `Operation` has multiple outputs "
"(at least 2)." % obj)
return element | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.compress;
import org.apache.kafka.common.compress.Lz4BlockOutputStream.BD;
import org.apache.kafka.common.compress.Lz4BlockOutputStream.FLG;
import org.apache.kafka.common.utils.BufferSupplier;
import net.jpountz.lz4.LZ4Compressor;
import net.jpountz.lz4.LZ4Exception;
import net.jpountz.lz4.LZ4Factory;
import net.jpountz.lz4.LZ4SafeDecompressor;
import net.jpountz.xxhash.XXHash32;
import net.jpountz.xxhash.XXHashFactory;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import static org.apache.kafka.common.compress.Lz4BlockOutputStream.LZ4_FRAME_INCOMPRESSIBLE_MASK;
import static org.apache.kafka.common.compress.Lz4BlockOutputStream.MAGIC;
/**
* A partial implementation of the v1.5.1 LZ4 Frame format.
*
* @see <a href="https://github.com/lz4/lz4/wiki/lz4_Frame_format.md">LZ4 Frame Format</a>
*
* This class is not thread-safe.
*/
public final class Lz4BlockInputStream extends InputStream {
public static final String PREMATURE_EOS = "Stream ended prematurely";
public static final String NOT_SUPPORTED = "Stream unsupported (invalid magic bytes)";
public static final String BLOCK_HASH_MISMATCH = "Block checksum mismatch";
public static final String DESCRIPTOR_HASH_MISMATCH = "Stream frame descriptor corrupted";
private static final LZ4SafeDecompressor DECOMPRESSOR = LZ4Factory.fastestInstance().safeDecompressor();
private static final XXHash32 CHECKSUM = XXHashFactory.fastestInstance().hash32();
private static final RuntimeException BROKEN_LZ4_EXCEPTION;
// https://issues.apache.org/jira/browse/KAFKA-9203
// detect buggy lz4 libraries on the classpath
static {
RuntimeException exception = null;
try {
detectBrokenLz4Version();
} catch (RuntimeException e) {
exception = e;
}
BROKEN_LZ4_EXCEPTION = exception;
}
private final ByteBuffer in;
private final boolean ignoreFlagDescriptorChecksum;
private final BufferSupplier bufferSupplier;
private final ByteBuffer decompressionBuffer;
// `flg` and `maxBlockSize` are effectively final, they are initialised in the `readHeader` method that is only
// invoked from the constructor
private FLG flg;
private int maxBlockSize;
// If a block is compressed, this is the same as `decompressionBuffer`. If a block is not compressed, this is
// a slice of `in` to avoid unnecessary copies.
private ByteBuffer decompressedBuffer;
private boolean finished;
/**
* Create a new {@link InputStream} that will decompress data using the LZ4 algorithm.
*
* @param in The byte buffer to decompress
* @param ignoreFlagDescriptorChecksum for compatibility with old kafka clients, ignore incorrect HC byte
* @throws IOException
*/
public Lz4BlockInputStream(ByteBuffer in, BufferSupplier bufferSupplier, boolean ignoreFlagDescriptorChecksum) throws IOException {
if (BROKEN_LZ4_EXCEPTION != null) {
throw BROKEN_LZ4_EXCEPTION;
}
this.ignoreFlagDescriptorChecksum = ignoreFlagDescriptorChecksum;
this.in = in.duplicate().order(ByteOrder.LITTLE_ENDIAN);
this.bufferSupplier = bufferSupplier;
readHeader();
decompressionBuffer = bufferSupplier.get(maxBlockSize);
finished = false;
}
/**
* Check whether KafkaLZ4BlockInputStream is configured to ignore the
* Frame Descriptor checksum, which is useful for compatibility with
* old client implementations that use incorrect checksum calculations.
*/
public boolean ignoreFlagDescriptorChecksum() {
return this.ignoreFlagDescriptorChecksum;
}
/**
* Reads the magic number and frame descriptor from input buffer.
*
* @throws IOException
*/
private void readHeader() throws IOException {
// read first 6 bytes into buffer to check magic and FLG/BD descriptor flags
if (in.remaining() < 6) {
throw new IOException(PREMATURE_EOS);
}
if (MAGIC != in.getInt()) {
throw new IOException(NOT_SUPPORTED);
}
// mark start of data to checksum
in.mark();
flg = FLG.fromByte(in.get());
maxBlockSize = BD.fromByte(in.get()).getBlockMaximumSize();
if (flg.isContentSizeSet()) {
if (in.remaining() < 8) {
throw new IOException(PREMATURE_EOS);
}
in.position(in.position() + 8);
}
// Final byte of Frame Descriptor is HC checksum
// Old implementations produced incorrect HC checksums
if (ignoreFlagDescriptorChecksum) {
in.position(in.position() + 1);
return;
}
int len = in.position() - in.reset().position();
int hash = CHECKSUM.hash(in, in.position(), len, 0);
in.position(in.position() + len);
if (in.get() != (byte) ((hash >> 8) & 0xFF)) {
throw new IOException(DESCRIPTOR_HASH_MISMATCH);
}
}
/**
* Decompresses (if necessary) buffered data, optionally computes and validates a XXHash32 checksum, and writes the
* result to a buffer.
*
* @throws IOException
*/
private void readBlock() throws IOException {
if (in.remaining() < 4) {
throw new IOException(PREMATURE_EOS);
}
int blockSize = in.getInt();
boolean compressed = (blockSize & LZ4_FRAME_INCOMPRESSIBLE_MASK) == 0;
blockSize &= ~LZ4_FRAME_INCOMPRESSIBLE_MASK;
// Check for EndMark
if (blockSize == 0) {
finished = true;
if (flg.isContentChecksumSet())
in.getInt(); // TODO: verify this content checksum
return;
} else if (blockSize > maxBlockSize) {
throw new IOException(String.format("Block size %d exceeded max: %d", blockSize, maxBlockSize));
}
if (in.remaining() < blockSize) {
throw new IOException(PREMATURE_EOS);
}
if (compressed) {
try {
final int bufferSize = DECOMPRESSOR.decompress(in, in.position(), blockSize, decompressionBuffer, 0,
maxBlockSize);
decompressionBuffer.position(0);
decompressionBuffer.limit(bufferSize);
decompressedBuffer = decompressionBuffer;
} catch (LZ4Exception e) {
throw new IOException(e);
}
} else {
decompressedBuffer = in.slice();
decompressedBuffer.limit(blockSize);
}
// verify checksum
if (flg.isBlockChecksumSet()) {
int hash = CHECKSUM.hash(in, in.position(), blockSize, 0);
in.position(in.position() + blockSize);
if (hash != in.getInt()) {
throw new IOException(BLOCK_HASH_MISMATCH);
}
} else {
in.position(in.position() + blockSize);
}
}
@Override
public int read() throws IOException {
if (finished) {
return -1;
}
if (available() == 0) {
readBlock();
}
if (finished) {
return -1;
}
return decompressedBuffer.get() & 0xFF;
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
net.jpountz.util.SafeUtils.checkRange(b, off, len);
if (finished) {
return -1;
}
if (available() == 0) {
readBlock();
}
if (finished) {
return -1;
}
len = Math.min(len, available());
decompressedBuffer.get(b, off, len);
return len;
}
@Override
public long skip(long n) throws IOException {
if (finished) {
return 0;
}
if (available() == 0) {
readBlock();
}
if (finished) {
return 0;
}
int skipped = (int) Math.min(n, available());
decompressedBuffer.position(decompressedBuffer.position() + skipped);
return skipped;
}
@Override
public int available() {
return decompressedBuffer == null ? 0 : decompressedBuffer.remaining();
}
@Override
public void close() {
bufferSupplier.release(decompressionBuffer);
}
@Override
public void mark(int readlimit) {
throw new RuntimeException("mark not supported");
}
@Override
public void reset() {
throw new RuntimeException("reset not supported");
}
/**
* Checks whether the version of lz4 on the classpath has the fix for reading from ByteBuffers with
* non-zero array offsets (see https://github.com/lz4/lz4-java/pull/65)
*/
static void detectBrokenLz4Version() {
byte[] source = new byte[]{1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3};
final LZ4Compressor compressor = LZ4Factory.fastestInstance().fastCompressor();
final byte[] compressed = new byte[compressor.maxCompressedLength(source.length)];
final int compressedLength = compressor.compress(source, 0, source.length, compressed, 0,
compressed.length);
// allocate an array-backed ByteBuffer with non-zero array-offset containing the compressed data
// a buggy decompressor will read the data from the beginning of the underlying array instead of
// the beginning of the ByteBuffer, failing to decompress the invalid data.
final byte[] zeroes = {0, 0, 0, 0, 0};
ByteBuffer nonZeroOffsetBuffer = ByteBuffer
.allocate(zeroes.length + compressed.length) // allocates the backing array with extra space to offset the data
.put(zeroes) // prepend invalid bytes (zeros) before the compressed data in the array
.slice() // create a new ByteBuffer sharing the underlying array, offset to start on the compressed data
.put(compressed); // write the compressed data at the beginning of this new buffer
ByteBuffer dest = ByteBuffer.allocate(source.length);
try {
DECOMPRESSOR.decompress(nonZeroOffsetBuffer, 0, compressedLength, dest, 0, source.length);
} catch (Exception e) {
throw new RuntimeException("Kafka has detected a buggy lz4-java library (< 1.4.x) on the classpath."
+ " If you are using Kafka client libraries, make sure your application does not"
+ " accidentally override the version provided by Kafka or include multiple versions"
+ " of the library on the classpath. The lz4-java version on the classpath should"
+ " match the version the Kafka client libraries depend on. Adding -verbose:class"
+ " to your JVM arguments may help understand which lz4-java version is getting loaded.", e);
}
}
} | java | github | https://github.com/apache/kafka | clients/src/main/java/org/apache/kafka/common/compress/Lz4BlockInputStream.java |
---
title: svelte
---
> MODULE: svelte | unknown | github | https://github.com/sveltejs/svelte | documentation/docs/98-reference/20-svelte.md |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Cumulus Networks <ce-ceng@cumulusnetworks.com>
#
# This file is part of Ansible
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: cl_interface
version_added: "2.1"
author: "Cumulus Networks (@CumulusNetworks)"
short_description: Configures a front panel port, loopback or
management port on Cumulus Linux.
description:
- Configures a front panel, sub-interface, SVI, management or loopback port
on a Cumulus Linux switch. For bridge ports use the cl_bridge module. For
bond ports use the cl_bond module. When configuring bridge related
features like the "vid" option, please follow the guidelines for
configuring "vlan aware" bridging. For more details review the Layer2
Interface Guide at http://docs.cumulusnetworks.com
options:
name:
description:
- name of the interface
required: true
alias_name:
description:
- add a port description
ipv4:
description:
- list of IPv4 addresses to configure on the interface.
use X.X.X.X/YY syntax.
ipv6:
description:
- list of IPv6 addresses to configure on the interface.
use X:X:X::X/YYY syntax
addr_method:
description:
- can be loopback for loopback interfaces or dhcp for dhcp
interfaces.
speed:
description:
- set speed of the swp(front panel) or management(eth0) interface.
speed is in MB
mtu:
description:
- set MTU. Configure Jumbo Frame by setting MTU to 9000.
virtual_ip:
description:
- define IPv4 virtual IP used by the Cumulus VRR feature
virtual_mac:
description:
- define Ethernet mac associated with Cumulus VRR feature
vids:
description:
- in vlan aware mode, lists vlans defined under the interface
mstpctl_bpduguard:
description:
- Enables BPDU Guard on a port in vlan-aware mode
mstpctl_portnetwork:
description:
- Enables bridge assurance in vlan-aware mode
mstpctl_portadminedge:
description:
- Enables admin edge port
clagd_enable:
description:
- Enables the clagd daemon. This command should only be applied to
the clag peerlink interface
clagd_priority:
description:
- Integer that changes the role the switch has in the clag domain.
The lower priority switch will assume the primary role. The number
can be between 0 and 65535
clagd_peer_ip:
description:
- IP address of the directly connected peer switch interface
clagd_sys_mac:
description:
- Clagd system mac address. Recommended to use the range starting
with 44:38:39:ff. Needs to be the same between 2 Clag switches
pvid:
description:
- in vlan aware mode, defines vlan that is the untagged vlan
location:
description:
- interface directory location
default:
- /etc/network/interfaces.d
requirements: [ Alternate Debian network interface manager - \
ifupdown2 @ github.com/CumulusNetworks/ifupdown2 ]
notes:
- because the module writes the interface directory location. Ensure that
``/etc/network/interfaces`` has a 'source /etc/network/interfaces.d/\*' or
whatever path is mentioned in the ``location`` attribute.
- For the config to be activated, i.e installed in the kernel,
"service networking reload" needs be be executed. See EXAMPLES section.
'''
EXAMPLES = '''
# Options ['virtual_mac', 'virtual_ip'] are required together
# configure a front panel port with an IP
cl_interface: name=swp1 ipv4=10.1.1.1/24
notify: reload networking
# configure front panel to use DHCP
cl_interface: name=swp2 addr_family=dhcp
notify: reload networking
# configure a SVI for vlan 100 interface with an IP
cl_interface: name=bridge.100 ipv4=10.1.1.1/24
notify: reload networking
# configure subinterface with an IP
cl_interface: name=bond0.100 alias_name='my bond' ipv4=10.1.1.1/24
notify: reload networking
# define cl_interfaces once in tasks
# then write intefaces in variables file
# with just the options you want.
cl_interface:
name: "{{ item.key }}"
ipv4: "{{ item.value.ipv4|default(omit) }}"
ipv6: "{{ item.value.ipv6|default(omit) }}"
alias_name: "{{ item.value.alias_name|default(omit) }}"
addr_method: "{{ item.value.addr_method|default(omit) }}"
speed: "{{ item.value.link_speed|default(omit) }}"
mtu: "{{ item.value.mtu|default(omit) }}"
clagd_enable: "{{ item.value.clagd_enable|default(omit) }}"
clagd_peer_ip: "{{ item.value.clagd_peer_ip|default(omit) }}"
clagd_sys_mac: "{{ item.value.clagd_sys_mac|default(omit) }}"
clagd_priority: "{{ item.value.clagd_priority|default(omit) }}"
vids: "{{ item.value.vids|default(omit) }}"
virtual_ip: "{{ item.value.virtual_ip|default(omit) }}"
virtual_mac: "{{ item.value.virtual_mac|default(omit) }}"
mstpctl_portnetwork: "{{ item.value.mstpctl_portnetwork|default('no') }}"
mstpctl_portadminedge: "{{ item.value.mstpctl_portadminedge|default('no') }}"
mstpctl_bpduguard: "{{ item.value.mstpctl_bpduguard|default('no') }}"
with_dict: cl_interfaces
notify: reload networking
# In vars file
# ============
cl_interfaces:
swp1:
alias_name: 'uplink to isp'
ipv4: '10.1.1.1/24'
swp2:
alias_name: 'l2 trunk connection'
vids: [1, 50]
swp3:
speed: 1000
alias_name: 'connects to 1G link'
##########
# br0 interface is configured by cl_bridge
##########
br0.100:
alias_name: 'SVI for vlan 100'
ipv4: '10.2.2.2/24'
ipv6: '10:2:2::2/127'
virtual_ip: '10.2.2.254'
virtual_mac: '00:00:5E:00:10:10'
'''
RETURN = '''
changed:
description: whether the interface was changed
returned: changed
type: bool
sample: True
msg:
description: human-readable report of success or failure
returned: always
type: string
sample: "interface bond0 config updated"
'''
# handy helper for calling system calls.
# calls AnsibleModule.run_command and prints a more appropriate message
# exec_path - path to file to execute, with all its arguments.
# E.g "/sbin/ip -o link show"
# failure_msg - what message to print on failure
def run_cmd(module, exec_path):
(_rc, out, _err) = module.run_command(exec_path)
if _rc > 0:
if re.search('cannot find interface', _err):
return '[{}]'
failure_msg = "Failed; %s Error: %s" % (exec_path, _err)
module.fail_json(msg=failure_msg)
else:
return out
def current_iface_config(module):
# due to a bug in ifquery, have to check for presence of interface file
# and not rely solely on ifquery. when bug is fixed, this check can be
# removed
_ifacename = module.params.get('name')
_int_dir = module.params.get('location')
module.custom_current_config = {}
if os.path.exists(_int_dir + '/' + _ifacename):
_cmd = "/sbin/ifquery -o json %s" % (module.params.get('name'))
module.custom_current_config = module.from_json(
run_cmd(module, _cmd))[0]
def build_address(module):
# if addr_method == 'dhcp', dont add IP address
if module.params.get('addr_method') == 'dhcp':
return
_ipv4 = module.params.get('ipv4')
_ipv6 = module.params.get('ipv6')
_addresslist = []
if _ipv4 and len(_ipv4) > 0:
_addresslist += _ipv4
if _ipv6 and len(_ipv6) > 0:
_addresslist += _ipv6
if len(_addresslist) > 0:
module.custom_desired_config['config']['address'] = ' '.join(
_addresslist)
def build_vids(module):
_vids = module.params.get('vids')
if _vids and len(_vids) > 0:
module.custom_desired_config['config']['bridge-vids'] = ' '.join(_vids)
def build_pvid(module):
_pvid = module.params.get('pvid')
if _pvid:
module.custom_desired_config['config']['bridge-pvid'] = str(_pvid)
def build_speed(module):
_speed = module.params.get('speed')
if _speed:
module.custom_desired_config['config']['link-speed'] = str(_speed)
module.custom_desired_config['config']['link-duplex'] = 'full'
def conv_bool_to_str(_value):
if isinstance(_value, bool):
if _value is True:
return 'yes'
else:
return 'no'
return _value
def build_generic_attr(module, _attr):
_value = module.params.get(_attr)
_value = conv_bool_to_str(_value)
if _value:
module.custom_desired_config['config'][
re.sub('_', '-', _attr)] = str(_value)
def build_alias_name(module):
alias_name = module.params.get('alias_name')
if alias_name:
module.custom_desired_config['config']['alias'] = alias_name
def build_addr_method(module):
_addr_method = module.params.get('addr_method')
if _addr_method:
module.custom_desired_config['addr_family'] = 'inet'
module.custom_desired_config['addr_method'] = _addr_method
def build_vrr(module):
_virtual_ip = module.params.get('virtual_ip')
_virtual_mac = module.params.get('virtual_mac')
vrr_config = []
if _virtual_ip:
vrr_config.append(_virtual_mac)
vrr_config.append(_virtual_ip)
module.custom_desired_config.get('config')['address-virtual'] = \
' '.join(vrr_config)
def build_desired_iface_config(module):
"""
take parameters defined and build ifupdown2 compatible hash
"""
module.custom_desired_config = {
'addr_family': None,
'auto': True,
'config': {},
'name': module.params.get('name')
}
build_addr_method(module)
build_address(module)
build_vids(module)
build_pvid(module)
build_speed(module)
build_alias_name(module)
build_vrr(module)
for _attr in ['mtu', 'mstpctl_portnetwork', 'mstpctl_portadminedge',
'mstpctl_bpduguard', 'clagd_enable',
'clagd_priority', 'clagd_peer_ip',
'clagd_sys_mac', 'clagd_args']:
build_generic_attr(module, _attr)
def config_dict_changed(module):
"""
return true if 'config' dict in hash is different
between desired and current config
"""
current_config = module.custom_current_config.get('config')
desired_config = module.custom_desired_config.get('config')
return current_config != desired_config
def config_changed(module):
"""
returns true if config has changed
"""
if config_dict_changed(module):
return True
# check if addr_method is changed
return module.custom_desired_config.get('addr_method') != \
module.custom_current_config.get('addr_method')
def replace_config(module):
temp = tempfile.NamedTemporaryFile()
desired_config = module.custom_desired_config
# by default it will be something like /etc/network/interfaces.d/swp1
final_location = module.params.get('location') + '/' + \
module.params.get('name')
final_text = ''
_fh = open(final_location, 'w')
# make sure to put hash in array or else ifquery will fail
# write to temp file
try:
temp.write(module.jsonify([desired_config]))
# need to seek to 0 so that data is written to tempfile.
temp.seek(0)
_cmd = "/sbin/ifquery -a -i %s -t json" % (temp.name)
final_text = run_cmd(module, _cmd)
finally:
temp.close()
try:
_fh.write(final_text)
finally:
_fh.close()
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, type='str'),
ipv4=dict(type='list'),
ipv6=dict(type='list'),
alias_name=dict(type='str'),
addr_method=dict(type='str',
choices=['', 'loopback', 'dhcp']),
speed=dict(type='str'),
mtu=dict(type='str'),
virtual_ip=dict(type='str'),
virtual_mac=dict(type='str'),
vids=dict(type='list'),
pvid=dict(type='str'),
mstpctl_portnetwork=dict(type='bool', choices=BOOLEANS),
mstpctl_portadminedge=dict(type='bool', choices=BOOLEANS),
mstpctl_bpduguard=dict(type='bool', choices=BOOLEANS),
clagd_enable=dict(type='bool', choices=BOOLEANS),
clagd_priority=dict(type='str'),
clagd_peer_ip=dict(type='str'),
clagd_sys_mac=dict(type='str'),
clagd_args=dict(type='str'),
location=dict(type='str',
default='/etc/network/interfaces.d')
),
required_together=[
['virtual_ip', 'virtual_mac'],
['clagd_enable', 'clagd_priority',
'clagd_peer_ip', 'clagd_sys_mac']
]
)
# if using the jinja default filter, this resolves to
# create an list with an empty string ['']. The following
# checks all lists and removes it, so that functions expecting
# an empty list, get this result. May upstream this fix into
# the AnsibleModule code to have it check for this.
for k, _param in module.params.iteritems():
if isinstance(_param, list):
module.params[k] = [x for x in _param if x]
_location = module.params.get('location')
if not os.path.exists(_location):
_msg = "%s does not exist." % (_location)
module.fail_json(msg=_msg)
return # for testing purposes only
ifacename = module.params.get('name')
_changed = False
_msg = "interface %s config not changed" % (ifacename)
current_iface_config(module)
build_desired_iface_config(module)
if config_changed(module):
replace_config(module)
_msg = "interface %s config updated" % (ifacename)
_changed = True
module.exit_json(changed=_changed, msg=_msg)
# import module snippets
from ansible.module_utils.basic import *
import tempfile
import os
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.