code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
# random util functions I've written
# mostly taken from https://github.com/DanielOaks/goshu but I don't care about the license
import getpass
def true_or_false(in_str):
"""Returns True/False if string represents it, else None."""
in_str = in_str.lower()
if in_str.startswith(('true', 'y', '1', 'on')):
return True
elif in_str.startswith(('false', 'n', '0', 'off')):
return False
else:
return None
def is_ok(prompt, blank=''):
"""Prompt the user for yes/no and returns True/False
Arguments:
prompt -- Prompt for the user
blank -- If True, a blank response will return True, ditto for False, the default ''
will not accept blank responses and ask until the user gives an appropriate
response
Returns:
True if user accepts, False if user does not"""
while True:
ok = input(prompt).lower().strip()
if len(ok) > 0:
if ok[0] == 'y' or ok[0] == 't' or ok[0] == '1': # yes, true, 1
return True
elif ok[0] == 'n' or ok[0] == 'f' or ok[0] == '0': # no, false, 0
return False
else:
if blank is True:
return True
elif blank is False:
return False
class GuiManager:
"""Handles generic gui stuff."""
# input functions
def get_string(self, prompt, repeating_prompt=None, default=None,
confirm_prompt=None, blank_allowed=False,
password=False, validate=None):
"""Get a string."""
if repeating_prompt is None:
repeating_prompt = prompt
# echo typed chars vs not doing that
if password:
fn = getpass.getpass
else:
fn = input
# confirm value if necessary
if confirm_prompt is not None:
val1 = fn(prompt)
val2 = fn(confirm_prompt)
while (val1 != val2 or
(val1.strip() == '' and not blank_allowed and default is None)
or (validate and not validate(val1))):
val1 = fn(repeating_prompt)
val2 = fn(confirm_prompt)
if val1.strip() == '' and default is not None:
output_value = default
else:
output_value = val1
# else just get a value that is / is not blank
else:
output_value = fn(prompt)
if not blank_allowed or validate:
if default is not None:
output_value = default
else:
while (output_value.strip() == '' or
(validate and not validate(output_value))):
output_value = fn(repeating_prompt)
return output_value
def get_number(self, prompt, repeating_prompt=None, default=None, force_int=False, password=False):
"""Get a number, force_int to force an integer."""
# parse function, since we repeat it
def parse_value(val):
try:
if force_int or '.' not in val:
return int(val)
else:
return float(val)
except (ValueError, TypeError):
if (default is not None) and (val.strip() == ''):
# we take blank as 'use the default'
# just use user-provided default
return default
else:
return '' # user screwed up, we'll ask for another value
# get initial value
value = self.get_string(prompt, repeating_prompt, blank_allowed=True, password=password)
value = parse_value(value.strip())
# repeat if required
while not isinstance(value, (int, float)):
value = self.get_string(repeating_prompt, repeating_prompt)
value = parse_value(value)
return value
def get_bool(self, prompt, repeating_prompt=None, default=None, allow_none=False, password=False):
"""Get a bool, allow_none to allow None."""
# parse function, since we repeat it
def parse_value(val):
if val == '':
if default is not None or allow_none:
return default
else:
val = true_or_false(val)
if val is None:
return ''
else:
return val
# get initial value
value = self.get_string(prompt, repeating_prompt, blank_allowed=True, password=password)
value = parse_value(value.strip())
# repeat if needed
while value not in (True, False, None):
value = self.get_string(repeating_prompt, repeating_prompt, blank_allowed=True,
password=password)
value = parse_value(value.strip())
return value | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2025 Arcee AI and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_arcee import *
from .modeling_arcee import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) | python | github | https://github.com/huggingface/transformers | src/transformers/models/arcee/__init__.py |
# -*- coding: utf-8 -*-
import os
import codecs
import re
'''
This file contains all phonetics related functions. The phonetic
transcription is obtained using eSpeak speech synthesizer
(http://espeak.sourceforge.net/).
For English the list of available phonetic vowels can be found here:
http://espeak.sourceforge.net/phonemes.html
'''
def is_vow(c, language='fi'):
'''
Is the given (lowercase) character a vowel or not.
'''
if language == 'fi': # Finnish
return c in u'aeiouyäöå'
elif len(language) >= 2 and language[:2] == 'en': # English
# In order to increase recall for the rhyme detection, we
# ignore the schwa vowel '@' as it can be rhymed with several
# different vowels. However, in BattleBot we do not ignore it
# in order to get a higher precision.
return c in u'3L5aAeEiI0VuUoO'
else:
raise Exception("Unknown language: %s" % language)
def map_vow(c, language):
'''
Map vowel to a similar sounding vowel (only for English).
'''
# This list is somewhat arbitrary, so some native English speaker
# who knows about phonetics might be able to improve it.
vow_map = {
'0':'o',
'O':'o',
'I':'i',
'E':'e'
}
if len(language) >= 2 and language[:2] == 'en' and c in vow_map:
return vow_map[c]
else:
return c
def is_space(c):
'''
Is the given character a space or newline (other space characters are
cleaned in the preprocessing phase).
'''
return c==' ' or c=='\n'
def get_phonetic_transcription(text, language='en-us', output_fname=None):
if output_fname is None:
fname2 = u'temp_transcription.txt'
else:
fname2 = output_fname
if output_fname is None or not os.path.exists(fname2):
print "Transcribing: %s" % fname2
fname = u'temp_lyrics.txt'
f = codecs.open(fname, 'w', 'utf8')
f.write(text)
f.close()
cmd = u'espeak -xq -v%s -f %s > %s' % (language, fname, fname2)
os.system(cmd)
f2 = codecs.open(fname2, 'r', 'utf8')
new_text = f2.read()
# Remove some unwanted stuff from the transcription
new_text = re.sub("_:'Ekskl@m,eIS@n_:", "", new_text)
new_text = re.sub("'", "", new_text)
new_text = re.sub(",", "", new_text)
return new_text | unknown | codeparrot/codeparrot-clean | ||
import logging
from autotest.client.shared import error
from virttest import env_process
@error.context_aware
def run(test, params, env):
"""
Qemu invalid parameter in qemu command line test:
1) Try boot up guest with invalid parameters
2) Catch the error message shows by qemu process
:param test: QEMU test object
:param params: Dictionary with the test parameters
:param env: Dictionary with test environment.
"""
vm_name = params["main_vm"]
params['start_vm'] = "yes"
try:
error.context("Start guest with invalid parameters.")
env_process.preprocess_vm(test, params, env, vm_name)
vm = env.get_vm(vm_name)
vm.destroy()
except Exception, emsg:
error.context("Check guest exit status.")
if "(core dumped)" in emsg.reason:
raise error.TestFail("Guest core dumped with invalid parameters.")
else:
logging.info("Guest quit as expect: %s" % emsg.reason)
return
raise error.TestFail("Guest start normally, didn't quit as expect.") | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler weights.
"""
from oslo.serialization import jsonutils
from nova import context
from nova import exception
from nova.openstack.common.fixture import mockpatch
from nova.scheduler import weights
from nova import test
from nova.tests import matchers
from nova.tests.scheduler import fakes
class TestWeighedHost(test.NoDBTestCase):
def test_dict_conversion(self):
host_state = fakes.FakeHostState('somehost', None, {})
host = weights.WeighedHost(host_state, 'someweight')
expected = {'weight': 'someweight',
'host': 'somehost'}
self.assertThat(host.to_dict(), matchers.DictMatches(expected))
def test_all_weighers(self):
classes = weights.all_weighers()
class_names = [cls.__name__ for cls in classes]
self.assertEqual(len(classes), 3)
self.assertIn('RAMWeigher', class_names)
self.assertIn('MetricsWeigher', class_names)
self.assertIn('IoOpsWeigher', class_names)
class RamWeigherTestCase(test.NoDBTestCase):
def setUp(self):
super(RamWeigherTestCase, self).setUp()
self.useFixture(mockpatch.Patch(
'nova.db.compute_node_get_all',
return_value=fakes.COMPUTE_NODES))
self.host_manager = fakes.FakeHostManager()
self.weight_handler = weights.HostWeightHandler()
self.weight_classes = self.weight_handler.get_matching_classes(
['nova.scheduler.weights.ram.RAMWeigher'])
def _get_weighed_host(self, hosts, weight_properties=None):
if weight_properties is None:
weight_properties = {}
return self.weight_handler.get_weighed_objects(self.weight_classes,
hosts, weight_properties)[0]
def _get_all_hosts(self):
ctxt = context.get_admin_context()
return self.host_manager.get_all_host_states(ctxt)
def test_default_of_spreading_first(self):
hostinfo_list = self._get_all_hosts()
# host1: free_ram_mb=512
# host2: free_ram_mb=1024
# host3: free_ram_mb=3072
# host4: free_ram_mb=8192
# so, host4 should win:
weighed_host = self._get_weighed_host(hostinfo_list)
self.assertEqual(weighed_host.weight, 1.0)
self.assertEqual(weighed_host.obj.host, 'host4')
def test_ram_filter_multiplier1(self):
self.flags(ram_weight_multiplier=0.0)
hostinfo_list = self._get_all_hosts()
# host1: free_ram_mb=512
# host2: free_ram_mb=1024
# host3: free_ram_mb=3072
# host4: free_ram_mb=8192
# We do not know the host, all have same weight.
weighed_host = self._get_weighed_host(hostinfo_list)
self.assertEqual(weighed_host.weight, 0.0)
def test_ram_filter_multiplier2(self):
self.flags(ram_weight_multiplier=2.0)
hostinfo_list = self._get_all_hosts()
# host1: free_ram_mb=512
# host2: free_ram_mb=1024
# host3: free_ram_mb=3072
# host4: free_ram_mb=8192
# so, host4 should win:
weighed_host = self._get_weighed_host(hostinfo_list)
self.assertEqual(weighed_host.weight, 1.0 * 2)
self.assertEqual(weighed_host.obj.host, 'host4')
def test_ram_filter_negative(self):
self.flags(ram_weight_multiplier=1.0)
hostinfo_list = self._get_all_hosts()
host_attr = {'id': 100, 'memory_mb': 8192, 'free_ram_mb': -512}
host_state = fakes.FakeHostState('negative', 'negative', host_attr)
hostinfo_list = list(hostinfo_list) + [host_state]
# host1: free_ram_mb=512
# host2: free_ram_mb=1024
# host3: free_ram_mb=3072
# host4: free_ram_mb=8192
# negativehost: free_ram_mb=-512
# so, host4 should win
weights = self.weight_handler.get_weighed_objects(self.weight_classes,
hostinfo_list, {})
weighed_host = weights[0]
self.assertEqual(weighed_host.weight, 1)
self.assertEqual(weighed_host.obj.host, "host4")
# and negativehost should lose
weighed_host = weights[-1]
self.assertEqual(weighed_host.weight, 0)
self.assertEqual(weighed_host.obj.host, "negative")
class MetricsWeigherTestCase(test.NoDBTestCase):
def setUp(self):
super(MetricsWeigherTestCase, self).setUp()
self.useFixture(mockpatch.Patch(
'nova.db.compute_node_get_all',
return_value=fakes.COMPUTE_NODES_METRICS))
self.host_manager = fakes.FakeHostManager()
self.weight_handler = weights.HostWeightHandler()
self.weight_classes = self.weight_handler.get_matching_classes(
['nova.scheduler.weights.metrics.MetricsWeigher'])
def _get_weighed_host(self, hosts, setting, weight_properties=None):
if not weight_properties:
weight_properties = {}
self.flags(weight_setting=setting, group='metrics')
return self.weight_handler.get_weighed_objects(self.weight_classes,
hosts, weight_properties)[0]
def _get_all_hosts(self):
ctxt = context.get_admin_context()
return self.host_manager.get_all_host_states(ctxt)
def _do_test(self, settings, expected_weight, expected_host):
hostinfo_list = self._get_all_hosts()
weighed_host = self._get_weighed_host(hostinfo_list, settings)
self.assertEqual(weighed_host.weight, expected_weight)
self.assertEqual(weighed_host.obj.host, expected_host)
def test_single_resource(self):
# host1: foo=512
# host2: foo=1024
# host3: foo=3072
# host4: foo=8192
# so, host4 should win:
setting = ['foo=1']
self._do_test(setting, 1.0, 'host4')
def test_multiple_resource(self):
# host1: foo=512, bar=1
# host2: foo=1024, bar=2
# host3: foo=3072, bar=1
# host4: foo=8192, bar=0
# so, host2 should win:
setting = ['foo=0.0001', 'bar=1']
self._do_test(setting, 1.0, 'host2')
def test_single_resourcenegtive_ratio(self):
# host1: foo=512
# host2: foo=1024
# host3: foo=3072
# host4: foo=8192
# so, host1 should win:
setting = ['foo=-1']
self._do_test(setting, 1.0, 'host1')
def test_multiple_resource_missing_ratio(self):
# host1: foo=512, bar=1
# host2: foo=1024, bar=2
# host3: foo=3072, bar=1
# host4: foo=8192, bar=0
# so, host4 should win:
setting = ['foo=0.0001', 'bar']
self._do_test(setting, 1.0, 'host4')
def test_multiple_resource_wrong_ratio(self):
# host1: foo=512, bar=1
# host2: foo=1024, bar=2
# host3: foo=3072, bar=1
# host4: foo=8192, bar=0
# so, host4 should win:
setting = ['foo=0.0001', 'bar = 2.0t']
self._do_test(setting, 1.0, 'host4')
def _check_parsing_result(self, weigher, setting, results):
self.flags(weight_setting=setting, group='metrics')
weigher._parse_setting()
self.assertEqual(len(weigher.setting), len(results))
for item in results:
self.assertIn(item, weigher.setting)
def test_parse_setting(self):
weigher = self.weight_classes[0]()
self._check_parsing_result(weigher,
['foo=1'],
[('foo', 1.0)])
self._check_parsing_result(weigher,
['foo=1', 'bar=-2.1'],
[('foo', 1.0), ('bar', -2.1)])
self._check_parsing_result(weigher,
['foo=a1', 'bar=-2.1'],
[('bar', -2.1)])
self._check_parsing_result(weigher,
['foo', 'bar=-2.1'],
[('bar', -2.1)])
self._check_parsing_result(weigher,
['=5', 'bar=-2.1'],
[('bar', -2.1)])
def test_metric_not_found_required(self):
setting = ['foo=1', 'zot=2']
self.assertRaises(exception.ComputeHostMetricNotFound,
self._do_test,
setting,
8192,
'host4')
def test_metric_not_found_non_required(self):
# host1: foo=512, bar=1
# host2: foo=1024, bar=2
# host3: foo=3072, bar=1
# host4: foo=8192, bar=0
# host5: foo=768, bar=0, zot=1
# host6: foo=2048, bar=0, zot=2
# so, host5 should win:
self.flags(required=False, group='metrics')
setting = ['foo=0.0001', 'zot=-1']
self._do_test(setting, 1.0, 'host5')
COMPUTE_NODES_IO_OPS = [
# host1: num_io_ops=1
dict(id=1, local_gb=1024, memory_mb=1024, vcpus=1,
disk_available_least=None, free_ram_mb=512, vcpus_used=1,
free_disk_gb=512, local_gb_used=0, updated_at=None,
service=dict(host='host1', disabled=False),
hypervisor_hostname='node1', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=None,
stats=jsonutils.dumps({'io_workload': '1'})),
# host2: num_io_ops=2
dict(id=2, local_gb=2048, memory_mb=2048, vcpus=2,
disk_available_least=1024, free_ram_mb=1024, vcpus_used=2,
free_disk_gb=1024, local_gb_used=0, updated_at=None,
service=dict(host='host2', disabled=True),
hypervisor_hostname='node2', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=None,
stats=jsonutils.dumps({'io_workload': '2'})),
# host3: num_io_ops=0, so host3 should win in the case of default
# io_ops_weight_multiplier configure.
dict(id=3, local_gb=4096, memory_mb=4096, vcpus=4,
disk_available_least=3333, free_ram_mb=3072, vcpus_used=1,
free_disk_gb=3072, local_gb_used=0, updated_at=None,
service=dict(host='host3', disabled=False),
hypervisor_hostname='node3', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=None,
stats=jsonutils.dumps({'io_workload': '0'})),
# host4: num_io_ops=4, so host4 should win in the case of positive
# io_ops_weight_multiplier configure.
dict(id=4, local_gb=8192, memory_mb=8192, vcpus=8,
disk_available_least=8192, free_ram_mb=8192, vcpus_used=0,
free_disk_gb=8888, local_gb_used=0, updated_at=None,
service=dict(host='host4', disabled=False),
hypervisor_hostname='node4', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=None,
stats=jsonutils.dumps({'io_workload': '4'})),
# Broken entry
dict(id=5, local_gb=1024, memory_mb=1024, vcpus=1, service=None),
]
class IoOpsWeigherTestCase(test.NoDBTestCase):
def setUp(self):
super(IoOpsWeigherTestCase, self).setUp()
self.useFixture(mockpatch.Patch(
'nova.db.compute_node_get_all',
return_value=COMPUTE_NODES_IO_OPS))
self.host_manager = fakes.FakeHostManager()
self.weight_handler = weights.HostWeightHandler()
self.weight_classes = self.weight_handler.get_matching_classes(
['nova.scheduler.weights.io_ops.IoOpsWeigher'])
def _get_weighed_host(self, hosts, io_ops_weight_multiplier):
if io_ops_weight_multiplier is not None:
self.flags(io_ops_weight_multiplier=io_ops_weight_multiplier)
return self.weight_handler.get_weighed_objects(self.weight_classes,
hosts, {})[0]
def _get_all_hosts(self):
ctxt = context.get_admin_context()
return self.host_manager.get_all_host_states(ctxt)
def _do_test(self, io_ops_weight_multiplier, expected_weight,
expected_host):
hostinfo_list = self._get_all_hosts()
weighed_host = self._get_weighed_host(hostinfo_list,
io_ops_weight_multiplier)
self.assertEqual(weighed_host.weight, expected_weight)
if expected_host:
self.assertEqual(weighed_host.obj.host, expected_host)
def test_io_ops_weight_multiplier_by_default(self):
self._do_test(io_ops_weight_multiplier=None,
expected_weight=0.0,
expected_host='host3')
def test_io_ops_weight_multiplier_zero_value(self):
# We do not know the host, all have same weight.
self._do_test(io_ops_weight_multiplier=0.0,
expected_weight=0.0,
expected_host=None)
def test_io_ops_weight_multiplier_positive_value(self):
self._do_test(io_ops_weight_multiplier=2.0,
expected_weight=2.0,
expected_host='host4') | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.validation.beanvalidation;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import jakarta.validation.ConstraintViolation;
import jakarta.validation.ElementKind;
import jakarta.validation.Path;
import jakarta.validation.ValidationException;
import jakarta.validation.executable.ExecutableValidator;
import jakarta.validation.metadata.BeanDescriptor;
import jakarta.validation.metadata.ConstraintDescriptor;
import org.jspecify.annotations.Nullable;
import org.springframework.beans.InvalidPropertyException;
import org.springframework.beans.NotReadablePropertyException;
import org.springframework.context.MessageSourceResolvable;
import org.springframework.context.support.DefaultMessageSourceResolvable;
import org.springframework.util.Assert;
import org.springframework.util.ClassUtils;
import org.springframework.util.StringUtils;
import org.springframework.validation.BindingResult;
import org.springframework.validation.Errors;
import org.springframework.validation.FieldError;
import org.springframework.validation.ObjectError;
import org.springframework.validation.SmartValidator;
/**
* Adapter that takes a JSR-303 {@code javax.validator.Validator} and
* exposes it as a Spring {@link org.springframework.validation.Validator}
* while also exposing the original JSR-303 Validator interface itself.
*
* <p>Can be used as a programmatic wrapper. Also serves as base class for
* {@link CustomValidatorBean} and {@link LocalValidatorFactoryBean},
* and as the primary implementation of the {@link SmartValidator} interface.
*
* @author Juergen Hoeller
* @author Sam Brannen
* @since 3.0
* @see SmartValidator
* @see CustomValidatorBean
* @see LocalValidatorFactoryBean
*/
public class SpringValidatorAdapter implements SmartValidator, jakarta.validation.Validator {
private static final Set<String> internalAnnotationAttributes = Set.of("message", "groups", "payload");
private jakarta.validation.@Nullable Validator targetValidator;
/**
* Create a new SpringValidatorAdapter for the given JSR-303 Validator.
* @param targetValidator the JSR-303 Validator to wrap
*/
public SpringValidatorAdapter(jakarta.validation.Validator targetValidator) {
Assert.notNull(targetValidator, "Target Validator must not be null");
this.targetValidator = targetValidator;
}
SpringValidatorAdapter() {
}
void setTargetValidator(jakarta.validation.Validator targetValidator) {
this.targetValidator = targetValidator;
}
//---------------------------------------------------------------------
// Implementation of Spring Validator interface
//---------------------------------------------------------------------
@Override
public boolean supports(Class<?> clazz) {
return (this.targetValidator != null);
}
@Override
public void validate(Object target, Errors errors) {
if (this.targetValidator != null) {
processConstraintViolations(this.targetValidator.validate(target), errors);
}
}
@Override
public void validate(Object target, Errors errors, Object... validationHints) {
if (this.targetValidator != null) {
processConstraintViolations(
this.targetValidator.validate(target, asValidationGroups(validationHints)), errors);
}
}
@SuppressWarnings({"rawtypes", "unchecked"})
@Override
public void validateValue(
Class<?> targetType, @Nullable String fieldName, @Nullable Object value, Errors errors, Object... validationHints) {
if (this.targetValidator != null) {
processConstraintViolations(this.targetValidator.validateValue(
(Class) targetType, fieldName, value, asValidationGroups(validationHints)), errors);
}
}
/**
* Turn the specified validation hints into JSR-303 validation groups.
* @since 5.1
*/
private Class<?>[] asValidationGroups(Object... validationHints) {
Set<Class<?>> groups = new LinkedHashSet<>(4);
for (Object hint : validationHints) {
if (hint instanceof Class<?> clazz) {
groups.add(clazz);
}
}
return ClassUtils.toClassArray(groups);
}
/**
* Process the given JSR-303 ConstraintViolations, adding corresponding errors to
* the provided Spring {@link Errors} object.
* @param violations the JSR-303 ConstraintViolation results
* @param errors the Spring errors object to register to
*/
@SuppressWarnings("serial")
protected void processConstraintViolations(Set<ConstraintViolation<Object>> violations, Errors errors) {
for (ConstraintViolation<Object> violation : violations) {
String field = determineField(violation);
FieldError fieldError = errors.getFieldError(field);
if (fieldError == null || !fieldError.isBindingFailure()) {
try {
ConstraintDescriptor<?> cd = violation.getConstraintDescriptor();
String errorCode = determineErrorCode(cd);
Object[] errorArgs = getArgumentsForConstraint(errors.getObjectName(), field, cd);
if (errors instanceof BindingResult bindingResult) {
// Can do custom FieldError registration with invalid value from ConstraintViolation,
// as necessary for Hibernate Validator compatibility (non-indexed set path in field)
String nestedField = bindingResult.getNestedPath() + field;
if (nestedField.isEmpty()) {
String[] errorCodes = bindingResult.resolveMessageCodes(errorCode);
ObjectError error = new ViolationObjectError(
errors.getObjectName(), errorCodes, errorArgs, violation, this);
bindingResult.addError(error);
}
else {
Object rejectedValue = getRejectedValue(field, violation, bindingResult);
String[] errorCodes = bindingResult.resolveMessageCodes(errorCode, field);
FieldError error = new ViolationFieldError(errors.getObjectName(), nestedField,
rejectedValue, errorCodes, errorArgs, violation, this);
bindingResult.addError(error);
}
}
else {
// Got no BindingResult - can only do standard rejectValue call
// with automatic extraction of the current field value
errors.rejectValue(field, errorCode, errorArgs, violation.getMessage());
}
}
catch (NotReadablePropertyException ex) {
throw new IllegalStateException("JSR-303 validated property '" + field +
"' does not have a corresponding accessor for Spring data binding - " +
"check your DataBinder's configuration (bean property versus direct field access)", ex);
}
}
}
}
/**
* Determine a field for the given constraint violation.
* <p>The default implementation returns the stringified property path.
* @param violation the current JSR-303 ConstraintViolation
* @return the Spring-reported field (for use with {@link Errors})
* @since 4.2
* @see jakarta.validation.ConstraintViolation#getPropertyPath()
* @see org.springframework.validation.FieldError#getField()
*/
protected String determineField(ConstraintViolation<Object> violation) {
Path path = violation.getPropertyPath();
StringBuilder sb = new StringBuilder();
boolean first = true;
for (Path.Node node : path) {
if (node.isInIterable() && !first) {
sb.append('[');
Object index = node.getIndex();
if (index == null) {
index = node.getKey();
}
if (index != null) {
sb.append(index);
}
sb.append(']');
}
String name = node.getName();
if (name != null && node.getKind() == ElementKind.PROPERTY && !name.startsWith("<")) {
if (!first) {
sb.append('.');
}
first = false;
sb.append(name);
}
}
return sb.toString();
}
/**
* Determine a Spring-reported error code for the given constraint descriptor.
* <p>The default implementation returns the simple class name of the descriptor's
* annotation type. Note that the configured
* {@link org.springframework.validation.MessageCodesResolver} will automatically
* generate error code variations which include the object name and the field name.
* @param descriptor the JSR-303 ConstraintDescriptor for the current violation
* @return a corresponding error code (for use with {@link Errors})
* @since 4.2
* @see jakarta.validation.metadata.ConstraintDescriptor#getAnnotation()
* @see org.springframework.validation.MessageCodesResolver
*/
protected String determineErrorCode(ConstraintDescriptor<?> descriptor) {
return descriptor.getAnnotation().annotationType().getSimpleName();
}
/**
* Return FieldError arguments for a validation error on the given field.
* Invoked for each violated constraint.
* <p>The default implementation returns a first argument indicating the field name
* (see {@link #getResolvableField}). Afterwards, it adds all actual constraint
* annotation attributes (i.e. excluding "message", "groups" and "payload") in
* alphabetical order of their attribute names.
* <p>Can be overridden to, for example, add further attributes from the constraint descriptor.
* @param objectName the name of the target object
* @param field the field that caused the binding error
* @param descriptor the JSR-303 constraint descriptor
* @return the Object array that represents the FieldError arguments
* @see org.springframework.validation.FieldError#getArguments
* @see org.springframework.context.support.DefaultMessageSourceResolvable
* @see org.springframework.validation.DefaultBindingErrorProcessor#getArgumentsForBindError
*/
protected Object[] getArgumentsForConstraint(String objectName, String field, ConstraintDescriptor<?> descriptor) {
List<Object> arguments = new ArrayList<>();
arguments.add(getResolvableField(objectName, field));
// Using a TreeMap for alphabetical ordering of attribute names
Map<String, Object> attributesToExpose = new TreeMap<>();
descriptor.getAttributes().forEach((attributeName, attributeValue) -> {
if (!internalAnnotationAttributes.contains(attributeName)) {
if (attributeValue instanceof String str) {
attributeValue = new ResolvableAttribute(str);
}
attributesToExpose.put(attributeName, attributeValue);
}
});
arguments.addAll(attributesToExpose.values());
return arguments.toArray();
}
/**
* Build a resolvable wrapper for the specified field, allowing to resolve the field's
* name in a {@code MessageSource}.
* <p>The default implementation returns a first argument indicating the field:
* of type {@code DefaultMessageSourceResolvable}, with "objectName.field" and "field"
* as codes, and with the plain field name as default message.
* @param objectName the name of the target object
* @param field the field that caused the binding error
* @return a corresponding {@code MessageSourceResolvable} for the specified field
* @since 4.3
* @see #getArgumentsForConstraint
*/
protected MessageSourceResolvable getResolvableField(String objectName, String field) {
String[] codes = (StringUtils.hasText(field) ?
new String[] {objectName + Errors.NESTED_PATH_SEPARATOR + field, field} :
new String[] {objectName});
return new DefaultMessageSourceResolvable(codes, field);
}
/**
* Extract the rejected value behind the given constraint violation,
* for exposure through the Spring errors representation.
* @param field the field that caused the binding error
* @param violation the corresponding JSR-303 ConstraintViolation
* @param bindingResult a Spring BindingResult for the backing object
* which contains the current field's value
* @return the invalid value to expose as part of the field error
* @since 4.2
* @see jakarta.validation.ConstraintViolation#getInvalidValue()
* @see org.springframework.validation.FieldError#getRejectedValue()
*/
protected @Nullable Object getRejectedValue(String field, ConstraintViolation<Object> violation, BindingResult bindingResult) {
Object invalidValue = violation.getInvalidValue();
if (!field.isEmpty() && !field.contains("[]") &&
(invalidValue == violation.getLeafBean() || field.contains("[") || field.contains("."))) {
// Possibly a bean constraint with property path: retrieve the actual property value.
// However, explicitly avoid this for "address[]" style paths that we can't handle.
try {
invalidValue = bindingResult.getRawFieldValue(field);
}
catch (InvalidPropertyException ex) {
// Bean validation uses ValueExtractor's to unwrap container values
// in which cases we can't access the raw value.
}
}
return invalidValue;
}
/**
* Indicate whether this violation's interpolated message has remaining
* placeholders and therefore requires {@link java.text.MessageFormat}
* to be applied to it. Called for a Bean Validation defined message
* (coming out {@code ValidationMessages.properties}) when rendered
* as the default message in Spring's MessageSource.
* <p>The default implementation considers a Spring-style "{0}" placeholder
* for the field name as an indication for {@link java.text.MessageFormat}.
* Any other placeholder or escape syntax occurrences are typically a
* mismatch, coming out of regex pattern values or the like. Note that
* standard Bean Validation does not support "{0}" style placeholders at all;
* this is a feature typically used in Spring MessageSource resource bundles.
* @param violation the Bean Validation constraint violation, including
* BV-defined interpolation of named attribute references in its message
* @return {@code true} if {@code java.text.MessageFormat} is to be applied,
* or {@code false} if the violation's message should be used as-is
* @since 5.1.8
* @see #getArgumentsForConstraint
*/
protected boolean requiresMessageFormat(ConstraintViolation<?> violation) {
return containsSpringStylePlaceholder(violation.getMessage());
}
private static boolean containsSpringStylePlaceholder(@Nullable String message) {
return (message != null && message.contains("{0}"));
}
//---------------------------------------------------------------------
// Implementation of JSR-303 Validator interface
//---------------------------------------------------------------------
@Override
public <T> Set<ConstraintViolation<T>> validate(T object, Class<?>... groups) {
Assert.state(this.targetValidator != null, "No target Validator set");
return this.targetValidator.validate(object, groups);
}
@Override
public <T> Set<ConstraintViolation<T>> validateProperty(T object, String propertyName, Class<?>... groups) {
Assert.state(this.targetValidator != null, "No target Validator set");
return this.targetValidator.validateProperty(object, propertyName, groups);
}
@Override
public <T> Set<ConstraintViolation<T>> validateValue(
Class<T> beanType, String propertyName, Object value, Class<?>... groups) {
Assert.state(this.targetValidator != null, "No target Validator set");
return this.targetValidator.validateValue(beanType, propertyName, value, groups);
}
@Override
public BeanDescriptor getConstraintsForClass(Class<?> clazz) {
Assert.state(this.targetValidator != null, "No target Validator set");
return this.targetValidator.getConstraintsForClass(clazz);
}
@Override
@SuppressWarnings("unchecked")
public <T> T unwrap(@Nullable Class<T> type) {
Assert.state(this.targetValidator != null, "No target Validator set");
try {
return (type != null ? this.targetValidator.unwrap(type) : (T) this.targetValidator);
}
catch (ValidationException ex) {
// Ignore if just being asked for plain JSR-303 Validator
if (jakarta.validation.Validator.class == type) {
return (T) this.targetValidator;
}
throw ex;
}
}
@Override
public ExecutableValidator forExecutables() {
Assert.state(this.targetValidator != null, "No target Validator set");
return this.targetValidator.forExecutables();
}
/**
* Wrapper for a String attribute which can be resolved via a {@code MessageSource},
* falling back to the original attribute as a default value otherwise.
*/
@SuppressWarnings("serial")
private static class ResolvableAttribute implements MessageSourceResolvable, Serializable {
private final String resolvableString;
public ResolvableAttribute(String resolvableString) {
this.resolvableString = resolvableString;
}
@Override
public String[] getCodes() {
return new String[] {this.resolvableString};
}
@Override
public Object @Nullable [] getArguments() {
return null;
}
@Override
public String getDefaultMessage() {
return this.resolvableString;
}
@Override
public String toString() {
return this.resolvableString;
}
}
/**
* Subclass of {@code ObjectError} with Spring-style default message rendering.
*/
@SuppressWarnings("serial")
private static class ViolationObjectError extends ObjectError implements Serializable {
private @Nullable transient SpringValidatorAdapter adapter;
private @Nullable transient ConstraintViolation<?> violation;
public ViolationObjectError(String objectName, String[] codes, Object[] arguments,
ConstraintViolation<?> violation, SpringValidatorAdapter adapter) {
super(objectName, codes, arguments, violation.getMessage());
this.adapter = adapter;
this.violation = violation;
wrap(violation);
}
@Override
public boolean shouldRenderDefaultMessage() {
return (this.adapter != null && this.violation != null ?
this.adapter.requiresMessageFormat(this.violation) :
containsSpringStylePlaceholder(getDefaultMessage()));
}
}
/**
* Subclass of {@code FieldError} with Spring-style default message rendering.
*/
@SuppressWarnings("serial")
private static class ViolationFieldError extends FieldError implements Serializable {
private @Nullable transient SpringValidatorAdapter adapter;
private @Nullable transient ConstraintViolation<?> violation;
public ViolationFieldError(String objectName, String field, @Nullable Object rejectedValue, String[] codes,
Object[] arguments, ConstraintViolation<?> violation, SpringValidatorAdapter adapter) {
super(objectName, field, rejectedValue, false, codes, arguments, violation.getMessage());
this.adapter = adapter;
this.violation = violation;
wrap(violation);
}
@Override
public boolean shouldRenderDefaultMessage() {
return (this.adapter != null && this.violation != null ?
this.adapter.requiresMessageFormat(this.violation) :
containsSpringStylePlaceholder(getDefaultMessage()));
}
}
} | java | github | https://github.com/spring-projects/spring-framework | spring-context/src/main/java/org/springframework/validation/beanvalidation/SpringValidatorAdapter.java |
use std::cell::RefCell;
use std::task::Waker;
pub(crate) struct Defer {
deferred: RefCell<Vec<Waker>>,
}
impl Defer {
pub(crate) fn new() -> Defer {
Defer {
deferred: RefCell::default(),
}
}
pub(crate) fn defer(&self, waker: &Waker) {
let mut deferred = self.deferred.borrow_mut();
// If the same task adds itself a bunch of times, then only add it once.
if let Some(last) = deferred.last() {
if last.will_wake(waker) {
return;
}
}
deferred.push(waker.clone());
}
pub(crate) fn is_empty(&self) -> bool {
self.deferred.borrow().is_empty()
}
pub(crate) fn wake(&self) {
while let Some(waker) = self.deferred.borrow_mut().pop() {
waker.wake();
}
}
#[cfg(feature = "taskdump")]
pub(crate) fn take_deferred(&self) -> Vec<Waker> {
let mut deferred = self.deferred.borrow_mut();
std::mem::take(&mut *deferred)
}
} | rust | github | https://github.com/tokio-rs/tokio | tokio/src/runtime/scheduler/defer.rs |
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v1beta1",
"metadata": {
"name": "v40.refresh_true.v42"
},
"spec": {
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"liveNow": false,
"preload": false,
"refresh": "",
"schemaVersion": 42,
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
]
},
"timezone": "",
"title": "Boolean Refresh Test Dashboard"
},
"status": {
"conversion": {
"failed": false,
"storedVersion": "v2beta1"
}
}
} | json | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/conversion/testdata/output/migrated_dashboards_from_v0_to_v2/v2beta1.v40.refresh_true.v1beta1.json |
<?php declare(strict_types=1);
/*
* This file is part of Composer.
*
* (c) Nils Adermann <naderman@naderman.de>
* Jordi Boggiano <j.boggiano@seld.be>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Composer\Test\Repository;
use Composer\Repository\RepositoryFactory;
use Composer\Test\TestCase;
class RepositoryFactoryTest extends TestCase
{
public function testManagerWithAllRepositoryTypes(): void
{
$manager = RepositoryFactory::manager(
$this->getMockBuilder('Composer\IO\IOInterface')->getMock(),
$this->getMockBuilder('Composer\Config')->getMock(),
$this->getMockBuilder('Composer\Util\HttpDownloader')->disableOriginalConstructor()->getMock(),
$this->getMockBuilder('Composer\EventDispatcher\EventDispatcher')->disableOriginalConstructor()->getMock()
);
$ref = new \ReflectionProperty($manager, 'repositoryClasses');
(\PHP_VERSION_ID < 80100) and $ref->setAccessible(true);
$repositoryClasses = $ref->getValue($manager);
self::assertEquals([
'composer',
'vcs',
'package',
'pear',
'git',
'bitbucket',
'git-bitbucket',
'github',
'gitlab',
'svn',
'fossil',
'perforce',
'hg',
'artifact',
'path',
], array_keys($repositoryClasses));
}
/**
* @dataProvider generateRepositoryNameProvider
*
* @param int|string $index
* @param array<string, string> $config
* @param array<string, mixed> $existingRepos
*
* @phpstan-param array{url?: string} $config
*/
public function testGenerateRepositoryName($index, array $config, array $existingRepos, string $expected): void
{
self::assertSame($expected, RepositoryFactory::generateRepositoryName($index, $config, $existingRepos));
}
public static function generateRepositoryNameProvider(): array
{
return [
[0, [], [], '0'],
[0, [], [[]], '02'],
[0, ['url' => 'https://example.org'], [], 'example.org'],
[0, ['url' => 'https://example.org'], ['example.org' => []], 'example.org2'],
['example.org', ['url' => 'https://example.org/repository'], [], 'example.org'],
['example.org', ['url' => 'https://example.org/repository'], ['example.org' => []], 'example.org2'],
];
}
} | php | github | https://github.com/composer/composer | tests/Composer/Test/Repository/RepositoryFactoryTest.php |
services:
app.static_token_provider:
class: Symfony\Bundle\SecurityBundle\Tests\Functional\Bundle\RememberMeBundle\Security\StaticTokenProvider
arguments: ['@kernel']
security:
firewalls:
default:
remember_me:
always_remember_me: true
secret: key
token_provider: app.static_token_provider | unknown | github | https://github.com/symfony/symfony | src/Symfony/Bundle/SecurityBundle/Tests/Functional/app/RememberMe/config_persistent.yml |
- hosts: localhost
gather_facts: false
tasks:
- command: echo
notify: handler
handlers:
- name: handler
include_tasks: test_block_as_handler-include_import-handlers.yml | unknown | github | https://github.com/ansible/ansible | test/integration/targets/handlers/test_block_as_handler-include.yml |
from __future__ import unicode_literals
import json
from six.moves.urllib.parse import urlencode
import re
import sure # noqa
import moto.server as server
'''
Test the different server responses
'''
def test_cloudformation_server_get():
backend = server.create_backend_app("cloudformation")
stack_name = 'test stack'
test_client = backend.test_client()
template_body = {
"Resources": {},
}
create_stack_resp = test_client.action_data("CreateStack", StackName=stack_name,
TemplateBody=json.dumps(template_body))
create_stack_resp.should.match(
r"<CreateStackResponse>.*<CreateStackResult>.*<StackId>.*</StackId>.*</CreateStackResult>.*</CreateStackResponse>", re.DOTALL)
stack_id_from_create_response = re.search(
"<StackId>(.*)</StackId>", create_stack_resp).groups()[0]
list_stacks_resp = test_client.action_data("ListStacks")
stack_id_from_list_response = re.search(
"<StackId>(.*)</StackId>", list_stacks_resp).groups()[0]
stack_id_from_create_response.should.equal(stack_id_from_list_response) | unknown | codeparrot/codeparrot-clean | ||
from django.conf import settings
from django.db.models.signals import post_init, pre_save, post_save
from django.utils.importlib import import_module
URL_ROUTE_HANDLERS = getattr(settings, 'URL_ROUTE_HANDLERS', ())
handlers = {}
def load_handlers():
for name in URL_ROUTE_HANDLERS:
module_name, class_name = name.rsplit('.', 1)
handler = getattr(import_module(module_name), class_name)()
add_handler(handler)
def add_handler(handler):
assert handler.name not in handlers, (
'An URL handler with the name "%s" already exists!') % handler.name
handlers[handler.name] = handler
dispatch_uid = 'urlrouter:%s' % id(handler.model)
post_init.connect(backup_urls, sender=handler.model,
dispatch_uid=dispatch_uid)
pre_save.connect(reclaim_urls, sender=handler.model,
dispatch_uid=dispatch_uid)
post_save.connect(add_new_urls, sender=handler.model,
dispatch_uid=dispatch_uid)
def backup_urls(sender, instance, **kwargs):
if instance.pk is None:
instance._urlrouter_urls_backup_ = set()
return
urls = set()
for handler in handlers.values():
if sender is not handler.model:
continue
urls |= set(handler.get_urls(instance))
instance._urlrouter_urls_backup_ = urls
def reclaim_urls(sender, instance, **kwargs):
from .models import URLRoute
reclaim = instance._urlrouter_urls_backup_
for handler in handlers.values():
if sender is not handler.model:
continue
reclaim -= set(handler.get_urls(instance))
URLRoute.objects.filter(url__in=reclaim, target=unicode(instance.pk)).delete()
instance._urlrouter_urls_backup_ -= reclaim
def add_new_urls(sender, instance, **kwargs):
from .models import URLRoute
query = URLRoute.objects.filter(url__in=instance._urlrouter_urls_backup_)
query = query.filter(target=unicode(instance.pk))
assigned_urls = set(url for url in query)
add = {}
for handler in handlers.values():
if sender is not handler.model:
continue
for url in handler.get_urls(instance):
if url in assigned_urls:
continue
assert url not in add, (
'URL handler "%s" wants to add URL "%s" which was already '
'added by handler "%s".') % (handle.name, url, add[url])
add[url] = handler.name
for url in add:
URLRoute(url=url, handler=add[url], target=unicode(instance.pk)).save()
instance._urlrouter_urls_backup_ = assigned_urls | set(add.keys())
load_handlers() | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The page cycler measurement.
This measurement registers a window load handler in which is forces a layout and
then records the value of performance.now(). This call to now() measures the
time from navigationStart (immediately after the previous page's beforeunload
event) until after the layout in the page's load event. In addition, two garbage
collections are performed in between the page loads (in the beforeunload event).
This extra garbage collection time is not included in the measurement times.
Finally, various memory and IO statistics are gathered at the very end of
cycling all pages.
"""
import collections
import os
from metrics import cpu
from metrics import iometric
from metrics import memory
from metrics import power
from metrics import speedindex
from metrics import v8_object_stats
from telemetry.core import util
from telemetry.page import page_test
from telemetry.value import scalar
class PageCycler(page_test.PageTest):
options = {'pageset_repeat': 10}
def __init__(self, *args, **kwargs):
super(PageCycler, self).__init__(*args, **kwargs)
with open(os.path.join(os.path.dirname(__file__),
'page_cycler.js'), 'r') as f:
self._page_cycler_js = f.read()
self._speedindex_metric = speedindex.SpeedIndexMetric()
self._memory_metric = None
self._power_metric = None
self._cpu_metric = None
self._v8_object_stats_metric = None
self._has_loaded_page = collections.defaultdict(int)
@classmethod
def AddCommandLineArgs(cls, parser):
parser.add_option('--v8-object-stats',
action='store_true',
help='Enable detailed V8 object statistics.')
parser.add_option('--report-speed-index',
action='store_true',
help='Enable the speed index metric.')
parser.add_option('--cold-load-percent', type='int', default=50,
help='%d of page visits for which a cold load is forced')
@classmethod
def ProcessCommandLineArgs(cls, parser, args):
cls._record_v8_object_stats = args.v8_object_stats
cls._report_speed_index = args.report_speed_index
cold_runs_percent_set = (args.cold_load_percent != None)
# Handle requests for cold cache runs
if (cold_runs_percent_set and
(args.cold_load_percent < 0 or args.cold_load_percent > 100)):
raise Exception('--cold-load-percent must be in the range [0-100]')
# Make sure _cold_run_start_index is an integer multiple of page_repeat.
# Without this, --pageset_shuffle + --page_repeat could lead to
# assertion failures on _started_warm in WillNavigateToPage.
if cold_runs_percent_set:
number_warm_pageset_runs = int(
(int(args.pageset_repeat) - 1) * (100 - args.cold_load_percent) / 100)
number_warm_runs = number_warm_pageset_runs * args.page_repeat
cls._cold_run_start_index = number_warm_runs + args.page_repeat
cls.discard_first_result = (not args.cold_load_percent or
cls.discard_first_result)
else:
cls._cold_run_start_index = args.pageset_repeat * args.page_repeat
def WillStartBrowser(self, browser):
"""Initialize metrics once right before the browser has been launched."""
self._power_metric = power.PowerMetric(browser)
def DidStartBrowser(self, browser):
"""Initialize metrics once right after the browser has been launched."""
self._memory_metric = memory.MemoryMetric(browser)
self._cpu_metric = cpu.CpuMetric(browser)
if self._record_v8_object_stats:
self._v8_object_stats_metric = v8_object_stats.V8ObjectStatsMetric()
def DidStartHTTPServer(self, tab):
# Avoid paying for a cross-renderer navigation on the first page on legacy
# page cyclers which use the filesystem.
tab.Navigate(tab.browser.http_server.UrlOf('nonexistent.html'))
def WillNavigateToPage(self, page, tab):
page.script_to_evaluate_on_commit = self._page_cycler_js
if self.ShouldRunCold(page.url):
tab.ClearCache(force=True)
if self._report_speed_index:
self._speedindex_metric.Start(page, tab)
self._cpu_metric.Start(page, tab)
self._power_metric.Start(page, tab)
def DidNavigateToPage(self, page, tab):
self._memory_metric.Start(page, tab)
if self._record_v8_object_stats:
self._v8_object_stats_metric.Start(page, tab)
def CustomizeBrowserOptions(self, options):
memory.MemoryMetric.CustomizeBrowserOptions(options)
power.PowerMetric.CustomizeBrowserOptions(options)
iometric.IOMetric.CustomizeBrowserOptions(options)
options.AppendExtraBrowserArgs('--js-flags=--expose_gc')
if self._record_v8_object_stats:
v8_object_stats.V8ObjectStatsMetric.CustomizeBrowserOptions(options)
if self._report_speed_index:
self._speedindex_metric.CustomizeBrowserOptions(options)
def ValidateAndMeasurePage(self, page, tab, results):
tab.WaitForJavaScriptExpression('__pc_load_time', 60)
chart_name_prefix = ('cold_' if self.IsRunCold(page.url) else
'warm_')
results.AddValue(scalar.ScalarValue(
results.current_page, '%stimes.page_load_time' % chart_name_prefix,
'ms', tab.EvaluateJavaScript('__pc_load_time'),
description='Average page load time. Measured from '
'performance.timing.navigationStart until the completion '
'time of a layout after the window.load event. Cold times '
'are the times when the page is loaded cold, i.e. without '
'loading it before, and warm times are times when the '
'page is loaded after being loaded previously.'))
self._has_loaded_page[page.url] += 1
self._power_metric.Stop(page, tab)
self._memory_metric.Stop(page, tab)
self._memory_metric.AddResults(tab, results)
self._power_metric.AddResults(tab, results)
self._cpu_metric.Stop(page, tab)
self._cpu_metric.AddResults(tab, results)
if self._record_v8_object_stats:
self._v8_object_stats_metric.Stop(page, tab)
self._v8_object_stats_metric.AddResults(tab, results)
if self._report_speed_index:
def SpeedIndexIsFinished():
return self._speedindex_metric.IsFinished(tab)
util.WaitFor(SpeedIndexIsFinished, 60)
self._speedindex_metric.Stop(page, tab)
self._speedindex_metric.AddResults(
tab, results, chart_name=chart_name_prefix+'speed_index')
def DidRunTest(self, browser, results):
iometric.IOMetric().AddSummaryResults(browser, results)
def IsRunCold(self, url):
return (self.ShouldRunCold(url) or
self._has_loaded_page[url] == 0)
def ShouldRunCold(self, url):
# We do the warm runs first for two reasons. The first is so we can
# preserve any initial profile cache for as long as possible.
# The second is that, if we did cold runs first, we'd have a transition
# page set during which we wanted the run for each URL to both
# contribute to the cold data and warm the catch for the following
# warm run, and clearing the cache before the load of the following
# URL would eliminate the intended warmup for the previous URL.
return (self._has_loaded_page[url] >= self._cold_run_start_index) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: iso-8859-1 -*-
# mosq.py
# Implementation of the square-law MOS transistor model
# Copyright 2012 Giuseppe Venturini
#
# This file is part of the ahkab simulator.
#
# Ahkab is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2 of the License.
#
# Ahkab is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License v2
# along with ahkab. If not, see <http://www.gnu.org/licenses/>.
"""
The Square Law Mos Model
------------------------
This module defines two classes:
- :class:`mosq_device`, the device
- :class:`mosq_model`, the model
Implementation details
----------------------
Assuming :math:`V_{ds} > 0` and a transistor type N in the
following, we have the following regions implemented:
1. No subthreshold conduction.
- :math:`V_{gs} < V_T`
- :math:`I_D = 0`
2. Ohmic region
- :math:`V_{GS} > V_T` and :math:`V_{GD} > V_T`
- :math:`I_D = k_n W/L ((V_{GS}-V_{T})V_{DS} - V_{DS}^2/2)`
3. Saturation region
- :math:`V_{GS} > V_T` and :math:`V_{DS} > V_{GS} - V_{T}`
- :math:`V_{GS} < V_{T}`
- :math:`I_D = 1/2 k_n W/L (V_{GS}-V_T)^2 * [1 + \lambda*(V_{DS}-V_{GS}+V_T)]`
Module reference
----------------
"""
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import math
import numpy as np
from . import constants
from . import options
from . import utilities
from . import printing
# DEFAULT VALUES FOR 500n CH LENGTH
COX_DEFAULT = .7e-3
VTO_DEFAULT = .5
GAMMA_DEFAULT = 1.0
PHI_DEFAULT = .7
KP_DEFAULT = 50e-6
LAMBDA_DEFAULT = .5
AVT_DEFAULT = 7.1e-3 * 1e-6
AKP_DEFAULT = 1.8e-2 * 1e-6
TCV_DEFAULT = 1e-3
BEX_DEFAULT = -1.5
ISMALL_GUESS_MIN = 1e-10
class mosq_device(object):
def __init__(self, part_id, nd, ng, ns, nb, W, L, model, M=1, N=1):
"""Quadratic Law MOSFET device
**Parameters:**
part_id : string
The part ID of the model. Eg. ``'M1'`` or ``'Mlow'``, the first
letter should always be ``'M'``.
nd : int
drain node
ng : int
gate node
ns : int
source node
nb : int
bulk node
L : float
element width [m]
W : float
element length [m]
model : mosq_mos_model instance
the model for the device
M : int, optional
shunt multiplier (n. of shunt devices)
N : int, optional
series multiplier (n. of series devices)
"""
self.ng = ng
self.nb = nb
self.n1 = nd
self.n2 = ns
self.ports = ((self.n1, self.n2), (
self.ng, self.n2), (self.nb, self.n2))
class dev_class(object):
pass # empty class to hold device parameters
self.device = dev_class()
self.device.L = float(L) # channel length -
self.device.W = float(W) # channel width -
self.device.M = int(M) # parallel multiple device number
self.device.N = int(N) # series multiple device number
self.device.mckey = None
self.device.part_id = part_id
self.mosq_model = model
self.mc_enabled = False
self.opdict = {}
self.opdict.update( {'state':(float('nan'), float('nan'),
float('nan'))})
self.part_id = part_id
self.is_nonlinear = True
self.is_symbolic = True
self.dc_guess = [self.mosq_model.VTO*0.4*self.mosq_model.NPMOS,
self.mosq_model.VTO*1.1*self.mosq_model.NPMOS,
0]
devcheck, reason = self.mosq_model.device_check(self.device)
if not devcheck:
raise ValueError(reason + " out of boundaries.")
def get_drive_ports(self, op):
"""Get the circuit ports that drive the device.
**Returns:**
tp : a tuple of tuples of nodes, each node being a drive port of the
device.
Eg. ``tp`` might be defined as:
::
tp = (port0, port1, port2...)
Where each port in the tuple is of the form:
::
port0 = (nplus, nminus)
In the case of a MOSQ device, the method returns:
::
((nd, nb), (ng, nb), (ns, nb))
Where:
* ``nd`` is the internal identifier of the drain node,
* ``ng`` is the internal identifier of the gate node,
* ``ns`` is the internal identifier of the source node.
* ``nb`` is the internal identifier of the bulk node,
"""
return self.ports # d,g,b
def get_output_ports(self):
"""Get the circuit ports where the device injects current.
**Returns:**
ports : a tuple of tuples of nodes, such as as:
::
(port0, port1, port2...)
Where each port in the tuple is itself a tuple, made of two nodes, eg.
::
port0 = (nplus, nminus)
In the case of a MOS device, the method returns:
::
((nd, ns),)
Where:
* ``nd`` is the internal identifier of the drain node,
* ``ns`` is the internal identifier of the source node.
"""
return ((self.n1, self.n2),)
def __str__(self):
rep = self.part_id + " %(nd)s %(ng)s %(ns)s %(nb)%s " + \
self.mosq_model.name + " w=" + str(self.device.W) + " l=" + \
str(self.device.L) + " M=" + str(self.device.M) + " N=" + \
str(self.device.N)
return rep
def _get_mos_type(self):
"""Returns N or P (capitalized), depending on the device type.
"""
mtype = 'N' if self.mosq_model.NPMOS == 1 else 'P'
return mtype
def istamp(self, ports_v, time=0, reduced=True):
"""Get the current stamp matrix
A stamp matrix corresponding to the current flowing in the element
with the voltages applied as specified in the ``ports_v`` vector.
**Parameters:**
ports_v : list
A list in the form: ``[voltage_across_port0, voltage_across_port1,
...]``
time: float
the simulation time at which the evaluation is performed.
It has no effect here. Set it to ``None`` during DC analysis.
"""
sw_vect, CS = self.mosq_model.get_voltages(*ports_v)
ids = self.mosq_model.get_ids(self.mosq_model, self.device, sw_vect)
istamp = np.array((CS*ids, -CS*ids), dtype=np.float64)
indices = ((self.n1 - 1*reduced, self.n2 - 1*reduced), (0, 0))
if reduced:
delete_i = [pos for pos, i in enumerate(indices[0]) if i == -1]
istamp = np.delete(istamp, delete_i, axis=0)
indices = tuple(zip(*[(i, j) for i, j in zip(*indices) if i != -1]))
return indices, istamp
def update_status_dictionary(self, ports_v):
"""Update the status dictionary
The status dictionary may be accessed at ``elem.opdict`` and contains
several pieces of information that may be of interest regarding the
biasing of the MOS device.
"""
if self.opdict is None:
self.opdict = {}
if not (self.opdict['state'] == ports_v[0]).all() or 'gmd' not in self.opdict \
or 'gm' not in self.opdict or 'gmb' not in self.opdict \
or 'Ids' not in self.opdict or 'SAT' not in self.opdict:
vds, vgs, _ = ports_v[0]
self.opdict['state'] = ports_v[0]
gstamp = self.gstamp(ports_v[0], reduced=False)[1]
self.opdict['gmd'] = gstamp[0, 0]
self.opdict['gm'] = gstamp[0, 1]
self.opdict['gmb'] = gstamp[0, 3]
self.opdict['Ids'] = self.istamp(ports_v[0], reduced=False)[1][0]
self.opdict.update({'VTH':self.mosq_model.get_VT(ports_v[0], self.device)})
self.opdict.update({'W':self.device.W, 'L':self.device.L,
'ON':(vgs >= self.opdict['VTH'])})
self.opdict.update({'beta':.5*self.mosq_model.KP*self.device.W/self.device.L})
self.opdict.update({'VOD':self.mosq_model.NPMOS*(vgs - self.opdict['VTH']),
'SAT':vds > vgs - self.opdict['VTH']})
else:
pass
#already up to date
def get_op_info(self, ports_v):
"""Information regarding the Operating Point (OP)
**Parameters:**
ports_v : list of lists
The voltages applied to all the driving ports, grouped by output
port.
i.e.
::
[<list of voltages for the drive ports of output port 0>,
<list of voltages for the drive ports of output port 1>,
...,
<list of voltages for the drive ports of output port N>]
Usually, this method returns ``op_keys`` and the corresponding
``op_info``, two lists, one holding the labels, the other the
corresponding values.
In the case of MOSFETs, the values are way too many to be shown in a
linear table. For this reason, we return ``None`` as ``op_keys``, and we
return for ``op_info`` a list which holds both labels and values in a
table-like manner, spanning the vertical and horizontal dimension.
For this reason, each MOSFET has to have its OP info printed alone, not
grouped as it happens with most other elements.
**Returns:**
op_keys : ``None``
See above for why this value is always ``None``.
op_info : list of floats
The OP information ready to be passed to :func:`printing.table` for
arranging it in a pretty table to display.
"""
self.update_status_dictionary(ports_v)
sat_status = "SATURATION" if self.opdict['SAT'] else "LINEAR"
if not self.opdict["ON"]:
status = "OFF"
else:
status = "ON"
arr = [[self.part_id + " ch", status, "", "", sat_status, "", "", "",
"", "", "", ""], ]
arr.append(["beta", "[A/V^2]:", self.opdict['beta'], "Weff", "[m]:",
str(self.opdict['W']) + " (" + str(self.device.W) + ")",
"L", "[m]:", str(self.opdict['L']) + " (" +
str(self.device.L) + ")", "M/N:", "", str(self.device.M) +
"/" + str(self.device.N)])
arr.append(["Vds", "[V]:", float(ports_v[0][0]), "Vgs", "[V]:",
float(ports_v[0][1]), "Vbs", "[V]:", float(ports_v[0][2]), "",
"", ""])
arr.append(["VTH", "[V]:", self.opdict['VTH'], "VOD", "[V]:",
self.opdict['VOD'], "", "", "", "VA", "[V]:",
str(self.opdict['Ids'] / self.opdict['gmd'])])
arr.append(["Ids", "[A]:", self.opdict['Ids'], "", "", "", "", "", "",
"", "", ''])
arr.append(["gm", "[S]:", self.opdict['gm'], "gmb", "[S]:",
self.opdict['gmb'], "ro", u"[\u2126]:",
1./self.opdict['gmd'], "", "", ""])
return None, arr
def gstamp(self, ports_v, time=0, reduced=True):
"""Get the transconductance stamp matrix
**Parameters:**
ports_v : sequence
a sequence of the form: ``[voltage_across_port0,
voltage_across_port1, ...]``
time : float, optional
the simulation time at which the evaluation is performed. Set it to
``None`` during DC analysis. Defaults to 0.
reduced : bool, optional
Whether the returned matrix should be in reduced form or not.
Defaults to ``True``, corresponding to reduced form.
**Returns:**
indices : sequence of sequences
The indices corresponding to the stamp matrix.
stamp : ndarray
The stamp matrix.
"""
indices = ([self.n1 - 1]*4 + [self.ng - 1]*4 + [self.n2 - 1]*4 + [self.nb - 1]*4,
[self.n1 - 1, self.ng - 1, self.n2 - 1, self.nb - 1]*4)
sw_vect, CS = self.mosq_model.get_voltages(*ports_v)
gmd = self.mosq_model.get_gmd(self.mosq_model, self.device, sw_vect)
gmg = self.mosq_model.get_gm(self.mosq_model, self.device, sw_vect)
gmb = self.mosq_model.get_gmb(self.mosq_model, self.device, sw_vect)
if gmd == 0:
gmd = options.gmin*2
if gmg == 0:
gmg = options.gmin*2
if gmb == 0:
gmb = -2*options.gmin
stamp = np.array(((gmd, gmg, -gmd-gmb-gmg, gmb),
(0, 0, 0, 0),
(-gmd, -gmg, gmd + gmg + gmb, -gmb),
(0, 0, 0, 0)), dtype=np.float64)
if CS == -1:
stamp = self.mosq_model.T1*stamp*self.mosq_model.T2
if (self.opdict['state'] != ports_v[0]).any():
self.opdict = {'state':ports_v[0]}
self.opdict.update({'gmd': stamp[0, 0]})
self.opdict.update({'gm': stamp[0, 1]})
self.opdict.update({'gmb': stamp[0, 3]})
if reduced:
zap_rc = [pos for pos, i in enumerate(indices[1][:4]) if i == -1]
stamp = np.delete(stamp, zap_rc, axis=0)
stamp = np.delete(stamp, zap_rc, axis=1)
indices = tuple(zip(*[(i, y) for i, y in zip(*indices) if (i != -1 and y != -1)]))
stamp_flat = stamp.reshape(-1)
stamp_folded = []
indices_folded = []
for ix, it in enumerate([(i, y) for i, y in zip(*indices)]):
if it not in indices_folded:
indices_folded.append(it)
stamp_folded.append(stamp_flat[ix])
else:
w = indices_folded.index(it)
stamp_folded[w] += stamp_flat[ix]
indices = tuple(zip(*indices_folded))
stamp = np.array(stamp_folded)
return indices, stamp
def get_value_function(self, identifier):
def get_value(self):
return self.opdict[identifier]
return get_value
def get_mc_requirements(self):
return True, 2
def setup_mc(self, status, mckey):
self.mc_enabled = status
if self.mc_enabled:
self.device.mckey = mckey
else:
self.device.mckey = None
def get_netlist_elem_line(self, nodes_dict):
"""Get the element netlist entry"""
mos_type = self._get_mos_type()
return "%s %s %s %s %s %s type=%s w=%g l=%g m=%g n=%g" % \
(self.part_id, nodes_dict[self.n1], nodes_dict[self.ng],
nodes_dict[self.n2], nodes_dict[self.nb], self.mosq_model.name,
mos_type, self.device.W, self.device.L, self.device.M,
self.device.N)
class mosq_mos_model(object):
def __init__(self, name=None, TYPE='n', TNOM=None, COX=None,
GAMMA=None, NSUB=None, PHI=None, VTO=None, KP=None,
LAMBDA=None, AKP=None, AVT=None,
TOX=None, VFB=None, U0=None, TCV=None, BEX=None):
self.name = "model_mosq0" if name is None else name
self.TNOM = float(TNOM) if TNOM is not None else constants.Tref
# print "TYPE IS:" + TYPE
if TYPE.lower() == 'n':
self.NPMOS = 1
elif TYPE.lower() == 'p':
self.NPMOS = -1
else:
raise ValueError("Unknown MOS type %s" % TYPE)
# optional parameters (no defaults)
self.TOX = float(TOX) if TOX is not None else None
self.NSUB = float(NSUB) if NSUB is not None else None
self.VFB = self.NPMOS * float(VFB) if VFB is not None else None
self.U0 = float(U0) if U0 is not None else None
# crucial parameters
if COX is not None:
self.COX = float(COX)
elif TOX is not None:
self.COX = constants.si.eox / TOX
else:
self.COX = COX_DEFAULT
if GAMMA is not None:
self.GAMMA = float(GAMMA)
elif NSUB is not None:
self.GAMMA = math.sqrt(
2 * constants.e * constants.si.esi * NSUB * 10 ** 6 / self.COX)
else:
self.GAMMA = GAMMA_DEFAULT
if PHI is not None:
self.PHI = float(PHI)
elif NSUB is not None:
self.PHI = 2 * constants.Vth(self.TNOM) * math.log(
NSUB * 10 ** 6 / constants.si.ni(self.TNOM))
else:
self.PHI = PHI_DEFAULT
if VTO is not None:
self.VTO = self.NPMOS * float(VTO)
if self.VTO < 0:
print("(W): model %s has internal negative VTO (%f V)." % (self.name, self.VTO))
elif VFB is not None:
self.VTO = VFB + PHI + GAMMA * PHI # inv here??
else:
self.VTO = VTO_DEFAULT
if KP is not None:
self.KP = float(KP)
elif U0 is not None:
self.KP = (U0 * 10 ** -4) * self.COX
else:
self.KP = KP_DEFAULT
self.LAMBDA = LAMBDA if LAMBDA is not None else LAMBDA_DEFAULT
# Intrinsic model temperature parameters
self.TCV = self.NPMOS * \
float(TCV) if TCV is not None else self.NPMOS * TCV_DEFAULT
self.BEX = float(BEX) if BEX is not None else BEX_DEFAULT
# Monte carlo
self.AVT = AVT if AVT is not None else AVT_DEFAULT
self.AKP = AKP if AKP is not None else AKP_DEFAULT
self.set_device_temperature(constants.T)
sc, sc_reason = self._self_check()
if not sc:
raise Exception(sc_reason + " out of range")
self.T1 = np.array(((0, 0, 1, 0),
(0, 1, 0, 0),
(1, 0, 0, 0),
(0, 0, 0, 1)))
self.T2 = np.array(((0, 0, 1, 0),
(0, 1, 0, 0),
(1, 0, 0, 0),
(0, 0, 0, 1)))
def set_device_temperature(self, T):
"""Change the temperature of the device.
Correspondingly, ``VTO``, ``KP`` and ``PHI`` get updated.
"""
self.TEMP = T
self.VTO = self.VTO - self.TCV * (T - self.TNOM)
self.KP = self.KP * (T / self.TNOM) ** self.BEX
self.PHI = (self.PHI * T / self.TNOM + 3 * constants.Vth(self.TNOM) *
math.log(T / self.TNOM) - constants.si.Eg(self.TNOM) * T /
self.TNOM + constants.si.Eg(T))
def get_device_temperature(self):
"""Returns the temperature of the device - in K.
"""
return self.TEMP
def print_model(self):
"""Print out the model
All the internal parameters of the model get printed out, for visual
inspection. Notice some can be set to ``None`` (ie not available) if
they were not provided and some of those not provided are calculated
from the others.
"""
arr = []
TYPE = 'N' if self.NPMOS == 1 else "P"
arr.append([self.name, "", "", TYPE + " MOS", "SQUARE MODEL", "", "",
"", "", "", "", ""])
arr.append(["KP", "[A/V^2]", self.KP, "VTO", "[V]:", self.VTO,
"TOX", "[m]", self.TOX, "COX", "[F/m^2]:", self.COX])
arr.append(["PHI", "[V]:", self.PHI, "GAMMA", "sqrt(V)", self.GAMMA,
"NSUB", "[cm^-3]", self.NSUB, "VFB", "[V]:", self.VFB])
arr.append(["U0", "[cm^2/(V*s)]:", self.U0, "TCV", "[V/K]", self.TCV,
"BEX", "", self.BEX, "", "", ""])
print(printing.table(arr))
def get_voltages(self, vds, vgs, vbs):
"""Performs the D <-> S swap if needed.
**Returns:**
voltages : tuple
A tuple containing ``(VDS, VGS, VBS)`` after the swap
CS : int
``CS`` is an integer which equals to:
* +1 if no swap was necessary,
* -1 if VD and VS have been swapped.
"""
# vd / vs swap
vds = float(vds)
vgs = float(vgs)
vbs = float(vbs)
vds = vds * self.NPMOS
vgs = vgs * self.NPMOS
vbs = vbs * self.NPMOS
if vds < 0:
vds_new = -vds
vgs_new = vgs - vds
vbs_new = vbs - vds
cs = -1
else:
vds_new = vds
vgs_new = vgs
vbs_new = vbs
cs = +1
# print ((float(vds_new), float(vgs_new), float(vbs_new)), cs)
return (float(vds_new), float(vgs_new), float(vbs_new)), cs
def get_svt_skp(self, device, debug=False):
if device.mckey and debug:
print("Monte carlo enabled. key:", device.mckey)
if device.mckey:
svt = device.mckey[0] * self.AVT / math.sqrt(
2 * device.W * device.L)
skp = device.mckey[1] * self.AKP / math.sqrt(
2 * device.W * device.L)
else:
svt, skp = 0, 0
return svt, skp
def get_VT(self, voltages, device):
"""Get the threshold voltage"""
#vds, vgs, vbs = voltages
_, _, vbs = voltages
#(_, _, vbs), CS = self.get_voltages(*voltages)
vsqrt1 = max(-vbs + 2*self.PHI, 0.)
vsqrt2 = max(2*self.PHI, 0.)
svt, _ = self.get_svt_skp(device)
VT = self.VTO + svt + self.GAMMA * (math.sqrt(vsqrt1) -
math.sqrt(vsqrt2))
return VT
@utilities.memoize
def get_ids(self, device, voltages):
"""Get the drain-source current
**Parameters:**
device : object
The device object holding the device parameters
as attributes.
voltages : tuple
A tuple containing the voltages applied to the driving ports.
In this case, the tuple is ``(vds, vgs, vbs)``.
**Returns:**
ids : float
The drain-source current
"""
"""
Returns:
IDS, the drain-to-source current
"""
(vds, vgs, vbs) = voltages
debug = False
if debug:
print("=== %s (%sch) current for vds: %g, vgs: %g, vbs: %g" \
% (device.part_id, 'n'*(self.NPMOS == 1) +
'p'*(self.NPMOS == -1), vds, vgs, vbs))
if debug:
print("PHI:", self.PHI, "vbs:", vbs)
VT = self.get_VT((vds, vgs, vbs), device)
_, skp = self.get_svt_skp(device)
if vgs < VT:
ids = options.iea * (vgs / VT + vds / VT) / 100
if debug:
print("OFF: %g" % ids)
else:
if vds < vgs - VT -0.5*self.LAMBDA*(VT - vgs)**2:
ids = (skp + 1) * self.KP * device.W / \
device.L * ((vgs - VT) * vds - .5 * vds ** 2)
if debug:
print("OHMIC: %g" % ids)
else:
ids = (skp + 1) * .5 * self.KP * device.W / device.L * (
vgs - VT) ** 2 * (1 + self.LAMBDA * (vds - vgs + VT + 0.25*self.LAMBDA*(VT - vgs)**2))
if debug:
print("SAT: %g" % ids)
Ids = self.NPMOS * device.M / device.N * ids
return Ids
@utilities.memoize
def get_gmb(self, device, voltages):
"""Get the bulk-source transconductance
Mathematically:
.. math::
g_{mb} = \\frac{dI_{DS}}{d(VS-VB)}
**Parameters:**
device : object
The device object holding the device parameters
as attributes.
voltages : tuple
A tuple containing the voltages applied to the driving ports.
In this case, the tuple is ``(vds, vgs, vbs)``.
**Returns:**
gmb : float
The source-bulk transconductace.
"""
(vds, vgs, vbs) = voltages
debug = False
svt, skp = self.get_svt_skp(device, debug=False)
assert vds >= 0
vsqrt1 = max(-vbs + 2*self.PHI, 0.)
vsqrt2 = max(2*self.PHI, 0.)
VT = self.VTO + svt + self.GAMMA * \
(math.sqrt(vsqrt1) - math.sqrt(vsqrt2))
gmb = 0
if vgs < VT:
pass # gmb = 0
else:
if vds < vgs - VT:
if vsqrt1 > 0:
gmb = self.KP * self.GAMMA * vds * device.W / \
(2 * device.L * vsqrt1 ** .5)
else:
if vsqrt1 > 0:
gmb += -0.25*self.KP*self.GAMMA*self.LAMBDA*device.W * (vsqrt1 > 0) * \
(-self.GAMMA*(-vsqrt2**.5 + vsqrt1**.5) + vgs - self.VTO)**2 / \
(device.L * vsqrt1**.5)
gmb += +0.5*self.KP*self.GAMMA*device.W*(self.LAMBDA* \
(self.GAMMA * (vsqrt2**.5 + vsqrt1**.5) + vds - vgs + self.VTO) + 1.0) *\
(-self.GAMMA * (vsqrt2**.5 + vsqrt1**.5) \
+ vgs - self.VTO) / (device.L * vsqrt1**.5)
gmb = self.NPMOS * (1 + skp) * gmb * device.M / device.N
if debug:
print("gmb %g" % gmb)
return gmb
@utilities.memoize
def get_gmd(self, device, voltages):
"""Get the drain-source transconductance
Mathematically:
.. math::
g_{md} = \\frac{dI_{DS}}{d(VD-VS)}
**Parameters:**
device : object
The device object holding the device parameters
as attributes.
voltages : tuple
A tuple containing the voltages applied to the driving ports.
In this case, the tuple is ``(vds, vgs, vbs)``.
**Returns:**
gmb : float
The drain-source transconductace.
"""
(vds, vgs, vbs) = voltages
debug = False
svt, skp = self.get_svt_skp(device, debug=False)
assert vds >= 0
vsqrt1 = max(-vbs + 2*self.PHI, 0.)
vsqrt2 = max(2*self.PHI, 0.)
VT = self.VTO + svt + self.GAMMA * \
(math.sqrt(vsqrt1) - math.sqrt(vsqrt2))
if vgs < VT:
gmd = options.iea / VT / 100
else:
if vds < vgs -VT -0.5*self.LAMBDA*(VT - vgs)**2: # correction term disc. due to LAMBDA
gmd = self.KP * device.W / device.L * (vgs - vds - VT)
else:
gmd = 0.5 * self.KP * self.LAMBDA * device.W / device.L * \
(vgs - VT)**2
gmd = (1 + skp) * gmd * device.M / device.N
if debug:
print("gmd %g" % gmd)
return gmd
@utilities.memoize
def get_gm(self, device, voltages):
"""Get the gate-source transconductance
Mathematically:
.. math::
g_{ms} = \\frac{dI_{DS}}{d(VG-VS)}
Often this is referred to as just :math:`g_m`.
**Parameters:**
device : object
The device object holding the device parameters
as attributes.
voltages : tuple
A tuple containing the voltages applied to the driving ports.
In this case, the tuple is ``(vds, vgs, vbs)``.
**Returns:**
gmb : float
The gate-source transconductace.
"""
(vds, vgs, vbs) = voltages
debug = False
svt, skp = self.get_svt_skp(device, debug=False)
assert vds >= 0
vsqrt1 = max(-vbs + 2*self.PHI, 0.)
vsqrt2 = max(2*self.PHI, 0.)
VT = self.VTO + svt + self.GAMMA * \
(math.sqrt(vsqrt1) - math.sqrt(vsqrt2))
if vgs < VT:
gm = options.iea / VT / 100
else:
if vds < vgs - VT:
gm = self.KP * device.W / device.L * vds
else:
gm = -0.5*self.KP*self.LAMBDA * device.W/device.L * (-self.GAMMA*(-vsqrt2**.5 + vsqrt1**.5) + vgs - self.VTO)**2 \
+0.5*self.KP * device.W/device.L *(self.LAMBDA*( self.GAMMA*(-vsqrt2**.5 + vsqrt1**.5) + vds - vgs + self.VTO) + 1.0) *\
(-2 * self.GAMMA * (-vsqrt2**.5 + vsqrt1**.5) + 2*vgs - 2*self.VTO)
gm = (1 + skp) * gm * device.M / device.N
if debug:
print("gmg %g" % gm)
return gm
def _self_check(self):
"""Performs sanity check on the model parameters."""
ret = True, ""
if self.NSUB is not None and self.NSUB < 0:
ret = (False, "NSUB " + str(self.NSUB))
elif self.U0 is not None and not self.U0 > 0:
ret = (False, "UO " + str(self.U0))
elif not self.GAMMA > 0:
ret = (False, "GAMMA " + str(self.GAMMA))
elif not self.PHI > 0.1:
ret = (False, "PHI " + str(self.PHI))
elif self.AVT and self.AVT < 0:
ret = (False, "AVT " + str(self.AVT))
elif self.AKP and self.AKP < 0:
ret = (False, "AKP " + str(self.AKP))
return ret
def device_check(self, adev):
"""Performs sanity check on the device parameters."""
if not adev.L > 0:
ret = (False, "L")
elif not adev.W > 0:
ret = (False, "W")
elif not adev.N > 0:
ret = (False, "N")
elif not adev.M > 0:
ret = (False, "M")
else:
ret = (True, "")
return ret | unknown | codeparrot/codeparrot-clean | ||
scrape_configs:
- job_name: prometheus
kubernetes_sd_configs:
- role: endpoints
selectors:
- role: "pod"
label: "foo=bar"
field: "metadata.status=Running"
- role: "pod"
label: "foo=bar"
field: "metadata.status=Running" | unknown | github | https://github.com/prometheus/prometheus | config/testdata/kubernetes_selectors_duplicated_role.bad.yml |
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/clock/qcom,gcc-sc8180x.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm Global Clock & Reset Controller on SC8180x
maintainers:
- Bjorn Andersson <bjorn.andersson@linaro.org>
description: |
Qualcomm global clock control module provides the clocks, resets and power
domains on SC8180x.
See also: include/dt-bindings/clock/qcom,gcc-sc8180x.h
properties:
compatible:
const: qcom,gcc-sc8180x
clocks:
items:
- description: Board XO source
- description: Board active XO source
- description: Sleep clock source
clock-names:
items:
- const: bi_tcxo
- const: bi_tcxo_ao
- const: sleep_clk
power-domains:
items:
- description: CX domain
required:
- compatible
- clocks
- clock-names
- power-domains
- '#power-domain-cells'
allOf:
- $ref: qcom,gcc.yaml#
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/power/qcom-rpmpd.h>
clock-controller@100000 {
compatible = "qcom,gcc-sc8180x";
reg = <0x00100000 0x1f0000>;
clocks = <&rpmhcc RPMH_CXO_CLK>,
<&rpmhcc RPMH_CXO_CLK_A>,
<&sleep_clk>;
clock-names = "bi_tcxo", "bi_tcxo_ao", "sleep_clk";
power-domains = <&rpmhpd SC8180X_CX>;
#clock-cells = <1>;
#reset-cells = <1>;
#power-domain-cells = <1>;
};
... | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/clock/qcom,gcc-sc8180x.yaml |
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package os
import (
"internal/poll"
"io"
"runtime"
"syscall"
)
func (f *File) writeTo(w io.Writer) (written int64, handled bool, err error) {
return 0, false, nil
}
// readFrom is basically a refactor of net.sendFile, but adapted to work for the target of *File.
func (f *File) readFrom(r io.Reader) (written int64, handled bool, err error) {
var remain int64 = 0 // 0 indicates sending until EOF
lr, ok := r.(*io.LimitedReader)
if ok {
remain, r = lr.N, lr.R
if remain <= 0 {
return 0, true, nil
}
}
var src *File
switch v := r.(type) {
case *File:
src = v
case fileWithoutWriteTo:
src = v.File
default:
return 0, false, nil
}
if src.checkValid("ReadFrom") != nil {
// Avoid returning the error as we report handled as false,
// leave further error handling as the responsibility of the caller.
return 0, false, nil
}
// If fd_in and fd_out refer to the same file and the source and target ranges overlap,
// sendfile(2) on SunOS will allow this kind of overlapping and work like a memmove,
// in this case the file content remains the same after copying, which is not what we want.
// Thus, we just bail out here and leave it to generic copy when it's a file copying itself.
if f.pfd.Sysfd == src.pfd.Sysfd {
return 0, false, nil
}
// sendfile() on illumos seems to incur intermittent failures when the
// target file is a standard stream (stdout/stderr), we hereby skip any
// anything other than regular files conservatively and leave them to generic copy.
// Check out https://go.dev/issue/68863 for more details.
if runtime.GOOS == "illumos" {
fi, err := f.Stat()
if err != nil {
return 0, false, nil
}
st, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return 0, false, nil
}
if typ := st.Mode & syscall.S_IFMT; typ != syscall.S_IFREG {
return 0, false, nil
}
}
sc, err := src.SyscallConn()
if err != nil {
return
}
// System call sendfile()s on Solaris and illumos support file-to-file copying.
// Check out https://docs.oracle.com/cd/E86824_01/html/E54768/sendfile-3ext.html and
// https://docs.oracle.com/cd/E88353_01/html/E37843/sendfile-3c.html and
// https://illumos.org/man/3EXT/sendfile for more details.
rerr := sc.Read(func(fd uintptr) bool {
written, err, handled = poll.SendFile(&f.pfd, fd, remain)
return true
})
if lr != nil {
lr.N = remain - written
}
if err == nil {
err = rerr
}
return written, handled, wrapSyscallError("sendfile", err)
} | go | github | https://github.com/golang/go | src/os/zero_copy_solaris.go |
# swift_build_support/products/__init__.py ----------------------*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
from .benchmarks import Benchmarks
from .cmark import CMark
from .earlyswiftdriver import EarlySwiftDriver
from .foundation import Foundation
from .indexstoredb import IndexStoreDB
from .libcxx import LibCXX
from .libdispatch import LibDispatch
from .libicu import LibICU
from .llbuild import LLBuild
from .lldb import LLDB
from .llvm import LLVM
from .ninja import Ninja
from .playgroundsupport import PlaygroundSupport
from .skstresstester import SKStressTester
from .sourcekitlsp import SourceKitLSP
from .swift import Swift
from .swiftdriver import SwiftDriver
from .swiftevolve import SwiftEvolve
from .swiftformat import SwiftFormat
from .swiftinspect import SwiftInspect
from .swiftpm import SwiftPM
from .swiftsyntax import SwiftSyntax
from .tsan_libdispatch import TSanLibDispatch
from .xctest import XCTest
__all__ = [
'CMark',
'Ninja',
'Foundation',
'LibCXX',
'LibDispatch',
'LibICU',
'LLBuild',
'LLDB',
'LLVM',
'Ninja',
'PlaygroundSupport',
'Swift',
'SwiftFormat',
'SwiftInspect',
'SwiftPM',
'SwiftDriver',
'EarlySwiftDriver',
'XCTest',
'SwiftSyntax',
'SKStressTester',
'SwiftEvolve',
'IndexStoreDB',
'SourceKitLSP',
'Benchmarks',
'TSanLibDispatch',
] | unknown | codeparrot/codeparrot-clean | ||
// Static imports.
import rootConst from "/static/absolute_root.js";
import testConst from "./module_test.js";
import * as NewModule from "./module_test.js";
import*as m from "./module_test.js";
import *as m from "./module_test.js";
import* as m from "./module_test.js";
import* as m from "./module_test.js";
import { testConst as alias } from "./module_test.js";
import { firstConst, secondConst } from "./module_test.js";
import {
firstVar1 as firstVarAlias,
$second_var_2 as secondVarAlias
} from "./module_test.js";
import relativeModule from "../nested/js/nested.js";
// Dynamic imports.
const dynamicModule = import("./module_test.js");
// Modules exports to aggregate modules.
export * from "./module_test.js";
export { testConst } from "./module_test.js";
export {
firstVar as firstVarAlias,
secondVar as secondVarAlias
} from "./module_test.js"; | javascript | github | https://github.com/django/django | tests/staticfiles_tests/project/documents/cached/module.js |
packages:
- 'apps/*'
- 'packages/*'
- 'bench/*'
- 'crates/*/js'
- 'turbopack/crates/*/js'
- 'turbopack/crates/turbopack-tests/tests/execution'
- 'turbopack/packages/*'
updateNotifier: false | unknown | github | https://github.com/vercel/next.js | pnpm-workspace.yaml |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import tempfile
import unittest
from pants.java.jar.shader import Shader
from pants.util.contextutil import open_zip
from pants.util.dirutil import safe_delete
class ShaderTest(unittest.TestCase):
def setUp(self):
self.jarjar = '/not/really/jarjar.jar'
self.shader = Shader(jarjar=self.jarjar)
self.output_jar = '/not/really/shaded.jar'
def populate_input_jar(self, *entries):
fd, input_jar_path = tempfile.mkstemp()
os.close(fd)
self.addCleanup(safe_delete, input_jar_path)
with open_zip(input_jar_path, 'w') as jar:
for entry in entries:
jar.writestr(entry, '0xCAFEBABE')
return input_jar_path
def test_assemble_default_rules(self):
input_jar = self.populate_input_jar('org/pantsbuild/tools/fake/Main.class',
'com/google/common/base/Function.class')
rules = self.shader.assemble_binary_rules('org.pantsbuild.tools.fake.Main', input_jar)
self.assertEqual(Shader.exclude_package('org.pantsbuild.tools.fake'), rules[0])
self.assertIn(Shader.exclude_package('javax.annotation'), rules[1:-1])
self.assertEqual(Shader.shade_package('com.google.common.base'), rules[-1])
def test_assemble_default_rules_default_package(self):
input_jar = self.populate_input_jar('main.class', 'com/google/common/base/Function.class')
rules = self.shader.assemble_binary_rules('main', input_jar)
self.assertEqual(Shader.exclude_package(), rules[0])
self.assertIn(Shader.exclude_package('javax.annotation'), rules[1:-1])
self.assertEqual(Shader.shade_package('com.google.common.base'), rules[-1])
def test_assemble_custom_rules(self):
input_jar = self.populate_input_jar('main.class')
rules = self.shader.assemble_binary_rules('main', input_jar,
custom_rules=[Shader.shade_class('bob'),
Shader.exclude_class('fred')])
self.assertEqual(Shader.shade_class('bob'), rules[0])
self.assertEqual(Shader.exclude_class('fred'), rules[1])
self.assertEqual(Shader.exclude_package(), rules[2])
self.assertIn(Shader.exclude_package('javax.annotation'), rules[3:])
def test_runner_command(self):
input_jar = self.populate_input_jar('main.class', 'com/google/common/base/Function.class')
custom_rules = [Shader.exclude_package('log4j', recursive=True)]
with self.shader.binary_shader(self.output_jar, 'main', input_jar,
custom_rules=custom_rules) as shader:
command = shader.command
self.assertTrue(command.pop(0).endswith('java'))
jar_or_cp = command.pop(0)
self.assertIn(jar_or_cp, {'-cp', 'classpath', '-jar'})
self.assertEqual(self.jarjar, os.path.abspath(command.pop(0)))
if jar_or_cp != '-jar':
# We don't really care what the name of the jarjar main class is - shader.command[2]
command.pop(0)
self.assertEqual('process', command.pop(0))
rules_file = command.pop(0)
self.assertTrue(os.path.exists(rules_file))
with open(rules_file) as fp:
lines = fp.read().splitlines()
self.assertEqual('rule log4j.** log4j.@1', lines[0]) # The custom rule.
self.assertEqual('rule * @1', lines[1]) # Exclude main's package.
self.assertIn('rule javax.annotation.* javax.annotation.@1', lines) # Exclude system.
self.assertEqual('rule com.google.common.base.* {}com.google.common.base.@1'
.format(Shader.SHADE_PREFIX), lines[-1]) # Shade the rest.
self.assertEqual(input_jar, command.pop(0))
self.assertEqual(self.output_jar, command.pop(0)) | unknown | codeparrot/codeparrot-clean | ||
# -*-*- encoding: utf-8 -*-*-
#
# This is gdata.photos.exif, implementing the exif namespace in gdata
#
# $Id: __init__.py 81 2007-10-03 14:41:42Z havard.gulldahl $
#
# Copyright 2007 Håvard Gulldahl
# Portions copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module maps elements from the {EXIF} namespace[1] to GData objects.
These elements describe image data, using exif attributes[2].
Picasa Web Albums uses the exif namespace to represent Exif data encoded
in a photo [3].
Picasa Web Albums uses the following exif elements:
exif:distance
exif:exposure
exif:flash
exif:focallength
exif:fstop
exif:imageUniqueID
exif:iso
exif:make
exif:model
exif:tags
exif:time
[1]: http://schemas.google.com/photos/exif/2007.
[2]: http://en.wikipedia.org/wiki/Exif
[3]: http://code.google.com/apis/picasaweb/reference.html#exif_reference
"""
__author__ = 'havard@gulldahl.no'# (Håvard Gulldahl)' #BUG: pydoc chokes on non-ascii chars in __author__
__license__ = 'Apache License v2'
import atom
import gdata
EXIF_NAMESPACE = 'http://schemas.google.com/photos/exif/2007'
class ExifBaseElement(atom.AtomBase):
"""Base class for elements in the EXIF_NAMESPACE (%s). To add new elements, you only need to add the element tag name to self._tag
""" % EXIF_NAMESPACE
_tag = ''
_namespace = EXIF_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, name=None, extension_elements=None,
extension_attributes=None, text=None):
self.name = name
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Distance(ExifBaseElement):
"(float) The distance to the subject, e.g. 0.0"
_tag = 'distance'
def DistanceFromString(xml_string):
return atom.CreateClassFromXMLString(Distance, xml_string)
class Exposure(ExifBaseElement):
"(float) The exposure time used, e.g. 0.025 or 8.0E4"
_tag = 'exposure'
def ExposureFromString(xml_string):
return atom.CreateClassFromXMLString(Exposure, xml_string)
class Flash(ExifBaseElement):
"""(string) Boolean value indicating whether the flash was used.
The .text attribute will either be `true' or `false'
As a convenience, this object's .bool method will return what you want,
so you can say:
flash_used = bool(Flash)
"""
_tag = 'flash'
def __bool__(self):
if self.text.lower() in ('true','false'):
return self.text.lower() == 'true'
def FlashFromString(xml_string):
return atom.CreateClassFromXMLString(Flash, xml_string)
class Focallength(ExifBaseElement):
"(float) The focal length used, e.g. 23.7"
_tag = 'focallength'
def FocallengthFromString(xml_string):
return atom.CreateClassFromXMLString(Focallength, xml_string)
class Fstop(ExifBaseElement):
"(float) The fstop value used, e.g. 5.0"
_tag = 'fstop'
def FstopFromString(xml_string):
return atom.CreateClassFromXMLString(Fstop, xml_string)
class ImageUniqueID(ExifBaseElement):
"(string) The unique image ID for the photo. Generated by Google Photo servers"
_tag = 'imageUniqueID'
def ImageUniqueIDFromString(xml_string):
return atom.CreateClassFromXMLString(ImageUniqueID, xml_string)
class Iso(ExifBaseElement):
"(int) The iso equivalent value used, e.g. 200"
_tag = 'iso'
def IsoFromString(xml_string):
return atom.CreateClassFromXMLString(Iso, xml_string)
class Make(ExifBaseElement):
"(string) The make of the camera used, e.g. Fictitious Camera Company"
_tag = 'make'
def MakeFromString(xml_string):
return atom.CreateClassFromXMLString(Make, xml_string)
class Model(ExifBaseElement):
"(string) The model of the camera used,e.g AMAZING-100D"
_tag = 'model'
def ModelFromString(xml_string):
return atom.CreateClassFromXMLString(Model, xml_string)
class Time(ExifBaseElement):
"""(int) The date/time the photo was taken, e.g. 1180294337000.
Represented as the number of milliseconds since January 1st, 1970.
The value of this element will always be identical to the value
of the <gphoto:timestamp>.
Look at this object's .isoformat() for a human friendly datetime string:
photo_epoch = Time.text # 1180294337000
photo_isostring = Time.isoformat() # '2007-05-27T19:32:17.000Z'
Alternatively:
photo_datetime = Time.datetime() # (requires python >= 2.3)
"""
_tag = 'time'
def isoformat(self):
"""(string) Return the timestamp as a ISO 8601 formatted string,
e.g. '2007-05-27T19:32:17.000Z'
"""
import time
epoch = float(self.text)/1000
return time.strftime('%Y-%m-%dT%H:%M:%S.000Z', time.gmtime(epoch))
def datetime(self):
"""(datetime.datetime) Return the timestamp as a datetime.datetime object
Requires python 2.3
"""
import datetime
epoch = float(self.text)/1000
return datetime.datetime.fromtimestamp(epoch)
def TimeFromString(xml_string):
return atom.CreateClassFromXMLString(Time, xml_string)
class Tags(ExifBaseElement):
"""The container for all exif elements.
The <exif:tags> element can appear as a child of a photo entry.
"""
_tag = 'tags'
_children = atom.AtomBase._children.copy()
_children['{%s}fstop' % EXIF_NAMESPACE] = ('fstop', Fstop)
_children['{%s}make' % EXIF_NAMESPACE] = ('make', Make)
_children['{%s}model' % EXIF_NAMESPACE] = ('model', Model)
_children['{%s}distance' % EXIF_NAMESPACE] = ('distance', Distance)
_children['{%s}exposure' % EXIF_NAMESPACE] = ('exposure', Exposure)
_children['{%s}flash' % EXIF_NAMESPACE] = ('flash', Flash)
_children['{%s}focallength' % EXIF_NAMESPACE] = ('focallength', Focallength)
_children['{%s}iso' % EXIF_NAMESPACE] = ('iso', Iso)
_children['{%s}time' % EXIF_NAMESPACE] = ('time', Time)
_children['{%s}imageUniqueID' % EXIF_NAMESPACE] = ('imageUniqueID', ImageUniqueID)
def __init__(self, extension_elements=None, extension_attributes=None, text=None):
ExifBaseElement.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
self.fstop=None
self.make=None
self.model=None
self.distance=None
self.exposure=None
self.flash=None
self.focallength=None
self.iso=None
self.time=None
self.imageUniqueID=None
def TagsFromString(xml_string):
return atom.CreateClassFromXMLString(Tags, xml_string) | unknown | codeparrot/codeparrot-clean | ||
#include <ATen/native/transformers/xpu/flash_attn/flash_api.h>
#include <ATen/native/transformers/xpu/sdp_utils.h>
#include <c10/util/Array.h>
namespace sdp {
bool is_flash_attention_available() {
return sycltla::is_flash_attention_available();
}
inline bool is_flash_attention_available(sdp_params const& params, bool debug) {
if (!is_flash_attention_available()) {
if (debug) {
TORCH_WARN("Torch XPU was not compiled with flash attention.");
}
return false;
}
return true;
}
bool check_flash_attention_hardware_support(
sdp_params const& params,
bool debug) {
if (!at::xpu::is_available()) {
TORCH_CHECK(false, "FlashAttentionXPU: XPU device is not available.");
}
constexpr auto supported_architectures =
c10::array_of<sycl::ext::oneapi::experimental::architecture>(
sycl::ext::oneapi::experimental::architecture::intel_gpu_pvc,
sycl::ext::oneapi::experimental::architecture::intel_gpu_pvc_vg,
sycl::ext::oneapi::experimental::architecture::intel_gpu_bmg_g21);
auto* device_prop = at::xpu::getCurrentDeviceProperties();
auto device_architecture = device_prop->architecture;
if (std::find(
supported_architectures.begin(),
supported_architectures.end(),
device_architecture) == supported_architectures.end()) {
if (debug) {
TORCH_WARN(
"XPU device architecture does not support flash attention. Supported architectures are: intel_gpu_pvc, intel_gpu_pvc_vg, intel_gpu_bmg_g21.");
}
return false;
}
return true;
}
inline bool check_flash_attention_datatype(
sdp_params const& params,
bool debug) {
constexpr auto supported_dtypes =
c10::array_of<at::ScalarType>(at::kBFloat16, at::kHalf);
auto query_dtype = params.query.dtype();
if (!(query_dtype == params.key.dtype() &&
query_dtype == params.value.dtype() &&
(std::find(
supported_dtypes.begin(), supported_dtypes.end(), query_dtype) !=
supported_dtypes.end()))) {
if (debug) {
TORCH_WARN(
"FlashAttentionXPU expected query, key, and value to all be of dtype: {",
"bfloat16, half",
"}. Got ",
"Query dtype: ",
params.query.dtype(),
", Key dtype: ",
params.key.dtype(),
", and Value dtype: ",
params.value.dtype(),
" instead.");
}
return false;
}
return true;
}
inline bool check_flash_attention_head_dim_size(
sdp_params const& params,
bool debug) {
const int query_size_last = params.query.size(3);
const int key_size_last = params.key.size(3);
const int value_size_last = params.value.size(3);
const bool head_dims_equal = (query_size_last == key_size_last) &&
(query_size_last == value_size_last);
if (!head_dims_equal) {
if (debug) {
TORCH_WARN(
"FlashAttentionXPU requires q,k,v to have the same last dimension.",
" Got Query.size(-1): ",
query_size_last,
", Key.size(-1): ",
key_size_last,
", Value.size(-1): ",
value_size_last,
" instead.");
}
return false;
}
constexpr auto max_supported_headdim = 192;
if (query_size_last > max_supported_headdim) {
if (debug) {
TORCH_WARN(
"FlashAttentionXPU supports head dimension up to ",
max_supported_headdim,
". ",
"Got head dimension: ",
query_size_last,
" instead.");
}
return false;
}
return true;
}
inline bool check_flash_attention_layout(sdp_params const& params, bool debug) {
return sycltla::check_flash_attention_layout(params, debug);
}
inline bool check_flash_causal_non_square_seqlens(
sdp_params const& params,
bool debug) {
// FlashAttention 2 updated the default mask meaning for causal in this PR:
// 9e5e8bc91e it is now aligned to lower_right which would be a BC break
// for non-square masks. We will not support non-square masks for causal w/
// FAV2
if (params.is_causal && !params.query.is_nested() &&
!params.key.is_nested() &&
params.query.sym_size(-2) != params.key.sym_size(-2)) {
if (debug) {
TORCH_WARN(
"Flash attention XPU does not support the is_causal flag when seqlen_q != seqlen_k. ",
"Got seqlen_q: ",
params.query.sym_size(-2),
" seqlen_k: ",
params.key.sym_size(-2),
". If you would like to use causal attention with non-square masks, please see CausalAttnMask.");
}
return false;
}
return true;
}
inline bool check_flash_attention_deterministic(
const sdp_params& params,
bool debug) {
auto& ctx = at::globalContext();
if (ctx.deterministicAlgorithms()) {
if (debug) {
TORCH_WARN("Flash attention XPU is not deterministic.");
}
return false;
}
return true;
}
bool can_use_flash_attention(sdp_params const& params, bool debug) {
constexpr auto constraints =
std::array<bool (*)(sdp_params const&, bool), 14>{
is_flash_attention_available,
check_flash_attention_hardware_support,
check_for_attn_mask,
check_for_dropout,
check_nested_tensor,
check_tensor_shapes,
check_batch_size_and_num_heads_dense<true /*supports GQA*/>,
check_nonzero_sequence_lengths_dense,
check_last_dim_stride_equals_1_dense<false /*ignore_singleton_dim*/>,
check_flash_causal_non_square_seqlens,
check_flash_attention_datatype,
check_flash_attention_head_dim_size,
check_flash_attention_layout,
check_flash_attention_deterministic};
for (auto& constraint : constraints) {
if (!constraint(params, debug)) {
return false;
}
}
return true;
}
} // namespace sdp | cpp | github | https://github.com/pytorch/pytorch | aten/src/ATen/native/transformers/xpu/sdp_utils.cpp |
from sklearn import preprocessing
import matplotlib.pyplot as plt
import numpy as np
nbaPGData = [
[ 15.8, 8.2, 8.1, 1.7 ],
[ 25.4, 10.3, 10.1, 1.8 ],
[ 22.1, 5.6, 3.1, 1.1 ],
[ 16.7, 3.4, 3.7, 1.0 ],
[ 16.2, 6.9, 5.6, 1.1 ],
[ 13.1, 5.3, 4.6, 1.6 ],
[ 17.3, 4.8, 4.1, 0.8 ],
[ 17.7, 5.0, 3.8, 2.0 ],
[ 26.9, 6.6, 4.5, 1.1 ],
[ 14.2, 7.0, 3.0, 1.5 ],
[ 15.2, 5.2, 3.8, 1.0 ],
[ 19.4, 6.2, 3.1, 1.1 ],
[ 12.4, 5.3, 2.6, 1.3 ],
[ 12.7, 6.2, 4.3, 1.3 ],
[ 8.3, 8.2, 4.0, 1.1 ],
[ 24.4, 5.1, 3.8, 1.1 ],
[ 11.6, 4.4, 2.8, 1.0 ],
[ 10.0, 2.8, 2.7, 0.9 ],
[ 18.6, 7.9, 5.4, 1.7 ],
[ 12.6, 6.6, 3.2, 0.9 ],
[ 7.5, 5.6, 3.1, 0.6 ],
[ 26.4, 6.1, 5.1, 1.6 ],
[ 10.2, 7.2, 6.9, 1.7 ],
[ 8.1, 2.9, 5.7, 1.2 ],
[ 9.5, 3.2, 2.3, 0.7 ],
[ 14.6, 5.3, 2.8, 0.6 ],
[ 13.4, 6.0, 4.3, 2.0 ],
[ 7.8, 4.4, 1.8, 1.0 ],
[ 19.4, 9.6, 3.7, 1.4 ],
[ 15.3, 7.8, 4.0, 1.2 ],
[ 29.1, 11.2, 8.1, 1.5 ],
[ 31.6, 10.4, 10.7, 1.6 ],
[ 25.3, 6.6, 4.5, 1.8 ],
[ 23.2, 5.5, 3.9, 1.1 ],
[ 17.9, 6.3, 3.1, 0.9 ],
[ 23.1, 10.7, 4.2, 2.0 ],
[ 28.9, 5.9, 2.7, 0.9 ],
[ 27.0, 5.9, 4.9, 0.9 ],
[ 11.1, 9.1, 4.1, 1.7 ],
[ 20.3, 5.8, 3.8, 1.2 ],
[ 25.2, 5.8, 3.2, 1.2 ],
[ 20.5, 6.3, 3.5, 1.3 ],
[ 21.1, 6.3, 4.8, 1.4 ],
[ 13.2, 4.6, 2.2, 1.0 ],
[ 18.0, 4.4, 3.8, 0.7 ],
[ 10.1, 4.5, 1.8, 0.5 ],
[ 15.4, 7.3, 3.9, 1.5 ],
[ 18.1, 9.2, 5.0, 2.0 ],
[ 22.4, 7.0, 4.8, 1.5 ],
[ 15.6, 4.8, 3.5, 1.4 ],
[ 12.8, 6.5, 4.7, 1.1 ],
[ 7.6, 4.7, 1.9, 0.7 ],
[ 6.9, 6.6, 3.1, 1.7 ],
[ 14.5, 5.2, 2.2, 0.7 ],
[ 16.9, 4.2, 3.4, 1.0 ],
[ 11.0, 5.6, 2.3, 0.5 ],
[ 12.8, 2.7, 2.6, 1.1 ],
[ 7.8, 6.7, 5.1, 1.4 ],
[ 11.0, 3.9, 3.2, 0.7 ],
[ 20.9, 5.2, 4.4, 1.6 ],
[ 23.5, 10.4, 7.8, 2.0 ],
[ 16.9, 4.3, 7.7, 1.2 ],
[ 30.1, 6.7, 5.4, 2.1 ],
[ 18.8, 6.2, 3.0, 1.1 ],
[ 22.2, 6.2, 3.0, 1.1 ],
[ 15.7, 5.9, 2.7, 1.2 ],
[ 21.2, 6.4, 4.7, 2.1 ],
[ 19.9, 10.2, 4.9, 1.9 ],
[ 10.1, 8.7, 4.3, 2.1 ],
[ 25.1, 6.8, 4.0, 0.9 ],
[ 19.5, 10.0, 4.2, 2.1 ],
[ 12.1, 3.5, 4.0, 1.1 ],
[ 19.0, 4.6, 4.1, 1.1 ],
[ 7.6, 4.1, 3.2, 0.9 ],
[ 14.1, 5.8, 3.8, 1.0 ],
[ 11.9, 5.3, 2.4, 0.8 ],
[ 11.9, 11.7, 6.0, 2.0 ],
[ 10.7, 6.4, 3.6, 1.2 ],
[ 12.8, 5.5, 3.4, 1.0 ],
[ 16.4, 4.7, 3.4, 0.7 ],
[ 9.9, 3.4, 3.5, 1.3 ],
[ 14.1, 5.8, 2.9, 0.9 ],
[ 15.3, 6.1, 2.9, 1.2 ],
[ 19.6, 4.7, 3.0, 1.1 ],
[ 12.6, 6.5, 4.0, 3.4 ],
[ 13.2, 3.3, 3.4, 1.2 ],
[ 10.3, 5.4, 2.2, 0.5 ],
[ 15.6, 10.2, 4.3, 1.3 ],
[ 12.2, 6.4, 3.4, 1.5 ],
[ 17.6, 5.6, 4.0, 1.2 ],
[ 15.5, 7.9, 8.7, 1.4 ],
[ 15.9, 7.6, 3.0, 0.7 ],
[ 15.0, 6.0, 4.5, 1.3 ],
[ 9.0, 4.8, 2.3, 1.5 ],
[ 12.6, 2.3, 1.8, 0.7 ],
[ 27.1, 6.0, 5.3, 0.8 ],
[ 27.4, 6.3, 4.3, 1.3 ],
[ 21.5, 8.1, 3.6, 1.7 ],
[ 20.3, 6.6, 3.4, 1.2 ],
[ 17.5, 7.5, 4.0, 1.2 ],
[ 22.0, 6.4, 4.9, 1.9 ],
[ 17.5, 4.5, 4.3, 0.9 ],
[ 8.2, 4.5, 5.6, 1.0 ],
[ 16.0, 4.2, 2.7, 0.7 ],
[ 13.9, 3.9, 2.8, 1.2 ],
[ 6.7, 4.0, 4.0, 0.7 ],
[ 12.6, 7.6, 2.3, 1.3 ],
[ 7.5, 3.3, 2.6, 0.6 ],
]
normalized_data = preprocessing.normalize(nbaPGData)
Examples = {
'pgNotNormalized': {
'data': nbaPGData,
'k': [3, 2, 4],
},
'pgNormalized': {
'data': normalized_data,
'k': [2, 4, 3],
},
} | unknown | codeparrot/codeparrot-clean | ||
/* gzread.c -- zlib functions for reading gzip files
* Copyright (C) 2004-2017 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
#include "gzguts.h"
/* Use read() to load a buffer -- return -1 on error, otherwise 0. Read from
state->fd, and update state->eof, state->err, and state->msg as appropriate.
This function needs to loop on read(), since read() is not guaranteed to
read the number of bytes requested, depending on the type of descriptor. */
local int gz_load(gz_statep state, unsigned char *buf, unsigned len,
unsigned *have) {
int ret;
unsigned get, max = ((unsigned)-1 >> 2) + 1;
*have = 0;
do {
get = len - *have;
if (get > max)
get = max;
ret = read(state->fd, buf + *have, get);
if (ret <= 0)
break;
*have += (unsigned)ret;
} while (*have < len);
if (ret < 0) {
gz_error(state, Z_ERRNO, zstrerror());
return -1;
}
if (ret == 0)
state->eof = 1;
return 0;
}
/* Load up input buffer and set eof flag if last data loaded -- return -1 on
error, 0 otherwise. Note that the eof flag is set when the end of the input
file is reached, even though there may be unused data in the buffer. Once
that data has been used, no more attempts will be made to read the file.
If strm->avail_in != 0, then the current data is moved to the beginning of
the input buffer, and then the remainder of the buffer is loaded with the
available data from the input file. */
local int gz_avail(gz_statep state) {
unsigned got;
z_streamp strm = &(state->strm);
if (state->err != Z_OK && state->err != Z_BUF_ERROR)
return -1;
if (state->eof == 0) {
if (strm->avail_in) { /* copy what's there to the start */
unsigned char *p = state->in;
unsigned const char *q = strm->next_in;
unsigned n = strm->avail_in;
do {
*p++ = *q++;
} while (--n);
}
if (gz_load(state, state->in + strm->avail_in,
state->size - strm->avail_in, &got) == -1)
return -1;
strm->avail_in += got;
strm->next_in = state->in;
}
return 0;
}
/* Look for gzip header, set up for inflate or copy. state->x.have must be 0.
If this is the first time in, allocate required memory. state->how will be
left unchanged if there is no more input data available, will be set to COPY
if there is no gzip header and direct copying will be performed, or it will
be set to GZIP for decompression. If direct copying, then leftover input
data from the input buffer will be copied to the output buffer. In that
case, all further file reads will be directly to either the output buffer or
a user buffer. If decompressing, the inflate state will be initialized.
gz_look() will return 0 on success or -1 on failure. */
local int gz_look(gz_statep state) {
z_streamp strm = &(state->strm);
/* allocate read buffers and inflate memory */
if (state->size == 0) {
/* allocate buffers */
state->in = (unsigned char *)malloc(state->want);
state->out = (unsigned char *)malloc(state->want << 1);
if (state->in == NULL || state->out == NULL) {
free(state->out);
free(state->in);
gz_error(state, Z_MEM_ERROR, "out of memory");
return -1;
}
state->size = state->want;
/* allocate inflate memory */
state->strm.zalloc = Z_NULL;
state->strm.zfree = Z_NULL;
state->strm.opaque = Z_NULL;
state->strm.avail_in = 0;
state->strm.next_in = Z_NULL;
if (inflateInit2(&(state->strm), 15 + 16) != Z_OK) { /* gunzip */
free(state->out);
free(state->in);
state->size = 0;
gz_error(state, Z_MEM_ERROR, "out of memory");
return -1;
}
}
/* get at least the magic bytes in the input buffer */
if (strm->avail_in < 2) {
if (gz_avail(state) == -1)
return -1;
if (strm->avail_in == 0)
return 0;
}
/* look for gzip magic bytes -- if there, do gzip decoding (note: there is
a logical dilemma here when considering the case of a partially written
gzip file, to wit, if a single 31 byte is written, then we cannot tell
whether this is a single-byte file, or just a partially written gzip
file -- for here we assume that if a gzip file is being written, then
the header will be written in a single operation, so that reading a
single byte is sufficient indication that it is not a gzip file) */
if (strm->avail_in > 1 &&
strm->next_in[0] == 31 && strm->next_in[1] == 139) {
inflateReset(strm);
state->how = GZIP;
state->direct = 0;
return 0;
}
/* no gzip header -- if we were decoding gzip before, then this is trailing
garbage. Ignore the trailing garbage and finish. */
if (state->direct == 0) {
strm->avail_in = 0;
state->eof = 1;
state->x.have = 0;
return 0;
}
/* doing raw i/o, copy any leftover input to output -- this assumes that
the output buffer is larger than the input buffer, which also assures
space for gzungetc() */
state->x.next = state->out;
memcpy(state->x.next, strm->next_in, strm->avail_in);
state->x.have = strm->avail_in;
strm->avail_in = 0;
state->how = COPY;
state->direct = 1;
return 0;
}
/* Decompress from input to the provided next_out and avail_out in the state.
On return, state->x.have and state->x.next point to the just decompressed
data. If the gzip stream completes, state->how is reset to LOOK to look for
the next gzip stream or raw data, once state->x.have is depleted. Returns 0
on success, -1 on failure. */
local int gz_decomp(gz_statep state) {
int ret = Z_OK;
unsigned had;
z_streamp strm = &(state->strm);
/* fill output buffer up to end of deflate stream */
had = strm->avail_out;
do {
/* get more input for inflate() */
if (strm->avail_in == 0 && gz_avail(state) == -1)
return -1;
if (strm->avail_in == 0) {
gz_error(state, Z_BUF_ERROR, "unexpected end of file");
break;
}
/* decompress and handle errors */
ret = inflate(strm, Z_NO_FLUSH);
if (ret == Z_STREAM_ERROR || ret == Z_NEED_DICT) {
gz_error(state, Z_STREAM_ERROR,
"internal error: inflate stream corrupt");
return -1;
}
if (ret == Z_MEM_ERROR) {
gz_error(state, Z_MEM_ERROR, "out of memory");
return -1;
}
if (ret == Z_DATA_ERROR) { /* deflate stream invalid */
gz_error(state, Z_DATA_ERROR,
strm->msg == NULL ? "compressed data error" : strm->msg);
return -1;
}
} while (strm->avail_out && ret != Z_STREAM_END);
/* update available output */
state->x.have = had - strm->avail_out;
state->x.next = strm->next_out - state->x.have;
/* if the gzip stream completed successfully, look for another */
if (ret == Z_STREAM_END)
state->how = LOOK;
/* good decompression */
return 0;
}
/* Fetch data and put it in the output buffer. Assumes state->x.have is 0.
Data is either copied from the input file or decompressed from the input
file depending on state->how. If state->how is LOOK, then a gzip header is
looked for to determine whether to copy or decompress. Returns -1 on error,
otherwise 0. gz_fetch() will leave state->how as COPY or GZIP unless the
end of the input file has been reached and all data has been processed. */
local int gz_fetch(gz_statep state) {
z_streamp strm = &(state->strm);
do {
switch(state->how) {
case LOOK: /* -> LOOK, COPY (only if never GZIP), or GZIP */
if (gz_look(state) == -1)
return -1;
if (state->how == LOOK)
return 0;
break;
case COPY: /* -> COPY */
if (gz_load(state, state->out, state->size << 1, &(state->x.have))
== -1)
return -1;
state->x.next = state->out;
return 0;
case GZIP: /* -> GZIP or LOOK (if end of gzip stream) */
strm->avail_out = state->size << 1;
strm->next_out = state->out;
if (gz_decomp(state) == -1)
return -1;
}
} while (state->x.have == 0 && (!state->eof || strm->avail_in));
return 0;
}
/* Skip len uncompressed bytes of output. Return -1 on error, 0 on success. */
local int gz_skip(gz_statep state, z_off64_t len) {
unsigned n;
/* skip over len bytes or reach end-of-file, whichever comes first */
while (len)
/* skip over whatever is in output buffer */
if (state->x.have) {
n = GT_OFF(state->x.have) || (z_off64_t)state->x.have > len ?
(unsigned)len : state->x.have;
state->x.have -= n;
state->x.next += n;
state->x.pos += n;
len -= n;
}
/* output buffer empty -- return if we're at the end of the input */
else if (state->eof && state->strm.avail_in == 0)
break;
/* need more data to skip -- load up output buffer */
else {
/* get more output, looking for header if required */
if (gz_fetch(state) == -1)
return -1;
}
return 0;
}
/* Read len bytes into buf from file, or less than len up to the end of the
input. Return the number of bytes read. If zero is returned, either the
end of file was reached, or there was an error. state->err must be
consulted in that case to determine which. */
local z_size_t gz_read(gz_statep state, voidp buf, z_size_t len) {
z_size_t got;
unsigned n;
/* if len is zero, avoid unnecessary operations */
if (len == 0)
return 0;
/* process a skip request */
if (state->seek) {
state->seek = 0;
if (gz_skip(state, state->skip) == -1)
return 0;
}
/* get len bytes to buf, or less than len if at the end */
got = 0;
do {
/* set n to the maximum amount of len that fits in an unsigned int */
n = (unsigned)-1;
if (n > len)
n = (unsigned)len;
/* first just try copying data from the output buffer */
if (state->x.have) {
if (state->x.have < n)
n = state->x.have;
memcpy(buf, state->x.next, n);
state->x.next += n;
state->x.have -= n;
}
/* output buffer empty -- return if we're at the end of the input */
else if (state->eof && state->strm.avail_in == 0) {
state->past = 1; /* tried to read past end */
break;
}
/* need output data -- for small len or new stream load up our output
buffer */
else if (state->how == LOOK || n < (state->size << 1)) {
/* get more output, looking for header if required */
if (gz_fetch(state) == -1)
return 0;
continue; /* no progress yet -- go back to copy above */
/* the copy above assures that we will leave with space in the
output buffer, allowing at least one gzungetc() to succeed */
}
/* large len -- read directly into user buffer */
else if (state->how == COPY) { /* read directly */
if (gz_load(state, (unsigned char *)buf, n, &n) == -1)
return 0;
}
/* large len -- decompress directly into user buffer */
else { /* state->how == GZIP */
state->strm.avail_out = n;
state->strm.next_out = (unsigned char *)buf;
if (gz_decomp(state) == -1)
return 0;
n = state->x.have;
state->x.have = 0;
}
/* update progress */
len -= n;
buf = (char *)buf + n;
got += n;
state->x.pos += n;
} while (len);
/* return number of bytes read into user buffer */
return got;
}
/* -- see zlib.h -- */
int ZEXPORT gzread(gzFile file, voidp buf, unsigned len) {
gz_statep state;
/* get internal structure */
if (file == NULL)
return -1;
state = (gz_statep)file;
/* check that we're reading and that there's no (serious) error */
if (state->mode != GZ_READ ||
(state->err != Z_OK && state->err != Z_BUF_ERROR))
return -1;
/* since an int is returned, make sure len fits in one, otherwise return
with an error (this avoids a flaw in the interface) */
if ((int)len < 0) {
gz_error(state, Z_STREAM_ERROR, "request does not fit in an int");
return -1;
}
/* read len or fewer bytes to buf */
len = (unsigned)gz_read(state, buf, len);
/* check for an error */
if (len == 0 && state->err != Z_OK && state->err != Z_BUF_ERROR)
return -1;
/* return the number of bytes read (this is assured to fit in an int) */
return (int)len;
}
/* -- see zlib.h -- */
z_size_t ZEXPORT gzfread(voidp buf, z_size_t size, z_size_t nitems, gzFile file) {
z_size_t len;
gz_statep state;
/* get internal structure */
if (file == NULL)
return 0;
state = (gz_statep)file;
/* check that we're reading and that there's no (serious) error */
if (state->mode != GZ_READ ||
(state->err != Z_OK && state->err != Z_BUF_ERROR))
return 0;
/* compute bytes to read -- error on overflow */
len = nitems * size;
if (size && len / size != nitems) {
gz_error(state, Z_STREAM_ERROR, "request does not fit in a size_t");
return 0;
}
/* read len or fewer bytes to buf, return the number of full items read */
return len ? gz_read(state, buf, len) / size : 0;
}
/* -- see zlib.h -- */
#ifdef Z_PREFIX_SET
# undef z_gzgetc
#else
# undef gzgetc
#endif
int ZEXPORT gzgetc(gzFile file) {
unsigned char buf[1];
gz_statep state;
/* get internal structure */
if (file == NULL)
return -1;
state = (gz_statep)file;
/* check that we're reading and that there's no (serious) error */
if (state->mode != GZ_READ ||
(state->err != Z_OK && state->err != Z_BUF_ERROR))
return -1;
/* try output buffer (no need to check for skip request) */
if (state->x.have) {
state->x.have--;
state->x.pos++;
return *(state->x.next)++;
}
/* nothing there -- try gz_read() */
return gz_read(state, buf, 1) < 1 ? -1 : buf[0];
}
int ZEXPORT gzgetc_(gzFile file) {
return gzgetc(file);
}
/* -- see zlib.h -- */
int ZEXPORT gzungetc(int c, gzFile file) {
gz_statep state;
/* get internal structure */
if (file == NULL)
return -1;
state = (gz_statep)file;
/* in case this was just opened, set up the input buffer */
if (state->mode == GZ_READ && state->how == LOOK && state->x.have == 0)
(void)gz_look(state);
/* check that we're reading and that there's no (serious) error */
if (state->mode != GZ_READ ||
(state->err != Z_OK && state->err != Z_BUF_ERROR))
return -1;
/* process a skip request */
if (state->seek) {
state->seek = 0;
if (gz_skip(state, state->skip) == -1)
return -1;
}
/* can't push EOF */
if (c < 0)
return -1;
/* if output buffer empty, put byte at end (allows more pushing) */
if (state->x.have == 0) {
state->x.have = 1;
state->x.next = state->out + (state->size << 1) - 1;
state->x.next[0] = (unsigned char)c;
state->x.pos--;
state->past = 0;
return c;
}
/* if no room, give up (must have already done a gzungetc()) */
if (state->x.have == (state->size << 1)) {
gz_error(state, Z_DATA_ERROR, "out of room to push characters");
return -1;
}
/* slide output data if needed and insert byte before existing data */
if (state->x.next == state->out) {
unsigned char *src = state->out + state->x.have;
unsigned char *dest = state->out + (state->size << 1);
while (src > state->out)
*--dest = *--src;
state->x.next = dest;
}
state->x.have++;
state->x.next--;
state->x.next[0] = (unsigned char)c;
state->x.pos--;
state->past = 0;
return c;
}
/* -- see zlib.h -- */
char * ZEXPORT gzgets(gzFile file, char *buf, int len) {
unsigned left, n;
char *str;
unsigned char *eol;
gz_statep state;
/* check parameters and get internal structure */
if (file == NULL || buf == NULL || len < 1)
return NULL;
state = (gz_statep)file;
/* check that we're reading and that there's no (serious) error */
if (state->mode != GZ_READ ||
(state->err != Z_OK && state->err != Z_BUF_ERROR))
return NULL;
/* process a skip request */
if (state->seek) {
state->seek = 0;
if (gz_skip(state, state->skip) == -1)
return NULL;
}
/* copy output bytes up to new line or len - 1, whichever comes first --
append a terminating zero to the string (we don't check for a zero in
the contents, let the user worry about that) */
str = buf;
left = (unsigned)len - 1;
if (left) do {
/* assure that something is in the output buffer */
if (state->x.have == 0 && gz_fetch(state) == -1)
return NULL; /* error */
if (state->x.have == 0) { /* end of file */
state->past = 1; /* read past end */
break; /* return what we have */
}
/* look for end-of-line in current output buffer */
n = state->x.have > left ? left : state->x.have;
eol = (unsigned char *)memchr(state->x.next, '\n', n);
if (eol != NULL)
n = (unsigned)(eol - state->x.next) + 1;
/* copy through end-of-line, or remainder if not found */
memcpy(buf, state->x.next, n);
state->x.have -= n;
state->x.next += n;
state->x.pos += n;
left -= n;
buf += n;
} while (left && eol == NULL);
/* return terminated string, or if nothing, end of file */
if (buf == str)
return NULL;
buf[0] = 0;
return str;
}
/* -- see zlib.h -- */
int ZEXPORT gzdirect(gzFile file) {
gz_statep state;
/* get internal structure */
if (file == NULL)
return 0;
state = (gz_statep)file;
/* if the state is not known, but we can find out, then do so (this is
mainly for right after a gzopen() or gzdopen()) */
if (state->mode == GZ_READ && state->how == LOOK && state->x.have == 0)
(void)gz_look(state);
/* return 1 if transparent, 0 if processing a gzip stream */
return state->direct;
}
/* -- see zlib.h -- */
int ZEXPORT gzclose_r(gzFile file) {
int ret, err;
gz_statep state;
/* get internal structure */
if (file == NULL)
return Z_STREAM_ERROR;
state = (gz_statep)file;
/* check that we're reading */
if (state->mode != GZ_READ)
return Z_STREAM_ERROR;
/* free memory and close file */
if (state->size) {
inflateEnd(&(state->strm));
free(state->out);
free(state->in);
}
err = state->err == Z_BUF_ERROR ? Z_BUF_ERROR : Z_OK;
gz_error(state, Z_OK, NULL);
free(state->path);
ret = close(state->fd);
free(state);
return ret ? Z_ERRNO : err;
} | c | github | https://github.com/opencv/opencv | 3rdparty/zlib/gzread.c |
from __future__ import unicode_literals
from datetime import datetime, timedelta
from django.template.defaultfilters import timeuntil_filter
from django.test import SimpleTestCase
from django.test.utils import requires_tz_support
from ..utils import setup
from .timezone_utils import TimezoneTestCase
class TimeuntilTests(TimezoneTestCase):
# Default compare with datetime.now()
@setup({'timeuntil01': '{{ a|timeuntil }}'})
def test_timeuntil01(self):
output = self.engine.render_to_string('timeuntil01', {'a': datetime.now() + timedelta(minutes=2, seconds=10)})
self.assertEqual(output, '2\xa0minutes')
@setup({'timeuntil02': '{{ a|timeuntil }}'})
def test_timeuntil02(self):
output = self.engine.render_to_string('timeuntil02', {'a': (datetime.now() + timedelta(days=1, seconds=10))})
self.assertEqual(output, '1\xa0day')
@setup({'timeuntil03': '{{ a|timeuntil }}'})
def test_timeuntil03(self):
output = self.engine.render_to_string(
'timeuntil03', {'a': (datetime.now() + timedelta(hours=8, minutes=10, seconds=10))}
)
self.assertEqual(output, '8\xa0hours, 10\xa0minutes')
# Compare to a given parameter
@setup({'timeuntil04': '{{ a|timeuntil:b }}'})
def test_timeuntil04(self):
output = self.engine.render_to_string(
'timeuntil04',
{'a': self.now - timedelta(days=1), 'b': self.now - timedelta(days=2)},
)
self.assertEqual(output, '1\xa0day')
@setup({'timeuntil05': '{{ a|timeuntil:b }}'})
def test_timeuntil05(self):
output = self.engine.render_to_string(
'timeuntil05',
{'a': self.now - timedelta(days=2), 'b': self.now - timedelta(days=2, minutes=1)},
)
self.assertEqual(output, '1\xa0minute')
# Regression for #7443
@setup({'timeuntil06': '{{ earlier|timeuntil }}'})
def test_timeuntil06(self):
output = self.engine.render_to_string('timeuntil06', {'earlier': self.now - timedelta(days=7)})
self.assertEqual(output, '0\xa0minutes')
@setup({'timeuntil07': '{{ earlier|timeuntil:now }}'})
def test_timeuntil07(self):
output = self.engine.render_to_string(
'timeuntil07', {'now': self.now, 'earlier': self.now - timedelta(days=7)}
)
self.assertEqual(output, '0\xa0minutes')
@setup({'timeuntil08': '{{ later|timeuntil }}'})
def test_timeuntil08(self):
output = self.engine.render_to_string('timeuntil08', {'later': self.now + timedelta(days=7, hours=1)})
self.assertEqual(output, '1\xa0week')
@setup({'timeuntil09': '{{ later|timeuntil:now }}'})
def test_timeuntil09(self):
output = self.engine.render_to_string('timeuntil09', {'now': self.now, 'later': self.now + timedelta(days=7)})
self.assertEqual(output, '1\xa0week')
# Differing timezones are calculated correctly.
@requires_tz_support
@setup({'timeuntil10': '{{ a|timeuntil }}'})
def test_timeuntil10(self):
output = self.engine.render_to_string('timeuntil10', {'a': self.now_tz})
self.assertEqual(output, '0\xa0minutes')
@requires_tz_support
@setup({'timeuntil11': '{{ a|timeuntil }}'})
def test_timeuntil11(self):
output = self.engine.render_to_string('timeuntil11', {'a': self.now_tz_i})
self.assertEqual(output, '0\xa0minutes')
@setup({'timeuntil12': '{{ a|timeuntil:b }}'})
def test_timeuntil12(self):
output = self.engine.render_to_string('timeuntil12', {'a': self.now_tz_i, 'b': self.now_tz})
self.assertEqual(output, '0\xa0minutes')
# Regression for #9065 (two date objects).
@setup({'timeuntil13': '{{ a|timeuntil:b }}'})
def test_timeuntil13(self):
output = self.engine.render_to_string('timeuntil13', {'a': self.today, 'b': self.today})
self.assertEqual(output, '0\xa0minutes')
@setup({'timeuntil14': '{{ a|timeuntil:b }}'})
def test_timeuntil14(self):
output = self.engine.render_to_string('timeuntil14', {'a': self.today, 'b': self.today - timedelta(hours=24)})
self.assertEqual(output, '1\xa0day')
class FunctionTests(SimpleTestCase):
def test_until_now(self):
self.assertEqual(timeuntil_filter(datetime.now() + timedelta(1, 1)), '1\xa0day')
def test_explicit_date(self):
self.assertEqual(timeuntil_filter(datetime(2005, 12, 30), datetime(2005, 12, 29)), '1\xa0day') | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
import openerp.addons.decimal_precision as dp
from openerp import workflow
class res_company(osv.Model):
_inherit = "res.company"
_columns = {
'sale_note': fields.text('Default Terms and Conditions', translate=True, help="Default terms and conditions for quotations."),
}
class sale_order(osv.osv):
_name = "sale.order"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_description = "Sales Order"
_track = {
'state': {
'sale.mt_order_confirmed': lambda self, cr, uid, obj, ctx=None: obj.state in ['manual'],
'sale.mt_order_sent': lambda self, cr, uid, obj, ctx=None: obj.state in ['sent']
},
}
def _amount_line_tax(self, cr, uid, line, context=None):
val = 0.0
for c in self.pool.get('account.tax').compute_all(cr, uid, line.tax_id, line.price_unit * (1-(line.discount or 0.0)/100.0), line.product_uom_qty, line.product_id, line.order_id.partner_id)['taxes']:
val += c.get('amount', 0.0)
return val
def _amount_all_wrapper(self, cr, uid, ids, field_name, arg, context=None):
""" Wrapper because of direct method passing as parameter for function fields """
return self._amount_all(cr, uid, ids, field_name, arg, context=context)
def _amount_all(self, cr, uid, ids, field_name, arg, context=None):
cur_obj = self.pool.get('res.currency')
res = {}
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = {
'amount_untaxed': 0.0,
'amount_tax': 0.0,
'amount_total': 0.0,
}
val = val1 = 0.0
cur = order.pricelist_id.currency_id
for line in order.order_line:
val1 += line.price_subtotal
val += self._amount_line_tax(cr, uid, line, context=context)
res[order.id]['amount_tax'] = cur_obj.round(cr, uid, cur, val)
res[order.id]['amount_untaxed'] = cur_obj.round(cr, uid, cur, val1)
res[order.id]['amount_total'] = res[order.id]['amount_untaxed'] + res[order.id]['amount_tax']
return res
def _invoiced_rate(self, cursor, user, ids, name, arg, context=None):
res = {}
for sale in self.browse(cursor, user, ids, context=context):
if sale.invoiced:
res[sale.id] = 100.0
continue
tot = 0.0
for invoice in sale.invoice_ids:
if invoice.state not in ('draft', 'cancel'):
tot += invoice.amount_untaxed
if tot:
res[sale.id] = min(100.0, tot * 100.0 / (sale.amount_untaxed or 1.00))
else:
res[sale.id] = 0.0
return res
def _invoice_exists(self, cursor, user, ids, name, arg, context=None):
res = {}
for sale in self.browse(cursor, user, ids, context=context):
res[sale.id] = False
if sale.invoice_ids:
res[sale.id] = True
return res
def _invoiced(self, cursor, user, ids, name, arg, context=None):
res = {}
for sale in self.browse(cursor, user, ids, context=context):
res[sale.id] = True
invoice_existence = False
for invoice in sale.invoice_ids:
if invoice.state!='cancel':
invoice_existence = True
if invoice.state != 'paid':
res[sale.id] = False
break
if not invoice_existence or sale.state == 'manual':
res[sale.id] = False
return res
def _invoiced_search(self, cursor, user, obj, name, args, context=None):
if not len(args):
return []
clause = ''
sale_clause = ''
no_invoiced = False
for arg in args:
if (arg[1] == '=' and arg[2]) or (arg[1] == '!=' and not arg[2]):
clause += 'AND inv.state = \'paid\''
else:
clause += 'AND inv.state != \'cancel\' AND sale.state != \'cancel\' AND inv.state <> \'paid\' AND rel.order_id = sale.id '
sale_clause = ', sale_order AS sale '
no_invoiced = True
cursor.execute('SELECT rel.order_id ' \
'FROM sale_order_invoice_rel AS rel, account_invoice AS inv '+ sale_clause + \
'WHERE rel.invoice_id = inv.id ' + clause)
res = cursor.fetchall()
if no_invoiced:
cursor.execute('SELECT sale.id ' \
'FROM sale_order AS sale ' \
'WHERE sale.id NOT IN ' \
'(SELECT rel.order_id ' \
'FROM sale_order_invoice_rel AS rel) and sale.state != \'cancel\'')
res.extend(cursor.fetchall())
if not res:
return [('id', '=', 0)]
return [('id', 'in', [x[0] for x in res])]
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('sale.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
def _get_default_company(self, cr, uid, context=None):
company_id = self.pool.get('res.users')._get_company(cr, uid, context=context)
if not company_id:
raise osv.except_osv(_('Error!'), _('There is no default company for the current user!'))
return company_id
def _get_default_section_id(self, cr, uid, context=None):
""" Gives default section by checking if present in the context """
section_id = self._resolve_section_id_from_context(cr, uid, context=context) or False
if not section_id:
section_id = self.pool.get('res.users').browse(cr, uid, uid, context).default_section_id.id or False
return section_id
def _resolve_section_id_from_context(self, cr, uid, context=None):
""" Returns ID of section based on the value of 'section_id'
context key, or None if it cannot be resolved to a single
Sales Team.
"""
if context is None:
context = {}
if type(context.get('default_section_id')) in (int, long):
return context.get('default_section_id')
if isinstance(context.get('default_section_id'), basestring):
section_ids = self.pool.get('crm.case.section').name_search(cr, uid, name=context['default_section_id'], context=context)
if len(section_ids) == 1:
return int(section_ids[0][0])
return None
_columns = {
'name': fields.char('Order Reference', required=True, copy=False,
readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, select=True),
'origin': fields.char('Source Document', help="Reference of the document that generated this sales order request."),
'client_order_ref': fields.char('Customer Reference', copy=False),
'state': fields.selection([
('draft', 'Draft Quotation'),
('sent', 'Quotation Sent'),
('cancel', 'Cancelled'),
('waiting_date', 'Waiting Schedule'),
('progress', 'Sales Order'),
('manual', 'Sale to Invoice'),
('shipping_except', 'Shipping Exception'),
('invoice_except', 'Invoice Exception'),
('done', 'Done'),
], 'Status', readonly=True, copy=False, help="Gives the status of the quotation or sales order.\
\nThe exception status is automatically set when a cancel operation occurs \
in the invoice validation (Invoice Exception) or in the picking list process (Shipping Exception).\nThe 'Waiting Schedule' status is set when the invoice is confirmed\
but waiting for the scheduler to run on the order date.", select=True),
'date_order': fields.datetime('Date', required=True, readonly=True, select=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, copy=False),
'validity_date': fields.date('Expiration Date', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}),
'create_date': fields.datetime('Creation Date', readonly=True, select=True, help="Date on which sales order is created."),
'date_confirm': fields.date('Confirmation Date', readonly=True, select=True, help="Date on which sales order is confirmed.", copy=False),
'user_id': fields.many2one('res.users', 'Salesperson', states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, select=True, track_visibility='onchange'),
'partner_id': fields.many2one('res.partner', 'Customer', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, required=True, change_default=True, select=True, track_visibility='always'),
'partner_invoice_id': fields.many2one('res.partner', 'Invoice Address', readonly=True, required=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="Invoice address for current sales order."),
'partner_shipping_id': fields.many2one('res.partner', 'Delivery Address', readonly=True, required=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="Delivery address for current sales order."),
'order_policy': fields.selection([
('manual', 'On Demand'),
], 'Create Invoice', required=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]},
help="""This field controls how invoice and delivery operations are synchronized."""),
'pricelist_id': fields.many2one('product.pricelist', 'Pricelist', required=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="Pricelist for current sales order."),
'currency_id': fields.related('pricelist_id', 'currency_id', type="many2one", relation="res.currency", string="Currency", readonly=True, required=True),
'project_id': fields.many2one('account.analytic.account', 'Contract / Analytic', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="The analytic account related to a sales order."),
'order_line': fields.one2many('sale.order.line', 'order_id', 'Order Lines', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, copy=True),
'invoice_ids': fields.many2many('account.invoice', 'sale_order_invoice_rel', 'order_id', 'invoice_id', 'Invoices', readonly=True, copy=False, help="This is the list of invoices that have been generated for this sales order. The same sales order may have been invoiced in several times (by line for example)."),
'invoiced_rate': fields.function(_invoiced_rate, string='Invoiced Ratio', type='float'),
'invoiced': fields.function(_invoiced, string='Paid',
fnct_search=_invoiced_search, type='boolean', help="It indicates that an invoice has been paid."),
'invoice_exists': fields.function(_invoice_exists, string='Invoiced',
fnct_search=_invoiced_search, type='boolean', help="It indicates that sales order has at least one invoice."),
'note': fields.text('Terms and conditions'),
'amount_untaxed': fields.function(_amount_all_wrapper, digits_compute=dp.get_precision('Account'), string='Untaxed Amount',
store={
'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10),
'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10),
},
multi='sums', help="The amount without tax.", track_visibility='always'),
'amount_tax': fields.function(_amount_all_wrapper, digits_compute=dp.get_precision('Account'), string='Taxes',
store={
'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10),
'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10),
},
multi='sums', help="The tax amount."),
'amount_total': fields.function(_amount_all_wrapper, digits_compute=dp.get_precision('Account'), string='Total',
store={
'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10),
'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10),
},
multi='sums', help="The total amount."),
'payment_term': fields.many2one('account.payment.term', 'Payment Term'),
'fiscal_position': fields.many2one('account.fiscal.position', 'Fiscal Position'),
'company_id': fields.many2one('res.company', 'Company'),
'section_id': fields.many2one('crm.case.section', 'Sales Team', change_default=True),
'procurement_group_id': fields.many2one('procurement.group', 'Procurement group', copy=False),
}
_defaults = {
'date_order': fields.datetime.now,
'order_policy': 'manual',
'company_id': _get_default_company,
'state': 'draft',
'user_id': lambda obj, cr, uid, context: uid,
'name': lambda obj, cr, uid, context: '/',
'partner_invoice_id': lambda self, cr, uid, context: context.get('partner_id', False) and self.pool.get('res.partner').address_get(cr, uid, [context['partner_id']], ['invoice'])['invoice'],
'partner_shipping_id': lambda self, cr, uid, context: context.get('partner_id', False) and self.pool.get('res.partner').address_get(cr, uid, [context['partner_id']], ['delivery'])['delivery'],
'note': lambda self, cr, uid, context: self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.sale_note,
'section_id': lambda s, cr, uid, c: s._get_default_section_id(cr, uid, c),
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Order Reference must be unique per Company!'),
]
_order = 'date_order desc, id desc'
# Form filling
def unlink(self, cr, uid, ids, context=None):
sale_orders = self.read(cr, uid, ids, ['state'], context=context)
unlink_ids = []
for s in sale_orders:
if s['state'] in ['draft', 'cancel']:
unlink_ids.append(s['id'])
else:
raise osv.except_osv(_('Invalid Action!'), _('In order to delete a confirmed sales order, you must cancel it before!'))
return osv.osv.unlink(self, cr, uid, unlink_ids, context=context)
def copy_quotation(self, cr, uid, ids, context=None):
id = self.copy(cr, uid, ids[0], context=context)
view_ref = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'sale', 'view_order_form')
view_id = view_ref and view_ref[1] or False,
return {
'type': 'ir.actions.act_window',
'name': _('Sales Order'),
'res_model': 'sale.order',
'res_id': id,
'view_type': 'form',
'view_mode': 'form',
'view_id': view_id,
'target': 'current',
'nodestroy': True,
}
def onchange_pricelist_id(self, cr, uid, ids, pricelist_id, order_lines, context=None):
context = context or {}
if not pricelist_id:
return {}
value = {
'currency_id': self.pool.get('product.pricelist').browse(cr, uid, pricelist_id, context=context).currency_id.id
}
if not order_lines or order_lines == [(6, 0, [])]:
return {'value': value}
warning = {
'title': _('Pricelist Warning!'),
'message' : _('If you change the pricelist of this order (and eventually the currency), prices of existing order lines will not be updated.')
}
return {'warning': warning, 'value': value}
def get_salenote(self, cr, uid, ids, partner_id, context=None):
context_lang = context.copy()
if partner_id:
partner_lang = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context).lang
context_lang.update({'lang': partner_lang})
return self.pool.get('res.users').browse(cr, uid, uid, context=context_lang).company_id.sale_note
def onchange_delivery_id(self, cr, uid, ids, company_id, partner_id, delivery_id, fiscal_position, context=None):
r = {'value': {}}
if not fiscal_position:
if not company_id:
company_id = self._get_default_company(cr, uid, context=context)
fiscal_position = self.pool['account.fiscal.position'].get_fiscal_position(cr, uid, company_id, partner_id, delivery_id, context=context)
if fiscal_position:
r['value']['fiscal_position'] = fiscal_position
return r
def onchange_partner_id(self, cr, uid, ids, part, context=None):
if not part:
return {'value': {'partner_invoice_id': False, 'partner_shipping_id': False, 'payment_term': False, 'fiscal_position': False}}
part = self.pool.get('res.partner').browse(cr, uid, part, context=context)
addr = self.pool.get('res.partner').address_get(cr, uid, [part.id], ['delivery', 'invoice', 'contact'])
pricelist = part.property_product_pricelist and part.property_product_pricelist.id or False
payment_term = part.property_payment_term and part.property_payment_term.id or False
dedicated_salesman = part.user_id and part.user_id.id or uid
val = {
'partner_invoice_id': addr['invoice'],
'partner_shipping_id': addr['delivery'],
'payment_term': payment_term,
'user_id': dedicated_salesman,
}
delivery_onchange = self.onchange_delivery_id(cr, uid, ids, False, part.id, addr['delivery'], False, context=context)
val.update(delivery_onchange['value'])
if pricelist:
val['pricelist_id'] = pricelist
sale_note = self.get_salenote(cr, uid, ids, part.id, context=context)
if sale_note: val.update({'note': sale_note})
return {'value': val}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if vals.get('name', '/') == '/':
vals['name'] = self.pool.get('ir.sequence').get(cr, uid, 'sale.order') or '/'
if vals.get('partner_id') and any(f not in vals for f in ['partner_invoice_id', 'partner_shipping_id', 'pricelist_id', 'fiscal_position']):
defaults = self.onchange_partner_id(cr, uid, [], vals['partner_id'], context=context)['value']
if not vals.get('fiscal_position') and vals.get('partner_shipping_id'):
delivery_onchange = self.onchange_delivery_id(cr, uid, [], vals.get('company_id'), None, vals['partner_id'], vals.get('partner_shipping_id'), context=context)
defaults.update(delivery_onchange['value'])
vals = dict(defaults, **vals)
ctx = dict(context or {}, mail_create_nolog=True)
new_id = super(sale_order, self).create(cr, uid, vals, context=ctx)
self.message_post(cr, uid, [new_id], body=_("Quotation created"), context=ctx)
return new_id
def button_dummy(self, cr, uid, ids, context=None):
return True
# FIXME: deprecated method, overriders should be using _prepare_invoice() instead.
# can be removed after 6.1.
def _inv_get(self, cr, uid, order, context=None):
return {}
def _prepare_invoice(self, cr, uid, order, lines, context=None):
"""Prepare the dict of values to create the new invoice for a
sales order. This method may be overridden to implement custom
invoice generation (making sure to call super() to establish
a clean extension chain).
:param browse_record order: sale.order record to invoice
:param list(int) line: list of invoice line IDs that must be
attached to the invoice
:return: dict of value to create() the invoice
"""
if context is None:
context = {}
journal_ids = self.pool.get('account.journal').search(cr, uid,
[('type', '=', 'sale'), ('company_id', '=', order.company_id.id)],
limit=1)
if not journal_ids:
raise osv.except_osv(_('Error!'),
_('Please define sales journal for this company: "%s" (id:%d).') % (order.company_id.name, order.company_id.id))
invoice_vals = {
'name': order.client_order_ref or '',
'origin': order.name,
'type': 'out_invoice',
'reference': order.client_order_ref or order.name,
'account_id': order.partner_id.property_account_receivable.id,
'partner_id': order.partner_invoice_id.id,
'journal_id': journal_ids[0],
'invoice_line': [(6, 0, lines)],
'currency_id': order.pricelist_id.currency_id.id,
'comment': order.note,
'payment_term': order.payment_term and order.payment_term.id or False,
'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id,
'date_invoice': context.get('date_invoice', False),
'company_id': order.company_id.id,
'user_id': order.user_id and order.user_id.id or False,
'section_id' : order.section_id.id
}
# Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1
invoice_vals.update(self._inv_get(cr, uid, order, context=context))
return invoice_vals
def _make_invoice(self, cr, uid, order, lines, context=None):
inv_obj = self.pool.get('account.invoice')
obj_invoice_line = self.pool.get('account.invoice.line')
if context is None:
context = {}
invoiced_sale_line_ids = self.pool.get('sale.order.line').search(cr, uid, [('order_id', '=', order.id), ('invoiced', '=', True)], context=context)
from_line_invoice_ids = []
for invoiced_sale_line_id in self.pool.get('sale.order.line').browse(cr, uid, invoiced_sale_line_ids, context=context):
for invoice_line_id in invoiced_sale_line_id.invoice_lines:
if invoice_line_id.invoice_id.id not in from_line_invoice_ids:
from_line_invoice_ids.append(invoice_line_id.invoice_id.id)
for preinv in order.invoice_ids:
if preinv.state not in ('cancel',) and preinv.id not in from_line_invoice_ids:
for preline in preinv.invoice_line:
inv_line_id = obj_invoice_line.copy(cr, uid, preline.id, {'invoice_id': False, 'price_unit': -preline.price_unit})
lines.append(inv_line_id)
inv = self._prepare_invoice(cr, uid, order, lines, context=context)
inv_id = inv_obj.create(cr, uid, inv, context=context)
data = inv_obj.onchange_payment_term_date_invoice(cr, uid, [inv_id], inv['payment_term'], time.strftime(DEFAULT_SERVER_DATE_FORMAT))
if data.get('value', False):
inv_obj.write(cr, uid, [inv_id], data['value'], context=context)
inv_obj.button_compute(cr, uid, [inv_id])
return inv_id
def print_quotation(self, cr, uid, ids, context=None):
'''
This function prints the sales order and mark it as sent, so that we can see more easily the next step of the workflow
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time'
self.signal_workflow(cr, uid, ids, 'quotation_sent')
return self.pool['report'].get_action(cr, uid, ids, 'sale.report_saleorder', context=context)
def manual_invoice(self, cr, uid, ids, context=None):
""" create invoices for the given sales orders (ids), and open the form
view of one of the newly created invoices
"""
mod_obj = self.pool.get('ir.model.data')
# create invoices through the sales orders' workflow
inv_ids0 = set(inv.id for sale in self.browse(cr, uid, ids, context) for inv in sale.invoice_ids)
self.signal_workflow(cr, uid, ids, 'manual_invoice')
inv_ids1 = set(inv.id for sale in self.browse(cr, uid, ids, context) for inv in sale.invoice_ids)
# determine newly created invoices
new_inv_ids = list(inv_ids1 - inv_ids0)
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_form')
res_id = res and res[1] or False,
return {
'name': _('Customer Invoices'),
'view_type': 'form',
'view_mode': 'form',
'view_id': [res_id],
'res_model': 'account.invoice',
'context': "{'type':'out_invoice'}",
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'current',
'res_id': new_inv_ids and new_inv_ids[0] or False,
}
def action_view_invoice(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing invoices of given sales order ids. It can either be a in a list or in a form view, if there is only one invoice to show.
'''
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_invoice_tree1')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
#compute the number of invoices to display
inv_ids = []
for so in self.browse(cr, uid, ids, context=context):
inv_ids += [invoice.id for invoice in so.invoice_ids]
#choose the view_mode accordingly
if len(inv_ids)>1:
result['domain'] = "[('id','in',["+','.join(map(str, inv_ids))+"])]"
else:
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_form')
result['views'] = [(res and res[1] or False, 'form')]
result['res_id'] = inv_ids and inv_ids[0] or False
return result
def test_no_product(self, cr, uid, order, context):
for line in order.order_line:
if line.product_id and (line.product_id.type<>'service'):
return False
return True
def action_invoice_create(self, cr, uid, ids, grouped=False, states=None, date_invoice = False, context=None):
if states is None:
states = ['confirmed', 'done', 'exception']
res = False
invoices = {}
invoice_ids = []
invoice = self.pool.get('account.invoice')
obj_sale_order_line = self.pool.get('sale.order.line')
partner_currency = {}
# If date was specified, use it as date invoiced, usefull when invoices are generated this month and put the
# last day of the last month as invoice date
if date_invoice:
context = dict(context or {}, date_invoice=date_invoice)
for o in self.browse(cr, uid, ids, context=context):
currency_id = o.pricelist_id.currency_id.id
if (o.partner_id.id in partner_currency) and (partner_currency[o.partner_id.id] <> currency_id):
raise osv.except_osv(
_('Error!'),
_('You cannot group sales having different currencies for the same partner.'))
partner_currency[o.partner_id.id] = currency_id
lines = []
for line in o.order_line:
if line.invoiced:
continue
elif (line.state in states):
lines.append(line.id)
created_lines = obj_sale_order_line.invoice_line_create(cr, uid, lines)
if created_lines:
invoices.setdefault(o.partner_invoice_id.id or o.partner_id.id, []).append((o, created_lines))
if not invoices:
for o in self.browse(cr, uid, ids, context=context):
for i in o.invoice_ids:
if i.state == 'draft':
return i.id
for val in invoices.values():
if grouped:
res = self._make_invoice(cr, uid, val[0][0], reduce(lambda x, y: x + y, [l for o, l in val], []), context=context)
invoice_ref = ''
origin_ref = ''
for o, l in val:
invoice_ref += (o.client_order_ref or o.name) + '|'
origin_ref += (o.origin or o.name) + '|'
self.write(cr, uid, [o.id], {'state': 'progress'})
cr.execute('insert into sale_order_invoice_rel (order_id,invoice_id) values (%s,%s)', (o.id, res))
self.invalidate_cache(cr, uid, ['invoice_ids'], [o.id], context=context)
#remove last '|' in invoice_ref
if len(invoice_ref) >= 1:
invoice_ref = invoice_ref[:-1]
if len(origin_ref) >= 1:
origin_ref = origin_ref[:-1]
invoice.write(cr, uid, [res], {'origin': origin_ref, 'name': invoice_ref})
else:
for order, il in val:
res = self._make_invoice(cr, uid, order, il, context=context)
invoice_ids.append(res)
self.write(cr, uid, [order.id], {'state': 'progress'})
cr.execute('insert into sale_order_invoice_rel (order_id,invoice_id) values (%s,%s)', (order.id, res))
self.invalidate_cache(cr, uid, ['invoice_ids'], [order.id], context=context)
return res
def action_invoice_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'invoice_except'}, context=context)
return True
def action_invoice_end(self, cr, uid, ids, context=None):
for this in self.browse(cr, uid, ids, context=context):
for line in this.order_line:
if line.state == 'exception':
line.write({'state': 'confirmed'})
if this.state == 'invoice_except':
this.write({'state': 'progress'})
return True
def action_cancel(self, cr, uid, ids, context=None):
if context is None:
context = {}
sale_order_line_obj = self.pool.get('sale.order.line')
account_invoice_obj = self.pool.get('account.invoice')
for sale in self.browse(cr, uid, ids, context=context):
for inv in sale.invoice_ids:
if inv.state not in ('draft', 'cancel'):
raise osv.except_osv(
_('Cannot cancel this sales order!'),
_('First cancel all invoices attached to this sales order.'))
inv.signal_workflow('invoice_cancel')
sale_order_line_obj.write(cr, uid, [l.id for l in sale.order_line],
{'state': 'cancel'})
self.write(cr, uid, ids, {'state': 'cancel'})
return True
def action_button_confirm(self, cr, uid, ids, context=None):
assert len(ids) == 1, 'This option should only be used for a single id at a time.'
self.signal_workflow(cr, uid, ids, 'order_confirm')
return True
def action_wait(self, cr, uid, ids, context=None):
context = context or {}
for o in self.browse(cr, uid, ids):
if not o.order_line:
raise osv.except_osv(_('Error!'),_('You cannot confirm a sales order which has no line.'))
noprod = self.test_no_product(cr, uid, o, context)
if (o.order_policy == 'manual') or noprod:
self.write(cr, uid, [o.id], {'state': 'manual', 'date_confirm': fields.date.context_today(self, cr, uid, context=context)})
else:
self.write(cr, uid, [o.id], {'state': 'progress', 'date_confirm': fields.date.context_today(self, cr, uid, context=context)})
self.pool.get('sale.order.line').button_confirm(cr, uid, [x.id for x in o.order_line])
return True
def action_quotation_send(self, cr, uid, ids, context=None):
'''
This function opens a window to compose an email, with the edi sale template message loaded by default
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time.'
ir_model_data = self.pool.get('ir.model.data')
try:
template_id = ir_model_data.get_object_reference(cr, uid, 'sale', 'email_template_edi_sale')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference(cr, uid, 'mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
ctx = dict()
ctx.update({
'default_model': 'sale.order',
'default_res_id': ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
'mark_so_as_sent': True
})
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
def action_done(self, cr, uid, ids, context=None):
for order in self.browse(cr, uid, ids, context=context):
self.pool.get('sale.order.line').write(cr, uid, [line.id for line in order.order_line], {'state': 'done'}, context=context)
return self.write(cr, uid, ids, {'state': 'done'}, context=context)
def _prepare_order_line_procurement(self, cr, uid, order, line, group_id=False, context=None):
date_planned = self._get_date_planned(cr, uid, order, line, order.date_order, context=context)
return {
'name': line.name,
'origin': order.name,
'date_planned': date_planned,
'product_id': line.product_id.id,
'product_qty': line.product_uom_qty,
'product_uom': line.product_uom.id,
'product_uos_qty': (line.product_uos and line.product_uos_qty) or line.product_uom_qty,
'product_uos': (line.product_uos and line.product_uos.id) or line.product_uom.id,
'company_id': order.company_id.id,
'group_id': group_id,
'invoice_state': (order.order_policy == 'picking') and '2binvoiced' or 'none',
'sale_line_id': line.id
}
def _get_date_planned(self, cr, uid, order, line, start_date, context=None):
date_planned = datetime.strptime(start_date, DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(days=line.delay or 0.0)
return date_planned
def _prepare_procurement_group(self, cr, uid, order, context=None):
return {'name': order.name, 'partner_id': order.partner_shipping_id.id}
def procurement_needed(self, cr, uid, ids, context=None):
#when sale is installed only, there is no need to create procurements, that's only
#further installed modules (sale_service, sale_stock) that will change this.
sale_line_obj = self.pool.get('sale.order.line')
res = []
for order in self.browse(cr, uid, ids, context=context):
res.append(sale_line_obj.need_procurement(cr, uid, [line.id for line in order.order_line], context=context))
return any(res)
def action_ignore_delivery_exception(self, cr, uid, ids, context=None):
for sale_order in self.browse(cr, uid, ids, context=context):
self.write(cr, uid, ids, {'state': 'progress' if sale_order.invoice_exists else 'manual'}, context=context)
return True
def action_ship_create(self, cr, uid, ids, context=None):
"""Create the required procurements to supply sales order lines, also connecting
the procurements to appropriate stock moves in order to bring the goods to the
sales order's requested location.
:return: True
"""
procurement_obj = self.pool.get('procurement.order')
sale_line_obj = self.pool.get('sale.order.line')
for order in self.browse(cr, uid, ids, context=context):
proc_ids = []
vals = self._prepare_procurement_group(cr, uid, order, context=context)
if not order.procurement_group_id:
group_id = self.pool.get("procurement.group").create(cr, uid, vals, context=context)
order.write({'procurement_group_id': group_id}, context=context)
for line in order.order_line:
#Try to fix exception procurement (possible when after a shipping exception the user choose to recreate)
if line.procurement_ids:
#first check them to see if they are in exception or not (one of the related moves is cancelled)
procurement_obj.check(cr, uid, [x.id for x in line.procurement_ids if x.state not in ['cancel', 'done']])
line.refresh()
#run again procurement that are in exception in order to trigger another move
proc_ids += [x.id for x in line.procurement_ids if x.state in ('exception', 'cancel')]
elif sale_line_obj.need_procurement(cr, uid, [line.id], context=context):
if (line.state == 'done') or not line.product_id:
continue
vals = self._prepare_order_line_procurement(cr, uid, order, line, group_id=group_id, context=context)
proc_id = procurement_obj.create(cr, uid, vals, context=context)
proc_ids.append(proc_id)
#Confirm procurement order such that rules will be applied on it
#note that the workflow normally ensure proc_ids isn't an empty list
procurement_obj.run(cr, uid, proc_ids, context=context)
#if shipping was in exception and the user choose to recreate the delivery order, write the new status of SO
if order.state == 'shipping_except':
val = {'state': 'progress', 'shipped': False}
if (order.order_policy == 'manual'):
for line in order.order_line:
if (not line.invoiced) and (line.state not in ('cancel', 'draft')):
val['state'] = 'manual'
break
order.write(val)
return True
def onchange_fiscal_position(self, cr, uid, ids, fiscal_position, order_lines, context=None):
'''Update taxes of order lines for each line where a product is defined
:param list ids: not used
:param int fiscal_position: sale order fiscal position
:param list order_lines: command list for one2many write method
'''
order_line = []
fiscal_obj = self.pool.get('account.fiscal.position')
product_obj = self.pool.get('product.product')
line_obj = self.pool.get('sale.order.line')
fpos = False
if fiscal_position:
fpos = fiscal_obj.browse(cr, uid, fiscal_position, context=context)
for line in order_lines:
# create (0, 0, { fields })
# update (1, ID, { fields })
if line[0] in [0, 1]:
prod = None
if line[2].get('product_id'):
prod = product_obj.browse(cr, uid, line[2]['product_id'], context=context)
elif line[1]:
prod = line_obj.browse(cr, uid, line[1], context=context).product_id
if prod and prod.taxes_id:
line[2]['tax_id'] = [[6, 0, fiscal_obj.map_tax(cr, uid, fpos, prod.taxes_id)]]
order_line.append(line)
# link (4, ID)
# link all (6, 0, IDS)
elif line[0] in [4, 6]:
line_ids = line[0] == 4 and [line[1]] or line[2]
for line_id in line_ids:
prod = line_obj.browse(cr, uid, line_id, context=context).product_id
if prod and prod.taxes_id:
order_line.append([1, line_id, {'tax_id': [[6, 0, fiscal_obj.map_tax(cr, uid, fpos, prod.taxes_id)]]}])
else:
order_line.append([4, line_id])
else:
order_line.append(line)
return {'value': {'order_line': order_line}}
def test_procurements_done(self, cr, uid, ids, context=None):
for sale in self.browse(cr, uid, ids, context=context):
for line in sale.order_line:
if not all([x.state == 'done' for x in line.procurement_ids]):
return False
return True
def test_procurements_except(self, cr, uid, ids, context=None):
for sale in self.browse(cr, uid, ids, context=context):
for line in sale.order_line:
if any([x.state == 'cancel' for x in line.procurement_ids]):
return True
return False
# TODO add a field price_unit_uos
# - update it on change product and unit price
# - use it in report if there is a uos
class sale_order_line(osv.osv):
def need_procurement(self, cr, uid, ids, context=None):
#when sale is installed only, there is no need to create procurements, that's only
#further installed modules (sale_service, sale_stock) that will change this.
prod_obj = self.pool.get('product.product')
for line in self.browse(cr, uid, ids, context=context):
if prod_obj.need_procurement(cr, uid, [line.product_id.id], context=context):
return True
return False
def _amount_line(self, cr, uid, ids, field_name, arg, context=None):
tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
res = {}
if context is None:
context = {}
for line in self.browse(cr, uid, ids, context=context):
price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)
taxes = tax_obj.compute_all(cr, uid, line.tax_id, price, line.product_uom_qty, line.product_id, line.order_id.partner_id)
cur = line.order_id.pricelist_id.currency_id
res[line.id] = cur_obj.round(cr, uid, cur, taxes['total'])
return res
def _get_uom_id(self, cr, uid, *args):
try:
proxy = self.pool.get('ir.model.data')
result = proxy.get_object_reference(cr, uid, 'product', 'product_uom_unit')
return result[1]
except Exception, ex:
return False
def _fnct_line_invoiced(self, cr, uid, ids, field_name, args, context=None):
res = dict.fromkeys(ids, False)
for this in self.browse(cr, uid, ids, context=context):
res[this.id] = this.invoice_lines and \
all(iline.invoice_id.state != 'cancel' for iline in this.invoice_lines)
return res
def _order_lines_from_invoice(self, cr, uid, ids, context=None):
# direct access to the m2m table is the less convoluted way to achieve this (and is ok ACL-wise)
cr.execute("""SELECT DISTINCT sol.id FROM sale_order_invoice_rel rel JOIN
sale_order_line sol ON (sol.order_id = rel.order_id)
WHERE rel.invoice_id = ANY(%s)""", (list(ids),))
return [i[0] for i in cr.fetchall()]
_name = 'sale.order.line'
_description = 'Sales Order Line'
_columns = {
'order_id': fields.many2one('sale.order', 'Order Reference', required=True, ondelete='cascade', select=True, readonly=True, states={'draft':[('readonly',False)]}),
'name': fields.text('Description', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of sales order lines."),
'product_id': fields.many2one('product.product', 'Product', domain=[('sale_ok', '=', True)], change_default=True, readonly=True, states={'draft': [('readonly', False)]}, ondelete='restrict'),
'invoice_lines': fields.many2many('account.invoice.line', 'sale_order_line_invoice_rel', 'order_line_id', 'invoice_id', 'Invoice Lines', readonly=True, copy=False),
'invoiced': fields.function(_fnct_line_invoiced, string='Invoiced', type='boolean',
store={
'account.invoice': (_order_lines_from_invoice, ['state'], 10),
'sale.order.line': (lambda self,cr,uid,ids,ctx=None: ids, ['invoice_lines'], 10)
}),
'price_unit': fields.float('Unit Price', required=True, digits_compute= dp.get_precision('Product Price'), readonly=True, states={'draft': [('readonly', False)]}),
'price_subtotal': fields.function(_amount_line, string='Subtotal', digits_compute= dp.get_precision('Account')),
'tax_id': fields.many2many('account.tax', 'sale_order_tax', 'order_line_id', 'tax_id', 'Taxes', readonly=True, states={'draft': [('readonly', False)]}),
'address_allotment_id': fields.many2one('res.partner', 'Allotment Partner',help="A partner to whom the particular product needs to be allotted."),
'product_uom_qty': fields.float('Quantity', digits_compute= dp.get_precision('Product UoS'), required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_uom': fields.many2one('product.uom', 'Unit of Measure ', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_uos_qty': fields.float('Quantity (UoS)' ,digits_compute= dp.get_precision('Product UoS'), readonly=True, states={'draft': [('readonly', False)]}),
'product_uos': fields.many2one('product.uom', 'Product UoS'),
'discount': fields.float('Discount (%)', digits_compute= dp.get_precision('Discount'), readonly=True, states={'draft': [('readonly', False)]}),
'th_weight': fields.float('Weight', readonly=True, states={'draft': [('readonly', False)]}),
'state': fields.selection(
[('cancel', 'Cancelled'),('draft', 'Draft'),('confirmed', 'Confirmed'),('exception', 'Exception'),('done', 'Done')],
'Status', required=True, readonly=True, copy=False,
help='* The \'Draft\' status is set when the related sales order in draft status. \
\n* The \'Confirmed\' status is set when the related sales order is confirmed. \
\n* The \'Exception\' status is set when the related sales order is set as exception. \
\n* The \'Done\' status is set when the sales order line has been picked. \
\n* The \'Cancelled\' status is set when a user cancel the sales order related.'),
'order_partner_id': fields.related('order_id', 'partner_id', type='many2one', relation='res.partner', store=True, string='Customer'),
'salesman_id':fields.related('order_id', 'user_id', type='many2one', relation='res.users', store=True, string='Salesperson'),
'company_id': fields.related('order_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
'delay': fields.float('Delivery Lead Time', required=True, help="Number of days between the order confirmation and the shipping of the products to the customer", readonly=True, states={'draft': [('readonly', False)]}),
'procurement_ids': fields.one2many('procurement.order', 'sale_line_id', 'Procurements'),
}
_order = 'order_id desc, sequence, id'
_defaults = {
'product_uom' : _get_uom_id,
'discount': 0.0,
'product_uom_qty': 1,
'product_uos_qty': 1,
'sequence': 10,
'state': 'draft',
'price_unit': 0.0,
'delay': 0.0,
}
def _get_line_qty(self, cr, uid, line, context=None):
if line.product_uos:
return line.product_uos_qty or 0.0
return line.product_uom_qty
def _get_line_uom(self, cr, uid, line, context=None):
if line.product_uos:
return line.product_uos.id
return line.product_uom.id
def _prepare_order_line_invoice_line(self, cr, uid, line, account_id=False, context=None):
"""Prepare the dict of values to create the new invoice line for a
sales order line. This method may be overridden to implement custom
invoice generation (making sure to call super() to establish
a clean extension chain).
:param browse_record line: sale.order.line record to invoice
:param int account_id: optional ID of a G/L account to force
(this is used for returning products including service)
:return: dict of values to create() the invoice line
"""
res = {}
if not line.invoiced:
if not account_id:
if line.product_id:
account_id = line.product_id.property_account_income.id
if not account_id:
account_id = line.product_id.categ_id.property_account_income_categ.id
if not account_id:
raise osv.except_osv(_('Error!'),
_('Please define income account for this product: "%s" (id:%d).') % \
(line.product_id.name, line.product_id.id,))
else:
prop = self.pool.get('ir.property').get(cr, uid,
'property_account_income_categ', 'product.category',
context=context)
account_id = prop and prop.id or False
uosqty = self._get_line_qty(cr, uid, line, context=context)
uos_id = self._get_line_uom(cr, uid, line, context=context)
pu = 0.0
if uosqty:
pu = round(line.price_unit * line.product_uom_qty / uosqty,
self.pool.get('decimal.precision').precision_get(cr, uid, 'Product Price'))
fpos = line.order_id.fiscal_position or False
account_id = self.pool.get('account.fiscal.position').map_account(cr, uid, fpos, account_id)
if not account_id:
raise osv.except_osv(_('Error!'),
_('There is no Fiscal Position defined or Income category account defined for default properties of Product categories.'))
res = {
'name': line.name,
'sequence': line.sequence,
'origin': line.order_id.name,
'account_id': account_id,
'price_unit': pu,
'quantity': uosqty,
'discount': line.discount,
'uos_id': uos_id,
'product_id': line.product_id.id or False,
'invoice_line_tax_id': [(6, 0, [x.id for x in line.tax_id])],
'account_analytic_id': line.order_id.project_id and line.order_id.project_id.id or False,
}
return res
def invoice_line_create(self, cr, uid, ids, context=None):
if context is None:
context = {}
create_ids = []
sales = set()
for line in self.browse(cr, uid, ids, context=context):
vals = self._prepare_order_line_invoice_line(cr, uid, line, False, context)
if vals:
inv_id = self.pool.get('account.invoice.line').create(cr, uid, vals, context=context)
self.write(cr, uid, [line.id], {'invoice_lines': [(4, inv_id)]}, context=context)
sales.add(line.order_id.id)
create_ids.append(inv_id)
# Trigger workflow events
for sale_id in sales:
workflow.trg_write(uid, 'sale.order', sale_id, cr)
return create_ids
def button_cancel(self, cr, uid, ids, context=None):
for line in self.browse(cr, uid, ids, context=context):
if line.invoiced:
raise osv.except_osv(_('Invalid Action!'), _('You cannot cancel a sales order line that has already been invoiced.'))
return self.write(cr, uid, ids, {'state': 'cancel'})
def button_confirm(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'confirmed'})
def button_done(self, cr, uid, ids, context=None):
res = self.write(cr, uid, ids, {'state': 'done'})
for line in self.browse(cr, uid, ids, context=context):
workflow.trg_write(uid, 'sale.order', line.order_id.id, cr)
return res
def uos_change(self, cr, uid, ids, product_uos, product_uos_qty=0, product_id=None):
product_obj = self.pool.get('product.product')
if not product_id:
return {'value': {'product_uom': product_uos,
'product_uom_qty': product_uos_qty}, 'domain': {}}
product = product_obj.browse(cr, uid, product_id)
value = {
'product_uom': product.uom_id.id,
}
# FIXME must depend on uos/uom of the product and not only of the coeff.
try:
value.update({
'product_uom_qty': product_uos_qty / product.uos_coeff,
'th_weight': product_uos_qty / product.uos_coeff * product.weight
})
except ZeroDivisionError:
pass
return {'value': value}
def create(self, cr, uid, values, context=None):
if values.get('order_id') and values.get('product_id') and any(f not in values for f in ['name', 'price_unit', 'type', 'product_uom_qty', 'product_uom']):
order = self.pool['sale.order'].read(cr, uid, values['order_id'], ['pricelist_id', 'partner_id', 'date_order', 'fiscal_position'], context=context)
defaults = self.product_id_change(cr, uid, [], order['pricelist_id'][0], values['product_id'],
qty=float(values.get('product_uom_qty', False)),
uom=values.get('product_uom', False),
qty_uos=float(values.get('product_uos_qty', False)),
uos=values.get('product_uos', False),
name=values.get('name', False),
partner_id=order['partner_id'][0],
date_order=order['date_order'],
fiscal_position=order['fiscal_position'][0] if order['fiscal_position'] else False,
flag=False, # Force name update
context=context
)['value']
if defaults.get('tax_id'):
defaults['tax_id'] = [[6, 0, defaults['tax_id']]]
values = dict(defaults, **values)
return super(sale_order_line, self).create(cr, uid, values, context=context)
def product_id_change(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False, fiscal_position=False, flag=False, context=None):
if context is None:
context = {}
Partner = self.pool['res.partner']
ProductUom = self.pool['product.uom']
Product = self.pool['product.product']
ctx_product = dict(context)
partner = False
if partner_id:
partner = Partner.browse(cr, uid, partner_id, context=context)
ctx_product['lang'] = partner.lang
ctx_product['partner_id'] = partner_id
elif lang:
ctx_product['lang'] = lang
if not product:
return {'value': {'th_weight': 0,
'product_uos_qty': qty}, 'domain': {'product_uom': [],
'product_uos': []}}
if not date_order:
date_order = time.strftime(DEFAULT_SERVER_DATE_FORMAT)
result = {}
product_obj = Product.browse(cr, uid, product, context=ctx_product)
uom2 = False
if uom:
uom2 = ProductUom.browse(cr, uid, uom, context=context)
if product_obj.uom_id.category_id.id != uom2.category_id.id:
uom = False
if uos:
if product_obj.uos_id:
uos2 = ProductUom.browse(cr, uid, uos, context=context)
if product_obj.uos_id.category_id.id != uos2.category_id.id:
uos = False
else:
uos = False
fpos = False
if not fiscal_position:
fpos = partner and partner.property_account_position or False
else:
fpos = self.pool['account.fiscal.position'].browse(cr, uid, fiscal_position)
if update_tax: # The quantity only have changed
result['tax_id'] = self.pool['account.fiscal.position'].map_tax(cr, uid, fpos, product_obj.taxes_id)
if not flag:
result['name'] = Product.name_get(cr, uid, [product_obj.id], context=ctx_product)[0][1]
if product_obj.description_sale:
result['name'] += '\n'+product_obj.description_sale
domain = {}
if (not uom) and (not uos):
result['product_uom'] = product_obj.uom_id.id
if product_obj.uos_id:
result['product_uos'] = product_obj.uos_id.id
result['product_uos_qty'] = qty * product_obj.uos_coeff
uos_category_id = product_obj.uos_id.category_id.id
else:
result['product_uos'] = False
result['product_uos_qty'] = qty
uos_category_id = False
result['th_weight'] = qty * product_obj.weight
domain = {'product_uom':
[('category_id', '=', product_obj.uom_id.category_id.id)],
'product_uos':
[('category_id', '=', uos_category_id)]}
elif uos and not uom: # only happens if uom is False
result['product_uom'] = product_obj.uom_id and product_obj.uom_id.id
result['product_uom_qty'] = qty_uos / product_obj.uos_coeff
result['th_weight'] = result['product_uom_qty'] * product_obj.weight
elif uom: # whether uos is set or not
default_uom = product_obj.uom_id and product_obj.uom_id.id
q = ProductUom._compute_qty(cr, uid, uom, qty, default_uom)
if product_obj.uos_id:
result['product_uos'] = product_obj.uos_id.id
result['product_uos_qty'] = qty * product_obj.uos_coeff
else:
result['product_uos'] = False
result['product_uos_qty'] = qty
result['th_weight'] = q * product_obj.weight # Round the quantity up
if not uom2:
uom2 = product_obj.uom_id
if pricelist and partner_id:
price = self.pool['product.pricelist'].price_get(cr, uid, [pricelist],
product, qty or 1.0, partner_id, {
'uom': uom or result.get('product_uom'),
'date': date_order,
})[pricelist]
else:
price = Product.price_get(cr, uid, [product], ptype='list_price', context=ctx_product)[product] or False
result.update({'price_unit': price})
return {'value': result, 'domain': domain}
def product_uom_change(self, cursor, user, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, context=None):
context = context or {}
lang = lang or ('lang' in context and context['lang'])
if not uom:
return {'value': {'price_unit': 0.0, 'product_uom' : uom or False}}
return self.product_id_change(cursor, user, ids, pricelist, product,
qty=qty, uom=uom, qty_uos=qty_uos, uos=uos, name=name,
partner_id=partner_id, lang=lang, update_tax=update_tax,
date_order=date_order, context=context)
def unlink(self, cr, uid, ids, context=None):
if context is None:
context = {}
"""Allows to delete sales order lines in draft,cancel states"""
for rec in self.browse(cr, uid, ids, context=context):
if rec.state not in ['draft', 'cancel']:
raise osv.except_osv(_('Invalid Action!'), _('Cannot delete a sales order line which is in state \'%s\'.') %(rec.state,))
return super(sale_order_line, self).unlink(cr, uid, ids, context=context)
class mail_compose_message(osv.Model):
_inherit = 'mail.compose.message'
def send_mail(self, cr, uid, ids, context=None):
context = context or {}
if context.get('default_model') == 'sale.order' and context.get('default_res_id') and context.get('mark_so_as_sent'):
context = dict(context, mail_post_autofollow=True)
self.pool.get('sale.order').signal_workflow(cr, uid, [context['default_res_id']], 'quotation_sent')
return super(mail_compose_message, self).send_mail(cr, uid, ids, context=context)
class account_invoice(osv.Model):
_inherit = 'account.invoice'
def _get_default_section_id(self, cr, uid, context=None):
""" Gives default section by checking if present in the context """
section_id = self._resolve_section_id_from_context(cr, uid, context=context) or False
if not section_id:
section_id = self.pool.get('res.users').browse(cr, uid, uid, context).default_section_id.id or False
return section_id
def _resolve_section_id_from_context(self, cr, uid, context=None):
""" Returns ID of section based on the value of 'section_id'
context key, or None if it cannot be resolved to a single
Sales Team.
"""
if context is None:
context = {}
if type(context.get('default_section_id')) in (int, long):
return context.get('default_section_id')
if isinstance(context.get('default_section_id'), basestring):
section_ids = self.pool.get('crm.case.section').name_search(cr, uid, name=context['default_section_id'], context=context)
if len(section_ids) == 1:
return int(section_ids[0][0])
return None
_columns = {
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
}
_defaults = {
'section_id': lambda self, cr, uid, c=None: self._get_default_section_id(cr, uid, context=c)
}
def confirm_paid(self, cr, uid, ids, context=None):
sale_order_obj = self.pool.get('sale.order')
res = super(account_invoice, self).confirm_paid(cr, uid, ids, context=context)
so_ids = sale_order_obj.search(cr, uid, [('invoice_ids', 'in', ids)], context=context)
for so_id in so_ids:
sale_order_obj.message_post(cr, uid, so_id, body=_("Invoice paid"), context=context)
return res
def unlink(self, cr, uid, ids, context=None):
""" Overwrite unlink method of account invoice to send a trigger to the sale workflow upon invoice deletion """
invoice_ids = self.search(cr, uid, [('id', 'in', ids), ('state', 'in', ['draft', 'cancel'])], context=context)
#if we can't cancel all invoices, do nothing
if len(invoice_ids) == len(ids):
#Cancel invoice(s) first before deleting them so that if any sale order is associated with them
#it will trigger the workflow to put the sale order in an 'invoice exception' state
for id in ids:
workflow.trg_validate(uid, 'account.invoice', id, 'invoice_cancel', cr)
return super(account_invoice, self).unlink(cr, uid, ids, context=context)
class procurement_order(osv.osv):
_inherit = 'procurement.order'
_columns = {
'sale_line_id': fields.many2one('sale.order.line', string='Sale Order Line'),
}
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
res = super(procurement_order, self).write(cr, uid, ids, vals, context=context)
from openerp import workflow
if vals.get('state') in ['done', 'cancel', 'exception']:
for proc in self.browse(cr, uid, ids, context=context):
if proc.sale_line_id and proc.sale_line_id.order_id:
order_id = proc.sale_line_id.order_id.id
if self.pool.get('sale.order').test_procurements_done(cr, uid, [order_id], context=context):
workflow.trg_validate(uid, 'sale.order', order_id, 'ship_end', cr)
if self.pool.get('sale.order').test_procurements_except(cr, uid, [order_id], context=context):
workflow.trg_validate(uid, 'sale.order', order_id, 'ship_except', cr)
return res
class product_product(osv.Model):
_inherit = 'product.product'
def _sales_count(self, cr, uid, ids, field_name, arg, context=None):
SaleOrderLine = self.pool['sale.order.line']
return {
product_id: SaleOrderLine.search_count(cr,uid, [('product_id', '=', product_id)], context=context)
for product_id in ids
}
_columns = {
'sales_count': fields.function(_sales_count, string='# Sales', type='integer'),
}
class product_template(osv.Model):
_inherit = 'product.template'
def _sales_count(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, 0)
for template in self.browse(cr, uid, ids, context=context):
res[template.id] = sum([p.sales_count for p in template.product_variant_ids])
return res
def action_view_sales(self, cr, uid, ids, context=None):
act_obj = self.pool.get('ir.actions.act_window')
mod_obj = self.pool.get('ir.model.data')
product_ids = []
for template in self.browse(cr, uid, ids, context=context):
product_ids += [x.id for x in template.product_variant_ids]
result = mod_obj.xmlid_to_res_id(cr, uid, 'sale.action_order_line_product_tree',raise_if_not_found=True)
result = act_obj.read(cr, uid, [result], context=context)[0]
result['domain'] = "[('product_id','in',[" + ','.join(map(str, product_ids)) + "])]"
return result
_columns = {
'sales_count': fields.function(_sales_count, string='# Sales', type='integer'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
#include "mt19937.h"
#include "mt19937-jump.h"
void mt19937_seed(mt19937_state *state, uint32_t seed) {
int pos;
seed &= 0xffffffffUL;
/* Knuth's PRNG as used in the Mersenne Twister reference implementation */
for (pos = 0; pos < RK_STATE_LEN; pos++) {
state->key[pos] = seed;
seed = (1812433253UL * (seed ^ (seed >> 30)) + pos + 1) & 0xffffffffUL;
}
state->pos = RK_STATE_LEN;
}
/* initializes mt[RK_STATE_LEN] with a seed */
static void init_genrand(mt19937_state *state, uint32_t s) {
int mti;
uint32_t *mt = state->key;
mt[0] = s & 0xffffffffUL;
for (mti = 1; mti < RK_STATE_LEN; mti++) {
/*
* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier.
* In the previous versions, MSBs of the seed affect
* only MSBs of the array mt[].
* 2002/01/09 modified by Makoto Matsumoto
*/
mt[mti] = (1812433253UL * (mt[mti - 1] ^ (mt[mti - 1] >> 30)) + mti);
/* for > 32 bit machines */
mt[mti] &= 0xffffffffUL;
}
state->pos = mti;
return;
}
/*
* initialize by an array with array-length
* init_key is the array for initializing keys
* key_length is its length
*/
void mt19937_init_by_array(mt19937_state *state, uint32_t *init_key,
int key_length) {
/* was signed in the original code. RDH 12/16/2002 */
int i = 1;
int j = 0;
uint32_t *mt = state->key;
int k;
init_genrand(state, 19650218UL);
k = (RK_STATE_LEN > key_length ? RK_STATE_LEN : key_length);
for (; k; k--) {
/* non linear */
mt[i] = (mt[i] ^ ((mt[i - 1] ^ (mt[i - 1] >> 30)) * 1664525UL)) +
init_key[j] + j;
/* for > 32 bit machines */
mt[i] &= 0xffffffffUL;
i++;
j++;
if (i >= RK_STATE_LEN) {
mt[0] = mt[RK_STATE_LEN - 1];
i = 1;
}
if (j >= key_length) {
j = 0;
}
}
for (k = RK_STATE_LEN - 1; k; k--) {
mt[i] = (mt[i] ^ ((mt[i - 1] ^ (mt[i - 1] >> 30)) * 1566083941UL)) -
i; /* non linear */
mt[i] &= 0xffffffffUL; /* for WORDSIZE > 32 machines */
i++;
if (i >= RK_STATE_LEN) {
mt[0] = mt[RK_STATE_LEN - 1];
i = 1;
}
}
mt[0] = 0x80000000UL; /* MSB is 1; assuring non-zero initial array */
}
void mt19937_gen(mt19937_state *state) {
uint32_t y;
int i;
for (i = 0; i < _MT19937_N - _MT19937_M; i++) {
y = (state->key[i] & UPPER_MASK) | (state->key[i + 1] & LOWER_MASK);
state->key[i] = state->key[i + _MT19937_M] ^ (y >> 1) ^ (-(y & 1) & MATRIX_A);
}
for (; i < _MT19937_N - 1; i++) {
y = (state->key[i] & UPPER_MASK) | (state->key[i + 1] & LOWER_MASK);
state->key[i] = state->key[i + (_MT19937_M - _MT19937_N)] ^ (y >> 1) ^ (-(y & 1) & MATRIX_A);
}
y = (state->key[_MT19937_N - 1] & UPPER_MASK) | (state->key[0] & LOWER_MASK);
state->key[_MT19937_N - 1] = state->key[_MT19937_M - 1] ^ (y >> 1) ^ (-(y & 1) & MATRIX_A);
state->pos = 0;
}
extern inline uint64_t mt19937_next64(mt19937_state *state);
extern inline uint32_t mt19937_next32(mt19937_state *state);
extern inline double mt19937_next_double(mt19937_state *state);
void mt19937_jump(mt19937_state *state) { mt19937_jump_state(state); } | c | github | https://github.com/numpy/numpy | numpy/random/src/mt19937/mt19937.c |
package client
import (
"context"
"github.com/moby/moby/api/types/network"
)
// NetworkConnectOptions represents the data to be used to connect a container to the
// network.
type NetworkConnectOptions struct {
Container string
EndpointConfig *network.EndpointSettings
}
// NetworkConnectResult represents the result of a NetworkConnect operation.
type NetworkConnectResult struct {
// Currently empty; placeholder for future fields.
}
// NetworkConnect connects a container to an existent network in the docker host.
func (cli *Client) NetworkConnect(ctx context.Context, networkID string, options NetworkConnectOptions) (NetworkConnectResult, error) {
networkID, err := trimID("network", networkID)
if err != nil {
return NetworkConnectResult{}, err
}
containerID, err := trimID("container", options.Container)
if err != nil {
return NetworkConnectResult{}, err
}
nc := network.ConnectRequest{
Container: containerID,
EndpointConfig: options.EndpointConfig,
}
resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil)
defer ensureReaderClosed(resp)
return NetworkConnectResult{}, err
} | go | github | https://github.com/moby/moby | client/network_connect.go |
from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import EtherscanLoader
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"EtherscanLoader": "langchain_community.document_loaders"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"EtherscanLoader",
] | python | github | https://github.com/langchain-ai/langchain | libs/langchain/langchain_classic/document_loaders/etherscan.py |
#ifndef DATE_TIME_LOCALE_CONFIG_HPP___
#define DATE_TIME_LOCALE_CONFIG_HPP___
/* Copyright (c) 2002-2020 CrystalClear Software, Inc.
* Use, modification and distribution is subject to the
* Boost Software License, Version 1.0. (See accompanying
* file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
* Author: Jeff Garland
* $Date$
*/
// This file configures whether the library will support locales and hence
// iostream based i/o. Even if a compiler has some support for locales,
// any failure to be compatible gets the compiler on the exclusion list.
//
// At the moment this is defined for MSVC 6 and any compiler that
// defines BOOST_NO_STD_LOCALE (gcc 2.95.x)
#include "boost/config.hpp" //sets BOOST_NO_STD_LOCALE
#include "boost/config/workaround.hpp"
//This file basically becomes a noop if locales are not properly supported
#if (defined(BOOST_NO_STD_LOCALE) \
|| (BOOST_WORKAROUND( BOOST_MSVC, < 1300)) \
|| (BOOST_WORKAROUND( BOOST_BORLANDC, BOOST_TESTED_AT( 0x581 )) ) \
|| (BOOST_WORKAROUND( BOOST_XLCPP_ZOS, BOOST_TESTED_AT( 0x42010000 )) ) /* <cctype> "shadows" the locale enabled overloads from <locale> */ \
)
#define BOOST_DATE_TIME_NO_LOCALE
#endif
#endif | unknown | github | https://github.com/mysql/mysql-server | extra/boost/boost_1_87_0/boost/date_time/locale_config.hpp |
#ifndef NUMPY_CORE_SRC_COMMON_NPY_NUMPYOS_H_
#define NUMPY_CORE_SRC_COMMON_NPY_NUMPYOS_H_
#ifdef __cplusplus
extern "C" {
#endif
NPY_NO_EXPORT char*
NumPyOS_ascii_formatd(char *buffer, size_t buf_size,
const char *format,
double val, int decimal);
NPY_NO_EXPORT char*
NumPyOS_ascii_formatf(char *buffer, size_t buf_size,
const char *format,
float val, int decimal);
NPY_NO_EXPORT char*
NumPyOS_ascii_formatl(char *buffer, size_t buf_size,
const char *format,
long double val, int decimal);
NPY_NO_EXPORT double
NumPyOS_ascii_strtod(const char *s, char** endptr);
NPY_NO_EXPORT long double
NumPyOS_ascii_strtold(const char *s, char** endptr);
NPY_NO_EXPORT int
NumPyOS_ascii_ftolf(FILE *fp, double *value);
NPY_NO_EXPORT int
NumPyOS_ascii_ftoLf(FILE *fp, long double *value);
NPY_NO_EXPORT int
NumPyOS_ascii_isspace(int c);
NPY_NO_EXPORT int
NumPyOS_ascii_isalpha(char c);
NPY_NO_EXPORT int
NumPyOS_ascii_isdigit(char c);
NPY_NO_EXPORT int
NumPyOS_ascii_isalnum(char c);
NPY_NO_EXPORT int
NumPyOS_ascii_islower(char c);
NPY_NO_EXPORT int
NumPyOS_ascii_isupper(char c);
NPY_NO_EXPORT int
NumPyOS_ascii_tolower(int c);
/* Convert a string to an int in an arbitrary base */
NPY_NO_EXPORT npy_longlong
NumPyOS_strtoll(const char *str, char **endptr, int base);
/* Convert a string to an int in an arbitrary base */
NPY_NO_EXPORT npy_ulonglong
NumPyOS_strtoull(const char *str, char **endptr, int base);
#ifdef __cplusplus
}
#endif
#endif /* NUMPY_CORE_SRC_COMMON_NPY_NUMPYOS_H_ */ | c | github | https://github.com/numpy/numpy | numpy/_core/src/common/numpyos.h |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Wishart."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import linalg
from tensorflow.contrib import distributions as distributions_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
distributions = distributions_lib
def make_pd(start, n):
"""Deterministically create a positive definite matrix."""
x = np.tril(linalg.circulant(np.arange(start, start + n)))
return np.dot(x, x.T)
def chol(x):
"""Compute Cholesky factorization."""
return linalg.cholesky(x).T
def wishart_var(df, x):
"""Compute Wishart variance for numpy scale matrix."""
x = np.sqrt(df) * np.asarray(x)
d = np.expand_dims(np.diag(x), -1)
return x**2 + np.dot(d, d.T)
class WishartCholeskyTest(test.TestCase):
def testEntropy(self):
with self.test_session():
scale = make_pd(1., 2)
df = 4
w = distributions.WishartCholesky(df, chol(scale))
# sp.stats.wishart(df=4, scale=make_pd(1., 2)).entropy()
self.assertAllClose(6.301387092430769, w.entropy().eval())
w = distributions.WishartCholesky(df=1, scale=[[1.]])
# sp.stats.wishart(df=1,scale=1).entropy()
self.assertAllClose(0.78375711047393404, w.entropy().eval())
def testMeanLogDetAndLogNormalizingConstant(self):
with self.test_session():
def entropy_alt(w):
return (
w.log_normalization()
- 0.5 * (w.df - w.dimension - 1.) * w.mean_log_det()
+ 0.5 * w.df * w.dimension).eval()
w = distributions.WishartCholesky(df=4,
scale=chol(make_pd(1., 2)))
self.assertAllClose(w.entropy().eval(), entropy_alt(w))
w = distributions.WishartCholesky(df=5, scale=[[1.]])
self.assertAllClose(w.entropy().eval(), entropy_alt(w))
def testMean(self):
with self.test_session():
scale = make_pd(1., 2)
df = 4
w = distributions.WishartCholesky(df, chol(scale))
self.assertAllEqual(df * scale, w.mean().eval())
def testMode(self):
with self.test_session():
scale = make_pd(1., 2)
df = 4
w = distributions.WishartCholesky(df, chol(scale))
self.assertAllEqual((df - 2. - 1.) * scale, w.mode().eval())
def testStd(self):
with self.test_session():
scale = make_pd(1., 2)
df = 4
w = distributions.WishartCholesky(df, chol(scale))
self.assertAllEqual(chol(wishart_var(df, scale)), w.stddev().eval())
def testVariance(self):
with self.test_session():
scale = make_pd(1., 2)
df = 4
w = distributions.WishartCholesky(df, chol(scale))
self.assertAllEqual(wishart_var(df, scale), w.variance().eval())
def testSample(self):
with self.test_session():
scale = make_pd(1., 2)
df = 4
chol_w = distributions.WishartCholesky(
df, chol(scale), cholesky_input_output_matrices=False)
x = chol_w.sample(1, seed=42).eval()
chol_x = [chol(x[0])]
full_w = distributions.WishartFull(
df, scale, cholesky_input_output_matrices=False)
self.assertAllClose(x, full_w.sample(1, seed=42).eval())
chol_w_chol = distributions.WishartCholesky(
df, chol(scale), cholesky_input_output_matrices=True)
self.assertAllClose(chol_x, chol_w_chol.sample(1, seed=42).eval())
eigen_values = array_ops.matrix_diag_part(
chol_w_chol.sample(
1000, seed=42))
np.testing.assert_array_less(0., eigen_values.eval())
full_w_chol = distributions.WishartFull(
df, scale, cholesky_input_output_matrices=True)
self.assertAllClose(chol_x, full_w_chol.sample(1, seed=42).eval())
eigen_values = array_ops.matrix_diag_part(
full_w_chol.sample(
1000, seed=42))
np.testing.assert_array_less(0., eigen_values.eval())
# Check first and second moments.
df = 4.
chol_w = distributions.WishartCholesky(
df=df,
scale=chol(make_pd(1., 3)),
cholesky_input_output_matrices=False)
x = chol_w.sample(10000, seed=42)
self.assertAllEqual((10000, 3, 3), x.get_shape())
moment1_estimate = math_ops.reduce_mean(x, reduction_indices=[0]).eval()
self.assertAllClose(chol_w.mean().eval(), moment1_estimate, rtol=0.05)
# The Variance estimate uses the squares rather than outer-products
# because Wishart.Variance is the diagonal of the Wishart covariance
# matrix.
variance_estimate = (math_ops.reduce_mean(
math_ops.square(x), reduction_indices=[0]) -
math_ops.square(moment1_estimate)).eval()
self.assertAllClose(
chol_w.variance().eval(), variance_estimate, rtol=0.05)
# Test that sampling with the same seed twice gives the same results.
def testSampleMultipleTimes(self):
with self.test_session():
df = 4.
n_val = 100
random_seed.set_random_seed(654321)
chol_w1 = distributions.WishartCholesky(
df=df,
scale=chol(make_pd(1., 3)),
cholesky_input_output_matrices=False,
name="wishart1")
samples1 = chol_w1.sample(n_val, seed=123456).eval()
random_seed.set_random_seed(654321)
chol_w2 = distributions.WishartCholesky(
df=df,
scale=chol(make_pd(1., 3)),
cholesky_input_output_matrices=False,
name="wishart2")
samples2 = chol_w2.sample(n_val, seed=123456).eval()
self.assertAllClose(samples1, samples2)
def testProb(self):
with self.test_session():
# Generate some positive definite (pd) matrices and their Cholesky
# factorizations.
x = np.array(
[make_pd(1., 2), make_pd(2., 2), make_pd(3., 2), make_pd(4., 2)])
chol_x = np.array([chol(x[0]), chol(x[1]), chol(x[2]), chol(x[3])])
# Since Wishart wasn"t added to SciPy until 0.16, we'll spot check some
# pdfs with hard-coded results from upstream SciPy.
log_prob_df_seq = np.array([
# math.log(stats.wishart.pdf(x[0], df=2+0, scale=x[0]))
-3.5310242469692907,
# math.log(stats.wishart.pdf(x[1], df=2+1, scale=x[1]))
-7.689907330328961,
# math.log(stats.wishart.pdf(x[2], df=2+2, scale=x[2]))
-10.815845159537895,
# math.log(stats.wishart.pdf(x[3], df=2+3, scale=x[3]))
-13.640549882916691,
])
# This test checks that batches don't interfere with correctness.
w = distributions.WishartCholesky(
df=[2, 3, 4, 5],
scale=chol_x,
cholesky_input_output_matrices=True)
self.assertAllClose(log_prob_df_seq, w.log_prob(chol_x).eval())
# Now we test various constructions of Wishart with different sample
# shape.
log_prob = np.array([
# math.log(stats.wishart.pdf(x[0], df=4, scale=x[0]))
-4.224171427529236,
# math.log(stats.wishart.pdf(x[1], df=4, scale=x[0]))
-6.3378770664093453,
# math.log(stats.wishart.pdf(x[2], df=4, scale=x[0]))
-12.026946850193017,
# math.log(stats.wishart.pdf(x[3], df=4, scale=x[0]))
-20.951582705289454,
])
for w in (
distributions.WishartCholesky(
df=4,
scale=chol_x[0],
cholesky_input_output_matrices=False),
distributions.WishartFull(
df=4,
scale=x[0],
cholesky_input_output_matrices=False)):
self.assertAllEqual((2, 2), w.event_shape_tensor().eval())
self.assertEqual(2, w.dimension.eval())
self.assertAllClose(log_prob[0], w.log_prob(x[0]).eval())
self.assertAllClose(log_prob[0:2], w.log_prob(x[0:2]).eval())
self.assertAllClose(
np.reshape(log_prob, (2, 2)),
w.log_prob(np.reshape(x, (2, 2, 2, 2))).eval())
self.assertAllClose(
np.reshape(np.exp(log_prob), (2, 2)),
w.prob(np.reshape(x, (2, 2, 2, 2))).eval())
self.assertAllEqual((2, 2),
w.log_prob(np.reshape(x, (2, 2, 2, 2))).get_shape())
for w in (
distributions.WishartCholesky(
df=4,
scale=chol_x[0],
cholesky_input_output_matrices=True),
distributions.WishartFull(
df=4,
scale=x[0],
cholesky_input_output_matrices=True)):
self.assertAllEqual((2, 2), w.event_shape_tensor().eval())
self.assertEqual(2, w.dimension.eval())
self.assertAllClose(log_prob[0], w.log_prob(chol_x[0]).eval())
self.assertAllClose(log_prob[0:2], w.log_prob(chol_x[0:2]).eval())
self.assertAllClose(
np.reshape(log_prob, (2, 2)),
w.log_prob(np.reshape(chol_x, (2, 2, 2, 2))).eval())
self.assertAllClose(
np.reshape(np.exp(log_prob), (2, 2)),
w.prob(np.reshape(chol_x, (2, 2, 2, 2))).eval())
self.assertAllEqual((2, 2),
w.log_prob(np.reshape(x, (2, 2, 2, 2))).get_shape())
def testBatchShape(self):
with self.test_session() as sess:
scale = make_pd(1., 2)
chol_scale = chol(scale)
w = distributions.WishartCholesky(df=4, scale=chol_scale)
self.assertAllEqual([], w.batch_shape)
self.assertAllEqual([], w.batch_shape_tensor().eval())
w = distributions.WishartCholesky(
df=[4., 4], scale=np.array([chol_scale, chol_scale]))
self.assertAllEqual([2], w.batch_shape)
self.assertAllEqual([2], w.batch_shape_tensor().eval())
scale_deferred = array_ops.placeholder(dtypes.float32)
w = distributions.WishartCholesky(df=4, scale=scale_deferred)
self.assertAllEqual(
[], sess.run(w.batch_shape_tensor(),
feed_dict={scale_deferred: chol_scale}))
self.assertAllEqual(
[2],
sess.run(w.batch_shape_tensor(),
feed_dict={scale_deferred: [chol_scale, chol_scale]}))
def testEventShape(self):
with self.test_session() as sess:
scale = make_pd(1., 2)
chol_scale = chol(scale)
w = distributions.WishartCholesky(df=4, scale=chol_scale)
self.assertAllEqual([2, 2], w.event_shape)
self.assertAllEqual([2, 2], w.event_shape_tensor().eval())
w = distributions.WishartCholesky(
df=[4., 4], scale=np.array([chol_scale, chol_scale]))
self.assertAllEqual([2, 2], w.event_shape)
self.assertAllEqual([2, 2], w.event_shape_tensor().eval())
scale_deferred = array_ops.placeholder(dtypes.float32)
w = distributions.WishartCholesky(df=4, scale=scale_deferred)
self.assertAllEqual(
[2, 2],
sess.run(w.event_shape_tensor(),
feed_dict={scale_deferred: chol_scale}))
self.assertAllEqual(
[2, 2],
sess.run(w.event_shape_tensor(),
feed_dict={scale_deferred: [chol_scale, chol_scale]}))
def testValidateArgs(self):
with self.test_session() as sess:
df_deferred = array_ops.placeholder(dtypes.float32)
chol_scale_deferred = array_ops.placeholder(dtypes.float32)
x = make_pd(1., 3)
chol_scale = chol(x)
# Check expensive, deferred assertions.
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"cannot be less than"):
chol_w = distributions.WishartCholesky(
df=df_deferred,
scale=chol_scale_deferred,
validate_args=True)
sess.run(chol_w.log_prob(np.asarray(
x, dtype=np.float32)),
feed_dict={df_deferred: 2.,
chol_scale_deferred: chol_scale})
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Cholesky decomposition was not successful"):
chol_w = distributions.WishartFull(
df=df_deferred, scale=chol_scale_deferred)
# np.ones((3, 3)) is not positive, definite.
sess.run(chol_w.log_prob(np.asarray(
x, dtype=np.float32)),
feed_dict={
df_deferred: 4.,
chol_scale_deferred: np.ones(
(3, 3), dtype=np.float32)
})
# Ensure no assertions.
chol_w = distributions.WishartCholesky(
df=df_deferred,
scale=chol_scale_deferred,
validate_args=False)
sess.run(chol_w.log_prob(np.asarray(
x, dtype=np.float32)),
feed_dict={df_deferred: 4,
chol_scale_deferred: chol_scale})
# Bogus log_prob, but since we have no checks running... c"est la vie.
sess.run(chol_w.log_prob(np.asarray(
x, dtype=np.float32)),
feed_dict={df_deferred: 4,
chol_scale_deferred: np.ones((3, 3))})
# Still has these assertions because they're resolveable at graph
# construction
with self.assertRaisesRegexp(ValueError, "cannot be less than"):
chol_w = distributions.WishartCholesky(
df=2, scale=chol_scale, validate_args=False)
with self.assertRaisesRegexp(TypeError, "not a floating-point type"):
chol_w = distributions.WishartCholesky(
df=4.,
scale=np.asarray(
chol_scale, dtype=np.int32),
validate_args=False)
if __name__ == "__main__":
test.main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python3
# Copyright 2014 BitPay Inc.
# Copyright 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test framework for pivx utils.
Runs automatically during `make check`.
Can also be run manually."""
from __future__ import division,print_function,unicode_literals
import argparse
import binascii
try:
import configparser
except ImportError:
import ConfigParser as configparser
import difflib
import json
import logging
import os
import pprint
import subprocess
import sys
def main():
config = configparser.ConfigParser()
config.optionxform = str
config.read_file(open(os.path.join(os.path.dirname(__file__), "../config.ini"), encoding="utf8"))
env_conf = dict(config.items('environment'))
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
verbose = args.verbose
if verbose:
level = logging.DEBUG
else:
level = logging.ERROR
formatter = '%(asctime)s - %(levelname)s - %(message)s'
# Add the format/level to the logger
logging.basicConfig(format=formatter, level=level)
bctester(os.path.join(env_conf["SRCDIR"], "test", "util", "data"), "bitcoin-util-test.json", env_conf)
def bctester(testDir, input_basename, buildenv):
""" Loads and parses the input file, runs all tests and reports results"""
input_filename = os.path.join(testDir, input_basename)
raw_data = open(input_filename, encoding="utf8").read()
input_data = json.loads(raw_data)
failed_testcases = []
for testObj in input_data:
try:
bctest(testDir, testObj, buildenv)
logging.info("PASSED: " + testObj["description"])
except:
logging.info("FAILED: " + testObj["description"])
failed_testcases.append(testObj["description"])
if failed_testcases:
error_message = "FAILED_TESTCASES:\n"
error_message += pprint.pformat(failed_testcases, width=400)
logging.error(error_message)
sys.exit(1)
else:
sys.exit(0)
def bctest(testDir, testObj, buildenv):
"""Runs a single test, comparing output and RC to expected output and RC.
Raises an error if input can't be read, executable fails, or output/RC
are not as expected. Error is caught by bctester() and reported.
"""
# Get the exec names and arguments
execprog = os.path.join(buildenv["BUILDDIR"], "src", testObj["exec"] + buildenv["EXEEXT"])
execargs = testObj['args']
execrun = [execprog] + execargs
# Read the input data (if there is any)
stdinCfg = None
inputData = None
if "input" in testObj:
filename = os.path.join(testDir, testObj["input"])
inputData = open(filename, encoding="utf8").read()
stdinCfg = subprocess.PIPE
# Read the expected output data (if there is any)
outputFn = None
outputData = None
outputType = None
if "output_cmp" in testObj:
outputFn = testObj['output_cmp']
outputType = os.path.splitext(outputFn)[1][1:] # output type from file extension (determines how to compare)
try:
outputData = open(os.path.join(testDir, outputFn), encoding="utf8").read()
except:
logging.error("Output file " + outputFn + " can not be opened")
raise
if not outputData:
logging.error("Output data missing for " + outputFn)
raise Exception
if not outputType:
logging.error("Output file %s does not have a file extension" % outputFn)
raise Exception
# Run the test
proc = subprocess.Popen(execrun, stdin=stdinCfg, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
try:
outs = proc.communicate(input=inputData)
except OSError:
logging.error("OSError, Failed to execute " + execprog)
raise
if outputData:
data_mismatch, formatting_mismatch = False, False
# Parse command output and expected output
try:
a_parsed = parse_output(outs[0], outputType)
except Exception as e:
logging.error('Error parsing command output as %s: %s' % (outputType, e))
raise
try:
b_parsed = parse_output(outputData, outputType)
except Exception as e:
logging.error('Error parsing expected output %s as %s: %s' % (outputFn, outputType, e))
raise
# Compare data
if a_parsed != b_parsed:
logging.error("Output data mismatch for " + outputFn + " (format " + outputType + ")")
data_mismatch = True
# Compare formatting
if outs[0] != outputData:
error_message = "Output formatting mismatch for " + outputFn + ":\n"
error_message += "".join(difflib.context_diff(outputData.splitlines(True),
outs[0].splitlines(True),
fromfile=outputFn,
tofile="returned"))
logging.error(error_message)
formatting_mismatch = True
assert not data_mismatch and not formatting_mismatch
# Compare the return code to the expected return code
wantRC = 0
if "return_code" in testObj:
wantRC = testObj['return_code']
if proc.returncode != wantRC:
logging.error("Return code mismatch for " + outputFn)
raise Exception
if "error_txt" in testObj:
want_error = testObj["error_txt"]
# Compare error text
# TODO: ideally, we'd compare the strings exactly and also assert
# That stderr is empty if no errors are expected. However, pivx-tx
# emits DISPLAY errors when running as a windows application on
# linux through wine. Just assert that the expected error text appears
# somewhere in stderr.
if want_error not in outs[1]:
logging.error("Error mismatch:\n" + "Expected: " + want_error + "\nReceived: " + outs[1].rstrip())
raise Exception
def parse_output(a, fmt):
"""Parse the output according to specified format.
Raise an error if the output can't be parsed."""
if fmt == 'json': # json: compare parsed data
return json.loads(a)
elif fmt == 'hex': # hex: parse and compare binary data
return binascii.a2b_hex(a.strip())
else:
raise NotImplementedError("Don't know how to compare %s" % fmt)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package planfile
import (
"path/filepath"
"strings"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/hashicorp/terraform/internal/addrs"
"github.com/hashicorp/terraform/internal/collections"
"github.com/hashicorp/terraform/internal/configs/configload"
"github.com/hashicorp/terraform/internal/depsfile"
"github.com/hashicorp/terraform/internal/getproviders/providerreqs"
"github.com/hashicorp/terraform/internal/plans"
"github.com/hashicorp/terraform/internal/states"
"github.com/hashicorp/terraform/internal/states/statefile"
tfversion "github.com/hashicorp/terraform/version"
)
func TestRoundtrip(t *testing.T) {
fixtureDir := filepath.Join("testdata", "test-config")
loader, err := configload.NewLoader(&configload.Config{
ModulesDir: filepath.Join(fixtureDir, ".terraform", "modules"),
})
if err != nil {
t.Fatal(err)
}
_, snapIn, diags := loader.LoadConfigWithSnapshot(fixtureDir)
if diags.HasErrors() {
t.Fatal(diags.Error())
}
// Just a minimal state file so we can test that it comes out again at all.
// We don't need to test the entire thing because the state file
// serialization is already tested in its own package.
stateFileIn := &statefile.File{
TerraformVersion: tfversion.SemVer,
Serial: 2,
Lineage: "abc123",
State: states.NewState(),
}
prevStateFileIn := &statefile.File{
TerraformVersion: tfversion.SemVer,
Serial: 1,
Lineage: "abc123",
State: states.NewState(),
}
// Minimal plan too, since the serialization of the tfplan portion of the
// file is tested more fully in tfplan_test.go .
planIn := &plans.Plan{
Changes: &plans.ChangesSrc{
Resources: []*plans.ResourceInstanceChangeSrc{},
Outputs: []*plans.OutputChangeSrc{},
},
DriftedResources: []*plans.ResourceInstanceChangeSrc{},
DeferredResources: []*plans.DeferredResourceInstanceChangeSrc{},
VariableValues: map[string]plans.DynamicValue{
"foo": plans.DynamicValue([]byte("foo placeholder")),
},
Backend: &plans.Backend{
Type: "local",
Config: plans.DynamicValue([]byte("config placeholder")),
Workspace: "default",
},
Checks: &states.CheckResults{},
// Due to some historical oddities in how we've changed modelling over
// time, we also include the states (without the corresponding file
// headers) in the plans.Plan object. This is currently ignored by
// Create but will be returned by ReadPlan and so we need to include
// it here so that we'll get a match when we compare input and output
// below.
PrevRunState: prevStateFileIn.State,
PriorState: stateFileIn.State,
}
locksIn := depsfile.NewLocks()
locksIn.SetProvider(
addrs.NewDefaultProvider("boop"),
providerreqs.MustParseVersion("1.0.0"),
providerreqs.MustParseVersionConstraints(">= 1.0.0"),
[]providerreqs.Hash{
providerreqs.MustParseHash("fake:hello"),
},
)
planFn := filepath.Join(t.TempDir(), "tfplan")
err = Create(planFn, CreateArgs{
ConfigSnapshot: snapIn,
PreviousRunStateFile: prevStateFileIn,
StateFile: stateFileIn,
Plan: planIn,
DependencyLocks: locksIn,
})
if err != nil {
t.Fatalf("failed to create plan file: %s", err)
}
wpf, err := OpenWrapped(planFn)
if err != nil {
t.Fatalf("failed to open plan file for reading: %s", err)
}
pr, ok := wpf.Local()
if !ok {
t.Fatalf("failed to open plan file as a local plan file")
}
if wpf.IsCloud() {
t.Fatalf("wrapped plan claims to be both kinds of plan at once")
}
t.Run("ReadPlan", func(t *testing.T) {
planOut, err := pr.ReadPlan()
if err != nil {
t.Fatalf("failed to read plan: %s", err)
}
if diff := cmp.Diff(planIn, planOut, collections.CmpOptions); diff != "" {
t.Errorf("plan did not survive round-trip\n%s", diff)
}
})
t.Run("ReadStateFile", func(t *testing.T) {
stateFileOut, err := pr.ReadStateFile()
if err != nil {
t.Fatalf("failed to read state: %s", err)
}
if diff := cmp.Diff(stateFileIn, stateFileOut); diff != "" {
t.Errorf("state file did not survive round-trip\n%s", diff)
}
})
t.Run("ReadPrevStateFile", func(t *testing.T) {
prevStateFileOut, err := pr.ReadPrevStateFile()
if err != nil {
t.Fatalf("failed to read state: %s", err)
}
if diff := cmp.Diff(prevStateFileIn, prevStateFileOut); diff != "" {
t.Errorf("state file did not survive round-trip\n%s", diff)
}
})
t.Run("ReadConfigSnapshot", func(t *testing.T) {
snapOut, err := pr.ReadConfigSnapshot()
if err != nil {
t.Fatalf("failed to read config snapshot: %s", err)
}
if diff := cmp.Diff(snapIn, snapOut); diff != "" {
t.Errorf("config snapshot did not survive round-trip\n%s", diff)
}
})
t.Run("ReadConfig", func(t *testing.T) {
// Reading from snapshots is tested in the configload package, so
// here we'll just test that we can successfully do it, to see if the
// glue code in _this_ package is correct.
_, diags := pr.ReadConfig(false)
if diags.HasErrors() {
t.Errorf("when reading config: %s", diags.Err())
}
})
t.Run("ReadDependencyLocks", func(t *testing.T) {
locksOut, diags := pr.ReadDependencyLocks()
if diags.HasErrors() {
t.Fatalf("when reading config: %s", diags.Err())
}
got := locksOut.AllProviders()
want := locksIn.AllProviders()
if diff := cmp.Diff(want, got, cmp.AllowUnexported(depsfile.ProviderLock{})); diff != "" {
t.Errorf("provider locks did not survive round-trip\n%s", diff)
}
})
}
func TestWrappedError(t *testing.T) {
// Open something that isn't a cloud or local planfile: should error
wrongFile := "not a valid zip file"
_, err := OpenWrapped(filepath.Join("testdata", "test-config", "root.tf"))
if !strings.Contains(err.Error(), wrongFile) {
t.Fatalf("expected %q, got %q", wrongFile, err)
}
// Open something that doesn't exist: should error
missingFile := "no such file or directory"
_, err = OpenWrapped(filepath.Join("testdata", "absent.tfplan"))
if !strings.Contains(err.Error(), missingFile) {
t.Fatalf("expected %q, got %q", missingFile, err)
}
}
func TestWrappedCloud(t *testing.T) {
// Loading valid cloud plan results in a wrapped cloud plan
wpf, err := OpenWrapped(filepath.Join("testdata", "cloudplan.json"))
if err != nil {
t.Fatalf("failed to open valid cloud plan: %s", err)
}
if !wpf.IsCloud() {
t.Fatalf("failed to open cloud file as a cloud plan")
}
if wpf.IsLocal() {
t.Fatalf("wrapped plan claims to be both kinds of plan at once")
}
} | go | github | https://github.com/hashicorp/terraform | internal/plans/planfile/planfile_test.go |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import itertools
import numpy as np
from multiprocessing.pool import ThreadPool
from pyspark import since, keyword_only
from pyspark.ml import Estimator, Model
from pyspark.ml.common import _py2java
from pyspark.ml.param import Params, Param, TypeConverters
from pyspark.ml.param.shared import HasParallelism, HasSeed
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaParams
from pyspark.sql.functions import rand
__all__ = ['ParamGridBuilder', 'CrossValidator', 'CrossValidatorModel', 'TrainValidationSplit',
'TrainValidationSplitModel']
class ParamGridBuilder(object):
r"""
Builder for a param grid used in grid search-based model selection.
>>> from pyspark.ml.classification import LogisticRegression
>>> lr = LogisticRegression()
>>> output = ParamGridBuilder() \
... .baseOn({lr.labelCol: 'l'}) \
... .baseOn([lr.predictionCol, 'p']) \
... .addGrid(lr.regParam, [1.0, 2.0]) \
... .addGrid(lr.maxIter, [1, 5]) \
... .build()
>>> expected = [
... {lr.regParam: 1.0, lr.maxIter: 1, lr.labelCol: 'l', lr.predictionCol: 'p'},
... {lr.regParam: 2.0, lr.maxIter: 1, lr.labelCol: 'l', lr.predictionCol: 'p'},
... {lr.regParam: 1.0, lr.maxIter: 5, lr.labelCol: 'l', lr.predictionCol: 'p'},
... {lr.regParam: 2.0, lr.maxIter: 5, lr.labelCol: 'l', lr.predictionCol: 'p'}]
>>> len(output) == len(expected)
True
>>> all([m in expected for m in output])
True
.. versionadded:: 1.4.0
"""
def __init__(self):
self._param_grid = {}
@since("1.4.0")
def addGrid(self, param, values):
"""
Sets the given parameters in this grid to fixed values.
"""
self._param_grid[param] = values
return self
@since("1.4.0")
def baseOn(self, *args):
"""
Sets the given parameters in this grid to fixed values.
Accepts either a parameter dictionary or a list of (parameter, value) pairs.
"""
if isinstance(args[0], dict):
self.baseOn(*args[0].items())
else:
for (param, value) in args:
self.addGrid(param, [value])
return self
@since("1.4.0")
def build(self):
"""
Builds and returns all combinations of parameters specified
by the param grid.
"""
keys = self._param_grid.keys()
grid_values = self._param_grid.values()
return [dict(zip(keys, prod)) for prod in itertools.product(*grid_values)]
class ValidatorParams(HasSeed):
"""
Common params for TrainValidationSplit and CrossValidator.
"""
estimator = Param(Params._dummy(), "estimator", "estimator to be cross-validated")
estimatorParamMaps = Param(Params._dummy(), "estimatorParamMaps", "estimator param maps")
evaluator = Param(
Params._dummy(), "evaluator",
"evaluator used to select hyper-parameters that maximize the validator metric")
def setEstimator(self, value):
"""
Sets the value of :py:attr:`estimator`.
"""
return self._set(estimator=value)
def getEstimator(self):
"""
Gets the value of estimator or its default value.
"""
return self.getOrDefault(self.estimator)
def setEstimatorParamMaps(self, value):
"""
Sets the value of :py:attr:`estimatorParamMaps`.
"""
return self._set(estimatorParamMaps=value)
def getEstimatorParamMaps(self):
"""
Gets the value of estimatorParamMaps or its default value.
"""
return self.getOrDefault(self.estimatorParamMaps)
def setEvaluator(self, value):
"""
Sets the value of :py:attr:`evaluator`.
"""
return self._set(evaluator=value)
def getEvaluator(self):
"""
Gets the value of evaluator or its default value.
"""
return self.getOrDefault(self.evaluator)
@classmethod
def _from_java_impl(cls, java_stage):
"""
Return Python estimator, estimatorParamMaps, and evaluator from a Java ValidatorParams.
"""
# Load information from java_stage to the instance.
estimator = JavaParams._from_java(java_stage.getEstimator())
evaluator = JavaParams._from_java(java_stage.getEvaluator())
epms = [estimator._transfer_param_map_from_java(epm)
for epm in java_stage.getEstimatorParamMaps()]
return estimator, epms, evaluator
def _to_java_impl(self):
"""
Return Java estimator, estimatorParamMaps, and evaluator from this Python instance.
"""
gateway = SparkContext._gateway
cls = SparkContext._jvm.org.apache.spark.ml.param.ParamMap
java_epms = gateway.new_array(cls, len(self.getEstimatorParamMaps()))
for idx, epm in enumerate(self.getEstimatorParamMaps()):
java_epms[idx] = self.getEstimator()._transfer_param_map_to_java(epm)
java_estimator = self.getEstimator()._to_java()
java_evaluator = self.getEvaluator()._to_java()
return java_estimator, java_epms, java_evaluator
class CrossValidator(Estimator, ValidatorParams, HasParallelism, MLReadable, MLWritable):
"""
K-fold cross validation performs model selection by splitting the dataset into a set of
non-overlapping randomly partitioned folds which are used as separate training and test datasets
e.g., with k=3 folds, K-fold cross validation will generate 3 (training, test) dataset pairs,
each of which uses 2/3 of the data for training and 1/3 for testing. Each fold is used as the
test set exactly once.
>>> from pyspark.ml.classification import LogisticRegression
>>> from pyspark.ml.evaluation import BinaryClassificationEvaluator
>>> from pyspark.ml.linalg import Vectors
>>> dataset = spark.createDataFrame(
... [(Vectors.dense([0.0]), 0.0),
... (Vectors.dense([0.4]), 1.0),
... (Vectors.dense([0.5]), 0.0),
... (Vectors.dense([0.6]), 1.0),
... (Vectors.dense([1.0]), 1.0)] * 10,
... ["features", "label"])
>>> lr = LogisticRegression()
>>> grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
>>> evaluator = BinaryClassificationEvaluator()
>>> cv = CrossValidator(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator,
... parallelism=2)
>>> cvModel = cv.fit(dataset)
>>> cvModel.avgMetrics[0]
0.5
>>> evaluator.evaluate(cvModel.transform(dataset))
0.8333...
.. versionadded:: 1.4.0
"""
numFolds = Param(Params._dummy(), "numFolds", "number of folds for cross validation",
typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, estimator=None, estimatorParamMaps=None, evaluator=None, numFolds=3,
seed=None, parallelism=1):
"""
__init__(self, estimator=None, estimatorParamMaps=None, evaluator=None, numFolds=3,\
seed=None, parallelism=1)
"""
super(CrossValidator, self).__init__()
self._setDefault(numFolds=3, parallelism=1)
kwargs = self._input_kwargs
self._set(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, estimator=None, estimatorParamMaps=None, evaluator=None, numFolds=3,
seed=None, parallelism=1):
"""
setParams(self, estimator=None, estimatorParamMaps=None, evaluator=None, numFolds=3,\
seed=None, parallelism=1):
Sets params for cross validator.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setNumFolds(self, value):
"""
Sets the value of :py:attr:`numFolds`.
"""
return self._set(numFolds=value)
@since("1.4.0")
def getNumFolds(self):
"""
Gets the value of numFolds or its default value.
"""
return self.getOrDefault(self.numFolds)
def _fit(self, dataset):
est = self.getOrDefault(self.estimator)
epm = self.getOrDefault(self.estimatorParamMaps)
numModels = len(epm)
eva = self.getOrDefault(self.evaluator)
nFolds = self.getOrDefault(self.numFolds)
seed = self.getOrDefault(self.seed)
h = 1.0 / nFolds
randCol = self.uid + "_rand"
df = dataset.select("*", rand(seed).alias(randCol))
metrics = [0.0] * numModels
pool = ThreadPool(processes=min(self.getParallelism(), numModels))
for i in range(nFolds):
validateLB = i * h
validateUB = (i + 1) * h
condition = (df[randCol] >= validateLB) & (df[randCol] < validateUB)
validation = df.filter(condition).cache()
train = df.filter(~condition).cache()
def singleTrain(paramMap):
model = est.fit(train, paramMap)
# TODO: duplicate evaluator to take extra params from input
metric = eva.evaluate(model.transform(validation, paramMap))
return metric
currentFoldMetrics = pool.map(singleTrain, epm)
for j in range(numModels):
metrics[j] += (currentFoldMetrics[j] / nFolds)
validation.unpersist()
train.unpersist()
if eva.isLargerBetter():
bestIndex = np.argmax(metrics)
else:
bestIndex = np.argmin(metrics)
bestModel = est.fit(dataset, epm[bestIndex])
return self._copyValues(CrossValidatorModel(bestModel, metrics))
@since("1.4.0")
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This copies creates a deep copy of
the embedded paramMap, and copies the embedded and extra parameters over.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
"""
if extra is None:
extra = dict()
newCV = Params.copy(self, extra)
if self.isSet(self.estimator):
newCV.setEstimator(self.getEstimator().copy(extra))
# estimatorParamMaps remain the same
if self.isSet(self.evaluator):
newCV.setEvaluator(self.getEvaluator().copy(extra))
return newCV
@since("2.3.0")
def write(self):
"""Returns an MLWriter instance for this ML instance."""
return JavaMLWriter(self)
@classmethod
@since("2.3.0")
def read(cls):
"""Returns an MLReader instance for this class."""
return JavaMLReader(cls)
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java CrossValidator, create and return a Python wrapper of it.
Used for ML persistence.
"""
estimator, epms, evaluator = super(CrossValidator, cls)._from_java_impl(java_stage)
numFolds = java_stage.getNumFolds()
seed = java_stage.getSeed()
parallelism = java_stage.getParallelism()
# Create a new instance of this stage.
py_stage = cls(estimator=estimator, estimatorParamMaps=epms, evaluator=evaluator,
numFolds=numFolds, seed=seed, parallelism=parallelism)
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java CrossValidator. Used for ML persistence.
:return: Java object equivalent to this instance.
"""
estimator, epms, evaluator = super(CrossValidator, self)._to_java_impl()
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.tuning.CrossValidator", self.uid)
_java_obj.setEstimatorParamMaps(epms)
_java_obj.setEvaluator(evaluator)
_java_obj.setEstimator(estimator)
_java_obj.setSeed(self.getSeed())
_java_obj.setNumFolds(self.getNumFolds())
_java_obj.setParallelism(self.getParallelism())
return _java_obj
class CrossValidatorModel(Model, ValidatorParams, MLReadable, MLWritable):
"""
CrossValidatorModel contains the model with the highest average cross-validation
metric across folds and uses this model to transform input data. CrossValidatorModel
also tracks the metrics for each param map evaluated.
.. versionadded:: 1.4.0
"""
def __init__(self, bestModel, avgMetrics=[]):
super(CrossValidatorModel, self).__init__()
#: best model from cross validation
self.bestModel = bestModel
#: Average cross-validation metrics for each paramMap in
#: CrossValidator.estimatorParamMaps, in the corresponding order.
self.avgMetrics = avgMetrics
def _transform(self, dataset):
return self.bestModel.transform(dataset)
@since("1.4.0")
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This copies the underlying bestModel,
creates a deep copy of the embedded paramMap, and
copies the embedded and extra parameters over.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
"""
if extra is None:
extra = dict()
bestModel = self.bestModel.copy(extra)
avgMetrics = self.avgMetrics
return CrossValidatorModel(bestModel, avgMetrics)
@since("2.3.0")
def write(self):
"""Returns an MLWriter instance for this ML instance."""
return JavaMLWriter(self)
@classmethod
@since("2.3.0")
def read(cls):
"""Returns an MLReader instance for this class."""
return JavaMLReader(cls)
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java CrossValidatorModel, create and return a Python wrapper of it.
Used for ML persistence.
"""
bestModel = JavaParams._from_java(java_stage.bestModel())
estimator, epms, evaluator = super(CrossValidatorModel, cls)._from_java_impl(java_stage)
py_stage = cls(bestModel=bestModel).setEstimator(estimator)
py_stage = py_stage.setEstimatorParamMaps(epms).setEvaluator(evaluator)
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java CrossValidatorModel. Used for ML persistence.
:return: Java object equivalent to this instance.
"""
sc = SparkContext._active_spark_context
# TODO: persist average metrics as well
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.tuning.CrossValidatorModel",
self.uid,
self.bestModel._to_java(),
_py2java(sc, []))
estimator, epms, evaluator = super(CrossValidatorModel, self)._to_java_impl()
_java_obj.set("evaluator", evaluator)
_java_obj.set("estimator", estimator)
_java_obj.set("estimatorParamMaps", epms)
return _java_obj
class TrainValidationSplit(Estimator, ValidatorParams, HasParallelism, MLReadable, MLWritable):
"""
.. note:: Experimental
Validation for hyper-parameter tuning. Randomly splits the input dataset into train and
validation sets, and uses evaluation metric on the validation set to select the best model.
Similar to :class:`CrossValidator`, but only splits the set once.
>>> from pyspark.ml.classification import LogisticRegression
>>> from pyspark.ml.evaluation import BinaryClassificationEvaluator
>>> from pyspark.ml.linalg import Vectors
>>> dataset = spark.createDataFrame(
... [(Vectors.dense([0.0]), 0.0),
... (Vectors.dense([0.4]), 1.0),
... (Vectors.dense([0.5]), 0.0),
... (Vectors.dense([0.6]), 1.0),
... (Vectors.dense([1.0]), 1.0)] * 10,
... ["features", "label"])
>>> lr = LogisticRegression()
>>> grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
>>> evaluator = BinaryClassificationEvaluator()
>>> tvs = TrainValidationSplit(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator,
... parallelism=2)
>>> tvsModel = tvs.fit(dataset)
>>> evaluator.evaluate(tvsModel.transform(dataset))
0.8333...
.. versionadded:: 2.0.0
"""
trainRatio = Param(Params._dummy(), "trainRatio", "Param for ratio between train and\
validation data. Must be between 0 and 1.", typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, estimator=None, estimatorParamMaps=None, evaluator=None, trainRatio=0.75,
parallelism=1, seed=None):
"""
__init__(self, estimator=None, estimatorParamMaps=None, evaluator=None, trainRatio=0.75,\
parallelism=1, seed=None)
"""
super(TrainValidationSplit, self).__init__()
self._setDefault(trainRatio=0.75, parallelism=1)
kwargs = self._input_kwargs
self._set(**kwargs)
@since("2.0.0")
@keyword_only
def setParams(self, estimator=None, estimatorParamMaps=None, evaluator=None, trainRatio=0.75,
parallelism=1, seed=None):
"""
setParams(self, estimator=None, estimatorParamMaps=None, evaluator=None, trainRatio=0.75,\
parallelism=1, seed=None):
Sets params for the train validation split.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setTrainRatio(self, value):
"""
Sets the value of :py:attr:`trainRatio`.
"""
return self._set(trainRatio=value)
@since("2.0.0")
def getTrainRatio(self):
"""
Gets the value of trainRatio or its default value.
"""
return self.getOrDefault(self.trainRatio)
def _fit(self, dataset):
est = self.getOrDefault(self.estimator)
epm = self.getOrDefault(self.estimatorParamMaps)
numModels = len(epm)
eva = self.getOrDefault(self.evaluator)
tRatio = self.getOrDefault(self.trainRatio)
seed = self.getOrDefault(self.seed)
randCol = self.uid + "_rand"
df = dataset.select("*", rand(seed).alias(randCol))
condition = (df[randCol] >= tRatio)
validation = df.filter(condition).cache()
train = df.filter(~condition).cache()
def singleTrain(paramMap):
model = est.fit(train, paramMap)
metric = eva.evaluate(model.transform(validation, paramMap))
return metric
pool = ThreadPool(processes=min(self.getParallelism(), numModels))
metrics = pool.map(singleTrain, epm)
train.unpersist()
validation.unpersist()
if eva.isLargerBetter():
bestIndex = np.argmax(metrics)
else:
bestIndex = np.argmin(metrics)
bestModel = est.fit(dataset, epm[bestIndex])
return self._copyValues(TrainValidationSplitModel(bestModel, metrics))
@since("2.0.0")
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This copies creates a deep copy of
the embedded paramMap, and copies the embedded and extra parameters over.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
"""
if extra is None:
extra = dict()
newTVS = Params.copy(self, extra)
if self.isSet(self.estimator):
newTVS.setEstimator(self.getEstimator().copy(extra))
# estimatorParamMaps remain the same
if self.isSet(self.evaluator):
newTVS.setEvaluator(self.getEvaluator().copy(extra))
return newTVS
@since("2.3.0")
def write(self):
"""Returns an MLWriter instance for this ML instance."""
return JavaMLWriter(self)
@classmethod
@since("2.3.0")
def read(cls):
"""Returns an MLReader instance for this class."""
return JavaMLReader(cls)
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java TrainValidationSplit, create and return a Python wrapper of it.
Used for ML persistence.
"""
estimator, epms, evaluator = super(TrainValidationSplit, cls)._from_java_impl(java_stage)
trainRatio = java_stage.getTrainRatio()
seed = java_stage.getSeed()
parallelism = java_stage.getParallelism()
# Create a new instance of this stage.
py_stage = cls(estimator=estimator, estimatorParamMaps=epms, evaluator=evaluator,
trainRatio=trainRatio, seed=seed, parallelism=parallelism)
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java TrainValidationSplit. Used for ML persistence.
:return: Java object equivalent to this instance.
"""
estimator, epms, evaluator = super(TrainValidationSplit, self)._to_java_impl()
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.tuning.TrainValidationSplit",
self.uid)
_java_obj.setEstimatorParamMaps(epms)
_java_obj.setEvaluator(evaluator)
_java_obj.setEstimator(estimator)
_java_obj.setTrainRatio(self.getTrainRatio())
_java_obj.setSeed(self.getSeed())
_java_obj.setParallelism(self.getParallelism())
return _java_obj
class TrainValidationSplitModel(Model, ValidatorParams, MLReadable, MLWritable):
"""
.. note:: Experimental
Model from train validation split.
.. versionadded:: 2.0.0
"""
def __init__(self, bestModel, validationMetrics=[]):
super(TrainValidationSplitModel, self).__init__()
#: best model from cross validation
self.bestModel = bestModel
#: evaluated validation metrics
self.validationMetrics = validationMetrics
def _transform(self, dataset):
return self.bestModel.transform(dataset)
@since("2.0.0")
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This copies the underlying bestModel,
creates a deep copy of the embedded paramMap, and
copies the embedded and extra parameters over.
And, this creates a shallow copy of the validationMetrics.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
"""
if extra is None:
extra = dict()
bestModel = self.bestModel.copy(extra)
validationMetrics = list(self.validationMetrics)
return TrainValidationSplitModel(bestModel, validationMetrics)
@since("2.3.0")
def write(self):
"""Returns an MLWriter instance for this ML instance."""
return JavaMLWriter(self)
@classmethod
@since("2.3.0")
def read(cls):
"""Returns an MLReader instance for this class."""
return JavaMLReader(cls)
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java TrainValidationSplitModel, create and return a Python wrapper of it.
Used for ML persistence.
"""
# Load information from java_stage to the instance.
bestModel = JavaParams._from_java(java_stage.bestModel())
estimator, epms, evaluator = super(TrainValidationSplitModel,
cls)._from_java_impl(java_stage)
# Create a new instance of this stage.
py_stage = cls(bestModel=bestModel).setEstimator(estimator)
py_stage = py_stage.setEstimatorParamMaps(epms).setEvaluator(evaluator)
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java TrainValidationSplitModel. Used for ML persistence.
:return: Java object equivalent to this instance.
"""
sc = SparkContext._active_spark_context
# TODO: persst validation metrics as well
_java_obj = JavaParams._new_java_obj(
"org.apache.spark.ml.tuning.TrainValidationSplitModel",
self.uid,
self.bestModel._to_java(),
_py2java(sc, []))
estimator, epms, evaluator = super(TrainValidationSplitModel, self)._to_java_impl()
_java_obj.set("evaluator", evaluator)
_java_obj.set("estimator", estimator)
_java_obj.set("estimatorParamMaps", epms)
return _java_obj
if __name__ == "__main__":
import doctest
from pyspark.sql import SparkSession
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.tuning tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
exit(-1) | unknown | codeparrot/codeparrot-clean | ||
//===--- Platform.cpp - Implement platform-related helpers ----------------===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
#include "swift/Basic/Assertions.h"
#include "swift/Basic/Pack.h"
#include "swift/Basic/Platform.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/TargetParser/Triple.h"
#include "llvm/Support/VersionTuple.h"
using namespace swift;
bool swift::tripleIsiOSSimulator(const llvm::Triple &triple) {
return (triple.isiOS() &&
!tripleIsMacCatalystEnvironment(triple) &&
triple.isSimulatorEnvironment());
}
bool swift::tripleIsAppleTVSimulator(const llvm::Triple &triple) {
return (triple.isTvOS() && triple.isSimulatorEnvironment());
}
bool swift::tripleIsWatchSimulator(const llvm::Triple &triple) {
return (triple.isWatchOS() && triple.isSimulatorEnvironment());
}
bool swift::tripleIsMacCatalystEnvironment(const llvm::Triple &triple) {
return triple.isiOS() && !triple.isTvOS() &&
triple.getEnvironment() == llvm::Triple::MacABI;
}
bool swift::tripleIsVisionSimulator(const llvm::Triple &triple) {
return triple.isXROS() && triple.isSimulatorEnvironment();
}
bool swift::tripleInfersSimulatorEnvironment(const llvm::Triple &triple) {
switch (triple.getOS()) {
case llvm::Triple::IOS:
case llvm::Triple::TvOS:
case llvm::Triple::WatchOS:
return !triple.hasEnvironment() &&
(triple.getArch() == llvm::Triple::x86 ||
triple.getArch() == llvm::Triple::x86_64) &&
!tripleIsMacCatalystEnvironment(triple);
default:
return false;
}
}
bool swift::triplesAreValidForZippering(const llvm::Triple &target,
const llvm::Triple &targetVariant) {
// The arch and vendor must match.
if (target.getArchName() != targetVariant.getArchName() ||
target.getArch() != targetVariant.getArch() ||
target.getSubArch() != targetVariant.getSubArch() ||
target.getVendor() != targetVariant.getVendor()) {
return false;
}
// Allow a macOS target and an iOS-macabi target variant
// This is typically the case when zippering a library originally
// developed for macOS.
if (target.isMacOSX() && tripleIsMacCatalystEnvironment(targetVariant)) {
return true;
}
// Allow an iOS-macabi target and a macOS target variant. This would
// be the case when zippering a library originally developed for
// iOS.
if (targetVariant.isMacOSX() && tripleIsMacCatalystEnvironment(target)) {
return true;
}
return false;
}
const std::optional<llvm::VersionTuple>
swift::minimumAvailableOSVersionForTriple(const llvm::Triple &triple) {
if (triple.isMacOSX())
return llvm::VersionTuple(10, 10, 0);
// Mac Catalyst was introduced with an iOS deployment target of 13.1.
if (tripleIsMacCatalystEnvironment(triple))
return llvm::VersionTuple(13, 1);
// Note: this must come before checking iOS since that returns true for
// both iOS and tvOS.
if (triple.isTvOS())
return llvm::VersionTuple(9, 0);
if (triple.isiOS())
return llvm::VersionTuple(8, 0);
if (triple.isWatchOS())
return llvm::VersionTuple(2, 0);
if (triple.isXROS())
return llvm::VersionTuple(1, 0);
return std::nullopt;
}
bool swift::tripleRequiresRPathForSwiftLibrariesInOS(
const llvm::Triple &triple) {
if (triple.isMacOSX()) {
// macOS versions before 10.14.4 don't have Swift in the OS
// (the linker still uses an rpath-based install name until 10.15).
// macOS versions before 12.0 don't have _Concurrency in the OS.
// macOS versions before 26.0 don't have Span in stdlib.
return triple.isMacOSXVersionLT(26, 0);
}
if (triple.isiOS()) {
// iOS versions before 12.2 don't have Swift in the OS.
// iOS versions before 15.0 don't have _Concurrency in the OS.
// iOS versions before 26.0 don't have Span in stdlib.
return triple.isOSVersionLT(26, 0);
}
if (triple.isWatchOS()) {
// watchOS versions before 5.2 don't have Swift in the OS.
// watchOS versions before 8.0 don't have _Concurrency in the OS.
// watchOS versions before 26.0 don't have Span in stdlib.
return triple.isOSVersionLT(26, 0);
}
if (triple.isTvOS()) {
// tvOS versions before 26.0 don't have Span in stdlib.
return triple.isOSVersionLT(26, 0);
}
if (triple.isXROS()) {
// visionOS versions before 26.0 don't have Span in stdlib.
return triple.isOSVersionLT(26, 0);
}
// Other platforms don't have Swift installed as part of the OS by default.
return false;
}
bool swift::tripleBTCFIByDefaultInOpenBSD(const llvm::Triple &triple) {
return triple.isOSOpenBSD() && (
triple.getArch() == llvm::Triple::aarch64 ||
triple.getArch() == llvm::Triple::x86_64);
}
DarwinPlatformKind swift::getDarwinPlatformKind(const llvm::Triple &triple) {
if (triple.isiOS()) {
if (triple.isTvOS()) {
if (tripleIsAppleTVSimulator(triple))
return DarwinPlatformKind::TvOSSimulator;
return DarwinPlatformKind::TvOS;
}
if (tripleIsiOSSimulator(triple))
return DarwinPlatformKind::IPhoneOSSimulator;
return DarwinPlatformKind::IPhoneOS;
}
if (triple.isWatchOS()) {
if (tripleIsWatchSimulator(triple))
return DarwinPlatformKind::WatchOSSimulator;
return DarwinPlatformKind::WatchOS;
}
if (triple.isMacOSX())
return DarwinPlatformKind::MacOS;
if (triple.isXROS()) {
if (tripleIsVisionSimulator(triple))
return DarwinPlatformKind::VisionOSSimulator;
return DarwinPlatformKind::VisionOS;
}
if (triple.isAppleFirmware())
return DarwinPlatformKind::Firmware;
llvm_unreachable("Unsupported Darwin platform");
}
static StringRef getPlatformNameForDarwin(const DarwinPlatformKind platform) {
switch (platform) {
case DarwinPlatformKind::MacOS:
return "macosx";
case DarwinPlatformKind::IPhoneOS:
return "iphoneos";
case DarwinPlatformKind::IPhoneOSSimulator:
return "iphonesimulator";
case DarwinPlatformKind::TvOS:
return "appletvos";
case DarwinPlatformKind::TvOSSimulator:
return "appletvsimulator";
case DarwinPlatformKind::WatchOS:
return "watchos";
case DarwinPlatformKind::WatchOSSimulator:
return "watchsimulator";
case DarwinPlatformKind::VisionOS:
return "xros";
case DarwinPlatformKind::VisionOSSimulator:
return "xrsimulator";
case DarwinPlatformKind::Firmware:
return "firmware";
}
llvm_unreachable("Unsupported Darwin platform");
}
StringRef swift::getPlatformNameForTriple(const llvm::Triple &triple) {
switch (triple.getOS()) {
case llvm::Triple::AIX:
case llvm::Triple::AMDHSA:
case llvm::Triple::AMDPAL:
case llvm::Triple::BridgeOS:
case llvm::Triple::CUDA:
case llvm::Triple::DragonFly:
case llvm::Triple::DriverKit:
case llvm::Triple::ELFIAMCU:
case llvm::Triple::Emscripten:
case llvm::Triple::Fuchsia:
case llvm::Triple::HermitCore:
case llvm::Triple::Hurd:
case llvm::Triple::KFreeBSD:
case llvm::Triple::Lv2:
case llvm::Triple::Mesa3D:
case llvm::Triple::NaCl:
case llvm::Triple::NetBSD:
case llvm::Triple::NVCL:
case llvm::Triple::PS5:
case llvm::Triple::RTEMS:
case llvm::Triple::Serenity:
case llvm::Triple::ShaderModel:
case llvm::Triple::Solaris:
case llvm::Triple::Vulkan:
case llvm::Triple::ZOS:
return "";
case llvm::Triple::Darwin:
case llvm::Triple::MacOSX:
case llvm::Triple::IOS:
case llvm::Triple::TvOS:
case llvm::Triple::WatchOS:
case llvm::Triple::XROS:
case llvm::Triple::Firmware:
return getPlatformNameForDarwin(getDarwinPlatformKind(triple));
case llvm::Triple::Linux:
if (triple.isAndroid())
return "android";
else if (triple.isMusl()) {
// The triple for linux-static is <arch>-swift-linux-musl, to distinguish
// it from a "normal" musl set-up (ala Alpine).
if (triple.getVendor() == llvm::Triple::Swift)
return "linux-static";
else
return "musl";
} else
return "linux";
case llvm::Triple::FreeBSD:
return "freebsd";
case llvm::Triple::OpenBSD:
return "openbsd";
case llvm::Triple::Win32:
switch (triple.getEnvironment()) {
case llvm::Triple::Cygnus:
return "cygwin";
case llvm::Triple::GNU:
return "mingw";
case llvm::Triple::MSVC:
case llvm::Triple::Itanium:
return "windows";
default:
return "none";
}
case llvm::Triple::PS4:
return "ps4";
case llvm::Triple::Haiku:
return "haiku";
case llvm::Triple::WASI:
return "wasi";
case llvm::Triple::UnknownOS:
return "none";
case llvm::Triple::UEFI:
case llvm::Triple::LiteOS:
case llvm::Triple::Managarm:
llvm_unreachable("unsupported OS");
}
llvm_unreachable("unsupported OS");
}
llvm::VersionTuple swift::getVersionForTriple(const llvm::Triple &triple) {
if (triple.isMacOSX()) {
llvm::VersionTuple OSVersion;
triple.getMacOSXVersion(OSVersion);
return OSVersion;
} else if (triple.isiOS()) {
return triple.getiOSVersion();
} else if (triple.isWatchOS()) {
return triple.getOSVersion();
} else if (triple.isXROS()) {
return triple.getOSVersion();
} else if (triple.isOSWindows()) {
return triple.getOSVersion();
} else if (triple.isAndroid()) {
return triple.getEnvironmentVersion();
}
return llvm::VersionTuple(/*Major=*/0, /*Minor=*/0, /*Subminor=*/0);
}
StringRef swift::getMajorArchitectureName(const llvm::Triple &Triple) {
if (Triple.isOSLinux()) {
switch (Triple.getSubArch()) {
case llvm::Triple::SubArchType::ARMSubArch_v7:
return "armv7";
case llvm::Triple::SubArchType::ARMSubArch_v6:
return "armv6";
case llvm::Triple::SubArchType::ARMSubArch_v5:
return "armv5";
default:
break;
}
}
if (Triple.isOSOpenBSD()) {
if (Triple.getArchName() == "amd64") {
return "x86_64";
}
}
return Triple.getArchName();
}
// The code below is responsible for normalizing target triples into the form
// used to name target-specific swiftmodule, swiftinterface, and swiftdoc files.
// If two triples have incompatible ABIs or can be distinguished by Swift #if
// declarations, they should normalize to different values.
//
// This code is only really used on platforms with toolchains supporting fat
// binaries (a single binary containing multiple architectures). On these
// platforms, this code should strip unnecessary details from target triple
// components and map synonyms to canonical values. Even values which don't need
// any special canonicalization should be documented here as comments.
//
// (Fallback behavior does not belong here; it should be implemented in code
// that calls this function, most importantly in SerializedModuleLoaderBase.)
//
// If you're trying to refer to this code to understand how Swift behaves and
// you're unfamiliar with LLVM internals, here's a cheat sheet for reading it:
//
// * llvm::Triple is the type for a target name. It's a bit of a misnomer,
// because it can contain three or four values: arch-vendor-os[-environment].
//
// * In .Cases and .Case, the last argument is the value the arguments before it
// map to. That is, `.Cases("bar", "baz", "foo")` will return "foo" if it sees
// "bar" or "baz".
//
// * std::optional is similar to a Swift Optional: it either contains a value
// or represents the absence of one. `None` is equivalent to `nil`; leading
// `*` is equivalent to trailing `!`; conversion to `bool` is a not-`None`
// check.
static StringRef
getArchForAppleTargetSpecificModuleTriple(const llvm::Triple &triple) {
auto tripleArchName = triple.getArchName();
return llvm::StringSwitch<StringRef>(tripleArchName)
.Cases({"arm64", "aarch64"}, "arm64")
.Cases({"arm64_32", "aarch64_32"}, "arm64_32")
.Cases({"x86_64", "amd64"}, "x86_64")
.Cases({"i386", "i486", "i586", "i686", "i786", "i886", "i986"}, "i386")
.Cases({"unknown", ""}, "unknown")
// These values are also supported, but are handled by the default case
// below:
// .Case ("armv7s", "armv7s")
// .Case ("armv7k", "armv7k")
// .Case ("armv7", "armv7")
// .Case ("arm64e", "arm64e")
.Default(tripleArchName);
}
static StringRef
getVendorForAppleTargetSpecificModuleTriple(const llvm::Triple &triple) {
// We unconditionally normalize to "apple" because it's relatively common for
// build systems to omit the vendor name or use an incorrect one like
// "unknown". Most parts of the compiler ignore the vendor, so you might not
// notice such a mistake.
//
// Please don't depend on this behavior--specify 'apple' if you're building
// for an Apple platform.
assert(triple.isOSDarwin() &&
"shouldn't normalize non-Darwin triple to 'apple'");
return "apple";
}
static StringRef
getOSForAppleTargetSpecificModuleTriple(const llvm::Triple &triple) {
auto tripleOSName = triple.getOSName();
// Truncate the OS name before the first digit. "Digit" here is ASCII '0'-'9'.
auto tripleOSNameNoVersion = tripleOSName.take_until(llvm::isDigit);
return llvm::StringSwitch<StringRef>(tripleOSNameNoVersion)
.Cases({"macos", "macosx", "darwin"}, "macos")
.Cases({"unknown", ""}, "unknown")
// These values are also supported, but are handled by the default case
// below:
// .Case ("ios", "ios")
// .Case ("tvos", "tvos")
// .Case ("watchos", "watchos")
.Default(tripleOSNameNoVersion);
}
static std::optional<StringRef>
getEnvironmentForAppleTargetSpecificModuleTriple(const llvm::Triple &triple) {
auto tripleEnvironment = triple.getEnvironmentName();
return llvm::StringSwitch<std::optional<StringRef>>(tripleEnvironment)
.Cases({"unknown", ""}, std::nullopt)
// These values are also supported, but are handled by the default case
// below:
// .Case ("simulator", StringRef("simulator"))
// .Case ("macabi", StringRef("macabi"))
.Default(tripleEnvironment);
}
llvm::Triple swift::getTargetSpecificModuleTriple(const llvm::Triple &triple) {
// isOSDarwin() returns true for all Darwin-style OSes, including macOS, iOS,
// etc.
if (triple.isOSDarwin()) {
StringRef newArch = getArchForAppleTargetSpecificModuleTriple(triple);
StringRef newVendor = getVendorForAppleTargetSpecificModuleTriple(triple);
StringRef newOS = getOSForAppleTargetSpecificModuleTriple(triple);
std::optional<StringRef> newEnvironment =
getEnvironmentForAppleTargetSpecificModuleTriple(triple);
if (!newEnvironment)
// Generate an arch-vendor-os triple.
return llvm::Triple(newArch, newVendor, newOS);
// Generate an arch-vendor-os-environment triple.
return llvm::Triple(newArch, newVendor, newOS, *newEnvironment);
}
// android - drop the API level. That is not pertinent to the module; the API
// availability is handled by the clang importer.
if (triple.isAndroid()) {
StringRef environment =
llvm::Triple::getEnvironmentTypeName(triple.getEnvironment());
return llvm::Triple(triple.getArchName(), triple.getVendorName(),
triple.getOSName(), environment);
}
if (triple.isOSFreeBSD()) {
return swift::getUnversionedTriple(triple);
}
if (triple.isOSOpenBSD()) {
StringRef arch = swift::getMajorArchitectureName(triple);
return llvm::Triple(arch, triple.getVendorName(), triple.getOSName());
}
// Other platforms get no normalization.
return triple;
}
llvm::Triple swift::getUnversionedTriple(const llvm::Triple &triple) {
StringRef unversionedOSName = triple.getOSName().take_until(llvm::isDigit);
if (triple.getEnvironment()) {
StringRef environment =
llvm::Triple::getEnvironmentTypeName(triple.getEnvironment());
return llvm::Triple(triple.getArchName(), triple.getVendorName(),
unversionedOSName, environment);
}
return llvm::Triple(triple.getArchName(), triple.getVendorName(),
unversionedOSName);
}
namespace {
// Here, we statically reflect the entire contents of RuntimeVersions.def
// into the template-argument structure of the type AllStaticSwiftReleases.
// We then use template metaprogramming on this type to synthesize arrays
// of PlatformSwiftRelease for each of the target platforms with
// deployment restrictions. This would be much simpler with the recent
// generalizations of constexpr and non-type template parameters, but
// those remain above our baseline for now, so we have to do this the
// old way.
/// A specific release of a platform that provides a specific Swift
/// runtime version. Ultimately, all the variadic goop below is just
/// building an array of these for each platform, which is what we'll
/// use at runtime.
struct PlatformSwiftRelease {
llvm::VersionTuple swiftVersion;
llvm::VersionTuple platformVersion;
};
/// A deployment-restricted platform.
enum class PlatformKind {
macOS,
iOS,
watchOS,
visionOS
};
/// A template which statically reflects a version tuple. Generalized
/// template parameters would theoretically let us just use
/// llvm::VersionTuple.
template <unsigned... Components>
struct StaticVersion;
/// A template which statically reflects a single PLATFORM in
/// RuntimeVersions.def.
template <PlatformKind Kind, class Version>
struct StaticPlatformRelease;
/// A template which statically reflects a single RUNTIME_VERSION in
/// RuntimeVersions.def.
template <class SwiftVersion, class PlatformReleases>
struct StaticSwiftRelease;
/// In the assumed context of a particular platform, the release
/// of the platform that first provided a particular Swift version.
template <class SwiftVersion, class PlatformVersion>
struct StaticPlatformSwiftRelease;
// C++ does not allow template argument lists to have trailing commas,
// so to make the macro metaprogramming side of this work, we have to
// include an extra type here (and special-case it in the transforms
// below) for the sole purpose of terminating the list without a comma.
struct Terminal;
#define UNPARENTHESIZE(...) __VA_ARGS__
using AllStaticSwiftReleases =
packs::Pack<
#define PLATFORM(NAME, VERSION) \
StaticPlatformRelease< \
PlatformKind::NAME, \
StaticVersion<UNPARENTHESIZE VERSION> \
>,
#define FUTURE
#define RUNTIME_VERSION(SWIFT_TUPLE, PROVIDERS) \
StaticSwiftRelease<StaticVersion<UNPARENTHESIZE SWIFT_TUPLE>, \
packs::Pack<PROVIDERS Terminal>>,
#include "swift/AST/RuntimeVersions.def"
Terminal
>;
#undef UNPARENTHESIZE
/// A template for comparing two StaticVersion type values.
template <class A, class B>
struct StaticVersionGT;
// 0.0 is not strictly greater than any version.
template <class Second>
struct StaticVersionGT<
StaticVersion<>,
Second
> {
static constexpr bool value = false;
};
// A version is strictly greater than 0.0 if it has any nonzero component.
template <unsigned FirstHead, unsigned... FirstTail>
struct StaticVersionGT<
StaticVersion<FirstHead, FirstTail...>,
StaticVersion<>
> {
static constexpr bool value =
(FirstHead > 0) ? true :
StaticVersionGT<StaticVersion<FirstTail...>,
StaticVersion<>>::value;
};
// a.b is strictly greater than c.d if (a > c || (a == c && b > d)).
template <unsigned FirstHead, unsigned... FirstTail,
unsigned SecondHead, unsigned... SecondTail>
struct StaticVersionGT<
StaticVersion<FirstHead, FirstTail...>,
StaticVersion<SecondHead, SecondTail...>
> {
static constexpr bool value =
(FirstHead > SecondHead) ? true :
(FirstHead < SecondHead) ? false :
StaticVersionGT<StaticVersion<FirstTail...>,
StaticVersion<SecondTail...>>::value;
};
/// A template for turning a StaticVersion into an llvm::VersionTuple.
template <class>
struct BuildVersionTuple;
template <unsigned... Components>
struct BuildVersionTuple<StaticVersion<Components...>> {
static constexpr llvm::VersionTuple get() {
return llvm::VersionTuple(Components...);
}
};
/// A transform that takes a StaticPlatformRelease, checks if it
/// matches the given platform, and turns it into a
/// StaticPlatformSwiftRelease if so. The result is returned as an
/// optional pack which will be empty if the release is for a different
/// platform.
template <class, class>
struct BuildStaticPlatformSwiftReleaseHelper;
template <PlatformKind Platform, class SwiftVersion>
struct BuildStaticPlatformSwiftReleaseHelper_Arg;
// Matching case.
template <PlatformKind Platform, class SwiftVersion, class PlatformVersion>
struct BuildStaticPlatformSwiftReleaseHelper<
BuildStaticPlatformSwiftReleaseHelper_Arg<Platform, SwiftVersion>,
StaticPlatformRelease<Platform, PlatformVersion>> {
using result = packs::Pack<
StaticPlatformSwiftRelease<SwiftVersion, PlatformVersion>
>;
};
// Non-matching case.
template <PlatformKind Platform, class SwiftVersion,
PlatformKind OtherPlatform, class PlatformVersion>
struct BuildStaticPlatformSwiftReleaseHelper<
BuildStaticPlatformSwiftReleaseHelper_Arg<Platform, SwiftVersion>,
StaticPlatformRelease<OtherPlatform, PlatformVersion>> {
using result = packs::Pack<>;
};
// Terminal case (see above).
template <PlatformKind Platform, class SwiftVersion>
struct BuildStaticPlatformSwiftReleaseHelper<
BuildStaticPlatformSwiftReleaseHelper_Arg<Platform, SwiftVersion>,
Terminal> {
using result = packs::Pack<>;
};
/// A transform that takes a StaticSwiftRelease, finds the platform
/// release in it that matches the given platform, and turns it into
/// StaticPlatformSwiftRelease. The result is returned as an optional
/// pack which will be empty if there is no release for the given
/// platform in this SSR.
template <class, class>
struct BuildStaticPlatformSwiftRelease;
template <PlatformKind Platform>
struct BuildStaticPlatformSwiftRelease_Arg;
// Main case: destructure the arguments, then flat-map our helper
// transform above. Note that we assume that there aren't multiple
// entries for the same platform in the platform releases of a given
// Swift release.
template <PlatformKind Platform, class SwiftVersion,
class StaticPlatformReleases>
struct BuildStaticPlatformSwiftRelease<
BuildStaticPlatformSwiftRelease_Arg<Platform>,
StaticSwiftRelease<SwiftVersion, StaticPlatformReleases>>
: packs::PackFlatMap<
BuildStaticPlatformSwiftReleaseHelper,
BuildStaticPlatformSwiftReleaseHelper_Arg<Platform, SwiftVersion>,
StaticPlatformReleases> {};
// Terminal case (see above).
template <PlatformKind Platform>
struct BuildStaticPlatformSwiftRelease<
BuildStaticPlatformSwiftRelease_Arg<Platform>,
Terminal> {
using result = packs::Pack<>;
};
/// A template for generating a PlatformSwiftRelease array element
/// from a StaticPlatformSwiftRelease type value.
template <class>
struct BuildPlatformSwiftRelease;
template <class SwiftVersion, class PlatformVersion>
struct BuildPlatformSwiftRelease<
StaticPlatformSwiftRelease<SwiftVersion, PlatformVersion>
> {
static constexpr PlatformSwiftRelease get() {
return { BuildVersionTuple<SwiftVersion>::get(),
BuildVersionTuple<PlatformVersion>::get() };
}
};
/// A template for comparing two StaticPlatformSwiftRelease type values,
/// for the purposes of a well-ordered assertion we want to make:
/// We don't call this GT because it's not really a general-purpose
/// comparison.
template <class, class>
struct StaticPlatformSwiftReleaseStrictlyDescend;
template <class FirstSwift, class FirstPlatform,
class SecondSwift, class SecondPlatform>
struct StaticPlatformSwiftReleaseStrictlyDescend<
StaticPlatformSwiftRelease<FirstSwift, FirstPlatform>,
StaticPlatformSwiftRelease<SecondSwift, SecondPlatform>
> {
static constexpr bool value =
StaticVersionGT<FirstSwift, SecondSwift>::value &&
StaticVersionGT<FirstPlatform, SecondPlatform>::value;
};
/// A helper template for BuildPlatformSwiftReleaseArray, below.
template <class P>
struct BuildPlatformSwiftReleaseArrayHelper;
template <class... StaticPlatformSwiftReleases>
struct BuildPlatformSwiftReleaseArrayHelper<
packs::Pack<StaticPlatformSwiftReleases...>
> {
// After we reverse the entries, we expect them to strictly
// descend in both the Swift version and the platform version.
static_assert(packs::PackComponentsAreOrdered<
StaticPlatformSwiftReleaseStrictlyDescend,
StaticPlatformSwiftReleases...
>::value,
"RuntimeVersions.def is not properly ordered?");
static constexpr PlatformSwiftRelease releases[] = {
BuildPlatformSwiftRelease<StaticPlatformSwiftReleases>::get()...
};
};
/// Build a static constexpr array of PlatformRelease objects matching
/// the given platform.
template <PlatformKind Platform>
struct BuildPlatformSwiftReleaseArray
: BuildPlatformSwiftReleaseArrayHelper<
// Turn each entry in AllStaticSwiftReleases into an optional
// StaticPlatformSwiftRelease representing whether there is a
// platform release providing that Swift release for the given
// platform. Flatten that pack, then reverse it so that it's in
// order of descending release versions. Finally, build an array
// of PlatformRelease objects for the remaining values.
typename packs::PackReverse<
typename packs::PackFlatMap<
BuildStaticPlatformSwiftRelease,
BuildStaticPlatformSwiftRelease_Arg<Platform>,
AllStaticSwiftReleases
>::result
>::result
> {};
} // end anonymous namespace
static std::optional<llvm::VersionTuple>
findSwiftRuntimeVersionHelper(llvm::VersionTuple targetPlatformVersion,
llvm::VersionTuple minimumSwiftVersion,
ArrayRef<PlatformSwiftRelease> allReleases) {
#define MAX(a, b) ((a) > (b) ? (a) : (b))
// Scan forward in our filtered platform release array for the given
// platform.
for (auto &release : allReleases) {
// If the provider version is <= the deployment target, then
// the deployment target includes support for the given Swift
// release. Since we're scanning in reverse order of Swift
// releases (because of the order of entries in RuntimeVersions.def),
// this must be the highest supported Swift release.
if (release.platformVersion <= targetPlatformVersion) {
return std::max(release.swiftVersion, minimumSwiftVersion);
}
}
// If we didn't find anything, but the target release is at least the
// notional future-release version, return that we aren't
// deployment-limited.
if (targetPlatformVersion >= llvm::VersionTuple(99, 99))
return std::nullopt;
// Otherwise, return the minimum Swift version.
return minimumSwiftVersion;
}
/// Return the highest Swift release that matches the given platform and
/// has a version no greater than the target version. Don't return a version
/// older that the minimum. Returns null if the target version matches the
/// notional future release version.
template <PlatformKind TargetPlatform>
static std::optional<llvm::VersionTuple>
findSwiftRuntimeVersion(llvm::VersionTuple targetPlatformVersion,
llvm::VersionTuple minimumSwiftVersion) {
auto &allReleases =
BuildPlatformSwiftReleaseArray<TargetPlatform>::releases;
return findSwiftRuntimeVersionHelper(targetPlatformVersion,
minimumSwiftVersion,
allReleases);
}
std::optional<llvm::VersionTuple>
swift::getSwiftRuntimeCompatibilityVersionForTarget(
const llvm::Triple &Triple) {
if (Triple.isMacOSX()) {
llvm::VersionTuple OSVersion;
Triple.getMacOSXVersion(OSVersion);
// macOS releases predate the stable ABI, so use Swift 5.0 as our base.
auto baseRelease = llvm::VersionTuple(5, 0);
// macOS got its first arm64(e) support in 11.0, which included Swift 5.3.
if (Triple.isAArch64())
baseRelease = llvm::VersionTuple(5, 3);
return findSwiftRuntimeVersion<PlatformKind::macOS>(OSVersion, baseRelease);
} else if (Triple.isiOS()) { // includes tvOS
llvm::VersionTuple OSVersion = Triple.getiOSVersion();
// iOS releases predate the stable ABI, so use Swift 5.0 as our base.
auto baseRelease = llvm::VersionTuple(5, 0);
// arm64 simulators and macCatalyst were introduced in iOS 14.0/tvOS 14.0,
// which included Swift 5.3.
if (Triple.isAArch64() &&
(Triple.isSimulatorEnvironment() ||
Triple.isMacCatalystEnvironment()))
baseRelease = llvm::VersionTuple(5, 3);
// iOS first got arm64e support in 12.0, which did not yet support the
// Swift stable ABI, so it does not provide a useful version bump.
return findSwiftRuntimeVersion<PlatformKind::iOS>(OSVersion, baseRelease);
} else if (Triple.isWatchOS()) {
llvm::VersionTuple OSVersion = Triple.getWatchOSVersion();
// watchOS releases predate the stable ABI, so use Swift 5.0 as our base.
auto baseRelease = llvm::VersionTuple(5, 0);
// 64-bit watchOS was first supported by watchOS 7, which provided
// Swift 5.3.
if (Triple.isArch64Bit())
baseRelease = llvm::VersionTuple(5, 3);
return findSwiftRuntimeVersion<PlatformKind::watchOS>(OSVersion, baseRelease);
} else if (Triple.isXROS()) {
llvm::VersionTuple OSVersion = Triple.getOSVersion();
// visionOS 1.0 provided Swift 5.9.
auto baseRelease = llvm::VersionTuple(5, 9);
return findSwiftRuntimeVersion<PlatformKind::visionOS>(OSVersion, baseRelease);
}
return std::nullopt;
}
static const llvm::VersionTuple minimumMacCatalystDeploymentTarget() {
return llvm::VersionTuple(13, 1);
}
llvm::VersionTuple swift::getTargetSDKVersion(clang::DarwinSDKInfo &SDKInfo,
const llvm::Triple &triple) {
// Retrieve the SDK version.
auto SDKVersion = SDKInfo.getVersion();
// For the Mac Catalyst environment, we have a macOS SDK with a macOS
// SDK version. Map that to the corresponding iOS version number to pass
// down to the linker.
if (tripleIsMacCatalystEnvironment(triple)) {
if (const auto *MacOStoMacCatalystMapping = SDKInfo.getVersionMapping(
clang::DarwinSDKInfo::OSEnvPair::macOStoMacCatalystPair())) {
return MacOStoMacCatalystMapping
->map(SDKVersion, minimumMacCatalystDeploymentTarget(), std::nullopt)
.value_or(llvm::VersionTuple(0, 0, 0));
}
return llvm::VersionTuple(0, 0, 0);
}
return SDKVersion;
}
std::optional<llvm::Triple>
swift::getCanonicalTriple(const llvm::Triple &triple) {
llvm::Triple Result = triple;
// Non-darwin targets do not require canonicalization.
if (!triple.isOSDarwin())
return Result;
// If the OS versions stay the same, return back the same triple.
const llvm::VersionTuple inputOSVersion = triple.getOSVersion();
const bool isOSVersionInValidRange =
llvm::Triple::isValidVersionForOS(triple.getOS(), inputOSVersion);
const llvm::VersionTuple canonicalVersion =
llvm::Triple::getCanonicalVersionForOS(
triple.getOS(), triple.getOSVersion(), isOSVersionInValidRange);
if (canonicalVersion == triple.getOSVersion())
return Result;
const std::string inputOSName = triple.getOSName().str();
const std::string inputOSVersionAsStr = inputOSVersion.getAsString();
const int platformNameLength =
inputOSName.size() - inputOSVersionAsStr.size();
if (!StringRef(inputOSName).ends_with(inputOSVersionAsStr) ||
(platformNameLength <= 0))
return std::nullopt;
llvm::SmallString<64> buffer(inputOSName.substr(0, platformNameLength));
buffer.append(canonicalVersion.getAsString());
Result.setOSName(buffer.str());
return Result;
}
static std::string getPlistEntry(const llvm::Twine &Path, StringRef KeyName) {
auto BufOrErr = llvm::MemoryBuffer::getFile(Path);
if (!BufOrErr) {
// FIXME: diagnose properly
return {};
}
std::string Key = "<key>";
Key += KeyName;
Key += "</key>";
StringRef Lines = BufOrErr.get()->getBuffer();
while (!Lines.empty()) {
StringRef CurLine;
std::tie(CurLine, Lines) = Lines.split('\n');
if (CurLine.find(Key) != StringRef::npos) {
std::tie(CurLine, Lines) = Lines.split('\n');
unsigned Begin = CurLine.find("<string>") + strlen("<string>");
unsigned End = CurLine.find("</string>");
return CurLine.substr(Begin, End - Begin).str();
}
}
return {};
}
std::string swift::getSDKBuildVersionFromPlist(StringRef Path) {
return getPlistEntry(Path, "ProductBuildVersion");
}
std::string swift::getSDKBuildVersion(StringRef Path) {
return getSDKBuildVersionFromPlist((llvm::Twine(Path) +
"/System/Library/CoreServices/SystemVersion.plist").str());
}
std::string swift::getSDKName(StringRef Path) {
std::string Name = getPlistEntry(llvm::Twine(Path)+"/SDKSettings.plist",
"CanonicalName");
if (Name.empty() && Path.ends_with(".sdk")) {
Name = llvm::sys::path::filename(Path).drop_back(strlen(".sdk")).str();
}
return Name;
} | cpp | github | https://github.com/apple/swift | lib/Basic/Platform.cpp |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import sys
from collections import defaultdict
from contextlib import contextmanager
from twitter.common.collections import OrderedSet
from pants.base.build_environment import get_buildroot, get_scm
from pants.base.worker_pool import SubprocPool
from pants.base.workunit import WorkUnitLabel
from pants.build_graph.target import Target
from pants.goal.products import Products
from pants.goal.workspace import ScmWorkspace
from pants.process.lock import OwnerPrintingInterProcessFileLock
from pants.reporting.report import Report
from pants.source.source_root import SourceRootConfig
class Context(object):
"""Contains the context for a single run of pants.
Task implementations can access configuration data from pants.ini and any flags they have exposed
here as well as information about the targets involved in the run.
Advanced uses of the context include adding new targets to it for upstream or downstream goals to
operate on and mapping of products a goal creates to the targets the products are associated with.
:API: public
"""
class Log(object):
"""A logger facade that logs into the pants reporting framework."""
def __init__(self, run_tracker):
self._run_tracker = run_tracker
def debug(self, *msg_elements):
self._run_tracker.log(Report.DEBUG, *msg_elements)
def info(self, *msg_elements):
self._run_tracker.log(Report.INFO, *msg_elements)
def warn(self, *msg_elements):
self._run_tracker.log(Report.WARN, *msg_elements)
def error(self, *msg_elements):
self._run_tracker.log(Report.ERROR, *msg_elements)
def fatal(self, *msg_elements):
self._run_tracker.log(Report.FATAL, *msg_elements)
# TODO: Figure out a more structured way to construct and use context than this big flat
# repository of attributes?
def __init__(self, options, run_tracker, target_roots,
requested_goals=None, target_base=None, build_graph=None,
build_file_parser=None, address_mapper=None, console_outstream=None, scm=None,
workspace=None, invalidation_report=None):
self._options = options
self.build_graph = build_graph
self.build_file_parser = build_file_parser
self.address_mapper = address_mapper
self.run_tracker = run_tracker
self._log = self.Log(run_tracker)
self._target_base = target_base or Target
self._products = Products()
self._buildroot = get_buildroot()
self._source_roots = SourceRootConfig.global_instance().get_source_roots()
self._lock = OwnerPrintingInterProcessFileLock(os.path.join(self._buildroot, '.pants.workdir.file_lock'))
self._java_sysprops = None # Computed lazily.
self.requested_goals = requested_goals or []
self._console_outstream = console_outstream or sys.stdout
self._scm = scm or get_scm()
self._workspace = workspace or (ScmWorkspace(self._scm) if self._scm else None)
self._replace_targets(target_roots)
self._invalidation_report = invalidation_report
@property
def options(self):
"""Returns the new-style options.
:API: public
"""
return self._options
@property
def log(self):
"""Returns the preferred logger for goals to use.
:API: public
"""
return self._log
@property
def products(self):
"""Returns the Products manager for the current run.
:API: public
"""
return self._products
@property
def source_roots(self):
"""Returns the :class:`pants.source.source_root.SourceRoots` instance for the current run.
:API: public
"""
return self._source_roots
@property
def target_roots(self):
"""Returns the targets specified on the command line.
This set is strictly a subset of all targets in play for the run as returned by self.targets().
Note that for a command line invocation that uses wildcard selectors : or ::, the targets
globbed by the wildcards are considered to be target roots.
:API: public
"""
return self._target_roots
@property
def console_outstream(self):
"""Returns the output stream to write console messages to.
:API: public
"""
return self._console_outstream
@property
def scm(self):
"""Returns the current workspace's scm, if any.
:API: public
"""
return self._scm
@property
def workspace(self):
"""Returns the current workspace, if any."""
return self._workspace
@property
def invalidation_report(self):
return self._invalidation_report
def __str__(self):
ident = Target.identify(self.targets())
return 'Context(id:{}, targets:{})'.format(ident, self.targets())
def submit_background_work_chain(self, work_chain, parent_workunit_name=None):
"""
:API: public
"""
background_root_workunit = self.run_tracker.get_background_root_workunit()
if parent_workunit_name:
# We have to keep this workunit alive until all its child work is done, so
# we manipulate the context manually instead of using it as a contextmanager.
# This is slightly funky, but the with-context usage is so pervasive and
# useful elsewhere that it's worth the funkiness in this one place.
workunit_parent_ctx = self.run_tracker.new_workunit_under_parent(
name=parent_workunit_name, labels=[WorkUnitLabel.MULTITOOL], parent=background_root_workunit)
workunit_parent = workunit_parent_ctx.__enter__()
done_hook = lambda: workunit_parent_ctx.__exit__(None, None, None)
else:
workunit_parent = background_root_workunit # Run directly under the root.
done_hook = None
self.run_tracker.background_worker_pool().submit_async_work_chain(
work_chain, workunit_parent=workunit_parent, done_hook=done_hook)
def background_worker_pool(self):
"""Returns the pool to which tasks can submit background work.
:API: public
"""
return self.run_tracker.background_worker_pool()
def subproc_map(self, f, items):
"""Map function `f` over `items` in subprocesses and return the result.
:API: public
:param f: A multiproc-friendly (importable) work function.
:param items: A iterable of pickleable arguments to f.
"""
try:
# Pool.map (and async_map().get() w/o timeout) can miss SIGINT.
# See: http://stackoverflow.com/a/1408476, http://bugs.python.org/issue8844
# Instead, we map_async(...), wait *with a timeout* until ready, then .get()
# NB: in 2.x, wait() with timeout wakes up often to check, burning CPU. Oh well.
res = SubprocPool.foreground().map_async(f, items)
while not res.ready():
res.wait(60) # Repeatedly wait for up to a minute.
if not res.ready():
self.log.debug('subproc_map result still not ready...')
return res.get()
except KeyboardInterrupt:
SubprocPool.shutdown(True)
raise
@contextmanager
def new_workunit(self, name, labels=None, cmd='', log_config=None):
"""Create a new workunit under the calling thread's current workunit.
:API: public
"""
with self.run_tracker.new_workunit(name=name, labels=labels, cmd=cmd, log_config=log_config) as workunit:
yield workunit
def acquire_lock(self):
""" Acquire the global lock for the root directory associated with this context. When
a goal requires serialization, it will call this to acquire the lock.
:API: public
"""
if self.options.for_global_scope().lock:
if not self._lock.acquired:
self._lock.acquire()
def release_lock(self):
"""Release the global lock if it's held.
Returns True if the lock was held before this call.
:API: public
"""
if not self._lock.acquired:
return False
else:
self._lock.release()
return True
def is_unlocked(self):
"""Whether the global lock object is actively holding the lock.
:API: public
"""
return not self._lock.acquired
def _replace_targets(self, target_roots):
# Replaces all targets in the context with the given roots and their transitive dependencies.
#
# If another task has already retrieved the current targets, mutable state may have been
# initialized somewhere, making it now unsafe to replace targets. Thus callers of this method
# must know what they're doing!
#
# TODO(John Sirois): This currently has only 1 use (outside ContextTest) in pantsbuild/pants and
# only 1 remaining known use case in the Foursquare codebase that will be able to go away with
# the post RoundEngine engine - kill the method at that time.
self._target_roots = list(target_roots)
def add_new_target(self, address, target_type, target_base=None, dependencies=None,
derived_from=None, **kwargs):
"""Creates a new target, adds it to the context and returns it.
This method ensures the target resolves files against the given target_base, creating the
directory if needed and registering a source root.
:API: public
"""
rel_target_base = target_base or address.spec_path
abs_target_base = os.path.join(get_buildroot(), rel_target_base)
if not os.path.exists(abs_target_base):
os.makedirs(abs_target_base)
# TODO: Adding source roots on the fly like this is yucky, but hopefully this
# method will go away entirely under the new engine. It's primarily used for injecting
# synthetic codegen targets, and that isn't how codegen will work in the future.
if not self.source_roots.find_by_path(rel_target_base):
# TODO: Set the lang and root category (source/test/thirdparty) based on the target type?
self.source_roots.add_source_root(rel_target_base)
if dependencies:
dependencies = [dep.address for dep in dependencies]
self.build_graph.inject_synthetic_target(address=address,
target_type=target_type,
dependencies=dependencies,
derived_from=derived_from,
**kwargs)
new_target = self.build_graph.get_target(address)
return new_target
def targets(self, predicate=None, **kwargs):
"""Selects targets in-play in this run from the target roots and their transitive dependencies.
Also includes any new synthetic targets created from the target roots or their transitive
dependencies during the course of the run.
See Target.closure_for_targets for remaining parameters.
:API: public
:param predicate: If specified, the predicate will be used to narrow the scope of targets
returned.
:param bool postorder: `True` to gather transitive dependencies with a postorder traversal;
`False` or preorder by default.
:returns: A list of matching targets.
"""
target_set = self._collect_targets(self.target_roots, **kwargs)
synthetics = OrderedSet()
for synthetic_address in self.build_graph.synthetic_addresses:
if self.build_graph.get_concrete_derived_from(synthetic_address) in target_set:
synthetics.add(self.build_graph.get_target(synthetic_address))
target_set.update(self._collect_targets(synthetics, **kwargs))
return filter(predicate, target_set)
def _collect_targets(self, root_targets, **kwargs):
return Target.closure_for_targets(
target_roots=root_targets,
**kwargs
)
def dependents(self, on_predicate=None, from_predicate=None):
"""Returns a map from targets that satisfy the from_predicate to targets they depend on that
satisfy the on_predicate.
:API: public
"""
core = set(self.targets(on_predicate))
dependees = defaultdict(set)
for target in self.targets(from_predicate):
for dependency in target.dependencies:
if dependency in core:
dependees[target].add(dependency)
return dependees
def resolve(self, spec):
"""Returns an iterator over the target(s) the given address points to.
:API: public
"""
return self.build_graph.resolve(spec)
def scan(self, root=None):
"""Scans and parses all BUILD files found under ``root``.
Only BUILD files found under ``root`` are parsed as roots in the graph, but any dependencies of
targets parsed in the root tree's BUILD files will be followed and this may lead to BUILD files
outside of ``root`` being parsed and included in the returned build graph.
:API: public
:param string root: The path to scan; by default, the build root.
:returns: A new build graph encapsulating the targets found.
"""
build_graph = self.build_graph.clone_new()
for address in self.address_mapper.scan_addresses(root):
build_graph.inject_address_closure(address)
return build_graph | unknown | codeparrot/codeparrot-clean | ||
import asyncore as _asyncore
import random as _random
import threading as _threading
import time as _time
from .confsys import Configurable as _Configurable
from .logger import mkInfoFunction as _mkInfoFunction
from .logger import mkWarnFunction as _mkWarnFunction
_proxyInfo = _mkInfoFunction('Proxy')
_proxyWarn = _mkWarnFunction('Proxy')
_definedProxyClasses = {}
class _ProxyMetaclass(type):
def __new__(*args, **kwargs):
builtClass = type.__new__(*args, **kwargs)
if builtClass.__name__ in _definedProxyClasses:
raise SystemError('Cannot define two message classes with the same name.')
_definedProxyClasses[builtClass.__name__] = builtClass
return builtClass
class ProxyThread(_threading.Thread):
def __init__(self, parentProxy, rule, domain, port, incomingSocket):
self._parentProxy = parentProxy
self._rule = rule
self._domain = domain
self._port = port
self._incomingSocket = incomingSocket
self._alive = True
self._destinations = self._rule.getForcedAddresses()
_threading.Thread.__init__(self, name='Thread for ' + domain + ':' + str(port))
self.daemon = True
def getParentProxy(self):
return self._parentProxy
def getRule(self):
return self._rule
def getDomain(self):
if self._destinations is not None:
if len(self._destinations) == 1:
return self._destinations[0]
destination = _random.choice(self._destinations)
_proxyInfo(self, 'to', self._domain, 'picked final address', destination, 'out of the', len(self._destinations), 'choices')
return destination
return self._domain
def getPort(self):
return self._port
def getDestination(self):
return self.getDomain(), self.getPort()
def getIncomingSocket(self):
return self._incomingSocket
def isAlive(self):
return self._alive
def close(self): # Overriddable
self._alive = False
self._parentProxy.notifyProxyClosed(self)
def run(self): # Overriddable
pass
class ForwarderProxyThread(ProxyThread):
def __init__(self, *args, **kwargs):
ProxyThread.__init__(self, *args, **kwargs)
self._incomingBuffer = b''
self._outgoingBuffer = b''
self._asyncSockets = {}
self._asyncIncoming = _asyncore.dispatcher(self.getIncomingSocket(), self._asyncSockets)
self._asyncIncoming.handle_read = self._incomingRead
self._asyncIncoming.handle_write = self._incomingWrite
self._asyncIncoming.writable = self._incomingWritable
self._asyncIncoming.handle_close = self._handleClose
self._asyncOutgoing = _asyncore.dispatcher(self._mkOutgoingSocket(), self._asyncSockets)
self._asyncOutgoing.handle_read = self._outgoingRead
self._asyncOutgoing.handle_write = self._outgoingWrite
self._asyncOutgoing.writable = self._outgoingWritable
self._asyncOutgoing.handle_close = self._handleClose
self._readSize = self._getReadSize()
self._buffered = self._isBuffered()
def _incomingRead(self):
read = self._asyncIncoming.recv(self._readSize)
if read:
self._incomingBuffer += read
if not self._buffered:
while self._incomingBuffer:
self._outgoingWrite()
def _incomingWrite(self):
sent = self._asyncIncoming.send(self._outgoingBuffer)
if sent:
self._outgoingBuffer = self._outgoingBuffer[sent:]
def _incomingWritable(self):
return self._outgoingBuffer
def _outgoingRead(self):
read = self._asyncOutgoing.recv(self._readSize)
if read:
self._outgoingBuffer += read
if not self._buffered:
while self._outgoingBuffer:
self._incomingWrite()
def _outgoingWrite(self):
sent = self._asyncOutgoing.send(self._incomingBuffer)
if sent:
self._incomingBuffer = self._incomingBuffer[sent:]
def _outgoingWritable(self):
return self._incomingBuffer
def _handleClose(self):
try:
self._asyncIncoming.close()
except:
pass
try:
self._asyncOutgoing.close()
except:
pass
self.close()
def run(self):
_asyncore.loop(map=self._asyncSockets)
def _getReadSize(self): # Overriddable
return 655365
def _isBuffered(self): # Overriddable
return True
def _mkOutgoingSocket(self): # Overriddable
raise NotImplementedError()
class Proxy(_Configurable):
__metaclass__ = _ProxyMetaclass
def __init__(self, name, providedConfig):
_Configurable.__init__(self, self.__class__.__name__ + u'<' + name + '>', providedConfig, self.__class__._proxyConfig, self.__class__._proxyConfigRequired)
def supportsTCP(self): # Overriddable
return True
def supportsUDP(self): # Overriddable
return False
def spawnTCP(self, rule, domain, tcpPort, incomingSocket):
if not self.supportsTCP():
raise SystemError(u'Cannot create a TCP connection; ' + str(self) + u' does not support TCP.')
return self._doSpawnTCP(rule, domain, tcpPort, incomingSocket)
def spawnUDP(self, rule, domain, udpPort, incomingSocket):
if not self.supportsUDP():
raise SystemError(u'Cannot create a UDP connection; ' + str(self) + u' does not support UDP.')
return self._doSpawnUDP(rule, domain, tcpPort, incomingSocket)
def _doSpawnTCP(self, rule, domain, tcpPort, incomingSocket): # Overriddable
self._getTCPThreadClass()(self, rule, domain, tcpPort, incomingSocket).start()
return True
def _doSpawnUDP(self, rule, domain, udpPort, incomingSocket): # Overriddable
self._getUDPThreadClass()(self, rule, domain, udpPort, incomingSocket).start()
return True
def _getTCPThreadClass(self): # Overriddable
raise NotImplementedError()
def _getUDPThreadClass(self): # Overriddable
raise NotImplementedError()
def onRegister(self): # Overriddable
pass
def notifyProxyClosed(self, proxyThread): # Overriddable
pass
class MultiplexingProxy(Proxy):
class Error(Exception):
pass
def __init__(self, *args, **kwargs):
Proxy.__init__(self, *args, **kwargs)
self._lock = _threading.RLock()
self._socket = None
self._activeCount = 0
def _getKeepalivePolicy(self): # Overriddable
raise NotImplementedError()
def _mkSocket(self): # Overriddable
raise NotImplementedError()
def _disconnectSocket(self): # Overriddable
self._socket.close()
def _autoReconnectSleep(self): # Overriddable
return 5
def _mkSocketLoop(self):
socket = None
while socket is None:
try:
socket = self._mkSocket()
except MultiplexingProxy.Error as e:
_proxyWarn(e)
if socket is None:
_time.sleep(self._autoReconnectSleep())
return socket
def acquireSocket(self, countAsActive=True):
with self._lock:
if self._socket is None:
self._activeCount = 0
self._socket = self._mkSocketLoop()
if countAsActive:
self._activeCount += 1
return self._socket
def socketBroken(self):
with self._lock:
try:
self._disconnectSocket()
except:
pass
self._socket = None
self._activeCount = 0
if self._getKeepalivePolicy():
_time.sleep(self._autoReconnectSleep())
self._socket = self._mkSocketLoop()
def notifyProxyClosed(self, proxyThread):
Proxy.notifyProxyClosed(self, proxyThread)
with self._lock:
self._activeCount -= 1
if self._activeCount < 1 and not self._getKeepalivePolicy():
self._disconnectSocket()
self._socket = None
self._activeCount = 0 | unknown | codeparrot/codeparrot-clean | ||
## Input
```javascript
function Component({a, b, c}) {
const x = [a];
const y = [null, b];
const z = [[], [], [c]];
x[0] = y[1];
z[0][0] = x[0];
return [x, z];
}
export const FIXTURE_ENTRYPOINT = {
fn: Component,
params: [{a: 1, b: 20, c: 300}],
sequentialRenders: [
{a: 2, b: 20, c: 300},
{a: 3, b: 20, c: 300},
{a: 3, b: 21, c: 300},
{a: 3, b: 22, c: 300},
{a: 3, b: 22, c: 301},
],
};
```
## Code
```javascript
import { c as _c } from "react/compiler-runtime";
function Component(t0) {
const $ = _c(6);
const { a, b, c } = t0;
let t1;
if ($[0] !== a || $[1] !== b || $[2] !== c) {
const x = [a];
let t2;
if ($[4] !== b) {
t2 = [null, b];
$[4] = b;
$[5] = t2;
} else {
t2 = $[5];
}
const y = t2;
const z = [[], [], [c]];
x[0] = y[1];
z[0][0] = x[0];
t1 = [x, z];
$[0] = a;
$[1] = b;
$[2] = c;
$[3] = t1;
} else {
t1 = $[3];
}
return t1;
}
export const FIXTURE_ENTRYPOINT = {
fn: Component,
params: [{ a: 1, b: 20, c: 300 }],
sequentialRenders: [
{ a: 2, b: 20, c: 300 },
{ a: 3, b: 20, c: 300 },
{ a: 3, b: 21, c: 300 },
{ a: 3, b: 22, c: 300 },
{ a: 3, b: 22, c: 301 },
],
};
```
### Eval output
(kind: ok) [[20],[[20],[],[300]]]
[[20],[[20],[],[300]]]
[[21],[[21],[],[300]]]
[[22],[[22],[],[300]]]
[[22],[[22],[],[301]]] | unknown | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/array-access-assignment.expect.md |
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant des fonctions utiles à la manipulation des noeuds"""
from primaires.interpreteur.masque.noeuds.noeud_masque import NoeudMasque
from primaires.interpreteur.masque.noeuds.noeud_optionnel import NoeudOptionnel
def creer_noeud(commande, schema):
"""Fonction appelée pour créer un noeud.
Elle prend en paramètre :
commande - la commande
schema - le schéma, sous la forme d'une liste de caractères, qui va
nous indiquer quel noeud créer
"""
nv_noeud = None
while schema and schema[0] == " ":
schema.pop(0)
if schema: # schema n'est pas vide
caractere = schema[0]
if caractere == '(': # un noeud optionnel
schema.pop(0)
noeud_interne = creer_noeud(commande, schema)
noeud_suivant = creer_noeud(commande, schema)
nv_noeud = NoeudOptionnel(noeud_interne, noeud_suivant)
elif caractere == ')':
schema.pop(0)
nv_noeud = None
else:
nv_noeud = NoeudMasque(commande, schema)
nv_noeud.construire_depuis_schema(schema)
nv_noeud.suivant = creer_noeud(commande, schema)
return nv_noeud | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#
# extras_dictcursor - test if DictCursor extension class works
#
# Copyright (C) 2004-2010 Federico Di Gregorio <fog@debian.org>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import time
from datetime import timedelta
import psycopg2
import psycopg2.extras
from .testutils import unittest, ConnectingTestCase, skip_before_postgres
from .testutils import skip_if_no_namedtuple
class ExtrasDictCursorTests(ConnectingTestCase):
"""Test if DictCursor extension class works."""
def setUp(self):
ConnectingTestCase.setUp(self)
curs = self.conn.cursor()
curs.execute("CREATE TEMPORARY TABLE ExtrasDictCursorTests (foo text)")
curs.execute("INSERT INTO ExtrasDictCursorTests VALUES ('bar')")
self.conn.commit()
def testDictConnCursorArgs(self):
self.conn.close()
self.conn = self.connect(connection_factory=psycopg2.extras.DictConnection)
cur = self.conn.cursor()
self.assertTrue(isinstance(cur, psycopg2.extras.DictCursor))
self.assertEqual(cur.name, None)
# overridable
cur = self.conn.cursor('foo',
cursor_factory=psycopg2.extras.NamedTupleCursor)
self.assertEqual(cur.name, 'foo')
self.assertTrue(isinstance(cur, psycopg2.extras.NamedTupleCursor))
def testDictCursorWithPlainCursorFetchOne(self):
self._testWithPlainCursor(lambda curs: curs.fetchone())
def testDictCursorWithPlainCursorFetchMany(self):
self._testWithPlainCursor(lambda curs: curs.fetchmany(100)[0])
def testDictCursorWithPlainCursorFetchManyNoarg(self):
self._testWithPlainCursor(lambda curs: curs.fetchmany()[0])
def testDictCursorWithPlainCursorFetchAll(self):
self._testWithPlainCursor(lambda curs: curs.fetchall()[0])
def testDictCursorWithPlainCursorIter(self):
def getter(curs):
for row in curs:
return row
self._testWithPlainCursor(getter)
def testUpdateRow(self):
row = self._testWithPlainCursor(lambda curs: curs.fetchone())
row['foo'] = 'qux'
self.assertTrue(row['foo'] == 'qux')
self.assertTrue(row[0] == 'qux')
@skip_before_postgres(8, 0)
def testDictCursorWithPlainCursorIterRowNumber(self):
curs = self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
self._testIterRowNumber(curs)
def _testWithPlainCursor(self, getter):
curs = self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
curs.execute("SELECT * FROM ExtrasDictCursorTests")
row = getter(curs)
self.assertTrue(row['foo'] == 'bar')
self.assertTrue(row[0] == 'bar')
return row
def testDictCursorWithPlainCursorRealFetchOne(self):
self._testWithPlainCursorReal(lambda curs: curs.fetchone())
def testDictCursorWithPlainCursorRealFetchMany(self):
self._testWithPlainCursorReal(lambda curs: curs.fetchmany(100)[0])
def testDictCursorWithPlainCursorRealFetchManyNoarg(self):
self._testWithPlainCursorReal(lambda curs: curs.fetchmany()[0])
def testDictCursorWithPlainCursorRealFetchAll(self):
self._testWithPlainCursorReal(lambda curs: curs.fetchall()[0])
def testDictCursorWithPlainCursorRealIter(self):
def getter(curs):
for row in curs:
return row
self._testWithPlainCursorReal(getter)
@skip_before_postgres(8, 0)
def testDictCursorWithPlainCursorRealIterRowNumber(self):
curs = self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
self._testIterRowNumber(curs)
def _testWithPlainCursorReal(self, getter):
curs = self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
curs.execute("SELECT * FROM ExtrasDictCursorTests")
row = getter(curs)
self.assertTrue(row['foo'] == 'bar')
def testDictCursorWithNamedCursorFetchOne(self):
self._testWithNamedCursor(lambda curs: curs.fetchone())
def testDictCursorWithNamedCursorFetchMany(self):
self._testWithNamedCursor(lambda curs: curs.fetchmany(100)[0])
def testDictCursorWithNamedCursorFetchManyNoarg(self):
self._testWithNamedCursor(lambda curs: curs.fetchmany()[0])
def testDictCursorWithNamedCursorFetchAll(self):
self._testWithNamedCursor(lambda curs: curs.fetchall()[0])
def testDictCursorWithNamedCursorIter(self):
def getter(curs):
for row in curs:
return row
self._testWithNamedCursor(getter)
@skip_before_postgres(8, 2)
def testDictCursorWithNamedCursorNotGreedy(self):
curs = self.conn.cursor('tmp', cursor_factory=psycopg2.extras.DictCursor)
self._testNamedCursorNotGreedy(curs)
@skip_before_postgres(8, 0)
def testDictCursorWithNamedCursorIterRowNumber(self):
curs = self.conn.cursor('tmp', cursor_factory=psycopg2.extras.DictCursor)
self._testIterRowNumber(curs)
def _testWithNamedCursor(self, getter):
curs = self.conn.cursor('aname', cursor_factory=psycopg2.extras.DictCursor)
curs.execute("SELECT * FROM ExtrasDictCursorTests")
row = getter(curs)
self.assertTrue(row['foo'] == 'bar')
self.assertTrue(row[0] == 'bar')
def testDictCursorRealWithNamedCursorFetchOne(self):
self._testWithNamedCursorReal(lambda curs: curs.fetchone())
def testDictCursorRealWithNamedCursorFetchMany(self):
self._testWithNamedCursorReal(lambda curs: curs.fetchmany(100)[0])
def testDictCursorRealWithNamedCursorFetchManyNoarg(self):
self._testWithNamedCursorReal(lambda curs: curs.fetchmany()[0])
def testDictCursorRealWithNamedCursorFetchAll(self):
self._testWithNamedCursorReal(lambda curs: curs.fetchall()[0])
def testDictCursorRealWithNamedCursorIter(self):
def getter(curs):
for row in curs:
return row
self._testWithNamedCursorReal(getter)
@skip_before_postgres(8, 2)
def testDictCursorRealWithNamedCursorNotGreedy(self):
curs = self.conn.cursor('tmp', cursor_factory=psycopg2.extras.RealDictCursor)
self._testNamedCursorNotGreedy(curs)
@skip_before_postgres(8, 0)
def testDictCursorRealWithNamedCursorIterRowNumber(self):
curs = self.conn.cursor('tmp', cursor_factory=psycopg2.extras.RealDictCursor)
self._testIterRowNumber(curs)
def _testWithNamedCursorReal(self, getter):
curs = self.conn.cursor('aname',
cursor_factory=psycopg2.extras.RealDictCursor)
curs.execute("SELECT * FROM ExtrasDictCursorTests")
row = getter(curs)
self.assertTrue(row['foo'] == 'bar')
def _testNamedCursorNotGreedy(self, curs):
curs.itersize = 2
curs.execute("""select clock_timestamp() as ts from generate_series(1,3)""")
recs = []
for t in curs:
time.sleep(0.01)
recs.append(t)
# check that the dataset was not fetched in a single gulp
self.assertTrue(recs[1]['ts'] - recs[0]['ts'] < timedelta(seconds=0.005))
self.assertTrue(recs[2]['ts'] - recs[1]['ts'] > timedelta(seconds=0.0099))
def _testIterRowNumber(self, curs):
# Only checking for dataset < itersize:
# see CursorTests.test_iter_named_cursor_rownumber
curs.itersize = 20
curs.execute("""select * from generate_series(1,10)""")
for i, r in enumerate(curs):
self.assertEqual(i + 1, curs.rownumber)
def testPickleDictRow(self):
import pickle
curs = self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
curs.execute("select 10 as a, 20 as b")
r = curs.fetchone()
d = pickle.dumps(r)
r1 = pickle.loads(d)
self.assertEqual(r, r1)
self.assertEqual(r[0], r1[0])
self.assertEqual(r[1], r1[1])
self.assertEqual(r['a'], r1['a'])
self.assertEqual(r['b'], r1['b'])
self.assertEqual(r._index, r1._index)
def testPickleRealDictRow(self):
import pickle
curs = self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
curs.execute("select 10 as a, 20 as b")
r = curs.fetchone()
d = pickle.dumps(r)
r1 = pickle.loads(d)
self.assertEqual(r, r1)
self.assertEqual(r['a'], r1['a'])
self.assertEqual(r['b'], r1['b'])
self.assertEqual(r._column_mapping, r1._column_mapping)
class NamedTupleCursorTest(ConnectingTestCase):
def setUp(self):
ConnectingTestCase.setUp(self)
from psycopg2.extras import NamedTupleConnection
try:
from collections import namedtuple # noqa
except ImportError:
return
self.conn = self.connect(connection_factory=NamedTupleConnection)
curs = self.conn.cursor()
curs.execute("CREATE TEMPORARY TABLE nttest (i int, s text)")
curs.execute("INSERT INTO nttest VALUES (1, 'foo')")
curs.execute("INSERT INTO nttest VALUES (2, 'bar')")
curs.execute("INSERT INTO nttest VALUES (3, 'baz')")
self.conn.commit()
@skip_if_no_namedtuple
def test_cursor_args(self):
cur = self.conn.cursor('foo', cursor_factory=psycopg2.extras.DictCursor)
self.assertEqual(cur.name, 'foo')
self.assertTrue(isinstance(cur, psycopg2.extras.DictCursor))
@skip_if_no_namedtuple
def test_fetchone(self):
curs = self.conn.cursor()
curs.execute("select * from nttest order by 1")
t = curs.fetchone()
self.assertEqual(t[0], 1)
self.assertEqual(t.i, 1)
self.assertEqual(t[1], 'foo')
self.assertEqual(t.s, 'foo')
self.assertEqual(curs.rownumber, 1)
self.assertEqual(curs.rowcount, 3)
@skip_if_no_namedtuple
def test_fetchmany_noarg(self):
curs = self.conn.cursor()
curs.arraysize = 2
curs.execute("select * from nttest order by 1")
res = curs.fetchmany()
self.assertEqual(2, len(res))
self.assertEqual(res[0].i, 1)
self.assertEqual(res[0].s, 'foo')
self.assertEqual(res[1].i, 2)
self.assertEqual(res[1].s, 'bar')
self.assertEqual(curs.rownumber, 2)
self.assertEqual(curs.rowcount, 3)
@skip_if_no_namedtuple
def test_fetchmany(self):
curs = self.conn.cursor()
curs.execute("select * from nttest order by 1")
res = curs.fetchmany(2)
self.assertEqual(2, len(res))
self.assertEqual(res[0].i, 1)
self.assertEqual(res[0].s, 'foo')
self.assertEqual(res[1].i, 2)
self.assertEqual(res[1].s, 'bar')
self.assertEqual(curs.rownumber, 2)
self.assertEqual(curs.rowcount, 3)
@skip_if_no_namedtuple
def test_fetchall(self):
curs = self.conn.cursor()
curs.execute("select * from nttest order by 1")
res = curs.fetchall()
self.assertEqual(3, len(res))
self.assertEqual(res[0].i, 1)
self.assertEqual(res[0].s, 'foo')
self.assertEqual(res[1].i, 2)
self.assertEqual(res[1].s, 'bar')
self.assertEqual(res[2].i, 3)
self.assertEqual(res[2].s, 'baz')
self.assertEqual(curs.rownumber, 3)
self.assertEqual(curs.rowcount, 3)
@skip_if_no_namedtuple
def test_executemany(self):
curs = self.conn.cursor()
curs.executemany("delete from nttest where i = %s",
[(1,), (2,)])
curs.execute("select * from nttest order by 1")
res = curs.fetchall()
self.assertEqual(1, len(res))
self.assertEqual(res[0].i, 3)
self.assertEqual(res[0].s, 'baz')
@skip_if_no_namedtuple
def test_iter(self):
curs = self.conn.cursor()
curs.execute("select * from nttest order by 1")
i = iter(curs)
self.assertEqual(curs.rownumber, 0)
t = next(i)
self.assertEqual(t.i, 1)
self.assertEqual(t.s, 'foo')
self.assertEqual(curs.rownumber, 1)
self.assertEqual(curs.rowcount, 3)
t = next(i)
self.assertEqual(t.i, 2)
self.assertEqual(t.s, 'bar')
self.assertEqual(curs.rownumber, 2)
self.assertEqual(curs.rowcount, 3)
t = next(i)
self.assertEqual(t.i, 3)
self.assertEqual(t.s, 'baz')
self.assertRaises(StopIteration, i.__next__)
self.assertEqual(curs.rownumber, 3)
self.assertEqual(curs.rowcount, 3)
def test_error_message(self):
try:
from collections import namedtuple # noqa
except ImportError:
# an import error somewhere
from psycopg2.extras import NamedTupleConnection
try:
self.conn = self.connect(
connection_factory=NamedTupleConnection)
curs = self.conn.cursor()
curs.execute("select 1")
curs.fetchone()
except ImportError:
pass
else:
self.fail("expecting ImportError")
else:
return self.skipTest("namedtuple available")
@skip_if_no_namedtuple
def test_record_updated(self):
curs = self.conn.cursor()
curs.execute("select 1 as foo;")
r = curs.fetchone()
self.assertEqual(r.foo, 1)
curs.execute("select 2 as bar;")
r = curs.fetchone()
self.assertEqual(r.bar, 2)
self.assertRaises(AttributeError, getattr, r, 'foo')
@skip_if_no_namedtuple
def test_no_result_no_surprise(self):
curs = self.conn.cursor()
curs.execute("update nttest set s = s")
self.assertRaises(psycopg2.ProgrammingError, curs.fetchone)
curs.execute("update nttest set s = s")
self.assertRaises(psycopg2.ProgrammingError, curs.fetchall)
@skip_if_no_namedtuple
def test_minimal_generation(self):
# Instrument the class to verify it gets called the minimum number of times.
from psycopg2.extras import NamedTupleCursor
f_orig = NamedTupleCursor._make_nt
calls = [0]
def f_patched(self_):
calls[0] += 1
return f_orig(self_)
NamedTupleCursor._make_nt = f_patched
try:
curs = self.conn.cursor()
curs.execute("select * from nttest order by 1")
curs.fetchone()
curs.fetchone()
curs.fetchone()
self.assertEqual(1, calls[0])
curs.execute("select * from nttest order by 1")
curs.fetchone()
curs.fetchall()
self.assertEqual(2, calls[0])
curs.execute("select * from nttest order by 1")
curs.fetchone()
curs.fetchmany(1)
self.assertEqual(3, calls[0])
finally:
NamedTupleCursor._make_nt = f_orig
@skip_if_no_namedtuple
@skip_before_postgres(8, 0)
def test_named(self):
curs = self.conn.cursor('tmp')
curs.execute("""select i from generate_series(0,9) i""")
recs = []
recs.extend(curs.fetchmany(5))
recs.append(curs.fetchone())
recs.extend(curs.fetchall())
self.assertEqual(list(range(10)), [t.i for t in recs])
@skip_if_no_namedtuple
def test_named_fetchone(self):
curs = self.conn.cursor('tmp')
curs.execute("""select 42 as i""")
t = curs.fetchone()
self.assertEqual(t.i, 42)
@skip_if_no_namedtuple
def test_named_fetchmany(self):
curs = self.conn.cursor('tmp')
curs.execute("""select 42 as i""")
recs = curs.fetchmany(10)
self.assertEqual(recs[0].i, 42)
@skip_if_no_namedtuple
def test_named_fetchall(self):
curs = self.conn.cursor('tmp')
curs.execute("""select 42 as i""")
recs = curs.fetchall()
self.assertEqual(recs[0].i, 42)
@skip_if_no_namedtuple
@skip_before_postgres(8, 2)
def test_not_greedy(self):
curs = self.conn.cursor('tmp')
curs.itersize = 2
curs.execute("""select clock_timestamp() as ts from generate_series(1,3)""")
recs = []
for t in curs:
time.sleep(0.01)
recs.append(t)
# check that the dataset was not fetched in a single gulp
self.assertTrue(recs[1].ts - recs[0].ts < timedelta(seconds=0.005))
self.assertTrue(recs[2].ts - recs[1].ts > timedelta(seconds=0.0099))
@skip_if_no_namedtuple
@skip_before_postgres(8, 0)
def test_named_rownumber(self):
curs = self.conn.cursor('tmp')
# Only checking for dataset < itersize:
# see CursorTests.test_iter_named_cursor_rownumber
curs.itersize = 4
curs.execute("""select * from generate_series(1,3)""")
for i, t in enumerate(curs):
self.assertEqual(i + 1, curs.rownumber)
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package config
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ValidatingAdmissionPolicyStatusControllerConfiguration) DeepCopyInto(out *ValidatingAdmissionPolicyStatusControllerConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyStatusControllerConfiguration.
func (in *ValidatingAdmissionPolicyStatusControllerConfiguration) DeepCopy() *ValidatingAdmissionPolicyStatusControllerConfiguration {
if in == nil {
return nil
}
out := new(ValidatingAdmissionPolicyStatusControllerConfiguration)
in.DeepCopyInto(out)
return out
} | go | github | https://github.com/kubernetes/kubernetes | pkg/controller/validatingadmissionpolicystatus/config/zz_generated.deepcopy.go |
# Diplomat
Diplomat is an experimental Rust tool for generating FFI definitions allowing many other languages to call Rust code. With Diplomat, you can simply define Rust APIs to be exposed over FFI and get high-level C, C++, and JavaScript bindings automatically!
Diplomat supports generating bindings from Rust to:
- C
- C++
- Dart
- Javascript/Typescript
- Kotlin (using JNA)
- Python (using [nanobind](https://nanobind.readthedocs.io/en/latest/index.html))
Diplomat supports languages through a plugin interface that makes it easy to add support for your favourite language. See [the book to get started](https://rust-diplomat.github.io/diplomat/developer.html), and `tool/src/{c, cpp, js}` for examples of existing language plugins.
## Installation
First, install the CLI tool for generating bindings:
```bash
$ cargo install diplomat-tool
```
Then, add the Diplomat macro and runtime as dependencies to your project:
```toml
diplomat = "0.10.0"
diplomat-runtime = "0.10.0"
```
## Getting Started
Documentation on how to use Diplomat can be found [in the book](https://rust-diplomat.github.io/diplomat/).
### Architecture
See the [design doc](docs/design_doc.md) for more details.
### Building and Testing
Simply run `cargo build` to build all the libraries and compile an example. To run unit tests, run `cargo test`.
Diplomat makes use of snapshot tests to check macro and code generation logic. When code generation logic changes and the snapshots need to be updated, run `cargo insta review` (run `cargo install cargo-insta` to get the tool) to view the changes and update the snapshots.
#### Javascript bindings for `wasm32-unknown-unknown`
The Javascript backend assumes that you are building WebAssembly on the C Spec ABI. This is not currently default for the `wasm32-unknown-unknown` target in the latest version of Rust, and so until the [new WASM ABI](https://blog.rust-lang.org/2025/04/04/c-abi-changes-for-wasm32-unknown-unknown/) is made stable, you have two options:
1. Build using nightly Rust and enable the [`-Zwasm-c-abi=spec`](https://doc.rust-lang.org/stable/unstable-book/compiler-flags/wasm-c-abi.html) flag.
1. Configure the JS backend to use legacy bindings. There is a [WASM ABI config option](https://github.com/rust-diplomat/diplomat/blob/main/tool/src/js/mod.rs) for this, please read [the guide on configuration in the book](https://rust-diplomat.github.io/diplomat/config) for more on how to configure. | unknown | github | https://github.com/nodejs/node | deps/crates/vendor/diplomat-runtime/README.md |
// Adapted from https://github.com/sunfishcode/mir2cranelift/blob/master/rust-examples/nocore-hello-world.rs
#![feature(
no_core, unboxed_closures, lang_items, never_type, linkage,
extern_types, thread_local
)]
#![no_core]
#![allow(dead_code, internal_features, non_camel_case_types)]
#![rustfmt_skip]
extern crate mini_core;
use mini_core::*;
use mini_core::libc::*;
unsafe extern "C" fn my_puts(s: *const u8) {
puts(s);
}
#[lang = "termination"]
trait Termination {
fn report(self) -> i32;
}
impl Termination for () {
fn report(self) -> i32 {
unsafe {
NUM = 6 * 7 + 1 + (1u8 == 1u8) as u8; // 44
*NUM_REF as i32
}
}
}
trait SomeTrait {
fn object_safe(&self);
}
impl SomeTrait for &'static str {
fn object_safe(&self) {
unsafe {
puts(*self as *const str as *const u8);
}
}
}
struct NoisyDrop {
text: &'static str,
inner: NoisyDropInner,
}
struct NoisyDropUnsized {
inner: NoisyDropInner,
text: str,
}
struct NoisyDropInner;
impl Drop for NoisyDrop {
fn drop(&mut self) {
unsafe {
puts(self.text as *const str as *const u8);
}
}
}
impl Drop for NoisyDropInner {
fn drop(&mut self) {
unsafe {
puts("Inner got dropped!\0" as *const str as *const u8);
}
}
}
impl SomeTrait for NoisyDrop {
fn object_safe(&self) {}
}
enum Ordering {
Less = -1,
Equal = 0,
Greater = 1,
}
#[lang = "start"]
fn start<T: Termination + 'static>(
main: fn() -> T,
argc: isize,
argv: *const *const u8,
_sigpipe: u8,
) -> isize {
if argc == 3 {
unsafe { puts(*argv); }
unsafe { puts(*((argv as usize + size_of::<*const u8>()) as *const *const u8)); }
unsafe { puts(*((argv as usize + 2 * size_of::<*const u8>()) as *const *const u8)); }
}
main().report();
0
}
static mut NUM: u8 = 6 * 7;
static NUM_REF: &'static u8 = unsafe { &* &raw const NUM };
macro_rules! assert {
($e:expr) => {
if !$e {
panic(stringify!(! $e));
}
};
}
macro_rules! assert_eq {
($l:expr, $r: expr) => {
if $l != $r {
panic(stringify!($l != $r));
}
}
}
struct Unique<T: ?Sized> {
pointer: *const T,
_marker: PhantomData<T>,
}
impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> {}
unsafe fn zeroed<T>() -> T {
let mut uninit = MaybeUninit { uninit: () };
intrinsics::write_bytes(&mut uninit.value.value as *mut T, 0, 1);
uninit.value.value
}
fn take_f32(_f: f32) {}
fn take_unique(_u: Unique<()>) {}
fn return_u128_pair() -> (u128, u128) {
(0, 0)
}
fn call_return_u128_pair() {
return_u128_pair();
}
fn main() {
take_unique(Unique {
pointer: 0 as *const (),
_marker: PhantomData,
});
take_f32(0.1);
//call_return_u128_pair();
let slice = &[0, 1] as &[i32];
let slice_ptr = slice as *const [i32] as *const i32;
let align = align_of::<*const i32>();
assert_eq!(slice_ptr as usize % align, 0);
//return;
unsafe {
printf("Hello %s\n\0" as *const str as *const i8, "printf\0" as *const str as *const i8);
let hello: &[u8] = b"Hello\0" as &[u8; 6];
let ptr: *const u8 = hello as *const [u8] as *const u8;
puts(ptr);
let world: Box<&str> = Box::new("World!\0");
puts(*world as *const str as *const u8);
world as Box<dyn SomeTrait>;
assert_eq!(intrinsics::bitreverse(0b10101000u8), 0b00010101u8);
assert_eq!(intrinsics::bitreverse(0xddccu16), 0x33bbu16);
assert_eq!(intrinsics::bitreverse(0xffee_ddccu32), 0x33bb77ffu32);
assert_eq!(intrinsics::bitreverse(0x1234_5678_ffee_ddccu64), 0x33bb77ff1e6a2c48u64);
assert_eq!(intrinsics::bswap(0xabu8), 0xabu8);
assert_eq!(intrinsics::bswap(0xddccu16), 0xccddu16);
assert_eq!(intrinsics::bswap(0xffee_ddccu32), 0xccdd_eeffu32);
assert_eq!(intrinsics::bswap(0x1234_5678_ffee_ddccu64), 0xccdd_eeff_7856_3412u64);
assert_eq!(intrinsics::size_of_val(hello) as u8, 6);
let chars = &['C', 'h', 'a', 'r', 's'];
let chars = chars as &[char];
assert_eq!(intrinsics::size_of_val(chars) as u8, 4 * 5);
let a: &dyn SomeTrait = &"abc\0";
a.object_safe();
#[cfg(target_arch="x86_64")]
assert_eq!(intrinsics::size_of_val(a) as u8, 16);
#[cfg(target_arch="m68k")]
assert_eq!(intrinsics::size_of_val(a) as u8, 8);
assert_eq!(intrinsics::size_of_val(&0u32) as u8, 4);
assert_eq!(align_of::<u16>() as u8, 2);
assert_eq!(intrinsics::align_of_val(&a) as u8, align_of::<&str>() as u8);
let u8_needs_drop = const { intrinsics::needs_drop::<u8>() };
assert!(!u8_needs_drop);
let slice_needs_drop = const { intrinsics::needs_drop::<[u8]>() };
assert!(!slice_needs_drop);
let noisy_drop = const { intrinsics::needs_drop::<NoisyDrop>() };
assert!(noisy_drop);
let noisy_unsized_drop = const { intrinsics::needs_drop::<NoisyDropUnsized>() };
assert!(noisy_unsized_drop);
Unique {
pointer: 0 as *const &str,
_marker: PhantomData,
} as Unique<dyn SomeTrait>;
struct MyDst<T: ?Sized>(T);
intrinsics::size_of_val(&MyDst([0u8; 4]) as &MyDst<[u8]>);
struct Foo {
x: u8,
y: !,
}
unsafe fn uninitialized<T>() -> T {
MaybeUninit { uninit: () }.value.value
}
zeroed::<(u8, u8)>();
#[allow(unreachable_code)]
{
if false {
zeroed::<!>();
zeroed::<Foo>();
uninitialized::<Foo>();
}
}
}
let _ = Box::new(NoisyDrop {
text: "Boxed outer got dropped!\0",
inner: NoisyDropInner,
}) as Box<dyn SomeTrait>;
const FUNC_REF: Option<fn()> = Some(main);
#[allow(unreachable_code)]
match FUNC_REF {
Some(_) => {},
None => assert!(false),
}
match Ordering::Less {
Ordering::Less => {},
_ => assert!(false),
}
[NoisyDropInner, NoisyDropInner];
let x = &[0u32, 42u32] as &[u32];
match x {
[] => assert_eq!(0u32, 1),
[_, ref y @ ..] => assert_eq!(&x[1] as *const u32 as usize, &y[0] as *const u32 as usize),
}
assert_eq!(((|()| 42u8) as fn(()) -> u8)(()), 42);
extern "C" {
#[linkage = "weak"]
static ABC: *const u8;
}
{
extern "C" {
#[linkage = "weak"]
static ABC: *const u8;
}
}
// TODO(antoyo): to make this work, support weak linkage.
//unsafe { assert_eq!(ABC as usize, 0); }
&mut (|| Some(0 as *const ())) as &mut dyn FnMut() -> Option<*const ()>;
let f = 1000.0;
assert_eq!(f as u8, 255);
let f2 = -1000.0;
assert_eq!(f2 as i8, -128);
assert_eq!(f2 as u8, 0);
static ANOTHER_STATIC: &u8 = &A_STATIC;
assert_eq!(*ANOTHER_STATIC, 42);
check_niche_behavior();
extern "C" {
type ExternType;
}
struct ExternTypeWrapper {
_a: ExternType,
}
let nullptr = 0 as *const ();
let extern_nullptr = nullptr as *const ExternTypeWrapper;
extern_nullptr as *const ();
let slice_ptr = &[] as *const [u8];
slice_ptr as *const u8;
#[cfg(not(jit))]
test_tls();
}
#[repr(C)]
enum c_void {
_1,
_2,
}
type c_int = i32;
type c_ulong = u64;
type pthread_t = c_ulong;
#[repr(C)]
struct pthread_attr_t {
__size: [u64; 7],
}
#[link(name = "pthread")]
extern "C" {
fn pthread_attr_init(attr: *mut pthread_attr_t) -> c_int;
fn pthread_create(
native: *mut pthread_t,
attr: *const pthread_attr_t,
f: extern "C" fn(_: *mut c_void) -> *mut c_void,
value: *mut c_void
) -> c_int;
fn pthread_join(
native: pthread_t,
value: *mut *mut c_void
) -> c_int;
}
#[thread_local]
#[cfg(not(jit))]
static mut TLS: u8 = 42;
#[cfg(not(jit))]
extern "C" fn mutate_tls(_: *mut c_void) -> *mut c_void {
unsafe { TLS = 0; }
0 as *mut c_void
}
#[cfg(not(jit))]
fn test_tls() {
unsafe {
let mut attr: pthread_attr_t = zeroed();
let mut thread: pthread_t = 0;
assert_eq!(TLS, 42);
if pthread_attr_init(&mut attr) != 0 {
assert!(false);
}
if pthread_create(&mut thread, &attr, mutate_tls, 0 as *mut c_void) != 0 {
assert!(false);
}
let mut res = 0 as *mut c_void;
pthread_join(thread, &mut res);
// TLS of main thread must not have been changed by the other thread.
assert_eq!(TLS, 42);
puts("TLS works!\n\0" as *const str as *const u8);
}
}
// Copied ui/issues/issue-61696.rs
pub enum Infallible {}
// The check that the `bool` field of `V1` is encoding a "niche variant"
// (i.e. not `V1`, so `V3` or `V4`) used to be mathematically incorrect,
// causing valid `V1` values to be interpreted as other variants.
pub enum E1 {
V1 { f: bool },
V2 { f: Infallible },
V3,
V4,
}
// Computing the discriminant used to be done using the niche type (here `u8`,
// from the `bool` field of `V1`), overflowing for variants with large enough
// indices (`V3` and `V4`), causing them to be interpreted as other variants.
pub enum E2<X> {
V1 { f: bool },
/*_00*/ _01(X), _02(X), _03(X), _04(X), _05(X), _06(X), _07(X),
_08(X), _09(X), _0A(X), _0B(X), _0C(X), _0D(X), _0E(X), _0F(X),
_10(X), _11(X), _12(X), _13(X), _14(X), _15(X), _16(X), _17(X),
_18(X), _19(X), _1A(X), _1B(X), _1C(X), _1D(X), _1E(X), _1F(X),
_20(X), _21(X), _22(X), _23(X), _24(X), _25(X), _26(X), _27(X),
_28(X), _29(X), _2A(X), _2B(X), _2C(X), _2D(X), _2E(X), _2F(X),
_30(X), _31(X), _32(X), _33(X), _34(X), _35(X), _36(X), _37(X),
_38(X), _39(X), _3A(X), _3B(X), _3C(X), _3D(X), _3E(X), _3F(X),
_40(X), _41(X), _42(X), _43(X), _44(X), _45(X), _46(X), _47(X),
_48(X), _49(X), _4A(X), _4B(X), _4C(X), _4D(X), _4E(X), _4F(X),
_50(X), _51(X), _52(X), _53(X), _54(X), _55(X), _56(X), _57(X),
_58(X), _59(X), _5A(X), _5B(X), _5C(X), _5D(X), _5E(X), _5F(X),
_60(X), _61(X), _62(X), _63(X), _64(X), _65(X), _66(X), _67(X),
_68(X), _69(X), _6A(X), _6B(X), _6C(X), _6D(X), _6E(X), _6F(X),
_70(X), _71(X), _72(X), _73(X), _74(X), _75(X), _76(X), _77(X),
_78(X), _79(X), _7A(X), _7B(X), _7C(X), _7D(X), _7E(X), _7F(X),
_80(X), _81(X), _82(X), _83(X), _84(X), _85(X), _86(X), _87(X),
_88(X), _89(X), _8A(X), _8B(X), _8C(X), _8D(X), _8E(X), _8F(X),
_90(X), _91(X), _92(X), _93(X), _94(X), _95(X), _96(X), _97(X),
_98(X), _99(X), _9A(X), _9B(X), _9C(X), _9D(X), _9E(X), _9F(X),
_A0(X), _A1(X), _A2(X), _A3(X), _A4(X), _A5(X), _A6(X), _A7(X),
_A8(X), _A9(X), _AA(X), _AB(X), _AC(X), _AD(X), _AE(X), _AF(X),
_B0(X), _B1(X), _B2(X), _B3(X), _B4(X), _B5(X), _B6(X), _B7(X),
_B8(X), _B9(X), _BA(X), _BB(X), _BC(X), _BD(X), _BE(X), _BF(X),
_C0(X), _C1(X), _C2(X), _C3(X), _C4(X), _C5(X), _C6(X), _C7(X),
_C8(X), _C9(X), _CA(X), _CB(X), _CC(X), _CD(X), _CE(X), _CF(X),
_D0(X), _D1(X), _D2(X), _D3(X), _D4(X), _D5(X), _D6(X), _D7(X),
_D8(X), _D9(X), _DA(X), _DB(X), _DC(X), _DD(X), _DE(X), _DF(X),
_E0(X), _E1(X), _E2(X), _E3(X), _E4(X), _E5(X), _E6(X), _E7(X),
_E8(X), _E9(X), _EA(X), _EB(X), _EC(X), _ED(X), _EE(X), _EF(X),
_F0(X), _F1(X), _F2(X), _F3(X), _F4(X), _F5(X), _F6(X), _F7(X),
_F8(X), _F9(X), _FA(X), _FB(X), _FC(X), _FD(X), _FE(X), _FF(X),
V3,
V4,
}
#[allow(unreachable_patterns)]
fn check_niche_behavior () {
if let E1::V2 { .. } = (E1::V1 { f: true }) {
intrinsics::abort();
}
if let E2::V1 { .. } = E2::V3::<Infallible> {
intrinsics::abort();
}
} | rust | github | https://github.com/rust-lang/rust | compiler/rustc_codegen_gcc/example/mini_core_hello_world.rs |
from collections import defaultdict
import pymongo
from bson import SON
from mongoengine.base.fields import UPDATE_OPERATORS
from mongoengine.connection import get_connection
from mongoengine.common import _import_class
from mongoengine.errors import InvalidQueryError
from mongoengine.python_support import IS_PYMONGO_3
__all__ = ('query', 'update')
COMPARISON_OPERATORS = ('ne', 'gt', 'gte', 'lt', 'lte', 'in', 'nin', 'mod',
'all', 'size', 'exists', 'not', 'elemMatch', 'type')
GEO_OPERATORS = ('within_distance', 'within_spherical_distance',
'within_box', 'within_polygon', 'near', 'near_sphere',
'max_distance', 'min_distance', 'geo_within', 'geo_within_box',
'geo_within_polygon', 'geo_within_center',
'geo_within_sphere', 'geo_intersects')
STRING_OPERATORS = ('contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith',
'exact', 'iexact')
CUSTOM_OPERATORS = ('match',)
MATCH_OPERATORS = (COMPARISON_OPERATORS + GEO_OPERATORS +
STRING_OPERATORS + CUSTOM_OPERATORS)
def query(_doc_cls=None, **query):
"""Transform a query from Django-style format to Mongo format.
"""
mongo_query = {}
merge_query = defaultdict(list)
for key, value in sorted(query.items()):
if key == "__raw__":
mongo_query.update(value)
continue
parts = key.rsplit('__')
indices = [(i, p) for i, p in enumerate(parts) if p.isdigit()]
parts = [part for part in parts if not part.isdigit()]
# Check for an operator and transform to mongo-style if there is
op = None
if len(parts) > 1 and parts[-1] in MATCH_OPERATORS:
op = parts.pop()
# Allw to escape operator-like field name by __
if len(parts) > 1 and parts[-1] == "":
parts.pop()
negate = False
if len(parts) > 1 and parts[-1] == 'not':
parts.pop()
negate = True
if _doc_cls:
# Switch field names to proper names [set in Field(name='foo')]
try:
fields = _doc_cls._lookup_field(parts)
except Exception, e:
raise InvalidQueryError(e)
parts = []
CachedReferenceField = _import_class('CachedReferenceField')
cleaned_fields = []
for field in fields:
append_field = True
if isinstance(field, basestring):
parts.append(field)
append_field = False
# is last and CachedReferenceField
elif isinstance(field, CachedReferenceField) and fields[-1] == field:
parts.append('%s._id' % field.db_field)
else:
parts.append(field.db_field)
if append_field:
cleaned_fields.append(field)
# Convert value to proper value
field = cleaned_fields[-1]
singular_ops = [None, 'ne', 'gt', 'gte', 'lt', 'lte', 'not']
singular_ops += STRING_OPERATORS
if op in singular_ops:
if isinstance(field, basestring):
if (op in STRING_OPERATORS and
isinstance(value, basestring)):
StringField = _import_class('StringField')
value = StringField.prepare_query_value(op, value)
else:
value = field
else:
value = field.prepare_query_value(op, value)
if isinstance(field, CachedReferenceField) and value:
value = value['_id']
elif op in ('in', 'nin', 'all', 'near') and not isinstance(value, dict):
# 'in', 'nin' and 'all' require a list of values
value = [field.prepare_query_value(op, v) for v in value]
# if op and op not in COMPARISON_OPERATORS:
if op:
if op in GEO_OPERATORS:
value = _geo_operator(field, op, value)
elif op in CUSTOM_OPERATORS:
if op in ('elem_match', 'match'):
value = field.prepare_query_value(op, value)
value = {"$elemMatch": value}
else:
NotImplementedError("Custom method '%s' has not "
"been implemented" % op)
elif op not in STRING_OPERATORS:
value = {'$' + op: value}
if negate:
value = {'$not': value}
for i, part in indices:
parts.insert(i, part)
key = '.'.join(parts)
if op is None or key not in mongo_query:
mongo_query[key] = value
elif key in mongo_query:
if key in mongo_query and isinstance(mongo_query[key], dict):
mongo_query[key].update(value)
# $max/minDistance needs to come last - convert to SON
value_dict = mongo_query[key]
if ('$maxDistance' in value_dict or '$minDistance' in value_dict) and \
('$near' in value_dict or '$nearSphere' in value_dict):
value_son = SON()
for k, v in value_dict.iteritems():
if k == '$maxDistance' or k == '$minDistance':
continue
value_son[k] = v
# Required for MongoDB >= 2.6, may fail when combining
# PyMongo 3+ and MongoDB < 2.6
near_embedded = False
for near_op in ('$near', '$nearSphere'):
if isinstance(value_dict.get(near_op), dict) and (
IS_PYMONGO_3 or get_connection().max_wire_version > 1):
value_son[near_op] = SON(value_son[near_op])
if '$maxDistance' in value_dict:
value_son[near_op][
'$maxDistance'] = value_dict['$maxDistance']
if '$minDistance' in value_dict:
value_son[near_op][
'$minDistance'] = value_dict['$minDistance']
near_embedded = True
if not near_embedded:
if '$maxDistance' in value_dict:
value_son['$maxDistance'] = value_dict['$maxDistance']
if '$minDistance' in value_dict:
value_son['$minDistance'] = value_dict['$minDistance']
mongo_query[key] = value_son
else:
# Store for manually merging later
merge_query[key].append(value)
# The queryset has been filter in such a way we must manually merge
for k, v in merge_query.items():
merge_query[k].append(mongo_query[k])
del mongo_query[k]
if isinstance(v, list):
value = [{k: val} for val in v]
if '$and' in mongo_query.keys():
mongo_query['$and'].extend(value)
else:
mongo_query['$and'] = value
return mongo_query
def update(_doc_cls=None, **update):
"""Transform an update spec from Django-style format to Mongo format.
"""
mongo_update = {}
for key, value in update.items():
if key == "__raw__":
mongo_update.update(value)
continue
parts = key.split('__')
# if there is no operator, default to "set"
if len(parts) < 3 and parts[0] not in UPDATE_OPERATORS:
parts.insert(0, 'set')
# Check for an operator and transform to mongo-style if there is
op = None
if parts[0] in UPDATE_OPERATORS:
op = parts.pop(0)
# Convert Pythonic names to Mongo equivalents
if op in ('push_all', 'pull_all'):
op = op.replace('_all', 'All')
elif op == 'dec':
# Support decrement by flipping a positive value's sign
# and using 'inc'
op = 'inc'
if value > 0:
value = -value
elif op == 'add_to_set':
op = 'addToSet'
elif op == 'set_on_insert':
op = "setOnInsert"
match = None
if parts[-1] in COMPARISON_OPERATORS:
match = parts.pop()
if _doc_cls:
# Switch field names to proper names [set in Field(name='foo')]
try:
fields = _doc_cls._lookup_field(parts)
except Exception, e:
raise InvalidQueryError(e)
parts = []
cleaned_fields = []
appended_sub_field = False
for field in fields:
append_field = True
if isinstance(field, basestring):
# Convert the S operator to $
if field == 'S':
field = '$'
parts.append(field)
append_field = False
else:
parts.append(field.db_field)
if append_field:
appended_sub_field = False
cleaned_fields.append(field)
if hasattr(field, 'field'):
cleaned_fields.append(field.field)
appended_sub_field = True
# Convert value to proper value
if appended_sub_field:
field = cleaned_fields[-2]
else:
field = cleaned_fields[-1]
GeoJsonBaseField = _import_class("GeoJsonBaseField")
if isinstance(field, GeoJsonBaseField):
value = field.to_mongo(value)
if op in (None, 'set', 'push', 'pull'):
if field.required or value is not None:
value = field.prepare_query_value(op, value)
elif op in ('pushAll', 'pullAll'):
value = [field.prepare_query_value(op, v) for v in value]
elif op in ('addToSet', 'setOnInsert'):
if isinstance(value, (list, tuple, set)):
value = [field.prepare_query_value(op, v) for v in value]
elif field.required or value is not None:
value = field.prepare_query_value(op, value)
elif op == "unset":
value = 1
if match:
match = '$' + match
value = {match: value}
key = '.'.join(parts)
if not op:
raise InvalidQueryError("Updates must supply an operation "
"eg: set__FIELD=value")
if 'pull' in op and '.' in key:
# Dot operators don't work on pull operations
# unless they point to a list field
# Otherwise it uses nested dict syntax
if op == 'pullAll':
raise InvalidQueryError("pullAll operations only support "
"a single field depth")
# Look for the last list field and use dot notation until there
field_classes = [c.__class__ for c in cleaned_fields]
field_classes.reverse()
ListField = _import_class('ListField')
if ListField in field_classes:
# Join all fields via dot notation to the last ListField
# Then process as normal
last_listField = len(
cleaned_fields) - field_classes.index(ListField)
key = ".".join(parts[:last_listField])
parts = parts[last_listField:]
parts.insert(0, key)
parts.reverse()
for key in parts:
value = {key: value}
elif op == 'addToSet' and isinstance(value, list):
value = {key: {"$each": value}}
else:
value = {key: value}
key = '$' + op
if key not in mongo_update:
mongo_update[key] = value
elif key in mongo_update and isinstance(mongo_update[key], dict):
mongo_update[key].update(value)
return mongo_update
def _geo_operator(field, op, value):
"""Helper to return the query for a given geo query"""
if op == "max_distance":
value = {'$maxDistance': value}
elif op == "min_distance":
value = {'$minDistance': value}
elif field._geo_index == pymongo.GEO2D:
if op == "within_distance":
value = {'$within': {'$center': value}}
elif op == "within_spherical_distance":
value = {'$within': {'$centerSphere': value}}
elif op == "within_polygon":
value = {'$within': {'$polygon': value}}
elif op == "near":
value = {'$near': value}
elif op == "near_sphere":
value = {'$nearSphere': value}
elif op == 'within_box':
value = {'$within': {'$box': value}}
else:
raise NotImplementedError("Geo method '%s' has not "
"been implemented for a GeoPointField" % op)
else:
if op == "geo_within":
value = {"$geoWithin": _infer_geometry(value)}
elif op == "geo_within_box":
value = {"$geoWithin": {"$box": value}}
elif op == "geo_within_polygon":
value = {"$geoWithin": {"$polygon": value}}
elif op == "geo_within_center":
value = {"$geoWithin": {"$center": value}}
elif op == "geo_within_sphere":
value = {"$geoWithin": {"$centerSphere": value}}
elif op == "geo_intersects":
value = {"$geoIntersects": _infer_geometry(value)}
elif op == "near":
value = {'$near': _infer_geometry(value)}
else:
raise NotImplementedError("Geo method '%s' has not "
"been implemented for a %s " % (op, field._name))
return value
def _infer_geometry(value):
"""Helper method that tries to infer the $geometry shape for a given value"""
if isinstance(value, dict):
if "$geometry" in value:
return value
elif 'coordinates' in value and 'type' in value:
return {"$geometry": value}
raise InvalidQueryError("Invalid $geometry dictionary should have "
"type and coordinates keys")
elif isinstance(value, (list, set)):
# TODO: shouldn't we test value[0][0][0][0] to see if it is MultiPolygon?
try:
value[0][0][0]
return {"$geometry": {"type": "Polygon", "coordinates": value}}
except:
pass
try:
value[0][0]
return {"$geometry": {"type": "LineString", "coordinates": value}}
except:
pass
try:
value[0]
return {"$geometry": {"type": "Point", "coordinates": value}}
except:
pass
raise InvalidQueryError("Invalid $geometry data. Can be either a dictionary "
"or (nested) lists of coordinate(s)") | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python3
# Copyright 2015 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script reads config.h.meson, looks for header
checks and writes the corresponding meson declaration.
Copy config.h.in to config.h.meson, replace #undef
with #mesondefine and run this. We can't do this automatically
because some configure scripts have #undef statements
that are unrelated to configure checks.
"""
import sys
print('''cc = meson.get_compiler('c')
cdata = configuration_data()''')
print('check_headers = [')
for line in open(sys.argv[1]):
line = line.strip()
if line.startswith('#mesondefine') and \
line.endswith('_H'):
token = line.split()[1]
tarr = token.split('_')[1:-1]
tarr = [x.lower() for x in tarr]
hname = '/'.join(tarr) + '.h'
print(" ['%s', '%s']," % (token, hname))
print(']\n')
print('''foreach h : check_headers
if cc.has_header(h.get(1))
cdata.set(h.get(0), 1)
endif
endforeach
''')
# Add stuff here as it is encountered.
function_data = \
{'HAVE_FEENABLEEXCEPT' : ('feenableexcept', 'fenv.h'),
'HAVE_FECLEAREXCEPT' : ('feclearexcept', 'fenv.h'),
'HAVE_FEDISABLEEXCEPT' : ('fedisableexcept', 'fenv.h'),
'HAVE_MMAP' : ('mmap', 'sys/mman.h'),
'HAVE_GETPAGESIZE' : ('getpagesize', 'unistd.h'),
'HAVE_GETISAX' : ('getisax', 'sys/auxv.h'),
'HAVE_GETTIMEOFDAY' : ('gettimeofday', 'sys/time.h'),
'HAVE_MPROTECT' : ('mprotect', 'sys/mman.h'),
'HAVE_POSIX_MEMALIGN' : ('posix_memalign', 'stdlib.h'),
'HAVE_SIGACTION' : ('sigaction', 'signal.h'),
'HAVE_ALARM' : ('alarm', 'unistd.h'),
'HAVE_CLOCK_GETTIME' : ('clock_gettime', 'time.h'),
'HAVE_CTIME_R' : ('ctime_r', 'time.h'),
'HAVE_DRAND48' : ('drand48', 'stdlib.h'),
'HAVE_FLOCKFILE' : ('flockfile', 'stdio.h'),
'HAVE_FORK' : ('fork', 'unistd.h'),
'HAVE_FUNLOCKFILE' : ('funlockfile', 'stdio.h'),
'HAVE_GETLINE' : ('getline', 'stdio.h'),
'HAVE_LINK' : ('link', 'unistd.h'),
'HAVE_RAISE' : ('raise', 'signal.h'),
'HAVE_STRNDUP' : ('strndup', 'string.h'),
'HAVE_SCHED_GETAFFINITY' : ('sched_getaffinity', 'sched.h'),
'HAVE_WAITPID' : ('waitpid', 'sys/wait.h'),
'HAVE_XRENDERCREATECONICALGRADIENT' : ('XRenderCreateConicalGradient', 'xcb/render.h'),
'HAVE_XRENDERCREATELINEARGRADIENT' : ('XRenderCreateLinearGradient', 'xcb/render.h'),
'HAVE_XRENDERCREATERADIALGRADIENT' : ('XRenderCreateRadialGradient', 'xcb/render.h'),
'HAVE_XRENDERCREATESOLIDFILL' : ('XRenderCreateSolidFill', 'xcb/render.h'),
'HAVE_DCGETTEXT': ('dcgettext', 'libintl.h'),
'HAVE_ENDMNTENT': ('endmntent', 'mntent.h'),
'HAVE_ENDSERVENT' : ('endservent', 'netdb.h'),
'HAVE_EVENTFD': ('eventfd', 'sys/eventfd.h'),
'HAVE_FALLOCATE': ('fallocate', 'fcntl.h'),
'HAVE_FCHMOD': ('fchmod', 'sys/stat.h'),
'HAVE_FCHOWN': ('fchown', 'unistd.h'),
'HAVE_FDWALK': ('fdwalk', 'stdlib.h'),
'HAVE_FSYNC': ('fsync', 'unistd.h'),
'HAVE_GETC_UNLOCKED': ('getc_unlocked', 'stdio.h'),
'HAVE_GETFSSTAT': ('getfsstat', 'sys/mount.h'),
'HAVE_GETMNTENT_R': ('getmntent_r', 'mntent.h'),
'HAVE_GETPROTOBYNAME_R': ('getprotobyname_r', 'netdb.h'),
'HAVE_GETRESUID' : ('getresuid', 'unistd.h'),
'HAVE_GETVFSSTAT' : ('getvfsstat', 'sys/statvfs.h'),
'HAVE_GMTIME_R' : ('gmtime_r', 'time.h'),
'HAVE_HASMNTOPT': ('hasmntopt', 'mntent.h'),
'HAVE_IF_INDEXTONAME': ('if_indextoname', 'net/if.h'),
'HAVE_IF_NAMETOINDEX': ('if_nametoindex', 'net/if.h'),
'HAVE_INOTIFY_INIT1': ('inotify_init1', 'sys/inotify.h'),
'HAVE_ISSETUGID': ('issetugid', 'unistd.h'),
'HAVE_KEVENT': ('kevent', 'sys/event.h'),
'HAVE_KQUEUE': ('kqueue', 'sys/event.h'),
'HAVE_LCHMOD': ('lchmod', 'sys/stat.h'),
'HAVE_LCHOWN': ('lchown', 'unistd.h'),
'HAVE_LSTAT': ('lstat', 'sys/stat.h'),
'HAVE_MEMCPY': ('memcpy', 'string.h'),
'HAVE_MEMALIGN': ('memalign', 'stdlib.h'),
'HAVE_MEMMEM': ('memmem', 'string.h'),
'HAVE_NEWLOCALE': ('newlocale', 'locale.h'),
'HAVE_PIPE2': ('pipe2', 'fcntl.h'),
'HAVE_POLL': ('poll', 'poll.h'),
'HAVE_PRLIMIT': ('prlimit', 'sys/resource.h'),
'HAVE_PTHREAD_ATTR_SETSTACKSIZE': ('pthread_attr_setstacksize', 'pthread.h'),
'HAVE_PTHREAD_CONDATTR_SETCLOCK': ('pthread_condattr_setclock', 'pthread.h'),
'HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP': ('pthread_cond_timedwait_relative_np', 'pthread.h'),
'HAVE_READLINK': ('readlink', 'unistd.h'),
'HAVE_RES_INIT': ('res_init', 'resolv.h'),
'HAVE_SENDMMSG': ('sendmmsg', 'sys/socket.h'),
'HAVE_SETENV': ('setenv', 'stdlib.h'),
'HAVE_SETMNTENT': ('setmntent', 'mntent.h'),
'HAVE_SNPRINTF': ('snprintf', 'stdio.h'),
'HAVE_SPLICE': ('splice', 'fcntl.h'),
'HAVE_STATFS': ('statfs', 'mount.h'),
'HAVE_STATVFS': ('statvfs', 'sys/statvfs.h'),
'HAVE_STPCOPY': ('stpcopy', 'string.h'),
'HAVE_STRCASECMP': ('strcasecmp', 'strings.h'),
'HAVE_STRLCPY': ('strlcpy', 'string.h'),
'HAVE_STRNCASECMP': ('strncasecmp', 'strings.h'),
'HAVE_STRSIGNAL': ('strsignal', 'signal.h'),
'HAVE_STRTOD_L': ('strtod_l', 'stdlib.h'),
'HAVE_STRTOLL_L': ('strtoll_l', 'stdlib.h'),
'HAVE_STRTOULL_L': ('strtoull_l', 'stdlib.h'),
'HAVE_SYMLINK': ('symlink', 'unistd.h'),
'HAVE_SYSCTLBYNAME': ('sysctlbyname', 'sys/sysctl.h'),
'HAVE_TIMEGM': ('timegm', 'time.h'),
'HAVE_UNSETENV': ('unsetenv', 'stdlib.h'),
'HAVE_USELOCALE': ('uselocale', 'xlocale.h'),
'HAVE_UTIMES': ('utimes', 'sys/time.h'),
'HAVE_VALLOC': ('valloc', 'stdlib.h'),
'HAVE_VASPRINTF': ('vasprintf', 'stdio.h'),
'HAVE_VSNPRINTF': ('vsnprintf', 'stdio.h'),
'HAVE_BCOPY': ('bcopy', 'strings.h'),
'HAVE_STRERROR': ('strerror', 'string.h'),
'HAVE_MEMMOVE': ('memmove', 'string.h'),
'HAVE_STRTOIMAX': ('strtoimax', 'inttypes.h'),
'HAVE_STRTOLL': ('strtoll', 'stdlib.h'),
'HAVE_STRTOQ': ('strtoq', 'stdlib.h'),
'HAVE_ACCEPT4': ('accept4', 'sys/socket.h'),
'HAVE_CHMOD': ('chmod', 'sys/stat.h'),
'HAVE_CHOWN': ('chown', 'unistd.h'),
'HAVE_FSTAT': ('fstat', 'sys/stat.h'),
'HAVE_GETADDRINFO': ('getaddrinfo', 'netdb.h'),
'HAVE_GETGRGID_R': ('getgrgid_r', 'grp.h'),
'HAVE_GETGRNAM_R': ('getgrnam_r', 'grp.h'),
'HAVE_GETGROUPS': ('getgroups', 'grp.h'),
'HAVE_GETOPT_LONG': ('getopt_long', 'getopt.h'),
'HAVE_GETPWNAM_R': ('getpwnam', 'pwd.h'),
'HAVE_GETPWUID_R': ('getpwuid_r', 'pwd.h'),
'HAVE_GETUID': ('getuid', 'unistd.h'),
'HAVE_LRINTF': ('lrintf', 'math.h'),
'HAVE_MKFIFO': ('mkfifo', 'sys/stat.h'),
'HAVE_MLOCK': ('mlock', 'sys/mman.h'),
'HAVE_NANOSLEEP': ('nanosleep', 'time.h'),
'HAVE_PIPE': ('pipe', 'unistd.h'),
'HAVE_PPOLL': ('ppoll', 'poll.h'),
'HAVE_REGEXEC': ('regexec', 'regex.h'),
'HAVE_SETEGID': ('setegid', 'unistd.h'),
'HAVE_SETEUID': ('seteuid', 'unistd.h'),
'HAVE_SETPGID': ('setpgid', 'unistd.h'),
'HAVE_SETREGID': ('setregid', 'unistd.h'),
'HAVE_SETRESGID': ('setresgid', 'unistd.h'),
'HAVE_SETRESUID': ('setresuid', 'unistd.h'),
'HAVE_SHM_OPEN': ('shm_open', 'fcntl.h'),
'HAVE_SLEEP': ('sleep', 'unistd.h'),
'HAVE_STRERROR_R': ('strerror_r', 'string.h'),
'HAVE_STRTOF': ('strtof', 'stdlib.h'),
'HAVE_SYSCONF': ('sysconf', 'unistd.h'),
'HAVE_USLEEP': ('usleep', 'unistd.h'),
'HAVE_VFORK': ('vfork', 'unistd.h'),
}
print('check_functions = [')
for line in open(sys.argv[1]):
try:
token = line.split()[1]
if token in function_data:
fdata = function_data[token]
print(" ['%s', '%s', '#include<%s>']," % (token, fdata[0], fdata[1]))
elif token.startswith('HAVE_') and not token.endswith('_H'):
print('# check token', token)
except Exception:
pass
print(']\n')
print('''foreach f : check_functions
if cc.has_function(f.get(1), prefix : f.get(2))
cdata.set(f.get(0), 1)
endif
endforeach
''')
# Convert sizeof checks.
for line in open(sys.argv[1]):
arr = line.strip().split()
if len(arr) != 2:
continue
elem = arr[1]
if elem.startswith('SIZEOF_'):
typename = elem.split('_', 1)[1].replace('_P', '*').replace('_', ' ').lower().replace('size t', 'size_t')
print("cdata.set('%s', cc.sizeof('%s'))" % (elem, typename))
print('''
configure_file(input : 'config.h.in',
output : 'config.h',
configuration : cdata)''') | unknown | codeparrot/codeparrot-clean | ||
<?php
/*
* This file is part of the Symfony package.
*
* (c) Fabien Potencier <fabien@symfony.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Symfony\Bundle\TwigBundle\DependencyInjection;
use Symfony\Bundle\TwigBundle\DependencyInjection\Compiler\AttributeExtensionPass;
use Symfony\Component\AssetMapper\AssetMapper;
use Symfony\Component\Config\FileLocator;
use Symfony\Component\Config\Resource\FileExistenceResource;
use Symfony\Component\Console\Application;
use Symfony\Component\DependencyInjection\ContainerBuilder;
use Symfony\Component\DependencyInjection\Extension\Extension;
use Symfony\Component\DependencyInjection\Loader\PhpFileLoader;
use Symfony\Component\DependencyInjection\Reference;
use Symfony\Component\Form\Form;
use Symfony\Component\Mailer\Mailer;
use Symfony\Component\Translation\LocaleSwitcher;
use Symfony\Component\Translation\Translator;
use Symfony\Component\Validator\Constraint;
use Twig\Attribute\AsTwigFilter;
use Twig\Attribute\AsTwigFunction;
use Twig\Attribute\AsTwigTest;
use Twig\Extension\ExtensionInterface;
use Twig\Extension\RuntimeExtensionInterface;
use Twig\Loader\LoaderInterface;
/**
* TwigExtension.
*
* @author Fabien Potencier <fabien@symfony.com>
* @author Jeremy Mikola <jmikola@gmail.com>
*/
class TwigExtension extends Extension
{
public function load(array $configs, ContainerBuilder $container): void
{
$loader = new PhpFileLoader($container, new FileLocator(__DIR__.'/../Resources/config'));
$loader->load('twig.php');
if ($container::willBeAvailable('symfony/form', Form::class, ['symfony/twig-bundle'])) {
$loader->load('form.php');
}
if ($container::willBeAvailable('symfony/console', Application::class, ['symfony/twig-bundle'])) {
$loader->load('console.php');
}
if (!$container::willBeAvailable('symfony/translation', Translator::class, ['symfony/twig-bundle'])) {
$container->removeDefinition('twig.translation.extractor');
}
if ($container::willBeAvailable('symfony/validator', Constraint::class, ['symfony/twig-bundle'])) {
$loader->load('validator.php');
}
foreach ($configs as $key => $config) {
if (isset($config['globals'])) {
foreach ($config['globals'] as $name => $value) {
if (\is_array($value) && isset($value['key'])) {
$configs[$key]['globals'][$name] = [
'key' => $name,
'value' => $value,
];
}
}
}
}
$configuration = $this->getConfiguration($configs, $container);
$config = $this->processConfiguration($configuration, $configs);
if ($container::willBeAvailable('symfony/mailer', Mailer::class, ['symfony/twig-bundle'])) {
$loader->load('mailer.php');
if ($htmlToTextConverter = $config['mailer']['html_to_text_converter'] ?? null) {
$container->getDefinition('twig.mime_body_renderer')->setArgument('$converter', new Reference($htmlToTextConverter));
}
if (ContainerBuilder::willBeAvailable('symfony/translation', LocaleSwitcher::class, ['symfony/framework-bundle'])) {
$container->getDefinition('twig.mime_body_renderer')->setArgument('$localeSwitcher', new Reference('translation.locale_switcher', ContainerBuilder::IGNORE_ON_INVALID_REFERENCE));
}
}
if ($container::willBeAvailable('symfony/asset-mapper', AssetMapper::class, ['symfony/twig-bundle'])) {
$loader->load('importmap.php');
}
$container->setParameter('twig.form.resources', $config['form_themes']);
$container->setParameter('twig.default_path', $config['default_path']);
$defaultTwigPath = $container->getParameterBag()->resolveValue($config['default_path']);
$envConfiguratorDefinition = $container->getDefinition('twig.configurator.environment');
$envConfiguratorDefinition->replaceArgument(0, $config['date']['format']);
$envConfiguratorDefinition->replaceArgument(1, $config['date']['interval_format']);
$envConfiguratorDefinition->replaceArgument(2, $config['date']['timezone']);
$envConfiguratorDefinition->replaceArgument(3, $config['number_format']['decimals']);
$envConfiguratorDefinition->replaceArgument(4, $config['number_format']['decimal_point']);
$envConfiguratorDefinition->replaceArgument(5, $config['number_format']['thousands_separator']);
$twigFilesystemLoaderDefinition = $container->getDefinition('twig.loader.native_filesystem');
// register user-configured paths
foreach ($config['paths'] as $path => $namespace) {
if (!$namespace) {
$twigFilesystemLoaderDefinition->addMethodCall('addPath', [$path]);
} else {
$twigFilesystemLoaderDefinition->addMethodCall('addPath', [$path, $namespace]);
}
}
// paths are modified in ExtensionPass if forms are enabled
$container->getDefinition('twig.template_iterator')->replaceArgument(1, $config['paths']);
$container->getDefinition('twig.template_iterator')->replaceArgument(3, $config['file_name_pattern']);
if ($container->hasDefinition('twig.command.lint')) {
$container->getDefinition('twig.command.lint')->replaceArgument(1, $config['file_name_pattern'] ?: ['*.twig']);
}
foreach ($this->getBundleTemplatePaths($container, $config) as $name => $paths) {
$namespace = $this->normalizeBundleName($name);
foreach ($paths as $path) {
$twigFilesystemLoaderDefinition->addMethodCall('addPath', [$path, $namespace]);
}
if ($paths) {
// the last path must be the bundle views directory
$twigFilesystemLoaderDefinition->addMethodCall('addPath', [$path, '!'.$namespace]);
}
}
if (file_exists($defaultTwigPath)) {
$twigFilesystemLoaderDefinition->addMethodCall('addPath', [$defaultTwigPath]);
}
$container->addResource(new FileExistenceResource($defaultTwigPath));
if (!empty($config['globals'])) {
$def = $container->getDefinition('twig');
foreach ($config['globals'] as $key => $global) {
if (isset($global['type']) && 'service' === $global['type']) {
$def->addMethodCall('addGlobal', [$key, new Reference($global['id'])]);
} else {
$def->addMethodCall('addGlobal', [$key, $global['value']]);
}
}
}
if (true === $config['cache']) {
$autoReloadOrDefault = $container->getParameterBag()->resolveValue($config['auto_reload'] ?? $config['debug']);
$buildDir = $container->getParameter('kernel.build_dir');
$cacheDir = $container->getParameter('kernel.cache_dir');
if ($autoReloadOrDefault || $cacheDir === $buildDir) {
$config['cache'] = '%kernel.cache_dir%/twig';
}
}
if (true === $config['cache']) {
$config['cache'] = new Reference('twig.template_cache.chain');
} else {
$container->removeDefinition('twig.template_cache.chain');
$container->removeDefinition('twig.template_cache.runtime_cache');
$container->removeDefinition('twig.template_cache.readonly_cache');
$container->removeDefinition('twig.template_cache.warmup_cache');
if (false === $config['cache']) {
$container->removeDefinition('twig.template_cache_warmer');
} else {
$container->getDefinition('twig.template_cache_warmer')->replaceArgument(2, null);
}
}
if (isset($config['autoescape_service'])) {
$config['autoescape'] = [new Reference($config['autoescape_service']), $config['autoescape_service_method'] ?? '__invoke'];
} else {
$config['autoescape'] = 'name';
}
$container->getDefinition('twig')->replaceArgument(1, array_intersect_key($config, [
'debug' => true,
'charset' => true,
'strict_variables' => true,
'autoescape' => true,
'cache' => true,
'auto_reload' => true,
'optimizations' => true,
]));
$container->registerForAutoconfiguration(ExtensionInterface::class)->addTag('twig.extension');
$container->registerForAutoconfiguration(LoaderInterface::class)->addTag('twig.loader');
$container->registerForAutoconfiguration(RuntimeExtensionInterface::class)->addTag('twig.runtime');
$container->registerAttributeForAutoconfiguration(AsTwigFilter::class, AttributeExtensionPass::autoconfigureFromAttribute(...));
$container->registerAttributeForAutoconfiguration(AsTwigFunction::class, AttributeExtensionPass::autoconfigureFromAttribute(...));
$container->registerAttributeForAutoconfiguration(AsTwigTest::class, AttributeExtensionPass::autoconfigureFromAttribute(...));
}
private function getBundleTemplatePaths(ContainerBuilder $container, array $config): array
{
$bundleHierarchy = [];
foreach ($container->getParameter('kernel.bundles_metadata') as $name => $bundle) {
$defaultOverrideBundlePath = $container->getParameterBag()->resolveValue($config['default_path']).'/bundles/'.$name;
if (file_exists($defaultOverrideBundlePath)) {
$bundleHierarchy[$name][] = $defaultOverrideBundlePath;
}
$container->addResource(new FileExistenceResource($defaultOverrideBundlePath));
if (file_exists($dir = $bundle['path'].'/Resources/views') || file_exists($dir = $bundle['path'].'/templates')) {
$bundleHierarchy[$name][] = $dir;
}
$container->addResource(new FileExistenceResource($dir));
}
return $bundleHierarchy;
}
private function normalizeBundleName(string $name): string
{
if (str_ends_with($name, 'Bundle')) {
$name = substr($name, 0, -6);
}
return $name;
}
} | php | github | https://github.com/symfony/symfony | src/Symfony/Bundle/TwigBundle/DependencyInjection/TwigExtension.php |
# -*- coding: utf-8 -*-
from openerp.tests import common
class test_inherits(common.TransactionCase):
def test_create_3_levels_inherits(self):
""" Check that we can create an inherits on 3 levels """
pallet = self.env['test.pallet'].create({
'name': 'B',
'field_in_box': 'box',
'field_in_pallet': 'pallet',
})
self.assertTrue(pallet)
self.assertEqual(pallet.name, 'B')
self.assertEqual(pallet.field_in_box, 'box')
self.assertEqual(pallet.field_in_pallet, 'pallet')
def test_read_3_levels_inherits(self):
""" Check that we can read an inherited field on 3 levels """
pallet = self.env.ref('test_inherits.pallet_a')
self.assertEqual(pallet.read(['name']), [{'id': pallet.id, 'name': 'Unit A'}])
def test_write_3_levels_inherits(self):
""" Check that we can create an inherits on 3 levels """
pallet = self.env.ref('test_inherits.pallet_a')
pallet.write({'name': 'C'})
self.assertEqual(pallet.name, 'C') | unknown | codeparrot/codeparrot-clean | ||
""" submit failure or test session information to a pastebin service. """
import pytest
import sys
import tempfile
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting")
group._addoption('--pastebin', metavar="mode",
action='store', dest="pastebin", default=None,
choices=['failed', 'all'],
help="send failed|all info to bpaste.net pastebin service.")
@pytest.hookimpl(trylast=True)
def pytest_configure(config):
import py
if config.option.pastebin == "all":
tr = config.pluginmanager.getplugin('terminalreporter')
# if no terminal reporter plugin is present, nothing we can do here;
# this can happen when this function executes in a slave node
# when using pytest-xdist, for example
if tr is not None:
# pastebin file will be utf-8 encoded binary file
config._pastebinfile = tempfile.TemporaryFile('w+b')
oldwrite = tr._tw.write
def tee_write(s, **kwargs):
oldwrite(s, **kwargs)
if py.builtin._istext(s):
s = s.encode('utf-8')
config._pastebinfile.write(s)
tr._tw.write = tee_write
def pytest_unconfigure(config):
if hasattr(config, '_pastebinfile'):
# get terminal contents and delete file
config._pastebinfile.seek(0)
sessionlog = config._pastebinfile.read()
config._pastebinfile.close()
del config._pastebinfile
# undo our patching in the terminal reporter
tr = config.pluginmanager.getplugin('terminalreporter')
del tr._tw.__dict__['write']
# write summary
tr.write_sep("=", "Sending information to Paste Service")
pastebinurl = create_new_paste(sessionlog)
tr.write_line("pastebin session-log: %s\n" % pastebinurl)
def create_new_paste(contents):
"""
Creates a new paste using bpaste.net service.
:contents: paste contents as utf-8 encoded bytes
:returns: url to the pasted contents
"""
import re
if sys.version_info < (3, 0):
from urllib import urlopen, urlencode
else:
from urllib.request import urlopen
from urllib.parse import urlencode
params = {
'code': contents,
'lexer': 'python3' if sys.version_info[0] == 3 else 'python',
'expiry': '1week',
}
url = 'https://bpaste.net'
response = urlopen(url, data=urlencode(params).encode('ascii')).read()
m = re.search(r'href="/raw/(\w+)"', response.decode('utf-8'))
if m:
return '%s/show/%s' % (url, m.group(1))
else:
return 'bad response: ' + response
def pytest_terminal_summary(terminalreporter):
import _pytest.config
if terminalreporter.config.option.pastebin != "failed":
return
tr = terminalreporter
if 'failed' in tr.stats:
terminalreporter.write_sep("=", "Sending information to Paste Service")
for rep in terminalreporter.stats.get('failed'):
try:
msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc
except AttributeError:
msg = tr._getfailureheadline(rep)
tw = _pytest.config.create_terminal_writer(terminalreporter.config, stringio=True)
rep.toterminal(tw)
s = tw.stringio.getvalue()
assert len(s)
pastebinurl = create_new_paste(s)
tr.write_line("%s --> %s" %(msg, pastebinurl)) | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2016 The Cockroach Authors.
//
// Use of this software is governed by the CockroachDB Software License
// included in the /LICENSE file.
package sql_test
import (
"context"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
)
func TestOrderByRandom(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
s, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{})
defer s.Stopper().Stop(context.Background())
seenOne := false
seenTwo := false
for {
row := sqlDB.QueryRow("SELECT * FROM (VALUES (1),(2)) ORDER BY random() LIMIT 1")
var val int
if err := row.Scan(&val); err != nil {
t.Fatal(err)
}
switch val {
case 1:
seenOne = true
case 2:
seenTwo = true
}
if seenOne && seenTwo {
break
}
}
} | go | github | https://github.com/cockroachdb/cockroach | pkg/sql/sort_test.go |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Routine for decoding the CIFAR-10 binary file format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Process images of this size. Note that this differs from the original CIFAR
# image size of 32 x 32. If one alters this number, then the entire model
# architecture will change and any model would need to be retrained.
IMAGE_SIZE = 24
# Global constants describing the CIFAR-10 data set.
NUM_CLASSES = 10
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000
def read_cifar10(filename_queue):
"""Reads and parses examples from CIFAR10 data files.
Recommendation: if you want N-way read parallelism, call this function
N times. This will give you N independent Readers reading different
files & positions within those files, which will give better mixing of
examples.
Args:
filename_queue: A queue of strings with the filenames to read from.
Returns:
An object representing a single example, with the following fields:
height: number of rows in the result (32)
width: number of columns in the result (32)
depth: number of color channels in the result (3)
key: a scalar string Tensor describing the filename & record number
for this example.
label: an int32 Tensor with the label in the range 0..9.
uint8image: a [height, width, depth] uint8 Tensor with the image data
"""
class CIFAR10Record(object):
pass
result = CIFAR10Record()
# Dimensions of the images in the CIFAR-10 dataset.
# See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
# input format.
label_bytes = 1 # 2 for CIFAR-100
result.height = 32
result.width = 32
result.depth = 3
image_bytes = result.height * result.width * result.depth
# Every record consists of a label followed by the image, with a
# fixed number of bytes for each.
record_bytes = label_bytes + image_bytes
# Read a record, getting filenames from the filename_queue. No
# header or footer in the CIFAR-10 format, so we leave header_bytes
# and footer_bytes at their default of 0.
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
result.key, value = reader.read(filename_queue)
# Convert from a string to a vector of uint8 that is record_bytes long.
record_bytes = tf.decode_raw(value, tf.uint8)
# The first bytes represent the label, which we convert from uint8->int32.
result.label = tf.cast(
tf.slice(record_bytes, [0], [label_bytes]), tf.int32)
# The remaining bytes after the label represent the image, which we reshape
# from [depth * height * width] to [depth, height, width].
depth_major = tf.reshape(tf.slice(record_bytes, [label_bytes], [image_bytes]),
[result.depth, result.height, result.width])
# Convert from [depth, height, width] to [height, width, depth].
result.uint8image = tf.transpose(depth_major, [1, 2, 0])
return result
def _generate_image_and_label_batch(image, label, min_queue_examples,
batch_size, shuffle):
"""Construct a queued batch of images and labels.
Args:
image: 3-D Tensor of [height, width, 3] of type.float32.
label: 1-D Tensor of type.int32
min_queue_examples: int32, minimum number of samples to retain
in the queue that provides of batches of examples.
batch_size: Number of images per batch.
shuffle: boolean indicating whether to use a shuffling queue.
Returns:
images: Images. 4D tensor of [batch_size, height, width, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
# Create a queue that shuffles the examples, and then
# read 'batch_size' images + labels from the example queue.
num_preprocess_threads = 16
if shuffle:
images, label_batch = tf.train.shuffle_batch(
[image, label],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples)
else:
images, label_batch = tf.train.batch(
[image, label],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size)
# Display the training images in the visualizer.
tf.image_summary('images', images)
return images, tf.reshape(label_batch, [batch_size])
def distorted_inputs(data_dir, batch_size):
"""Construct distorted input for CIFAR training using the Reader ops.
Args:
data_dir: Path to the CIFAR-10 data directory.
batch_size: Number of images per batch.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i)
for i in xrange(1, 6)]
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
# Read examples from files in the filename queue.
read_input = read_cifar10(filename_queue)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
height = IMAGE_SIZE
width = IMAGE_SIZE
# Image processing for training the network. Note the many random
# distortions applied to the image.
# Randomly crop a [height, width] section of the image.
distorted_image = tf.random_crop(reshaped_image, [height, width, 3])
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Because these operations are not commutative, consider randomizing
# the order their operation.
distorted_image = tf.image.random_brightness(distorted_image,
max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image,
lower=0.2, upper=1.8)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_whitening(distorted_image)
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *
min_fraction_of_examples_in_queue)
print ('Filling queue with %d CIFAR images before starting to train. '
'This will take a few minutes.' % min_queue_examples)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(float_image, read_input.label,
min_queue_examples, batch_size,
shuffle=True)
def inputs(eval_data, data_dir, batch_size):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
data_dir: Path to the CIFAR-10 data directory.
batch_size: Number of images per batch.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
if not eval_data:
filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i)
for i in xrange(1, 6)]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
else:
filenames = [os.path.join(data_dir, 'test_batch.bin')]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
# Read examples from files in the filename queue.
read_input = read_cifar10(filename_queue)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
height = IMAGE_SIZE
width = IMAGE_SIZE
# Image processing for evaluation.
# Crop the central [height, width] of the image.
resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image,
width, height)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_whitening(resized_image)
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(num_examples_per_epoch *
min_fraction_of_examples_in_queue)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(float_image, read_input.label,
min_queue_examples, batch_size,
shuffle=False) | unknown | codeparrot/codeparrot-clean | ||
"""Bot object for Dozer"""
import logging
import re
import sys
import traceback
import discord
from discord.ext import commands
from . import utils
DOZER_LOGGER = logging.Logger(name='dozer')
DOZER_LOGGER.level = logging.INFO
DOZER_HANDLER = logging.StreamHandler(stream=sys.stdout)
DOZER_HANDLER.level = logging.INFO
DOZER_LOGGER.addHandler(DOZER_HANDLER)
DOZER_HANDLER.setFormatter(fmt=logging.Formatter('[%(asctime)s] [%(levelname)s] [%(name)s] %(message)s'))
if discord.version_info.major < 1:
DOZER_LOGGER.error("Your installed discord.py version is too low "
"%d.%d.%d, please upgrade to at least 1.0.0a",
discord.version_info.major,
discord.version_info.minor,
discord.version_info.micro)
sys.exit(1)
class InvalidContext(commands.CheckFailure):
"""
Check failure raised by the global check for an invalid command context - executed by a bot, exceeding global rate-limit, etc.
The message will be ignored.
"""
class DozerContext(commands.Context):
"""Cleans all messages before sending"""
async def send(self, content=None, **kwargs): # pylint: disable=arguments-differ
if content is not None:
content = utils.clean(self, content, mass=True, member=False, role=False, channel=False)
return await super().send(content, **kwargs)
class Dozer(commands.Bot):
"""Botty things that are critical to Dozer working"""
_global_cooldown = commands.Cooldown(1, 1, commands.BucketType.user) # One command per second per user
def __init__(self, config):
super().__init__(command_prefix=config['prefix'])
self.config = config
self._restarting = False
self.check(self.global_checks)
async def on_ready(self):
"""Things to run when the bot has initialized and signed in"""
DOZER_LOGGER.info('Signed in as {}#{} ({})'.format(self.user.name, self.user.discriminator, self.user.id))
if self.config['is_backup']:
status = discord.Status.dnd
else:
status = discord.Status.online
game = discord.Game(name='%shelp | %d guilds' % (self.config['prefix'], len(self.guilds)))
try:
await self.change_presence(activity=game, status=status)
except TypeError:
DOZER_LOGGER.warning("You are running an older version of the discord.py rewrite (with breaking changes)! "
"To upgrade, run `pip install -r requirements.txt --upgrade`")
async def get_context(self, message, *, cls=DozerContext):
ctx = await super().get_context(message, cls=cls)
return ctx
async def on_command_error(self, context, exception):
if isinstance(exception, commands.NoPrivateMessage):
await context.send('{}, This command cannot be used in DMs.'.format(context.author.mention))
elif isinstance(exception, commands.UserInputError):
await context.send('{}, {}'.format(context.author.mention, self.format_error(context, exception)))
elif isinstance(exception, commands.NotOwner):
await context.send('{}, {}'.format(context.author.mention, exception.args[0]))
elif isinstance(exception, commands.MissingPermissions):
permission_names = [name.replace('guild', 'server').replace('_', ' ').title() for name in exception.missing_perms]
await context.send('{}, you need {} permissions to run this command!'.format(
context.author.mention, utils.pretty_concat(permission_names)))
elif isinstance(exception, commands.BotMissingPermissions):
permission_names = [name.replace('guild', 'server').replace('_', ' ').title() for name in exception.missing_perms]
await context.send('{}, I need {} permissions to run this command!'.format(
context.author.mention, utils.pretty_concat(permission_names)))
elif isinstance(exception, commands.CommandOnCooldown):
await context.send(
'{}, That command is on cooldown! Try again in {:.2f}s!'.format(context.author.mention, exception.retry_after))
elif isinstance(exception, (commands.CommandNotFound, InvalidContext)):
pass # Silent ignore
else:
await context.send('```\n%s\n```' % ''.join(traceback.format_exception_only(type(exception), exception)).strip())
if isinstance(context.channel, discord.TextChannel):
DOZER_LOGGER.error('Error in command <%d> (%d.name!r(%d.id) %d(%d.id) %d(%d.id) %d)',
context.command, context.guild, context.guild, context.channel, context.channel,
context.author, context.author, context.message.content)
else:
DOZER_LOGGER.error('Error in command <%d> (DM %d(%d.id) %d)', context.command, context.channel.recipient,
context.channel.recipient, context.message.content)
DOZER_LOGGER.error(''.join(traceback.format_exception(type(exception), exception, exception.__traceback__)))
@staticmethod
def format_error(ctx, err, *, word_re=re.compile('[A-Z][a-z]+')):
"""Turns an exception into a user-friendly (or -friendlier, at least) error message."""
type_words = word_re.findall(type(err).__name__)
type_msg = ' '.join(map(str.lower, type_words))
if err.args:
return '%s: %s' % (type_msg, utils.clean(ctx, err.args[0]))
else:
return type_msg
def global_checks(self, ctx):
"""Checks that should be executed before passed to the command"""
if ctx.author.bot:
raise InvalidContext('Bots cannot run commands!')
retry_after = self._global_cooldown.update_rate_limit()
if retry_after:
raise InvalidContext('Global rate-limit exceeded!')
return True
def run(self, *args, **kwargs):
token = self.config['discord_token']
del self.config['discord_token'] # Prevent token dumping
super().run(token)
async def shutdown(self, restart=False):
"""Shuts down the bot"""
self._restarting = restart
await self.logout()
await self.close()
self.loop.stop() | unknown | codeparrot/codeparrot-clean | ||
# Downloaded from http://code.activestate.com/recipes/576693/
# Licensed under the MIT License
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self) | unknown | codeparrot/codeparrot-clean | ||
from tmapi.models import Association
from base_manager import BaseManager
from entity_type import EntityType
from property_assertion import PropertyAssertion
class EntityTypePropertyAssertionManager (BaseManager):
def filter_by_authority_entity_type (self, authority, entity_type):
return self.filter(scope=authority).filter(
roles__type=self.eats_topic_map.property_role_type,
roles__player=entity_type)
def filter_by_entity (self, entity):
return self.filter(roles__type=self.eats_topic_map.entity_role_type,
roles__player=entity)
def get_queryset (self):
assertion_type = self.eats_topic_map.entity_type_assertion_type
qs = super(EntityTypePropertyAssertionManager, self).get_queryset()
return qs.filter(type=assertion_type)
class EntityTypePropertyAssertion (Association, PropertyAssertion):
objects = EntityTypePropertyAssertionManager()
class Meta:
proxy = True
app_label = 'eats'
@property
def entity_type (self):
"""Returns the entity type being asserted.
:rtype: `EntityType`
"""
if not hasattr(self, '_entity_type'):
property_role = self.get_roles(
self.eats_topic_map.property_role_type)[0]
self._entity_type = property_role.get_player(proxy=EntityType)
return self._entity_type
def set_players (self, entity, entity_type):
"""Sets the entity and entity type involved in this property
assertion.
:param entity: the entity
:type entity: `Entity`
:param entity_type: the entity type
:type entity_type: `Topic`
"""
if hasattr(self, '_entity') or hasattr(self, '_entity_type'):
raise Exception(
'set_players may be called only once for a property assertion')
self.create_role(self.eats_topic_map.property_role_type, entity_type)
self._entity_type = entity_type
self.create_role(self.eats_topic_map.entity_role_type, entity)
self._entity = entity
def update (self, entity_type):
"""Updates this property assertion.
:param entity_type: entity type
:type entity_type: `Topic`
"""
if entity_type != self.entity_type:
self.authority.validate_components(entity_type=entity_type)
property_role = self.get_roles(
self.eats_topic_map.property_role_type)[0]
property_role.set_player(entity_type)
self._entity_type = entity_type | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
intraday data handlers in csv format.
@author: jev
"""
from __future__ import division
import pandas as pd
import datetime as dt
import os
from extra import ProgressBar
dateFormat = "%Y%m%d" # date format for converting filenames to dates
dateTimeFormat = "%Y%m%d %H:%M:%S"
def fileName2date(fName):
'''convert filename to date'''
name = os.path.splitext(fName)[0]
return dt.datetime.strptime(name.split('_')[1],dateFormat).date()
def parseDateTime(dateTimeStr):
return dt.datetime.strptime(dateTimeStr,dateTimeFormat)
def loadCsv(fName):
''' load DataFrame from csv file '''
with open(fName,'r') as f:
lines = f.readlines()
dates= []
header = [h.strip() for h in lines[0].strip().split(',')[1:]]
data = [[] for i in range(len(header))]
for line in lines[1:]:
fields = line.rstrip().split(',')
dates.append(parseDateTime(fields[0]))
for i,field in enumerate(fields[1:]):
data[i].append(float(field))
return pd.DataFrame(data=dict(zip(header,data)),index=pd.Index(dates))
class HistDataCsv(object):
'''class for working with historic database in .csv format'''
def __init__(self,symbol,dbDir,autoCreateDir=False):
self.symbol = symbol
self.dbDir = os.path.normpath(os.path.join(dbDir,symbol))
if not os.path.exists(self.dbDir) and autoCreateDir:
print 'Creating data directory ', self.dbDir
os.mkdir(self.dbDir)
self.dates = []
for fName in os.listdir(self.dbDir):
self.dates.append(fileName2date(fName))
def saveData(self,date, df,lowerCaseColumns=True):
''' add data to database'''
if lowerCaseColumns: # this should provide consistency to column names. All lowercase
df.columns = [ c.lower() for c in df.columns]
s = self.symbol+'_'+date.strftime(dateFormat)+'.csv' # file name
dest = os.path.join(self.dbDir,s) # full path destination
print 'Saving data to: ', dest
df.to_csv(dest)
def loadDate(self,date):
''' load data '''
s = self.symbol+'_'+date.strftime(dateFormat)+'.csv' # file name
df = pd.DataFrame.from_csv(os.path.join(self.dbDir,s))
cols = [col.strip() for col in df.columns.tolist()]
df.columns = cols
#df = loadCsv(os.path.join(self.dbDir,s))
return df
def loadDates(self,dates):
''' load multiple dates, concantenating to one DataFrame '''
tmp =[]
print 'Loading multiple dates for ' , self.symbol
p = ProgressBar(len(dates))
for i,date in enumerate(dates):
tmp.append(self.loadDate(date))
p.animate(i+1)
print ''
return pd.concat(tmp)
def createOHLC(self):
''' create ohlc from intraday data'''
ohlc = pd.DataFrame(index=self.dates, columns=['open','high','low','close'])
for date in self.dates:
print 'Processing', date
try:
df = self.loadDate(date)
ohlc.set_value(date,'open',df['open'][0])
ohlc.set_value(date,'high',df['wap'].max())
ohlc.set_value(date,'low', df['wap'].min())
ohlc.set_value(date,'close',df['close'][-1])
except Exception as e:
print 'Could not convert:', e
return ohlc
def __repr__(self):
return '{symbol} dataset with {nrDates} days of data'.format(symbol=self.symbol, nrDates=len(self.dates))
class HistDatabase(object):
''' class working with multiple symbols at once '''
def __init__(self, dataDir):
# get symbols from directory names
symbols = []
for l in os.listdir(dataDir):
if os.path.isdir(os.path.join(dataDir,l)):
symbols.append(l)
#build dataset
self.csv = {} # dict of HistDataCsv halndlers
for symbol in symbols:
self.csv[symbol] = HistDataCsv(symbol,dataDir)
def loadDates(self,dates=None):
'''
get data for all symbols as wide panel
provide a dates list. If no dates list is provided, common dates are used.
'''
if dates is None: dates=self.commonDates
tmp = {}
for k,v in self.csv.iteritems():
tmp[k] = v.loadDates(dates)
return pd.WidePanel(tmp)
def toHDF(self,dataFile,dates=None):
''' write wide panel data to a hdfstore file '''
if dates is None: dates=self.commonDates
store = pd.HDFStore(dataFile)
wp = self.loadDates(dates)
store['data'] = wp
store.close()
@property
def commonDates(self):
''' return dates common for all symbols '''
t = [v.dates for v in self.csv.itervalues()] # get all dates in a list
d = list(set(t[0]).intersection(*t[1:]))
return sorted(d)
def __repr__(self):
s = '-----Hist CSV Database-----\n'
for k,v in self.csv.iteritems():
s+= (str(v)+'\n')
return s
#--------------------
if __name__=='__main__':
dbDir =os.path.normpath('D:/data/30sec')
vxx = HistDataCsv('VXX',dbDir)
spy = HistDataCsv('SPY',dbDir)
#
date = dt.date(2012,8,31)
print date
#
pair = pd.DataFrame({'SPY':spy.loadDate(date)['close'],'VXX':vxx.loadDate(date)['close']})
print pair.tail() | unknown | codeparrot/codeparrot-clean | ||
import {makeArray, print} from 'shared-runtime';
function useTest() {
return makeArray<number | void>(
print(1),
(function foo() {
print(2);
return 2;
})(),
);
}
export const FIXTURE_ENTRYPOINT = {
fn: useTest,
params: [],
}; | typescript | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/codegen-inline-iife.ts |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core import util
from telemetry.core import exceptions
from telemetry.page import page as page_module
from telemetry.page.actions import page_action
class ClickElementAction(page_action.PageAction):
def __init__(self, attributes=None):
super(ClickElementAction, self).__init__(attributes)
def RunAction(self, page, tab, previous_action):
def DoClick():
assert hasattr(self, 'selector') or hasattr(self, 'text')
if hasattr(self, 'selector'):
code = 'document.querySelector(\'' + self.selector + '\').click();'
try:
tab.ExecuteJavaScript(code)
except exceptions.EvaluateException:
raise page_action.PageActionFailed(
'Cannot find element with selector ' + self.selector)
else:
callback_code = 'function(element) { element.click(); }'
try:
util.FindElementAndPerformAction(tab, self.text, callback_code)
except exceptions.EvaluateException:
raise page_action.PageActionFailed(
'Cannot find element with text ' + self.text)
if hasattr(self, 'wait_for_navigate'):
tab.PerformActionAndWaitForNavigate(DoClick)
elif hasattr(self, 'wait_for_href_change'):
old_url = tab.EvaluateJavaScript('document.location.href')
DoClick()
util.WaitFor(lambda: tab.EvaluateJavaScript(
'document.location.href') != old_url, 60)
else:
DoClick()
page_module.Page.WaitForPageToLoad(self, tab, 60)
tab.WaitForDocumentReadyStateToBeInteractiveOrBetter() | unknown | codeparrot/codeparrot-clean | ||
# pylint: disable=missing-docstring
from django.core.cache import cache
from django.test.utils import override_settings
from lang_pref import LANGUAGE_KEY
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.modulestore.tests.django_utils import TEST_DATA_MIXED_TOY_MODULESTORE
from student.models import anonymous_id_for_user
from student.models import UserProfile
from student.roles import CourseStaffRole, CourseInstructorRole
from student.tests.factories import UserFactory, UserProfileFactory
from openedx.core.djangoapps.user_api.preferences.api import set_user_preference
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
# Will also run default tests for IDTokens and UserInfo
from oauth2_provider.tests import IDTokenTestCase, UserInfoTestCase
class BaseTestMixin(ModuleStoreTestCase):
profile = None
MODULESTORE = TEST_DATA_MIXED_TOY_MODULESTORE
def setUp(self):
super(BaseTestMixin, self).setUp()
self.course_key = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')
self.course_id = unicode(self.course_key)
self.user_factory = UserFactory
self.set_user(self.make_user())
def set_user(self, user):
super(BaseTestMixin, self).set_user(user)
self.profile = UserProfileFactory(user=self.user)
class IDTokenTest(BaseTestMixin, IDTokenTestCase):
def setUp(self):
super(IDTokenTest, self).setUp()
# CourseAccessHandler uses the application cache.
cache.clear()
def test_sub_claim(self):
scopes, claims = self.get_id_token_values('openid')
self.assertIn('openid', scopes)
sub = claims['sub']
expected_sub = anonymous_id_for_user(self.user, None)
self.assertEqual(sub, expected_sub)
def test_user_name_claim(self):
_scopes, claims = self.get_id_token_values('openid profile')
claim_name = claims['name']
user_profile = UserProfile.objects.get(user=self.user)
user_name = user_profile.name
self.assertEqual(claim_name, user_name)
@override_settings(LANGUAGE_CODE='en')
def test_user_without_locale_claim(self):
scopes, claims = self.get_id_token_values('openid profile')
self.assertIn('profile', scopes)
self.assertEqual(claims['locale'], 'en')
def test_user_with_locale_claim(self):
language = 'en'
set_user_preference(self.user, LANGUAGE_KEY, language)
scopes, claims = self.get_id_token_values('openid profile')
self.assertIn('profile', scopes)
locale = claims['locale']
self.assertEqual(language, locale)
def test_no_special_course_access(self):
scopes, claims = self.get_id_token_values('openid course_instructor course_staff')
self.assertNotIn('course_staff', scopes)
self.assertNotIn('staff_courses', claims)
self.assertNotIn('course_instructor', scopes)
self.assertNotIn('instructor_courses', claims)
def test_course_staff_courses(self):
CourseStaffRole(self.course_key).add_users(self.user)
scopes, claims = self.get_id_token_values('openid course_staff')
self.assertIn('course_staff', scopes)
self.assertNotIn('staff_courses', claims) # should not return courses in id_token
def test_course_instructor_courses(self):
CourseInstructorRole(self.course_key).add_users(self.user)
scopes, claims = self.get_id_token_values('openid course_instructor')
self.assertIn('course_instructor', scopes)
self.assertNotIn('instructor_courses', claims) # should not return courses in id_token
def test_course_staff_courses_with_claims(self):
CourseStaffRole(self.course_key).add_users(self.user)
course_id = unicode(self.course_key)
nonexistent_course_id = 'some/other/course'
claims = {
'staff_courses': {
'values': [course_id, nonexistent_course_id],
'essential': True,
}
}
scopes, claims = self.get_id_token_values(scope='openid course_staff', claims=claims)
self.assertIn('course_staff', scopes)
self.assertIn('staff_courses', claims)
self.assertEqual(len(claims['staff_courses']), 1)
self.assertIn(course_id, claims['staff_courses'])
self.assertNotIn(nonexistent_course_id, claims['staff_courses'])
def test_permissions_scope(self):
scopes, claims = self.get_id_token_values('openid profile permissions')
self.assertIn('permissions', scopes)
self.assertFalse(claims['administrator'])
self.user.is_staff = True
self.user.save()
_scopes, claims = self.get_id_token_values('openid profile permissions')
self.assertTrue(claims['administrator'])
class UserInfoTest(BaseTestMixin, UserInfoTestCase):
def token_for_scope(self, scope):
full_scope = 'openid %s' % scope
self.set_access_token_scope(full_scope)
token = self.access_token.token # pylint: disable=no-member
return full_scope, token
def get_with_scope(self, scope):
scope, token = self.token_for_scope(scope)
result, claims = self.get_userinfo(token, scope)
self.assertEqual(result.status_code, 200)
return claims
def get_with_claim_value(self, scope, claim, values):
_full_scope, token = self.token_for_scope(scope)
result, claims = self.get_userinfo(
token,
claims={claim: {'values': values}}
)
self.assertEqual(result.status_code, 200)
return claims
def test_request_staff_courses_using_scope(self):
CourseStaffRole(self.course_key).add_users(self.user)
claims = self.get_with_scope('course_staff')
courses = claims['staff_courses']
self.assertIn(self.course_id, courses)
self.assertEqual(len(courses), 1)
def test_request_instructor_courses_using_scope(self):
CourseInstructorRole(self.course_key).add_users(self.user)
claims = self.get_with_scope('course_instructor')
courses = claims['instructor_courses']
self.assertIn(self.course_id, courses)
self.assertEqual(len(courses), 1)
def test_request_staff_courses_with_claims(self):
CourseStaffRole(self.course_key).add_users(self.user)
values = [self.course_id, 'some_invalid_course']
claims = self.get_with_claim_value('course_staff', 'staff_courses', values)
self.assertEqual(len(claims), 2)
courses = claims['staff_courses']
self.assertIn(self.course_id, courses)
self.assertEqual(len(courses), 1)
def test_request_instructor_courses_with_claims(self):
CourseInstructorRole(self.course_key).add_users(self.user)
values = ['edX/toy/TT_2012_Fall', self.course_id, 'invalid_course_id']
claims = self.get_with_claim_value('course_instructor', 'instructor_courses', values)
self.assertEqual(len(claims), 2)
courses = claims['instructor_courses']
self.assertIn(self.course_id, courses)
self.assertEqual(len(courses), 1)
def test_permissions_scope(self):
claims = self.get_with_scope('permissions')
self.assertIn('administrator', claims)
self.assertFalse(claims['administrator'])
self.user.is_staff = True
self.user.save()
claims = self.get_with_scope('permissions')
self.assertTrue(claims['administrator']) | unknown | codeparrot/codeparrot-clean | ||
// RUN: rm -rf %t && mkdir -p %t
// RUN: clang-doc --extra-arg -std=c++20 --output=%t --format=json --executor=standalone %s
// RUN: FileCheck %s < %t/json/GlobalNamespace/index.json
template<typename T>
concept Incrementable = requires(T x) {
++x;
x++;
};
template<typename T> void increment(T t) requires Incrementable<T>;
template<Incrementable T> Incrementable auto incrementTwo(T t);
// CHECK: "Functions": [
// CHECK-NEXT: {
// CHECK-NEXT: "InfoType": "function",
// CHECK-NEXT: "IsStatic": false,
// CHECK-NEXT: "Name": "increment",
// CHECK-NEXT: "Params": [
// CHECK-NEXT: {
// CHECK-NEXT: "End": true,
// CHECK-NEXT: "Name": "t",
// CHECK-NEXT: "Type": {
// CHECK-NEXT: "Name": "T",
// CHECK-NEXT: "QualName": "T",
// CHECK-NEXT: "USR": "0000000000000000000000000000000000000000"
// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: ],
// CHECK-NEXT: "ReturnType": {
// CHECK-NEXT: "IsBuiltIn": true,
// CHECK-NEXT: "IsTemplate": false,
// CHECK-NEXT: "Name": "void",
// CHECK-NEXT: "QualName": "void",
// CHECK-NEXT: "USR": "0000000000000000000000000000000000000000"
// CHECK-NEXT: },
// CHECK-NEXT: "Template": {
// CHECK-NEXT: "Constraints": [
// CHECK-NEXT: {
// CHECK-NEXT: "End": true,
// CHECK-NEXT: "Expression": "Incrementable<T>",
// CHECK-NEXT: "Name": "Incrementable",
// CHECK-NEXT: "QualName": "Incrementable",
// CHECK-NEXT: "USR": "{{[0-9A-F]*}}"
// CHECK-NEXT: }
// CHECK-NEXT: ],
// CHECK-NEXT: "Parameters": [
// CHECK-NEXT: {
// CHECK-NEXT: "End": true,
// CHECK-NEXT: "Param": "typename T"
// CHECK-NEXT: }
// CHECK-NEXT: ]
// CHECK-NEXT: },
// CHECK-NEXT: "USR": "{{[0-9A-F]*}}"
// CHECK-NEXT: },
// CHECK-NEXT: {
// CHECK-NEXT: "End": true,
// CHECK-NEXT: "InfoType": "function",
// CHECK-NEXT: "IsStatic": false,
// CHECK-NEXT: "Name": "incrementTwo",
// CHECK-NEXT: "Params": [
// CHECK-NEXT: {
// CHECK-NEXT: "End": true,
// CHECK-NEXT: "Name": "t",
// CHECK-NEXT: "Type": {
// CHECK-NEXT: "Name": "T",
// CHECK-NEXT: "QualName": "T",
// CHECK-NEXT: "USR": "0000000000000000000000000000000000000000"
// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: ],
// CHECK-NEXT: "ReturnType": {
// CHECK-NEXT: "IsBuiltIn": false,
// CHECK-NEXT: "IsTemplate": false,
// CHECK-NEXT: "Name": "Incrementable auto",
// CHECK-NEXT: "QualName": "Incrementable auto",
// CHECK-NEXT: "USR": "0000000000000000000000000000000000000000"
// CHECK-NEXT: },
// CHECK-NEXT: "Template": {
// CHECK-NEXT: "Constraints": [
// CHECK-NEXT: {
// CHECK-NEXT: "End": true,
// CHECK-NEXT: "Expression": "Incrementable<T>",
// CHECK-NEXT: "Name": "Incrementable",
// CHECK-NEXT: "QualName": "Incrementable",
// CHECK-NEXT: "USR": "{{[0-9A-F]*}}"
// CHECK-NEXT: }
// CHECK-NEXT: ],
// CHECK-NEXT: "Parameters": [
// CHECK-NEXT: {
// CHECK-NEXT: "End": true,
// CHECK-NEXT: "Param": "Incrementable T"
// CHECK-NEXT: }
// CHECK-NEXT: ]
// CHECK-NEXT: },
// CHECK-NEXT: "USR": "{{[0-9A-F]*}}"
// CHECK-NEXT: } | cpp | github | https://github.com/llvm/llvm-project | clang-tools-extra/test/clang-doc/json/function-requires.cpp |
/* Copyright 2017 - 2025 R. Thomas
* Copyright 2017 - 2025 Quarkslab
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIEF_FMT_FORMATTER
#define LIEF_FMT_FORMATTER
#include <spdlog/fmt/fmt.h>
#include <spdlog/fmt/ranges.h>
#define FMT_FORMATTER(T, F) \
template <typename Char> struct fmt::formatter<T, Char> { \
template <typename ParseContext> \
constexpr auto parse(ParseContext& ctx) -> decltype(ctx.begin()) { \
return ctx.begin(); \
} \
template <typename FormatContext> \
auto format(const T& p, FormatContext& ctx) const \
-> decltype(ctx.out()) { \
auto out = ctx.out(); \
out = detail::write<Char>(out, F(p)); \
return out; \
} \
}
#endif | unknown | github | https://github.com/nodejs/node | deps/LIEF/src/fmt_formatter.hpp |
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from units.compat import unittest
from units.compat.mock import MagicMock, patch
from ansible.inventory.manager import InventoryManager
from ansible.module_utils.six import iteritems
from ansible.playbook.play import Play
from units.mock.loader import DictDataLoader
from units.mock.path import mock_unfrackpath_noop
from ansible.vars.manager import VariableManager
class TestVariableManager(unittest.TestCase):
def test_basic_manager(self):
fake_loader = DictDataLoader({})
mock_inventory = MagicMock()
v = VariableManager(loader=fake_loader, inventory=mock_inventory)
variables = v.get_vars(use_cache=False)
# Check var manager expected values, never check: ['omit', 'vars']
# FIXME: add the following ['ansible_version', 'ansible_playbook_python', 'groups']
for varname, value in (('playbook_dir', os.path.abspath('.')), ):
self.assertEqual(variables[varname], value)
def test_variable_manager_extra_vars(self):
fake_loader = DictDataLoader({})
extra_vars = dict(a=1, b=2, c=3)
mock_inventory = MagicMock()
v = VariableManager(loader=fake_loader, inventory=mock_inventory)
# override internal extra_vars loading
v._extra_vars = extra_vars
myvars = v.get_vars(use_cache=False)
for (key, val) in iteritems(extra_vars):
self.assertEqual(myvars.get(key), val)
def test_variable_manager_options_vars(self):
fake_loader = DictDataLoader({})
options_vars = dict(a=1, b=2, c=3)
mock_inventory = MagicMock()
v = VariableManager(loader=fake_loader, inventory=mock_inventory)
# override internal options_vars loading
v._extra_vars = options_vars
myvars = v.get_vars(use_cache=False)
for (key, val) in iteritems(options_vars):
self.assertEqual(myvars.get(key), val)
def test_variable_manager_play_vars(self):
fake_loader = DictDataLoader({})
mock_play = MagicMock()
mock_play.get_vars.return_value = dict(foo="bar")
mock_play.get_roles.return_value = []
mock_play.get_vars_files.return_value = []
mock_inventory = MagicMock()
v = VariableManager(loader=fake_loader, inventory=mock_inventory)
self.assertEqual(v.get_vars(play=mock_play, use_cache=False).get("foo"), "bar")
def test_variable_manager_play_vars_files(self):
fake_loader = DictDataLoader({
__file__: """
foo: bar
"""
})
mock_play = MagicMock()
mock_play.get_vars.return_value = dict()
mock_play.get_roles.return_value = []
mock_play.get_vars_files.return_value = [__file__]
mock_inventory = MagicMock()
v = VariableManager(inventory=mock_inventory, loader=fake_loader)
self.assertEqual(v.get_vars(play=mock_play, use_cache=False).get("foo"), "bar")
def test_variable_manager_task_vars(self):
# FIXME: BCS make this work
return
# pylint: disable=unreachable
fake_loader = DictDataLoader({})
mock_task = MagicMock()
mock_task._role = None
mock_task.loop = None
mock_task.get_vars.return_value = dict(foo="bar")
mock_task.get_include_params.return_value = dict()
mock_all = MagicMock()
mock_all.get_vars.return_value = {}
mock_all.get_file_vars.return_value = {}
mock_host = MagicMock()
mock_host.get.name.return_value = 'test01'
mock_host.get_vars.return_value = {}
mock_host.get_host_vars.return_value = {}
mock_inventory = MagicMock()
mock_inventory.hosts.get.return_value = mock_host
mock_inventory.hosts.get.name.return_value = 'test01'
mock_inventory.get_host.return_value = mock_host
mock_inventory.groups.__getitem__.return_value = mock_all
v = VariableManager(loader=fake_loader, inventory=mock_inventory)
self.assertEqual(v.get_vars(task=mock_task, use_cache=False).get("foo"), "bar")
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_variable_manager_precedence(self):
# FIXME: this needs to be redone as dataloader is not the automatic source of data anymore
return
# pylint: disable=unreachable
'''
Tests complex variations and combinations of get_vars() with different
objects to modify the context under which variables are merged.
'''
# FIXME: BCS makethiswork
# return True
mock_inventory = MagicMock()
inventory1_filedata = """
[group2:children]
group1
[group1]
host1 host_var=host_var_from_inventory_host1
[group1:vars]
group_var = group_var_from_inventory_group1
[group2:vars]
group_var = group_var_from_inventory_group2
"""
fake_loader = DictDataLoader({
# inventory1
'/etc/ansible/inventory1': inventory1_filedata,
# role defaults_only1
'/etc/ansible/roles/defaults_only1/defaults/main.yml': """
default_var: "default_var_from_defaults_only1"
host_var: "host_var_from_defaults_only1"
group_var: "group_var_from_defaults_only1"
group_var_all: "group_var_all_from_defaults_only1"
extra_var: "extra_var_from_defaults_only1"
""",
'/etc/ansible/roles/defaults_only1/tasks/main.yml': """
- debug: msg="here i am"
""",
# role defaults_only2
'/etc/ansible/roles/defaults_only2/defaults/main.yml': """
default_var: "default_var_from_defaults_only2"
host_var: "host_var_from_defaults_only2"
group_var: "group_var_from_defaults_only2"
group_var_all: "group_var_all_from_defaults_only2"
extra_var: "extra_var_from_defaults_only2"
""",
})
inv1 = InventoryManager(loader=fake_loader, sources=['/etc/ansible/inventory1'])
v = VariableManager(inventory=mock_inventory, loader=fake_loader)
play1 = Play.load(dict(
hosts=['all'],
roles=['defaults_only1', 'defaults_only2'],
), loader=fake_loader, variable_manager=v)
# first we assert that the defaults as viewed as a whole are the merged results
# of the defaults from each role, with the last role defined "winning" when
# there is a variable naming conflict
res = v.get_vars(play=play1)
self.assertEqual(res['default_var'], 'default_var_from_defaults_only2')
# next, we assert that when vars are viewed from the context of a task within a
# role, that task will see its own role defaults before any other role's
blocks = play1.compile()
task = blocks[1].block[0]
res = v.get_vars(play=play1, task=task)
self.assertEqual(res['default_var'], 'default_var_from_defaults_only1')
# next we assert the precedence of inventory variables
v.set_inventory(inv1)
h1 = inv1.get_host('host1')
res = v.get_vars(play=play1, host=h1)
self.assertEqual(res['group_var'], 'group_var_from_inventory_group1')
self.assertEqual(res['host_var'], 'host_var_from_inventory_host1')
# next we test with group_vars/ files loaded
fake_loader.push("/etc/ansible/group_vars/all", """
group_var_all: group_var_all_from_group_vars_all
""")
fake_loader.push("/etc/ansible/group_vars/group1", """
group_var: group_var_from_group_vars_group1
""")
fake_loader.push("/etc/ansible/group_vars/group3", """
# this is a dummy, which should not be used anywhere
group_var: group_var_from_group_vars_group3
""")
fake_loader.push("/etc/ansible/host_vars/host1", """
host_var: host_var_from_host_vars_host1
""")
fake_loader.push("group_vars/group1", """
playbook_group_var: playbook_group_var
""")
fake_loader.push("host_vars/host1", """
playbook_host_var: playbook_host_var
""")
res = v.get_vars(play=play1, host=h1)
# self.assertEqual(res['group_var'], 'group_var_from_group_vars_group1')
# self.assertEqual(res['group_var_all'], 'group_var_all_from_group_vars_all')
# self.assertEqual(res['playbook_group_var'], 'playbook_group_var')
# self.assertEqual(res['host_var'], 'host_var_from_host_vars_host1')
# self.assertEqual(res['playbook_host_var'], 'playbook_host_var')
# add in the fact cache
v._fact_cache['host1'] = dict(fact_cache_var="fact_cache_var_from_fact_cache")
res = v.get_vars(play=play1, host=h1)
self.assertEqual(res['fact_cache_var'], 'fact_cache_var_from_fact_cache')
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_variable_manager_role_vars_dependencies(self):
'''
Tests vars from role dependencies with duplicate dependencies.
'''
mock_inventory = MagicMock()
fake_loader = DictDataLoader({
# role common-role
'/etc/ansible/roles/common-role/tasks/main.yml': """
- debug: msg="{{role_var}}"
""",
# We do not need allow_duplicates: yes for this role
# because eliminating duplicates is done by the execution
# strategy, which we do not test here.
# role role1
'/etc/ansible/roles/role1/vars/main.yml': """
role_var: "role_var_from_role1"
""",
'/etc/ansible/roles/role1/meta/main.yml': """
dependencies:
- { role: common-role }
""",
# role role2
'/etc/ansible/roles/role2/vars/main.yml': """
role_var: "role_var_from_role2"
""",
'/etc/ansible/roles/role2/meta/main.yml': """
dependencies:
- { role: common-role }
""",
})
v = VariableManager(loader=fake_loader, inventory=mock_inventory)
play1 = Play.load(dict(
hosts=['all'],
roles=['role1', 'role2'],
), loader=fake_loader, variable_manager=v)
# The task defined by common-role exists twice because role1
# and role2 depend on common-role. Check that the tasks see
# different values of role_var.
blocks = play1.compile()
task = blocks[1].block[0]
res = v.get_vars(play=play1, task=task)
self.assertEqual(res['role_var'], 'role_var_from_role1')
task = blocks[2].block[0]
res = v.get_vars(play=play1, task=task)
self.assertEqual(res['role_var'], 'role_var_from_role2') | unknown | codeparrot/codeparrot-clean | ||
import imp
import logging
import os
_shared_objs = {}
def _get_one(name):
global _shared_objs
# Build path
here = os.path.dirname(os.path.abspath(__file__))
plugins_dir = os.path.join(here, "plugins")
filepath = os.path.join(plugins_dir, '%s.py' % name)
# Load plug-in
with open(filepath, 'r') as f:
p = imp.load_module(name, f, filepath, ('py', 'r', imp.PY_SOURCE))
# Cached obj?
if name in _shared_objs:
return _shared_objs[name]
obj = None
for n in dir(p):
if n.lower() == name.lower():
obj = getattr(p, n)()
assert obj, "Class not found"
# Cache obj?
is_shared = getattr(obj, "SHARED_OBJ", False)
if is_shared:
_shared_objs[name] = obj
return obj
def get(*args):
if len(args) == 1:
return _get_one(*args)
else:
return tuple([_get_one(s) for s in args])
def logger(script_name):
# Logging file
log_fname = "{}.log".format(script_name)
dirfp = os.path.expanduser("~/.autome/logs")
log_fp = os.path.join(dirfp, log_fname)
# Logger
fstring = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
formatter = logging.Formatter(fstring)
handler = logging.FileHandler(log_fp)
handler.setFormatter(formatter)
logger = logging.getLogger(script_name)
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
return logger | unknown | codeparrot/codeparrot-clean | ||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
if [ "${push_name}" != "windows" ]; then
exit 0
fi
cd src
echo "GRS_CONFIG_USER1_USERNAME=${garasign_jsign_username}" >>"signing-envfile"
echo "GRS_CONFIG_USER1_PASSWORD=${garasign_jsign_password}" >>"signing-envfile"
set -o errexit
set -o verbose
msi_filename=mongodb-${push_name}-${push_arch}-${suffix}.msi
cp bazel-bin/src/mongo/installer/msi/mongodb-win32-x86_64-windows-${version}.msi $msi_filename
if [ "${is_patch}" != "true" ]; then
# signing windows artifacts with jsign
cat <<'EOF' >jsign_signing_commands.sh
function sign(){
if [ -e $1 ]
then
jsign -a mongo-authenticode-2024 --replace --tsaurl http://timestamp.digicert.com -d SHA-256 $1
else
echo "$1 does not exist. Skipping signing"
fi
}
EOF
cat <<EOF >>jsign_signing_commands.sh
sign $msi_filename
EOF
podman run \
--env-file=signing-envfile \
--rm \
-v $(pwd):$(pwd) -w $(pwd) \
${garasign_jsign_image_ecr} \
/bin/bash -c "$(cat ./jsign_signing_commands.sh)"
else
echo "Not signing windows msi due to it being a patch build"
fi
# generating checksums
if [ -e $msi_filename ]; then
shasum -a 1 $msi_filename | tee $msi_filename.sha1
shasum -a 256 $msi_filename | tee $msi_filename.sha256
md5sum $msi_filename | tee $msi_filename.md5
else
echo "$msi_filename does not exist. Skipping checksum generation"
fi | unknown | github | https://github.com/mongodb/mongo | evergreen/garasign_jsign_sign.sh |
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*- #
#####################################################################
# #
# Frets on Fire X #
# Copyright (C) 2009-2011 FoFiX Team #
# 2006 Sami Kyöstilä #
# #
# This program is free software; you can redistribute it and/or #
# modify it under the terms of the GNU General Public License #
# as published by the Free Software Foundation; either version 2 #
# of the License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, #
# MA 02110-1301, USA. #
#####################################################################
# FoFiX fully unified setup script
import distutils.ccompiler
from distutils.dep_util import newer
from Cython.Distutils import build_ext as _build_ext
from distutils.command.install import install as _install
import sys, glob, os
import subprocess
import shlex
import numpy as np
from fofix.core import Version, SceneFactory
try:
from setuptools import setup, Extension, Command
except ImportError:
from distutils.core import setup, Extension
from distutils.cmd import Command
# Start setting up py2{exe,app} and building the argument set for setup().
# setup() arguments that are not specific to either are near the bottom of
# the script, just before the actual call to setup().
setup_args = {}
if os.name == 'nt':
try:
import py2exe
except ImportError:
if 'py2exe' in sys.argv:
sys.stderr.write('py2exe must be installed to create .exe files.\n')
sys.exit(1)
else:
from py2exe.resources.VersionInfo import RT_VERSION
from py2exe.resources.VersionInfo import Version as VersionResource
#stump: sometimes py2.6 py2exe thinks parts of pygame are "system" DLLs...
__orig_isSystemDLL = py2exe.build_exe.isSystemDLL
def isSystemDLL(pathname):
exclude = ['pygame', 'libogg']
for dll in exclude:
if pathname.lower().find(dll) != -1:
return 0
return __orig_isSystemDLL(pathname)
py2exe.build_exe.isSystemDLL = isSystemDLL
setup_args.update({
'zipfile': "data/library.zip",
'windows': [
{
"script": "FoFiX.py",
"icon_resources": [(1, "./win32/fofix.ico")],
"other_resources": [(RT_VERSION, 1, VersionResource(
#stump: the parameter below must consist only of up to four numerical fields separated by dots
Version.versionNum(),
file_description="Frets on Fire X",
legal_copyright=r"© 2008-2013 FoFiX Team. GNU GPL v2 or later.",
company_name="FoFiX Team",
internal_name="FoFiX.exe",
original_filename="FoFiX.exe",
product_name=Version.PROGRAM_NAME,
#stump: when run from the exe, FoFiX will claim to be "FoFiX v" + product_version
product_version=Version.version()
).resource_bytes())]
}
]
})
elif sys.platform == 'darwin':
try:
import py2app
except ImportError:
if 'py2app' in sys.argv:
sys.stderr.write('py2app must be installed to create .app bundles.\n')
sys.exit(1)
setup_args.update({
'app': ['FoFiX.py'],
'data_files': [
(".", ["./AUTHORS", "./COPYING", "./CREDITS", "./CHANGELOG", "./makefile", "../README.md"]),
("doc", glob.glob("./doc/*")),
("data", glob.glob("./data/*")),
],
#stump: these arguments interfere with the py2exe version tagging code,
# so only use them for py2app even though they are standard distutils
# arguments. When they are present, py2exe synthesizes its own version
# info resource, superseding the one we specify explicitly above.
'version': Version.version(),
'description': "Frets on Fire X",
'name': "FoFiX",
'url': "http://code.google.com/p/fofix/",
})
# Forced includes needed for PIL.
extraIncludes = [
"PIL.PngImagePlugin",
"PIL.JpegImagePlugin",
]
# Forced includes needed for pyOpenGL 3 and the accelerator.
import OpenGL
if int(OpenGL.__version__[0]) > 2:
extraIncludes += [
"OpenGL.arrays.ctypesarrays",
"OpenGL.arrays.numpymodule",
"OpenGL.arrays.lists",
"OpenGL.arrays.numbers",
"OpenGL.arrays.strings", #stump: needed by shader code
"OpenGL.arrays.ctypesparameters",
]
if os.name == 'nt':
extraIncludes.append("OpenGL.platform.win32")
#stump: The pyopengl-accelerator format handlers import this
# module using the Python/C API, so the packaging tools don't
# know that it is needed.
try:
from OpenGL_accelerate import formathandler
extraIncludes.append("OpenGL_accelerate.formathandler")
except ImportError:
pass
extraDllExcludes = []
# Command-specific options shared between py2exe and py2app.
common_options = {
"dist_dir": "./dist",
"includes": SceneFactory.scenes + extraIncludes,
"excludes": [
"ode",
"_ssl",
"bz2",
"email",
"calendar",
"doctest",
"ftplib",
"getpass",
"gopherlib",
"macpath",
"macurl2path",
"PIL.GimpGradientFile",
"PIL.GimpPaletteFile",
"PIL.PaletteFile",
"GimpGradientFile", #stump: we still need the non-PIL names for these
"GimpPaletteFile", # because they get included under these names when
"PaletteFile", # excluded above...
"multiprocessing",
"Tkinter",
"Pyrex",
"distutils",
"pydoc",
"py_compile",
"compiler",
]
}
# Copy then specialize the command-specific options.
options = {
'py2exe': common_options.copy(),
'py2app': common_options.copy(),
}
options['py2exe'].update({
"dll_excludes": [
"msvcp90.dll",
"mswsock.dll",
"powrprof.dll",
"w9xpopen.exe",
] + extraDllExcludes,
"optimize": 2
})
options['py2exe']['excludes'].extend([
'macosx',
])
options['py2app'].update({
'argv_emulation': True,
'iconfile': './icon_mac_composed.icns',
'plist': {
'CFBundleIdentifier': 'org.pythonmac.FoFiX.FretsonFire',
'CFBundleSignature': 'FoFX',
'NSHumanReadableCopyright': u"\xa9 2008-2013 FoFiX Team. GNU GPL v2 or later.",
}
})
def find_command(cmd):
'''Find a program on the PATH, or, on win32, in the dependency pack.'''
sys.stdout.write('checking for program %s... ' % cmd)
if os.name == 'nt':
# Only accept something from the dependency pack.
path = os.path.join('.', 'win32', 'deps', 'bin', cmd+'.exe')
else:
# Search the PATH.
path = None
for dir in os.environ['PATH'].split(os.pathsep):
if os.access(os.path.join(dir, cmd), os.X_OK):
path = os.path.join(dir, cmd)
break
if path is None or not os.path.isfile(path):
print('not found')
sys.stderr.write('Could not find required program "%s".\n' % cmd)
if os.name == 'nt':
sys.stderr.write('(Check that you have the latest version of the dependency pack installed.)\n')
sys.exit(1)
print(path)
return path
# Find pkg-config so we can find the libraries we need.
pkg_config = find_command('pkg-config')
def pc_exists(pkg):
'''Check whether pkg-config thinks a library exists.'''
if os.spawnl(os.P_WAIT, pkg_config, 'pkg-config', '--exists', pkg) == 0:
return True
else:
return False
# {py26hack} - Python 2.7 has subprocess.check_output for this purpose.
def grab_stdout(*args, **kw):
'''Obtain standard output from a subprocess invocation, raising an exception
if the subprocess fails.'''
kw['stdout'] = subprocess.PIPE
proc = subprocess.Popen(*args, **kw)
stdout = proc.communicate()[0]
if proc.returncode != 0:
raise RuntimeError('subprocess %r returned %d' % (args[0], proc.returncode))
return stdout.decode('utf-8')
# Blacklist MinGW-specific dependency libraries on Windows.
if os.name == 'nt':
lib_blacklist = ['m', 'mingw32']
else:
lib_blacklist = []
def pc_info(pkg, altnames=[]):
'''Obtain build options for a library from pkg-config and
return a dict that can be expanded into the argument list for
L{distutils.core.Extension}.'''
sys.stdout.write('checking for library %s... ' % pkg)
if not pc_exists(pkg):
for name in altnames:
if pc_exists(name):
pkg = name
sys.stdout.write('(using alternative name %s) ' % pkg)
break
else:
print('not found')
sys.stderr.write('Could not find required library "%s".\n' % pkg)
sys.stderr.write('(Also tried the following alternative names: %s)\n' % ', '.join(altnames))
if os.name == 'nt':
sys.stderr.write('(Check that you have the latest version of the dependency pack installed.)\n')
else:
sys.stderr.write('(Check that you have the appropriate development package installed.)\n')
sys.exit(1)
cflags = shlex.split(grab_stdout([pkg_config, '--cflags', pkg]))
libs = shlex.split(grab_stdout([pkg_config, '--libs', pkg]))
# Pick out anything interesting in the cflags and libs, and
# silently drop the rest.
def def_split(x):
pair = list(x.split('=', 1))
if len(pair) == 1:
pair.append(None)
return tuple(pair)
info = {
'define_macros': [def_split(x[2:]) for x in cflags if x[:2] == '-D'],
'include_dirs': [x[2:] for x in cflags if x[:2] == '-I'],
'libraries': [x[2:] for x in libs if x[:2] == '-l' and x[2:] not in lib_blacklist],
'library_dirs': [x[2:] for x in libs if x[:2] == '-L'],
}
print('ok')
return info
ogg_info = pc_info('ogg')
vorbisfile_info = pc_info('vorbisfile')
sdl_info = pc_info('sdl')
sdl_mixer_info = pc_info('SDL_mixer')
theoradec_info = pc_info('theoradec')
glib_info = pc_info('glib-2.0')
gthread_info = pc_info('gthread-2.0')
swscale_info = pc_info('libswscale')
soundtouch_info = pc_info('soundtouch', ['soundtouch-1.4', 'soundtouch-1.0'])
if os.name == 'nt':
# Windows systems: we just know what the OpenGL library is.
gl_info = {'libraries': ['opengl32']}
# And glib needs a slight hack to work correctly.
glib_info['define_macros'].append(('inline', '__inline'))
# And we use the prebuilt soundtouch-c.
soundtouch_info['libraries'].append('soundtouch-c')
extra_soundtouch_src = []
else:
# Other systems: we ask pkg-config.
try:
gl_info = pc_info('gl')
except SystemExit:
# Work around to include opengl.framwork during compilation on OSX.
os.environ['LDFLAGS'] = '-framework opengl'
os.environ['CFLAGS'] = '-framework opengl'
gl_info = {
'define_macros': [],
'include_dirs': [],
'libraries': [],
'library_dirs': [],
}
# And build our own soundtouch-c.
extra_soundtouch_src = ['fofix/core/MixStream/soundtouch-c.cpp']
# Build a similar info record for the numpy headers.
numpy_info = {'include_dirs': [np.get_include()]}
def combine_info(*args):
'''Combine multiple result dicts from L{pc_info} into one.'''
info = {
'define_macros': [],
'include_dirs': [],
'libraries': [],
'library_dirs': [],
}
for a in args:
info['define_macros'].extend(a.get('define_macros', []))
info['include_dirs'].extend(a.get('include_dirs', []))
info['libraries'].extend(a.get('libraries', []))
info['library_dirs'].extend(a.get('library_dirs', []))
return info
# Extend the build_ext command further to rebuild the import libraries on
# an MSVC build under Windows so they actually work.
class build_ext(_build_ext):
def run(self, *args, **kw):
if self.compiler is None:
self.compiler = distutils.ccompiler.get_default_compiler()
if self.compiler == 'msvc':
msvc = distutils.ccompiler.new_compiler(compiler='msvc', verbose=self.verbose, dry_run=self.dry_run, force=self.force)
msvc.initialize()
for deffile in glob.glob(os.path.join('..', 'win32', 'deps', 'lib', '*.def')):
libfile = os.path.splitext(deffile)[0] + '.lib'
if newer(deffile, libfile):
msvc.spawn([msvc.lib, '/nologo', '/machine:x86', '/out:'+libfile, '/def:'+deffile])
# Also add the directory containing the msinttypes headers to the include path.
self.include_dirs.append(os.path.join('.', 'win32', 'deps', 'include', 'msinttypes'))
return _build_ext.run(self, *args, **kw)
def build_extension(self, ext):
# If we're using MSVC, specify C++ exception handling behavior to avoid compiler warnings.
if self.compiler.compiler_type == 'msvc':
ext.extra_compile_args.append('/EHsc')
return _build_ext.build_extension(self, ext)
# Make "setup.py install" do nothing until we configure something more sensible.
class install(_install):
def run(self, *args, **kw):
sys.stderr.write('This is not the correct way to install FoFiX.\n')
sys.exit(1)
# Convert .po files into .mo files.
class msgfmt(Command):
user_options = []
description = 'convert .po files in data/po into .mo files in data/translations'
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
msgfmt_cmd = find_command('msgfmt')
for pofile in glob.glob(os.path.join('..', 'data', 'po', '*.po')):
mofile = os.path.join('..', 'data', 'translations', os.path.basename(pofile)[:-3]+'.mo')
if newer(pofile, mofile):
self.mkpath(os.path.dirname(mofile))
self.spawn([msgfmt_cmd, '-c', '-o', mofile, pofile])
# Extract translatable strings.
class xgettext(Command):
user_options = []
description = 'extract translatable strings from source code'
# The function names that indicate a translatable string.
FUNCNAMES = ['_', 'N_']
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
xgettext_cmd = find_command('xgettext')
potfile = os.path.join('..', 'data', 'po', 'messages.pot')
self.spawn([xgettext_cmd,
'--package-name='+Version.PROGRAM_NAME,
'--package-version='+Version.version(),
'--copyright-holder=FoFiX Team',
'-o', potfile] +
['-k' + funcname for funcname in self.FUNCNAMES] +
glob.glob('*.py'))
# Add the common arguments to setup().
# This includes arguments to cause FoFiX's extension modules to be built.
setup_args.update({
'options': options,
'ext_modules': [
Extension('fofix.lib.cmgl', ['fofix/core/cmgl/cmgl.pyx'], **combine_info(numpy_info, gl_info)),
Extension('fofix.lib._pypitch',
language='c++',
sources=['fofix/core/pypitch/_pypitch.pyx', 'fofix/core/pypitch/pitch.cpp']),
Extension('fofix.lib._VideoPlayer',
['fofix/core/VideoPlayer/_VideoPlayer.pyx', 'fofix/core/VideoPlayer/VideoPlayer.c'],
**combine_info(gl_info, ogg_info, theoradec_info, glib_info, swscale_info,
{'include_dirs': ['.']})),
Extension('fofix.lib._MixStream',
['fofix/core/MixStream/_MixStream.pyx', 'fofix/core/MixStream/MixStream.c',
'fofix/core/MixStream/vorbis.c'] + extra_soundtouch_src,
**combine_info(vorbisfile_info, soundtouch_info, glib_info, gthread_info, sdl_info, sdl_mixer_info)),
],
'cmdclass': {'build_ext': build_ext, 'install': install, 'msgfmt': msgfmt, 'xgettext': xgettext},
})
# If we're on Windows, add the dependency directory to the PATH so py2exe will
# pick up necessary DLLs from there.
if os.name == 'nt':
os.environ['PATH'] = os.path.abspath(os.path.join('.', 'win32', 'deps', 'bin')) + os.pathsep + os.environ['PATH']
# And finally...
setup(**setup_args) | unknown | codeparrot/codeparrot-clean | ||
from datetime import date
import itertools
from django.conf import settings
from django.db.models import *
from apps.activity.base_activity import BaseActivity
from apps.canvas_auth.models import User
from canvas import util
from canvas.redis_models import RedisLastBumpedBuffer
from drawquest.apps.drawquest_auth.details_models import UserDetails
from services import Services
class StickerActivity(BaseActivity):
TYPE = 'sticker'
FORCE_ANONYMOUS = True
@classmethod
def _from_sticker(cls, comment_sticker):
from canvas.details_models import CommentDetails
comment_details = CommentDetails.from_id(comment_sticker.comment_id)
data = {
'comment_sticker_type_id': comment_sticker.type_id,
'details_url': comment_details.linked_url,
}
if comment_details.reply_content:
try:
data['thumbnail_url'] = comment_details.reply_content.get_absolute_url_for_image_type('small_square')
except KeyError:
pass
return data
@classmethod
def from_sticker(cls, actor, comment_sticker):
return cls(cls._from_sticker(comment_sticker), actor=actor)
class EpicStickerActivity(StickerActivity):
TYPE = 'epic_sticker'
FORCE_ANONYMOUS = False
@classmethod
def from_sticker(cls, actor, comment_sticker):
data = cls._from_sticker(comment_sticker)
data['reward_stickers'] = comment_sticker.sticker.cost
message = comment_sticker.epic_message
if message:
#TODO delete 'tooltip' once we switch to the new activity feed
data['tooltip'] = u'"{0}" - from {1}'.format(message, comment_sticker.user.username)
data['personal_message'] = message
return cls(data, actor=actor)
class LevelUpActivity(BaseActivity):
TYPE = 'level_up'
FORCE_ANONYMOUS = False
class FollowedByUserActivity(BaseActivity):
TYPE = 'followed_by_user'
FORCE_ANONYMOUS = False
@classmethod
def from_user(cls, actor, followee):
data = {
'details_url': '/user/{0}'.format(actor.username),
'is_actor_anonymous': False,
'followee': UserDetails.from_id(followee.id),
}
return cls(data, actor=actor)
def followee_is_following_actor(self):
return User.objects.get(id=self._data['followee']['id']).is_following(self.actor['id'])
class _FromCommentMixin(object):
@classmethod
def from_comment(cls, actor, comment):
comment_details = comment.details()
data = {
'details_url': comment_details.linked_url,
'is_actor_anonymous': comment.anonymous,
'thread_title': comment.thread.op.title,
}
if comment_details.reply_content:
try:
data['thumbnail_url'] = comment_details.reply_content.get_absolute_url_for_image_type('small_square')
except KeyError:
pass
return cls(data, actor=actor)
class RemixInviteActivity(BaseActivity, _FromCommentMixin):
TYPE = 'remix_invite'
class ThreadPromotedActivity(BaseActivity, _FromCommentMixin):
TYPE = 'thread_promoted'
FORCE_ANONYMOUS = False
class PostPromotedActivity(BaseActivity, _FromCommentMixin):
TYPE = 'post_promoted'
FORCE_ANONYMOUS = False
class MonsterRemixInviteActivity(BaseActivity):
TYPE = 'remix_invite_monster'
@classmethod
def from_comment(cls, actor, comment):
comment_details = comment.details()
data = {
'details_url': "/monster/{0}/complete".format(comment_details.short_id()),
'is_actor_anonymous': comment.anonymous,
'thumbnail_url': '/static/img/tiny_monster_mascot.png',
}
return cls(data, actor=actor)
class ThreadReplyActivity(BaseActivity, _FromCommentMixin):
TYPE = 'thread_reply'
class RemixActivity(ThreadReplyActivity):
TYPE = 'remix'
class ReplyActivity(ThreadReplyActivity):
TYPE = 'reply'
class DailyFreeStickersActivity(BaseActivity):
TYPE = 'daily_free_stickers'
FORCE_ANONYMOUS = False
def _load_activity_types():
from django.conf import settings
from django.core import exceptions
from django.utils.importlib import import_module
types = []
for type_path in settings.ACTIVITY_TYPE_CLASSES:
try:
dot = type_path.rindex('.')
except ValueError:
raise exceptions.ImproperlyConfigured("%s isn't a module" % type_path)
type_module, type_classname = type_path[:dot], type_path[dot+1:]
try:
mod = import_module(type_module)
except ImportError, e:
raise exceptions.ImproperlyConfigured('Error importing activity type %s: "%s"' % (type_module, e))
try:
type_class = getattr(mod, type_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured('Activity type module "%s" does not define a "%s" class'
% (type_module, type_classname))
types.append(type_class)
return types
class ActivityStream(object):
ACTIVITY_TYPES = {cls.TYPE: cls for cls in _load_activity_types()}
def __init__(self, user_id, stream_size=1000, activity_types=ACTIVITY_TYPES):
self._user_id = user_id
self._activity_types = activity_types
self._buffer = RedisLastBumpedBuffer('user:{}:stream_v6'.format(user_id), stream_size,
getter=self._make_activity)
self._read = RedisLastBumpedBuffer('user:{}:stream_read'.format(user_id), stream_size)
def _make_activity(self, activity_id):
from apps.activity.models import Activity
activity_data = Activity.details_by_id(activity_id)()
try:
return self._activity_types[activity_data['activity_type']](activity_data)
except KeyError:
return None
def __iter__(self):
for item in self._buffer:
if item is not None:
yield item
def valid_activity_type(self, activity_type):
return activity_type in self.ACTIVITY_TYPES
def iter_until(self, timestamp):
""" Returns an iterator over the activities up until `timestamp`. """
return itertools.dropwhile(lambda activity: activity.timestamp >= float(timestamp), self._buffer)
def _invalidate_cache(self):
if settings.PROJECT == 'drawquest':
from apps.activity import api
api.activity_stream_items.delete_cache(None, None, user=self._user_id)
def push(self, activity_item):
from apps.activity.models import Activity
if not hasattr(activity_item, 'id'):
dbactivity = Activity.from_redis_activity(activity_item)
id_ = dbactivity.id
else:
id_ = activity_item.id
self._buffer.bump(id_, coerce=False)
self._invalidate_cache()
def mark_read(self, activity_id):
from canvas.models import UserRedis
user_redis = UserRedis(self._user_id)
activity = self._make_activity(activity_id)
try:
if (activity_id not in self._read
and activity_id in set(item.id for item in self._buffer)
and float(user_redis.user_kv.hget('activity_stream_last_viewed')) < float(activity.timestamp)):
user_redis.user_kv.hincrby('activity_stream_unseen', -1)
except TypeError:
pass
self._read.bump(activity_id)
def mark_all_read(self):
from canvas.models import UserRedis
user_redis = UserRedis(self._user_id)
user_redis.user_kv.hset('activity_stream_last_viewed', Services.time.time())
user_redis.user_kv.hset('activity_stream_unseen', 0)
user_redis.activity_stream_channel.publish('activity_stream_viewed')
def has_read(self, activity_id):
activity_id = int(activity_id)
from canvas.models import UserRedis
user_redis = UserRedis(self._user_id)
activity = self._make_activity(activity_id)
if activity_id in self._read:
return True
try:
return float(user_redis.user_kv.hget('activity_stream_last_viewed')) >= float(activity.timestamp)
except TypeError:
return False | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2010-2024 JetBrains s.r.o. and Kotlin Programming Language contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package org.jetbrains.kotlin.analysis.api.fir.test.cases.generated.cases.components.symbolInfoProvider;
import com.intellij.testFramework.TestDataPath;
import org.jetbrains.kotlin.test.util.KtTestUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.kotlin.analysis.api.fir.test.configurators.AnalysisApiFirTestConfiguratorFactory;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiTestConfiguratorFactoryData;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiTestConfigurator;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.TestModuleKind;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.FrontendKind;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisSessionMode;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiMode;
import org.jetbrains.kotlin.analysis.api.impl.base.test.cases.components.symbolInfoProvider.AbstractCanBeOperatorTest;
import org.jetbrains.kotlin.test.TestMetadata;
import org.junit.jupiter.api.Test;
import java.io.File;
import java.util.regex.Pattern;
/** This class is generated by {@link org.jetbrains.kotlin.generators.tests.analysis.api.GenerateAnalysisApiTestsKt}. DO NOT MODIFY MANUALLY */
@SuppressWarnings("all")
@TestMetadata("analysis/analysis-api/testData/components/symbolInfoProvider/canBeOperator")
@TestDataPath("$PROJECT_ROOT")
public class FirIdeDependentAnalysisScriptSourceModuleCanBeOperatorTestGenerated extends AbstractCanBeOperatorTest {
@NotNull
@Override
public AnalysisApiTestConfigurator getConfigurator() {
return AnalysisApiFirTestConfiguratorFactory.INSTANCE.createConfigurator(
new AnalysisApiTestConfiguratorFactoryData(
FrontendKind.Fir,
TestModuleKind.ScriptSource,
AnalysisSessionMode.Dependent,
AnalysisApiMode.Ide
)
);
}
@Test
public void testAllFilesPresentInCanBeOperator() {
KtTestUtil.assertAllTestsPresentByMetadataWithExcluded(this.getClass(), new File("analysis/analysis-api/testData/components/symbolInfoProvider/canBeOperator"), Pattern.compile("^(.+)\\.kts$"), null, true);
}
} | java | github | https://github.com/JetBrains/kotlin | analysis/analysis-api-fir/tests-gen/org/jetbrains/kotlin/analysis/api/fir/test/cases/generated/cases/components/symbolInfoProvider/FirIdeDependentAnalysisScriptSourceModuleCanBeOperatorTestGenerated.java |
"""
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from multiprocessing import Process
import os
import sys
import zmq
import subprocess
import fcntl
import json
import time
from django.conf import settings
from contextlib import contextmanager
from util import ReplicationMixin
from fs.btrfs import (get_oldest_snap, is_subvol)
from storageadmin.models import Appliance
from smart_manager.models import ReplicaTrail
from cli import APIWrapper
from django import db
import logging
logger = logging.getLogger(__name__)
BTRFS = '/sbin/btrfs'
class Sender(ReplicationMixin, Process):
def __init__(self, uuid, receiver_ip, replica, rt=None):
self.uuid = uuid
self.receiver_ip = receiver_ip
self.receiver_port = replica.data_port
self.replica = replica
self.snap_name = '%s_%d_replication' % (replica.share, replica.id)
self.snap_name += '_1' if (rt is None) else '_%d' % (rt.id + 1)
self.snap_id = '%s_%s' % (self.uuid, self.snap_name)
self.rt = rt
self.rt2 = None
self.rt2_id = None
self.rid = replica.id
self.identity = u'%s-%s' % (self.uuid, self.rid)
self.sp = None
self.rlatest_snap = None #Latest snapshot per Receiver(comes along with receiver-ready)
self.ctx = zmq.Context()
self.msg = ''
self.update_trail = False
self.total_bytes_sent = 0
self.ppid = os.getpid()
self.max_snap_retain = settings.REPLICATION.get('max_snap_retain')
for alias, info in db.connections.databases.items():
db.close_connection()
super(Sender, self).__init__()
@contextmanager
def _clean_exit_handler(self):
try:
yield
except Exception, e:
logger.error('Id: %s. %s. Exception: %s' % (self.identity, self.msg, e.__str__()))
if (self.update_trail):
try:
data = {'status': 'failed',
'error': '%s. Exception: %s' % (self.msg, e.__str__())}
self.update_replica_status(self.rt2_id, data)
except Exception, e:
logger.error('Id: %s. Exception occured while updating replica status: %s' %
(self.identity, e.__str__()))
self._sys_exit(3)
def _sys_exit(self, code):
if (self.sp is not None and
self.sp.poll() is None):
self.sp.terminate()
self.ctx.destroy(linger=0)
sys.exit(code)
def _init_greeting(self):
self.send_req = self.ctx.socket(zmq.DEALER)
self.send_req.setsockopt_string(zmq.IDENTITY, self.identity)
self.send_req.connect('tcp://%s:%d' % (self.receiver_ip, self.receiver_port))
msg = { 'pool': self.replica.dpool,
'share': self.replica.share,
'snap': self.snap_name,
'incremental': self.rt is not None,
'uuid': self.uuid, }
msg_str = json.dumps(msg)
self.send_req.send_multipart(['sender-ready', b'%s' % msg_str])
logger.debug('Id: %s Initial greeting: %s' % (self.identity, msg))
self.poll.register(self.send_req, zmq.POLLIN)
def _send_recv(self, command, msg=''):
self.msg = ('Failed while send-recv-ing command(%s)' % command)
rcommand = rmsg = None
self.send_req.send_multipart([command, b'%s' % msg])
#There is no retry logic here because it's an overkill at the moment.
#If the stream is interrupted, we can only start from the beginning again.
#So we wait patiently, but only once. Perhaps we can implement a buffering
#or temporary caching strategy to make this part robust.
socks = dict(self.poll.poll(60000)) # 60 seconds.
if (socks.get(self.send_req) == zmq.POLLIN):
rcommand, rmsg = self.send_req.recv_multipart()
if ((len(command) > 0 or (rcommand is not None and rcommand != 'send-more')) or
(len(command) > 0 and rcommand is None)):
logger.debug('Id: %s Server: %s:%d scommand: %s rcommand: %s' %
(self.identity, self.receiver_ip, self.receiver_port, command, rcommand))
return rcommand, rmsg
def _delete_old_snaps(self, share_path):
oldest_snap = get_oldest_snap(share_path, self.max_snap_retain, regex='_replication_')
if (oldest_snap is not None):
logger.debug('Id: %s. Deleting old snapshot: %s' % (self.identity, oldest_snap))
self.msg = ('Failed to delete snapshot: %s. Aborting.' %
oldest_snap)
if (self.delete_snapshot(self.replica.share, oldest_snap)):
return self._delete_old_snaps(share_path)
def _refresh_rt(self):
#for incremental sends, the receiver tells us the latest successful
#snapshot on it. This should match self.rt in most cases. Sometimes,
#it may not be the one refered by self.rt(latest) but a previous one.
#We need to make sure to *only* send the incremental send that receiver
#expects.
self.msg = ('Failed to validate/refresh ReplicaTrail.')
if (self.rlatest_snap is None):
#Validate/update self.rt to the one that has the expected Snapshot on the system.
for rt in ReplicaTrail.objects.filter(replica=self.replica, status='succeeded').order_by('-id'):
snap_path = ('%s%s/.snapshots/%s/%s' % (settings.MNT_PT, self.replica.pool,
self.replica.share, self.rt.snap_name))
if (is_subvol(snap_path)):
return rt
#Snapshots from previous succeeded ReplicaTrails don't actually
#exist on the system. So we send a Full replication instead of
#incremental.
return None
if (len(self.rlatest_snap) == 0):
#Receiver sends empty string when it fails to reply back to an
#incremental send request with an appropriate parent snapshot name.
return None
if (self.rt.snap_name != self.rlatest_snap):
self.msg = ('Mismatch on starting snapshot for '
'btrfs-send. Sender picked %s but Receiver wants '
'%s, which takes precedence.' % (self.rt.snap_name, self.rlatest_snap))
for rt in ReplicaTrail.objects.filter(replica=self.replica, status='succeeded').order_by('-id'):
if (rt.snap_name == self.rlatest_snap):
self.msg = ('%s. successful trail found for %s' % (self.msg, self.rlatest_snap))
snap_path = ('%s%s/.snapshots/%s/%s' % (settings.MNT_PT, self.replica.pool,
self.replica.share, self.rlatest_snap))
if (is_subvol(snap_path)):
self.msg = ('Snapshot(%s) exists in the system and will be '
'used as the parent' % snap_path)
logger.debug('Id: %s. %s' % (self.identity, self.msg))
return rt
self.msg = ('Snapshot(%s) does not exist on the system. So cannot use it.' % snap_path)
raise Exception(self.msg)
raise Exception('%s. No succeeded trail found for %s.' % (self.msg, self.rlatest_snap))
snap_path = ('%s%s/.snapshots/%s/%s' % (settings.MNT_PT, self.replica.pool,
self.replica.share, self.rlatest_snap))
if (is_subvol(snap_path)):
return self.rt
raise Exception('Parent Snapshot(%s) to use in btrfs-send does not '
'exist in the system.' % snap_path)
def run(self):
self.msg = ('Top level exception in sender: %s' % self.identity)
with self._clean_exit_handler():
self.law = APIWrapper()
self.poll = zmq.Poller()
self._init_greeting()
# create a new replica trail if it's the very first time
# or if the last one succeeded
self.msg = ('Failed to create local replica trail for snap_name:'
' %s. Aborting.' % self.snap_name)
self.rt2 = self.create_replica_trail(self.replica.id,
self.snap_name)
self.rt2_id = self.rt2['id']
# prune old snapshots.
self.update_trail = True
self.msg = ('Failed to prune old snapshots')
share_path = ('%s%s/.snapshots/%s' % (settings.MNT_PT, self.replica.pool,
self.replica.share))
self._delete_old_snaps(share_path)
# Refresh replica trail.
if (self.rt is not None):
self.rt = self._refresh_rt()
# create a snapshot only if it's not already from a previous
# failed attempt.
self.msg = ('Failed to create snapshot: %s. Aborting.' % self.snap_name)
self.create_snapshot(self.replica.share, self.snap_name)
retries_left = 10
poll_interval = 6000 # 6 seconds
while (True):
socks = dict(self.poll.poll(poll_interval))
if (socks.get(self.send_req) == zmq.POLLIN):
retries_left = 10 # not really necessary because we just want one reply for now.
command, reply = self.send_req.recv_multipart()
if (command == 'receiver-ready'):
if (self.rt is not None):
self.rlatest_snap = reply
self.rt = self._refresh_rt()
logger.debug('Id: %s. command(%s) and message(%s) '
'received. Proceeding to send fsdata.'
% (self.identity, command, reply))
break
else:
if (command in 'receiver-init-error'):
self.msg = ('%s received for %s. extended reply: %s. Aborting.' %
(command, self.identity, reply))
elif (command == 'snap-exists'):
logger.debug('Id: %s. %s received. Not sending fsdata' % (self.identity, command))
data = {'status': 'succeeded',
'error': 'snapshot already exists on the receiver',}
self.msg = ('Failed to update replica status for %s' % self.snap_id)
self.update_replica_status(self.rt2_id, data)
self._sys_exit(0)
else:
self.msg = ('unexpected reply(%s) for %s. extended reply: %s. Aborting' %
(command, self.identity, reply))
raise Exception(self.msg)
else:
retries_left -= 1
logger.debug('Id: %s. No response from receiver. Number '
'of retry attempts left: %d' % (self.identity, retries_left))
if (retries_left == 0):
self.msg = ('Receiver(%s:%d) is unreachable. Aborting.' %
(self.receiver_ip, self.receiver_port))
raise Exception(self.msg)
self.send_req.setsockopt(zmq.LINGER, 0)
self.send_req.close()
self.poll.unregister(self.send_req)
self._init_greeting()
snap_path = ('%s%s/.snapshots/%s/%s' %
(settings.MNT_PT, self.replica.pool, self.replica.share,
self.snap_name))
cmd = [BTRFS, 'send', snap_path]
if (self.rt is not None):
prev_snap = ('%s%s/.snapshots/%s/%s' %
(settings.MNT_PT, self.replica.pool,
self.replica.share, self.rt.snap_name))
logger.info('Id: %s. Sending incremental replica between %s -- %s' %
(self.identity, prev_snap, snap_path))
cmd = [BTRFS, 'send', '-p', prev_snap, snap_path]
else:
logger.info('Id: %s. Sending full replica: %s' % (self.identity, snap_path))
try:
self.sp = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
fcntl.fcntl(self.sp.stdout.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
except Exception, e:
self.msg = ('Failed to start the low level btrfs send '
'command(%s). Aborting. Exception: ' % (cmd, e.__str__()))
logger.error('Id: %s. %s' % (self.identity, msg))
self._send_recv('btrfs-send-init-error')
self._sys_exit(3)
alive = True
num_msgs = 0
t0 = time.time()
while (alive):
try:
if (self.sp.poll() is not None):
logger.debug('Id: %s. send process finished for %s. rc: %d. '
'stderr: %s' % (self.identity, self.snap_id,
self.sp.returncode,
self.sp.stderr.read()))
alive = False
fs_data = self.sp.stdout.read()
except IOError:
continue
except Exception, e:
self.msg = ('Exception occured while reading low level btrfs '
'send data for %s. Aborting.' % self.snap_id)
if (alive):
self.sp.terminate()
self.update_trail = True
self._send_recv('btrfs-send-unexpected-termination-error')
self._sys_exit(3)
self.msg = ('Failed to send fsdata to the receiver for %s. Aborting.' %
(self.snap_id))
self.update_trail = True
command, message = self._send_recv('', fs_data)
self.total_bytes_sent += len(fs_data)
num_msgs += 1
if (num_msgs == 1000):
num_msgs = 0
dsize, drate = self.size_report(self.total_bytes_sent, t0)
logger.debug('Id: %s Sender alive. Data transferred: '
'%s. Rate: %s/sec.' % (self.identity, dsize, drate))
if (command is None or command == 'receiver-error'):
#command is None when the remote side vanishes.
self.msg = ('Got null or error command(%s) message(%s) from the Receiver while'
' transmitting fsdata. Aborting.' % (command, message))
raise Exception(message)
if (not alive):
if (self.sp.returncode != 0):
#do we mark failed?
command, message = self._send_recv('btrfs-send-nonzero-termination-error')
else:
command, message = self._send_recv('btrfs-send-stream-finished')
if (os.getppid() != self.ppid):
logger.error('Id: %s. Scheduler exited. Sender for %s cannot go on. '
'Aborting.' % (self.identity, self.snap_id))
self._sys_exit(3)
data = {'status': 'succeeded',
'kb_sent': self.total_bytes_sent / 1024, }
self.msg = ('Failed to update final replica status for %s'
'. Aborting.' % self.snap_id)
self.update_replica_status(self.rt2_id, data)
dsize, drate = self.size_report(self.total_bytes_sent, t0)
logger.debug('Id: %s. Send complete. Total data transferred: %s.'
' Rate: %s/sec.' % (self.identity, dsize, drate))
self._sys_exit(0) | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package net
import (
"flag"
"fmt"
"net/internal/socktest"
"os"
"runtime"
"slices"
"strings"
"sync"
"testing"
"time"
)
var (
sw socktest.Switch
// uninstallTestHooks runs just before a run of benchmarks.
testHookUninstaller sync.Once
)
var (
testTCPBig = flag.Bool("tcpbig", false, "whether to test massive size of data per read or write call on TCP connection")
testDNSFlood = flag.Bool("dnsflood", false, "whether to test DNS query flooding")
// If external IPv4 connectivity exists, we can try dialing
// non-node/interface local scope IPv4 addresses.
// On Windows, Lookup APIs may not return IPv4-related
// resource records when a node has no external IPv4
// connectivity.
testIPv4 = flag.Bool("ipv4", true, "assume external IPv4 connectivity exists")
// If external IPv6 connectivity exists, we can try dialing
// non-node/interface local scope IPv6 addresses.
// On Windows, Lookup APIs may not return IPv6-related
// resource records when a node has no external IPv6
// connectivity.
testIPv6 = flag.Bool("ipv6", false, "assume external IPv6 connectivity exists")
)
func TestMain(m *testing.M) {
setupTestData()
installTestHooks()
st := m.Run()
testHookUninstaller.Do(uninstallTestHooks)
if testing.Verbose() {
printRunningGoroutines()
printInflightSockets()
printSocketStats()
}
forceCloseSockets()
os.Exit(st)
}
// mustSetDeadline calls the bound method m to set a deadline on a Conn.
// If the call fails, mustSetDeadline skips t if the current GOOS is believed
// not to support deadlines, or fails the test otherwise.
func mustSetDeadline(t testing.TB, m func(time.Time) error, d time.Duration) {
err := m(time.Now().Add(d))
if err != nil {
t.Helper()
if runtime.GOOS == "plan9" {
t.Skipf("skipping: %s does not support deadlines", runtime.GOOS)
}
t.Fatal(err)
}
}
type ipv6LinkLocalUnicastTest struct {
network, address string
nameLookup bool
}
var (
ipv6LinkLocalUnicastTCPTests []ipv6LinkLocalUnicastTest
ipv6LinkLocalUnicastUDPTests []ipv6LinkLocalUnicastTest
)
func setupTestData() {
if supportsIPv4() {
resolveTCPAddrTests = append(resolveTCPAddrTests, []resolveTCPAddrTest{
{"tcp", "localhost:1", &TCPAddr{IP: IPv4(127, 0, 0, 1), Port: 1}, nil},
{"tcp4", "localhost:2", &TCPAddr{IP: IPv4(127, 0, 0, 1), Port: 2}, nil},
}...)
resolveUDPAddrTests = append(resolveUDPAddrTests, []resolveUDPAddrTest{
{"udp", "localhost:1", &UDPAddr{IP: IPv4(127, 0, 0, 1), Port: 1}, nil},
{"udp4", "localhost:2", &UDPAddr{IP: IPv4(127, 0, 0, 1), Port: 2}, nil},
}...)
resolveIPAddrTests = append(resolveIPAddrTests, []resolveIPAddrTest{
{"ip", "localhost", &IPAddr{IP: IPv4(127, 0, 0, 1)}, nil},
{"ip4", "localhost", &IPAddr{IP: IPv4(127, 0, 0, 1)}, nil},
}...)
}
if supportsIPv6() {
resolveTCPAddrTests = append(resolveTCPAddrTests, resolveTCPAddrTest{"tcp6", "localhost:3", &TCPAddr{IP: IPv6loopback, Port: 3}, nil})
resolveUDPAddrTests = append(resolveUDPAddrTests, resolveUDPAddrTest{"udp6", "localhost:3", &UDPAddr{IP: IPv6loopback, Port: 3}, nil})
resolveIPAddrTests = append(resolveIPAddrTests, resolveIPAddrTest{"ip6", "localhost", &IPAddr{IP: IPv6loopback}, nil})
// Issue 20911: don't return IPv4 addresses for
// Resolve*Addr calls of the IPv6 unspecified address.
resolveTCPAddrTests = append(resolveTCPAddrTests, resolveTCPAddrTest{"tcp", "[::]:4", &TCPAddr{IP: IPv6unspecified, Port: 4}, nil})
resolveUDPAddrTests = append(resolveUDPAddrTests, resolveUDPAddrTest{"udp", "[::]:4", &UDPAddr{IP: IPv6unspecified, Port: 4}, nil})
resolveIPAddrTests = append(resolveIPAddrTests, resolveIPAddrTest{"ip", "::", &IPAddr{IP: IPv6unspecified}, nil})
}
ifi := loopbackInterface()
if ifi != nil {
index := fmt.Sprintf("%v", ifi.Index)
resolveTCPAddrTests = append(resolveTCPAddrTests, []resolveTCPAddrTest{
{"tcp6", "[fe80::1%" + ifi.Name + "]:1", &TCPAddr{IP: ParseIP("fe80::1"), Port: 1, Zone: zoneCache.name(ifi.Index)}, nil},
{"tcp6", "[fe80::1%" + index + "]:2", &TCPAddr{IP: ParseIP("fe80::1"), Port: 2, Zone: index}, nil},
}...)
resolveUDPAddrTests = append(resolveUDPAddrTests, []resolveUDPAddrTest{
{"udp6", "[fe80::1%" + ifi.Name + "]:1", &UDPAddr{IP: ParseIP("fe80::1"), Port: 1, Zone: zoneCache.name(ifi.Index)}, nil},
{"udp6", "[fe80::1%" + index + "]:2", &UDPAddr{IP: ParseIP("fe80::1"), Port: 2, Zone: index}, nil},
}...)
resolveIPAddrTests = append(resolveIPAddrTests, []resolveIPAddrTest{
{"ip6", "fe80::1%" + ifi.Name, &IPAddr{IP: ParseIP("fe80::1"), Zone: zoneCache.name(ifi.Index)}, nil},
{"ip6", "fe80::1%" + index, &IPAddr{IP: ParseIP("fe80::1"), Zone: index}, nil},
}...)
}
addr := ipv6LinkLocalUnicastAddr(ifi)
if addr != "" {
if runtime.GOOS != "dragonfly" {
ipv6LinkLocalUnicastTCPTests = append(ipv6LinkLocalUnicastTCPTests, []ipv6LinkLocalUnicastTest{
{"tcp", "[" + addr + "%" + ifi.Name + "]:0", false},
}...)
ipv6LinkLocalUnicastUDPTests = append(ipv6LinkLocalUnicastUDPTests, []ipv6LinkLocalUnicastTest{
{"udp", "[" + addr + "%" + ifi.Name + "]:0", false},
}...)
}
ipv6LinkLocalUnicastTCPTests = append(ipv6LinkLocalUnicastTCPTests, []ipv6LinkLocalUnicastTest{
{"tcp6", "[" + addr + "%" + ifi.Name + "]:0", false},
}...)
ipv6LinkLocalUnicastUDPTests = append(ipv6LinkLocalUnicastUDPTests, []ipv6LinkLocalUnicastTest{
{"udp6", "[" + addr + "%" + ifi.Name + "]:0", false},
}...)
switch runtime.GOOS {
case "darwin", "ios", "dragonfly", "freebsd", "openbsd", "netbsd":
ipv6LinkLocalUnicastTCPTests = append(ipv6LinkLocalUnicastTCPTests, []ipv6LinkLocalUnicastTest{
{"tcp", "[localhost%" + ifi.Name + "]:0", true},
{"tcp6", "[localhost%" + ifi.Name + "]:0", true},
}...)
ipv6LinkLocalUnicastUDPTests = append(ipv6LinkLocalUnicastUDPTests, []ipv6LinkLocalUnicastTest{
{"udp", "[localhost%" + ifi.Name + "]:0", true},
{"udp6", "[localhost%" + ifi.Name + "]:0", true},
}...)
case "linux":
ipv6LinkLocalUnicastTCPTests = append(ipv6LinkLocalUnicastTCPTests, []ipv6LinkLocalUnicastTest{
{"tcp", "[ip6-localhost%" + ifi.Name + "]:0", true},
{"tcp6", "[ip6-localhost%" + ifi.Name + "]:0", true},
}...)
ipv6LinkLocalUnicastUDPTests = append(ipv6LinkLocalUnicastUDPTests, []ipv6LinkLocalUnicastTest{
{"udp", "[ip6-localhost%" + ifi.Name + "]:0", true},
{"udp6", "[ip6-localhost%" + ifi.Name + "]:0", true},
}...)
}
}
}
func printRunningGoroutines() {
gss := runningGoroutines()
if len(gss) == 0 {
return
}
fmt.Fprintf(os.Stderr, "Running goroutines:\n")
for _, gs := range gss {
fmt.Fprintf(os.Stderr, "%v\n", gs)
}
fmt.Fprintf(os.Stderr, "\n")
}
// runningGoroutines returns a list of remaining goroutines.
func runningGoroutines() []string {
var gss []string
b := make([]byte, 2<<20)
b = b[:runtime.Stack(b, true)]
for s := range strings.SplitSeq(string(b), "\n\n") {
_, stack, _ := strings.Cut(s, "\n")
stack = strings.TrimSpace(stack)
if !strings.Contains(stack, "created by net") {
continue
}
gss = append(gss, stack)
}
slices.Sort(gss)
return gss
}
func printInflightSockets() {
sos := sw.Sockets()
if len(sos) == 0 {
return
}
fmt.Fprintf(os.Stderr, "Inflight sockets:\n")
for s, so := range sos {
fmt.Fprintf(os.Stderr, "%v: %v\n", s, so)
}
fmt.Fprintf(os.Stderr, "\n")
}
func printSocketStats() {
sts := sw.Stats()
if len(sts) == 0 {
return
}
fmt.Fprintf(os.Stderr, "Socket statistical information:\n")
for _, st := range sts {
fmt.Fprintf(os.Stderr, "%v\n", st)
}
fmt.Fprintf(os.Stderr, "\n")
} | go | github | https://github.com/golang/go | src/net/main_test.go |
"""
Support for ISY994 lights.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/isy994/
"""
import logging
from homeassistant.components.isy994 import (
HIDDEN_STRING, ISY, SENSOR_STRING, ISYDeviceABC)
from homeassistant.components.light import ATTR_BRIGHTNESS
from homeassistant.const import STATE_OFF, STATE_ON
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the ISY994 platform."""
logger = logging.getLogger(__name__)
devs = []
if ISY is None or not ISY.connected:
logger.error('A connection has not been made to the ISY controller.')
return False
# Import dimmable nodes
for (path, node) in ISY.nodes:
if node.dimmable and SENSOR_STRING not in node.name:
if HIDDEN_STRING in path:
node.name += HIDDEN_STRING
devs.append(ISYLightDevice(node))
add_devices(devs)
class ISYLightDevice(ISYDeviceABC):
"""Representation of a ISY light."""
_domain = 'light'
_dtype = 'analog'
_attrs = {ATTR_BRIGHTNESS: 'value'}
_onattrs = [ATTR_BRIGHTNESS]
_states = [STATE_ON, STATE_OFF]
def _attr_filter(self, attr):
"""Filter brightness out of entity while off."""
if ATTR_BRIGHTNESS in attr and not self.is_on:
del attr[ATTR_BRIGHTNESS]
return attr | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Adam Miller (maxamillion@fedoraproject.org)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: firewalld
short_description: Manage arbitrary ports/services with firewalld
description:
- This module allows for addition or deletion of services and ports either tcp or udp in either running or permanent firewalld rules.
version_added: "1.4"
options:
service:
description:
- "Name of a service to add/remove to/from firewalld - service must be listed in output of firewall-cmd --get-services."
required: false
default: null
port:
description:
- "Name of a port or port range to add/remove to/from firewalld. Must be in the form PORT/PROTOCOL or PORT-PORT/PROTOCOL for port ranges."
required: false
default: null
rich_rule:
description:
- "Rich rule to add/remove to/from firewalld."
required: false
default: null
source:
description:
- 'The source/network you would like to add/remove to/from firewalld'
required: false
default: null
version_added: "2.0"
interface:
description:
- 'The interface you would like to add/remove to/from a zone in firewalld'
required: false
default: null
version_added: "2.1"
zone:
description:
- >
The firewalld zone to add/remove to/from (NOTE: default zone can be configured per system but "public" is default from upstream. Available choices
can be extended based on per-system configs, listed here are "out of the box" defaults).
required: false
default: system-default(public)
choices: [ "work", "drop", "internal", "external", "trusted", "home", "dmz", "public", "block" ]
permanent:
description:
- >
Should this configuration be in the running firewalld configuration or persist across reboots. As of Ansible version 2.3, permanent operations can
operate on firewalld configs when it's not running (requires firewalld >= 3.0.9)
required: false
default: null
immediate:
description:
- "Should this configuration be applied immediately, if set as permanent"
required: false
default: false
version_added: "1.9"
state:
description:
- "Should this port accept(enabled) or reject(disabled) connections."
required: true
choices: [ "enabled", "disabled" ]
timeout:
description:
- "The amount of time the rule should be in effect for when non-permanent."
required: false
default: 0
masquerade:
description:
- 'The masquerade setting you would like to enable/disable to/from zones within firewalld'
required: false
default: null
version_added: "2.1"
notes:
- Not tested on any Debian based system.
- Requires the python2 bindings of firewalld, which may not be installed by default if the distribution switched to python 3
requirements: [ 'firewalld >= 0.2.11' ]
author: "Adam Miller (@maxamillion)"
'''
EXAMPLES = '''
- firewalld:
service: https
permanent: true
state: enabled
- firewalld:
port: 8081/tcp
permanent: true
state: disabled
- firewalld:
port: 161-162/udp
permanent: true
state: enabled
- firewalld:
zone: dmz
service: http
permanent: true
state: enabled
- firewalld:
rich_rule: 'rule service name="ftp" audit limit value="1/m" accept'
permanent: true
state: enabled
- firewalld:
source: 192.0.2.0/24
zone: internal
state: enabled
- firewalld:
zone: trusted
interface: eth2
permanent: true
state: enabled
- firewalld:
masquerade: yes
state: enabled
permanent: true
zone: dmz
'''
from ansible.module_utils.basic import AnsibleModule
import sys
#####################
# Globals
#
fw = None
module = None
fw_offline = False
Rich_Rule = None
FirewallClientZoneSettings = None
#####################
# exception handling
#
def action_handler(action_func, action_func_args):
"""
Function to wrap calls to make actions on firewalld in try/except
logic and emit (hopefully) useful error messages
"""
msgs = []
try:
return action_func(*action_func_args)
except Exception:
# Make python 2.4 shippable ci tests happy
e = sys.exc_info()[1]
# If there are any commonly known errors that we should provide more
# context for to help the users diagnose what's wrong. Handle that here
if "INVALID_SERVICE" in "%s" % e:
msgs.append("Services are defined by port/tcp relationship and named as they are in /etc/services (on most systems)")
if len(msgs) > 0:
module.fail_json(
msg='ERROR: Exception caught: %s %s' % (e, ', '.join(msgs))
)
else:
module.fail_json(msg='ERROR: Exception caught: %s' % e)
#####################
# fw_offline helpers
#
def get_fw_zone_settings(zone):
if fw_offline:
fw_zone = fw.config.get_zone(zone)
fw_settings = FirewallClientZoneSettings(
list(fw.config.get_zone_config(fw_zone))
)
else:
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
return (fw_zone, fw_settings)
def update_fw_settings(fw_zone, fw_settings):
if fw_offline:
fw.config.set_zone_config(fw_zone, fw_settings.settings)
else:
fw_zone.update(fw_settings)
#####################
# masquerade handling
#
def get_masquerade_enabled(zone):
if fw.queryMasquerade(zone) is True:
return True
else:
return False
def get_masquerade_enabled_permanent(zone):
fw_zone, fw_settings = get_fw_zone_settings(zone)
if fw_settings.getMasquerade() is True:
return True
else:
return False
def set_masquerade_enabled(zone):
fw.addMasquerade(zone)
def set_masquerade_disabled(zone):
fw.removeMasquerade(zone)
def set_masquerade_permanent(zone, masquerade):
fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.setMasquerade(masquerade)
update_fw_settings(fw_zone, fw_settings)
################
# port handling
#
def get_port_enabled(zone, port_proto):
if fw_offline:
fw_zone, fw_settings = get_fw_zone_settings(zone)
ports_list = fw_settings.getPorts()
else:
ports_list = fw.getPorts(zone)
if port_proto in ports_list:
return True
else:
return False
def set_port_enabled(zone, port, protocol, timeout):
fw.addPort(zone, port, protocol, timeout)
def set_port_disabled(zone, port, protocol):
fw.removePort(zone, port, protocol)
def get_port_enabled_permanent(zone, port_proto):
fw_zone, fw_settings = get_fw_zone_settings(zone)
if tuple(port_proto) in fw_settings.getPorts():
return True
else:
return False
def set_port_enabled_permanent(zone, port, protocol):
fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.addPort(port, protocol)
update_fw_settings(fw_zone, fw_settings)
def set_port_disabled_permanent(zone, port, protocol):
fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.removePort(port, protocol)
update_fw_settings(fw_zone, fw_settings)
####################
# source handling
#
def get_source(zone, source):
fw_zone, fw_settings = get_fw_zone_settings(zone)
if source in fw_settings.getSources():
return True
else:
return False
def add_source(zone, source):
fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.addSource(source)
update_fw_settings(fw_zone, fw_settings)
def remove_source(zone, source):
fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.removeSource(source)
update_fw_settings(fw_zone, fw_settings)
####################
# interface handling
#
def get_interface(zone, interface):
if fw_offline:
fw_zone, fw_settings = get_fw_zone_settings(zone)
interface_list = fw_settings.getInterfaces()
else:
interface_list = fw.getInterfaces(zone)
if interface in fw.getInterfaces(zone):
return True
else:
return False
def change_zone_of_interface(zone, interface):
fw.changeZoneOfInterface(zone, interface)
def remove_interface(zone, interface):
fw.removeInterface(zone, interface)
def get_interface_permanent(zone, interface):
fw_zone, fw_settings = get_fw_zone_settings(zone)
if interface in fw_settings.getInterfaces():
return True
else:
return False
def change_zone_of_interface_permanent(zone, interface):
fw_zone, fw_settings = get_fw_zone_settings(zone)
if fw_offline:
iface_zone_objs = [ ]
for zone in fw.config.get_zones():
old_zone_obj = fw.config.get_zone(zone)
if interface in old_zone_obj.interfaces:
iface_zone_objs.append(old_zone_obj)
if len(iface_zone_objs) > 1:
# Even it shouldn't happen, it's actually possible that
# the same interface is in several zone XML files
module.fail_json(
msg = 'ERROR: interface {} is in {} zone XML file, can only be in one'.format(
interface,
len(iface_zone_objs)
)
)
old_zone_obj = iface_zone_objs[0]
if old_zone_obj.name != zone:
old_zone_settings = FirewallClientZoneSettings(
fw.config.get_zone_config(old_zone_obj)
)
old_zone_settings.removeInterface(interface) # remove from old
fw.config.set_zone_config(old_zone_obj, old_zone_settings.settings)
fw_settings.addInterface(interface) # add to new
fw.config.set_zone_config(fw_zone, fw_settings.settings)
else:
old_zone_name = fw.config().getZoneOfInterface(interface)
if old_zone_name != zone:
if old_zone_name:
old_zone_obj = fw.config().getZoneByName(old_zone_name)
old_zone_settings = old_zone_obj.getSettings()
old_zone_settings.removeInterface(interface) # remove from old
old_zone_obj.update(old_zone_settings)
fw_settings.addInterface(interface) # add to new
fw_zone.update(fw_settings)
def remove_interface_permanent(zone, interface):
fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.removeInterface(interface)
update_fw_settings(fw_zone, fw_settings)
####################
# service handling
#
def get_service_enabled(zone, service):
if service in fw.getServices(zone):
return True
else:
return False
def set_service_enabled(zone, service, timeout):
fw.addService(zone, service, timeout)
def set_service_disabled(zone, service):
fw.removeService(zone, service)
def get_service_enabled_permanent(zone, service):
fw_zone, fw_settings = get_fw_zone_settings(zone)
if service in fw_settings.getServices():
return True
else:
return False
def set_service_enabled_permanent(zone, service):
fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.addService(service)
update_fw_settings(fw_zone, fw_settings)
def set_service_disabled_permanent(zone, service):
fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.removeService(service)
update_fw_settings(fw_zone, fw_settings)
####################
# rich rule handling
#
def get_rich_rule_enabled(zone, rule):
# Convert the rule string to standard format
# before checking whether it is present
rule = str(Rich_Rule(rule_str=rule))
if rule in fw.getRichRules(zone):
return True
else:
return False
def set_rich_rule_enabled(zone, rule, timeout):
fw.addRichRule(zone, rule, timeout)
def set_rich_rule_disabled(zone, rule):
fw.removeRichRule(zone, rule)
def get_rich_rule_enabled_permanent(zone, rule):
fw_zone, fw_settings = get_fw_zone_settings(zone)
# Convert the rule string to standard format
# before checking whether it is present
rule = str(Rich_Rule(rule_str=rule))
if rule in fw_settings.getRichRules():
return True
else:
return False
def set_rich_rule_enabled_permanent(zone, rule):
fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.addRichRule(rule)
update_fw_settings(fw_zone, fw_settings)
def set_rich_rule_disabled_permanent(zone, rule):
fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.removeRichRule(rule)
update_fw_settings(fw_zone, fw_settings)
def main():
global module
## make module global so we don't have to pass it to action_handler every
## function call
global module
module = AnsibleModule(
argument_spec = dict(
service=dict(required=False,default=None),
port=dict(required=False,default=None),
rich_rule=dict(required=False,default=None),
zone=dict(required=False,default=None),
immediate=dict(type='bool',default=False),
source=dict(required=False,default=None),
permanent=dict(type='bool',required=False,default=None),
state=dict(choices=['enabled', 'disabled'], required=True),
timeout=dict(type='int',required=False,default=0),
interface=dict(required=False,default=None),
masquerade=dict(required=False,default=None),
offline=dict(type='bool',required=False,default=None),
),
supports_check_mode=True
)
## Handle running (online) daemon vs non-running (offline) daemon
global fw
global fw_offline
global Rich_Rule
global FirewallClientZoneSettings
## Imports
try:
import firewall.config
FW_VERSION = firewall.config.VERSION
from firewall.client import Rich_Rule
from firewall.client import FirewallClient
fw = None
fw_offline = False
try:
fw = FirewallClient()
fw.getDefaultZone()
except AttributeError:
## Firewalld is not currently running, permanent-only operations
## Import other required parts of the firewalld API
##
## NOTE:
## online and offline operations do not share a common firewalld API
from firewall.core.fw_test import Firewall_test
from firewall.client import FirewallClientZoneSettings
fw = Firewall_test()
fw.start()
fw_offline = True
except ImportError:
## Make python 2.4 shippable ci tests happy
e = sys.exc_info()[1]
module.fail_json(msg='firewalld and its python 2 module are required for this module, version 2.0.11 or newer required '
'(3.0.9 or newer for offline operations) \n %s' % e)
if fw_offline:
## Pre-run version checking
if FW_VERSION < "0.3.9":
module.fail_json(msg='unsupported version of firewalld, offline operations require >= 3.0.9')
else:
## Pre-run version checking
if FW_VERSION < "0.2.11":
module.fail_json(msg='unsupported version of firewalld, requires >= 2.0.11')
## Check for firewalld running
try:
if fw.connected is False:
module.fail_json(msg='firewalld service must be running, or try with offline=true')
except AttributeError:
module.fail_json(msg="firewalld connection can't be established,\
installed version (%s) likely too old. Requires firewalld >= 2.0.11" % FW_VERSION)
## Verify required params are provided
if module.params['source'] is None and module.params['permanent'] is None:
module.fail_json(msg='permanent is a required parameter')
if module.params['interface'] is not None and module.params['zone'] is None:
module.fail(msg='zone is a required parameter')
if module.params['immediate'] and fw_offline:
module.fail(msg='firewall is not currently running, unable to perform immediate actions without a running firewall daemon')
## Global Vars
changed=False
msgs = []
service = module.params['service']
rich_rule = module.params['rich_rule']
source = module.params['source']
if module.params['port'] is not None:
port, protocol = module.params['port'].strip().split('/')
if protocol is None:
module.fail_json(msg='improper port format (missing protocol?)')
else:
port = None
if module.params['zone'] is not None:
zone = module.params['zone']
else:
if fw_offline:
zone = fw.get_default_zone()
else:
zone = fw.getDefaultZone()
permanent = module.params['permanent']
desired_state = module.params['state']
immediate = module.params['immediate']
timeout = module.params['timeout']
interface = module.params['interface']
masquerade = module.params['masquerade']
modification_count = 0
if service is not None:
modification_count += 1
if port is not None:
modification_count += 1
if rich_rule is not None:
modification_count += 1
if interface is not None:
modification_count += 1
if masquerade is not None:
modification_count += 1
if modification_count > 1:
module.fail_json(msg='can only operate on port, service, rich_rule or interface at once')
if service is not None:
if immediate and permanent:
is_enabled_permanent = action_handler(
get_service_enabled_permanent,
(zone, service)
)
is_enabled_immediate = action_handler(
get_service_enabled,
(zone, service)
)
msgs.append('Permanent and Non-Permanent(immediate) operation')
if desired_state == "enabled":
if not is_enabled_permanent or not is_enabled_immediate:
if module.check_mode:
module.exit_json(changed=True)
if not is_enabled_permanent:
action_handler(
set_service_enabled_permanent,
(zone, service)
)
changed=True
if not is_enabled_immediate:
action_handler(
set_service_enabled,
(zone, service, timeout)
)
changed=True
elif desired_state == "disabled":
if is_enabled_permanent or is_enabled_immediate:
if module.check_mode:
module.exit_json(changed=True)
if is_enabled_permanent:
action_handler(
set_service_disabled_permanent,
(zone, service)
)
changed=True
if is_enabled_immediate:
action_handler(
set_service_disabled,
(zone, service)
)
changed=True
elif permanent and not immediate:
is_enabled = action_handler(
get_service_enabled_permanent,
(zone, service)
)
msgs.append('Permanent operation')
if desired_state == "enabled":
if is_enabled is False:
if module.check_mode:
module.exit_json(changed=True)
action_handler(
set_service_enabled_permanent,
(zone, service)
)
changed=True
elif desired_state == "disabled":
if is_enabled is True:
if module.check_mode:
module.exit_json(changed=True)
action_handler(
set_service_disabled_permanent,
(zone, service)
)
changed=True
elif immediate and not permanent:
is_enabled = action_handler(
get_service_enabled,
(zone, service)
)
msgs.append('Non-permanent operation')
if desired_state == "enabled":
if is_enabled is False:
if module.check_mode:
module.exit_json(changed=True)
action_handler(
set_service_enabled,
(zone, service, timeout)
)
changed=True
elif desired_state == "disabled":
if is_enabled is True:
if module.check_mode:
module.exit_json(changed=True)
action_handler(
set_service_disabled,
(zone, service)
)
changed=True
if changed is True:
msgs.append("Changed service %s to %s" % (service, desired_state))
# FIXME - source type does not handle non-permanent mode, this was an
# oversight in the past.
if source is not None:
is_enabled = action_handler(get_source, (zone, source))
if desired_state == "enabled":
if is_enabled is False:
if module.check_mode:
module.exit_json(changed=True)
action_handler(add_source, (zone, source))
changed=True
msgs.append("Added %s to zone %s" % (source, zone))
elif desired_state == "disabled":
if is_enabled is True:
if module.check_mode:
module.exit_json(changed=True)
action_handler(remove_source, (zone, source))
changed=True
msgs.append("Removed %s from zone %s" % (source, zone))
if port is not None:
if immediate and permanent:
is_enabled_permanent = action_handler(
get_port_enabled_permanent,
(zone,[port, protocol])
)
is_enabled_immediate = action_handler(
get_port_enabled,
(zone, [port, protocol])
)
msgs.append('Permanent and Non-Permanent(immediate) operation')
if desired_state == "enabled":
if not is_enabled_permanent or not is_enabled_immediate:
if module.check_mode:
module.exit_json(changed=True)
if not is_enabled_permanent:
action_handler(
set_port_enabled_permanent,
(zone, port, protocol)
)
changed=True
if not is_enabled_immediate:
action_handler(
set_port_enabled,
(zone, port, protocol, timeout)
)
changed=True
elif desired_state == "disabled":
if is_enabled_permanent or is_enabled_immediate:
if module.check_mode:
module.exit_json(changed=True)
if is_enabled_permanent:
action_handler(
set_port_disabled_permanent,
(zone, port, protocol)
)
changed=True
if is_enabled_immediate:
action_handler(
set_port_disabled,
(zone, port, protocol)
)
changed=True
elif permanent and not immediate:
is_enabled = action_handler(
get_port_enabled_permanent,
(zone, [port, protocol])
)
msgs.append('Permanent operation')
if desired_state == "enabled":
if is_enabled is False:
if module.check_mode:
module.exit_json(changed=True)
action_handler(
set_port_enabled_permanent,
(zone, port, protocol)
)
changed=True
elif desired_state == "disabled":
if is_enabled is True:
if module.check_mode:
module.exit_json(changed=True)
action_handler(
set_port_disabled_permanent,
(zone, port, protocol)
)
changed=True
if immediate and not permanent:
is_enabled = action_handler(
get_port_enabled,
(zone, [port,protocol])
)
msgs.append('Non-permanent operation')
if desired_state == "enabled":
if is_enabled is False:
if module.check_mode:
module.exit_json(changed=True)
action_handler(
set_port_enabled,
(zone, port, protocol, timeout)
)
changed=True
elif desired_state == "disabled":
if is_enabled is True:
if module.check_mode:
module.exit_json(changed=True)
action_handler(
set_port_disabled,
(zone, port, protocol)
)
changed=True
if changed is True:
msgs.append("Changed port %s to %s" % ("%s/%s" % (port, protocol), \
desired_state))
if rich_rule is not None:
if immediate and permanent:
is_enabled_permanent = action_handler(
get_rich_rule_enabled_permanent,
(zone, rich_rule)
)
is_enabled_immediate = action_handler(
get_rich_rule_enabled,
(zone, rich_rule)
)
msgs.append('Permanent and Non-Permanent(immediate) operation')
if desired_state == "enabled":
if not is_enabled_permanent or not is_enabled_immediate:
if module.check_mode:
module.exit_json(changed=True)
if not is_enabled_permanent:
action_handler(
set_rich_rule_enabled_permanent,
(zone, rich_rule)
)
changed=True
if not is_enabled_immediate:
action_handler(
set_rich_rule_enabled,
(zone, rich_rule, timeout)
)
changed=True
elif desired_state == "disabled":
if is_enabled_permanent or is_enabled_immediate:
if module.check_mode:
module.exit_json(changed=True)
if is_enabled_permanent:
action_handler(
set_rich_rule_disabled_permanent,
(zone, rich_rule)
)
changed=True
if is_enabled_immediate:
action_handler(
set_rich_rule_disabled,
(zone, rich_rule)
)
changed=True
if permanent and not immediate:
is_enabled = action_handler(
get_rich_rule_enabled_permanent,
(zone, rich_rule)
)
msgs.append('Permanent operation')
if desired_state == "enabled":
if is_enabled is False:
if module.check_mode:
module.exit_json(changed=True)
action_handler(
set_rich_rule_enabled_permanent,
(zone, rich_rule)
)
changed=True
elif desired_state == "disabled":
if is_enabled is True:
if module.check_mode:
module.exit_json(changed=True)
action_handler(
set_rich_rule_disabled_permanent,
(zone, rich_rule)
)
changed=True
if immediate and not permanent:
is_enabled = action_handler(
get_rich_rule_enabled,
(zone, rich_rule)
)
msgs.append('Non-permanent operation')
if desired_state == "enabled":
if is_enabled is False:
if module.check_mode:
module.exit_json(changed=True)
action_handler(
set_rich_rule_enabled,
(zone, rich_rule, timeout)
)
changed=True
elif desired_state == "disabled":
if is_enabled is True:
if module.check_mode:
module.exit_json(changed=True)
action_handler(
set_rich_rule_disabled,
(zone, rich_rule)
)
changed=True
if changed is True:
msgs.append("Changed rich_rule %s to %s" % (rich_rule, desired_state))
if interface is not None:
if immediate and permanent:
is_enabled_permanent = action_handler(
get_interface_permanent,
(zone, interface)
)
is_enabled_immediate = action_handler(
get_interface,
(zone, interface)
)
msgs.append('Permanent and Non-Permanent(immediate) operation')
if desired_state == "enabled":
if not is_enabled_permanent or not is_enabled_immediate:
if module.check_mode:
module.exit_json(changed=True)
if not is_enabled_permanent:
change_zone_of_interface_permanent(zone, interface)
changed=True
if not is_enabled_immediate:
change_zone_of_interface(zone, interface)
changed=True
if changed:
msgs.append("Changed %s to zone %s" % (interface, zone))
elif desired_state == "disabled":
if is_enabled_permanent or is_enabled_immediate:
if module.check_mode:
module.exit_json(changed=True)
if is_enabled_permanent:
remove_interface_permanent(zone, interface)
changed=True
if is_enabled_immediate:
remove_interface(zone, interface)
changed=True
if changed:
msgs.append("Removed %s from zone %s" % (interface, zone))
elif permanent and not immediate:
is_enabled = action_handler(
get_interface_permanent,
(zone, interface)
)
msgs.append('Permanent operation')
if desired_state == "enabled":
if is_enabled is False:
if module.check_mode:
module.exit_json(changed=True)
change_zone_of_interface_permanent(zone, interface)
changed=True
msgs.append("Changed %s to zone %s" % (interface, zone))
elif desired_state == "disabled":
if is_enabled is True:
if module.check_mode:
module.exit_json(changed=True)
remove_interface_permanent(zone, interface)
changed=True
msgs.append("Removed %s from zone %s" % (interface, zone))
elif immediate and not permanent:
is_enabled = action_handler(
get_interface,
(zone, interface)
)
msgs.append('Non-permanent operation')
if desired_state == "enabled":
if is_enabled is False:
if module.check_mode:
module.exit_json(changed=True)
change_zone_of_interface(zone, interface)
changed=True
msgs.append("Changed %s to zone %s" % (interface, zone))
elif desired_state == "disabled":
if is_enabled is True:
if module.check_mode:
module.exit_json(changed=True)
remove_interface(zone, interface)
changed=True
msgs.append("Removed %s from zone %s" % (interface, zone))
if masquerade is not None:
if immediate and permanent:
is_enabled_permanent = action_handler(
get_masquerade_enabled_permanent,
(zone,)
)
is_enabled_immediate = action_handler(get_masquerade_enabled, (zone,))
msgs.append('Permanent and Non-Permanent(immediate) operation')
if desired_state == "enabled":
if not is_enabled_permanent or not is_enabled_immediate:
if module.check_mode:
module.exit_json(changed=True)
if not is_enabled_permanent:
action_handler(set_masquerade_permanent, (zone, True))
changed=True
if not is_enabled_immediate:
action_handler(set_masquerade_enabled, (zone,))
changed=True
if changed:
msgs.append("Added masquerade to zone %s" % (zone))
elif desired_state == "disabled":
if is_enabled_permanent or is_enabled_immediate:
if module.check_mode:
module.exit_json(changed=True)
if is_enabled_permanent:
action_handler(set_masquerade_permanent, (zone, False))
changed=True
if is_enabled_immediate:
action_handler(set_masquerade_disabled, (zone,))
changed=True
if changed:
msgs.append("Removed masquerade from zone %s" % (zone))
elif permanent and not immediate:
is_enabled = action_handler(get_masquerade_enabled_permanent, (zone,))
msgs.append('Permanent operation')
if desired_state == "enabled":
if is_enabled is False:
if module.check_mode:
module.exit_json(changed=True)
action_handler(set_masquerade_permanent, (zone, True))
changed=True
msgs.append("Added masquerade to zone %s" % (zone))
elif desired_state == "disabled":
if is_enabled is True:
if module.check_mode:
module.exit_json(changed=True)
action_handler(set_masquerade_permanent, (zone, False))
changed=True
msgs.append("Removed masquerade from zone %s" % (zone))
elif immediate and not permanent:
is_enabled = action_handler(get_masquerade_enabled, (zone,))
msgs.append('Non-permanent operation')
if desired_state == "enabled":
if is_enabled is False:
if module.check_mode:
module.exit_json(changed=True)
action_handler(set_masquerade_enabled, (zone))
changed=True
msgs.append("Added masquerade to zone %s" % (zone))
elif desired_state == "disabled":
if is_enabled is True:
if module.check_mode:
module.exit_json(changed=True)
action_handler(set_masquerade_disabled, (zone))
changed=True
msgs.append("Removed masquerade from zone %s" % (zone))
if fw_offline:
msgs.append("(offline operation: only on-disk configs were altered)")
module.exit_json(changed=changed, msg=', '.join(msgs))
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""GDB support for Chrome types.
Add this to your gdb by amending your ~/.gdbinit as follows:
python
import sys
sys.path.insert(0, "/path/to/tools/gdb/")
import gdb_chrome
end
This module relies on the WebKit gdb module already existing in
your Python path.
Use
(gdb) p /r any_variable
to print |any_variable| without using any printers.
"""
import datetime
import gdb
import webkit
# When debugging this module, set the below variable to True, and then use
# (gdb) python del sys.modules['gdb_chrome']
# (gdb) python import gdb_chrome
# to reload.
_DEBUGGING = False
pp_set = gdb.printing.RegexpCollectionPrettyPrinter("chromium")
def typed_ptr(ptr):
"""Prints a pointer along with its exact type.
By default, gdb would print just the address, which takes more
steps to interpret.
"""
# Returning this as a cast expression surrounded by parentheses
# makes it easier to cut+paste inside of gdb.
return '((%s)%s)' % (ptr.dynamic_type, ptr)
class Printer(object):
def __init__(self, val):
self.val = val
class StringPrinter(Printer):
def display_hint(self):
return 'string'
class String16Printer(StringPrinter):
def to_string(self):
return webkit.ustring_to_string(self.val['_M_dataplus']['_M_p'])
pp_set.add_printer(
'string16',
'^string16|std::basic_string<(unsigned short|char16|base::char16).*>$',
String16Printer);
class GURLPrinter(StringPrinter):
def to_string(self):
return self.val['spec_']
pp_set.add_printer('GURL', '^GURL$', GURLPrinter)
class FilePathPrinter(StringPrinter):
def to_string(self):
return self.val['path_']['_M_dataplus']['_M_p']
pp_set.add_printer('FilePath', '^FilePath$', FilePathPrinter)
class SizePrinter(Printer):
def to_string(self):
return '%sx%s' % (self.val['width_'], self.val['height_'])
pp_set.add_printer('gfx::Size', '^gfx::(Size|SizeF|SizeBase<.*>)$', SizePrinter)
class PointPrinter(Printer):
def to_string(self):
return '%s,%s' % (self.val['x_'], self.val['y_'])
pp_set.add_printer('gfx::Point', '^gfx::(Point|PointF|PointBase<.*>)$',
PointPrinter)
class RectPrinter(Printer):
def to_string(self):
return '%s %s' % (self.val['origin_'], self.val['size_'])
pp_set.add_printer('gfx::Rect', '^gfx::(Rect|RectF|RectBase<.*>)$',
RectPrinter)
class SmartPtrPrinter(Printer):
def to_string(self):
return '%s%s' % (self.typename, typed_ptr(self.ptr()))
class ScopedRefPtrPrinter(SmartPtrPrinter):
typename = 'scoped_refptr'
def ptr(self):
return self.val['ptr_']
pp_set.add_printer('scoped_refptr', '^scoped_refptr<.*>$', ScopedRefPtrPrinter)
class LinkedPtrPrinter(SmartPtrPrinter):
typename = 'linked_ptr'
def ptr(self):
return self.val['value_']
pp_set.add_printer('linked_ptr', '^linked_ptr<.*>$', LinkedPtrPrinter)
class WeakPtrPrinter(SmartPtrPrinter):
typename = 'base::WeakPtr'
def ptr(self):
flag = ScopedRefPtrPrinter(self.val['ref_']['flag_']).ptr()
if flag and flag['is_valid_']:
return self.val['ptr_']
return gdb.Value(0).cast(self.val['ptr_'].type)
pp_set.add_printer('base::WeakPtr', '^base::WeakPtr<.*>$', WeakPtrPrinter)
class CallbackPrinter(Printer):
"""Callbacks provide no usable information so reduce the space they take."""
def to_string(self):
return '...'
pp_set.add_printer('base::Callback', '^base::Callback<.*>$', CallbackPrinter)
class LocationPrinter(Printer):
def to_string(self):
return '%s()@%s:%s' % (self.val['function_name_'].string(),
self.val['file_name_'].string(),
self.val['line_number_'])
pp_set.add_printer('tracked_objects::Location', '^tracked_objects::Location$',
LocationPrinter)
class LockPrinter(Printer):
def to_string(self):
try:
if self.val['owned_by_thread_']:
return 'Locked by thread %s' % self.val['owning_thread_id_']
else:
return 'Unlocked'
except gdb.error:
return 'Unknown state'
pp_set.add_printer('base::Lock', '^base::Lock$', LockPrinter)
class TimeDeltaPrinter(object):
def __init__(self, val):
self._timedelta = datetime.timedelta(microseconds=int(val['delta_']))
def timedelta(self):
return self._timedelta
def to_string(self):
return str(self._timedelta)
pp_set.add_printer('base::TimeDelta', '^base::TimeDelta$', TimeDeltaPrinter)
class TimeTicksPrinter(TimeDeltaPrinter):
def __init__(self, val):
self._timedelta = datetime.timedelta(microseconds=int(val['ticks_']))
pp_set.add_printer('base::TimeTicks', '^base::TimeTicks$', TimeTicksPrinter)
class TimePrinter(object):
def __init__(self, val):
timet_offset = gdb.parse_and_eval(
'base::Time::kTimeTToMicrosecondsOffset')
self._datetime = (datetime.datetime.fromtimestamp(0) +
datetime.timedelta(microseconds=
int(val['us_'] - timet_offset)))
def datetime(self):
return self._datetime
def to_string(self):
return str(self._datetime)
pp_set.add_printer('base::Time', '^base::Time$', TimePrinter)
class IpcMessagePrinter(Printer):
def header(self):
return self.val['header_'].cast(
gdb.lookup_type('IPC::Message::Header').pointer())
def to_string(self):
message_type = self.header()['type']
return '%s of kind %s line %s' % (
self.val.dynamic_type,
(message_type >> 16).cast(gdb.lookup_type('IPCMessageStart')),
message_type & 0xffff)
def children(self):
yield ('header_', self.header().dereference())
yield ('capacity_', self.val['capacity_'])
yield ('variable_buffer_offset_', self.val['variable_buffer_offset_'])
for field in self.val.type.fields():
if field.is_base_class:
continue
yield (field.name, self.val[field.name])
pp_set.add_printer('IPC::Message', '^IPC::Message$', IpcMessagePrinter)
class NotificationRegistrarPrinter(Printer):
def to_string(self):
try:
registrations = self.val['registered_']
vector_finish = registrations['_M_impl']['_M_finish']
vector_start = registrations['_M_impl']['_M_start']
if vector_start == vector_finish:
return 'Not watching notifications'
if vector_start.dereference().type.sizeof == 0:
# Incomplete type: b/8242773
return 'Watching some notifications'
return ('Watching %s notifications; '
'print %s->registered_ for details') % (
int(vector_finish - vector_start),
typed_ptr(self.val.address))
except gdb.error:
return 'NotificationRegistrar'
pp_set.add_printer('content::NotificationRegistrar',
'^content::NotificationRegistrar$',
NotificationRegistrarPrinter)
class SiteInstanceImplPrinter(object):
def __init__(self, val):
self.val = val.cast(val.dynamic_type)
def to_string(self):
return 'SiteInstanceImpl@%s for %s' % (
self.val.address, self.val['site_'])
def children(self):
yield ('id_', self.val['id_'])
yield ('has_site_', self.val['has_site_'])
if self.val['browsing_instance_']['ptr_']:
yield ('browsing_instance_', self.val['browsing_instance_']['ptr_'])
if self.val['process_']:
yield ('process_', typed_ptr(self.val['process_']))
if self.val['render_process_host_factory_']:
yield ('render_process_host_factory_',
self.val['render_process_host_factory_'])
pp_set.add_printer('content::SiteInstanceImpl', '^content::SiteInstanceImpl$',
SiteInstanceImplPrinter)
class RenderProcessHostImplPrinter(object):
def __init__(self, val):
self.val = val.cast(val.dynamic_type)
def to_string(self):
pid = ''
try:
child_process_launcher_ptr = (
self.val['child_process_launcher_']['impl_']['data_']['ptr'])
if child_process_launcher_ptr:
context = (child_process_launcher_ptr['context_']['ptr_'])
if context:
pid = ' PID %s' % str(context['process_']['process_'])
except gdb.error:
# The definition of the Context type may not be available.
# b/8242773
pass
return 'RenderProcessHostImpl@%s%s' % (self.val.address, pid)
def children(self):
yield ('id_', self.val['id_'])
yield ('render_widget_hosts_',
self.val['render_widget_hosts_']['data_'])
yield ('fast_shutdown_started_', self.val['fast_shutdown_started_'])
yield ('deleting_soon_', self.val['deleting_soon_'])
yield ('pending_views_', self.val['pending_views_'])
yield ('visible_widgets_', self.val['visible_widgets_'])
yield ('backgrounded_', self.val['backgrounded_'])
yield ('widget_helper_', self.val['widget_helper_'])
yield ('is_initialized_', self.val['is_initialized_'])
yield ('browser_context_', typed_ptr(self.val['browser_context_']))
yield ('sudden_termination_allowed_',
self.val['sudden_termination_allowed_'])
yield ('ignore_input_events_', self.val['ignore_input_events_'])
yield ('is_guest_', self.val['is_guest_'])
pp_set.add_printer('content::RenderProcessHostImpl',
'^content::RenderProcessHostImpl$',
RenderProcessHostImplPrinter)
gdb.printing.register_pretty_printer(gdb, pp_set, replace=_DEBUGGING) | unknown | codeparrot/codeparrot-clean | ||
<?php
/*
* This file is part of the Symfony package.
*
* (c) Fabien Potencier <fabien@symfony.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Symfony\Bridge\Doctrine\Tests\Fixtures;
use Doctrine\ORM\Mapping\Column;
use Doctrine\ORM\Mapping\Entity;
use Doctrine\ORM\Mapping\Id;
#[Entity]
class CustomUuidIdEntity
{
public function __construct(
#[Id, Column(type: CustomUuidIdType::class)]
protected CustomUuidId $id,
) {
}
} | php | github | https://github.com/symfony/symfony | src/Symfony/Bridge/Doctrine/Tests/Fixtures/CustomUuidIdEntity.php |
import inspect
import os
import copy
from contextlib import contextmanager
from typing import TypeVar
from django.conf.urls import url
from django.utils.log import DEFAULT_LOGGING
from cratis.settings import CratisApplication
from cratis.settings import CratisConfig
from cratis.settings import FeatureNotLoadedYet
from cratis.utils import Collectable
T = TypeVar('T')
class FeatureDependencyFailure(Exception):
pass
#
# @contextmanager
# def feature(feature_cls: T, calee: Feature, optional=False) -> T:
# """
# :param feature_cls:
# :type feature_cls: str | class
# :param calee:
# :param optional:
# :return:
# """
#
# try:
# _feature = CratisApplication.get(feature_cls)
# _feature.calee = calee
# yield _feature
# _feature.calee = None
# except (FeatureNotLoadedYet, ImportError) as e:
# if not optional:
# raise Exception('by %s' % calee, e)
class BaseFeature(object):
calee = None
"""
When future is used by other features using `with feature()` syntax,
this variable get filled with reference to calee.
"""
"""
Basement for all the features.
"""
settings = None # type: CratisConfig
app = None # type: CratisApplication
_static_requirements = ()
def __init__(self):
super().__init__()
self.__requirements = []
def set_application(self, app):
"""
Initialized just before configuration process start
"""
self.app = app
self.settings = app.scope
def get_required(self):
"""
Returns list of classes of features that must be loaded before this feature
can be loaded
"""
return self.__requirements
def require_if_installed(self):
return self.require(optional=True)
@contextmanager
def use(self, feature_cls: T, require=True) -> T:
"""
:param feature_cls:
:type feature_cls: str | class
:param optional:
:return:
"""
try:
_feature = self.app.get(feature_cls)
_feature.calee = self
yield _feature
_feature.calee = None
except (FeatureNotLoadedYet, ImportError) as e:
if require:
raise Exception('by %s' % self, e)
def get_deps(self):
"""
Return list of python dependencies that are required by the feature
:return:
"""
return ()
def on_load(self, loaded_features):
"""
Loads feature. Used by settings class.
By default, checks requirements and executes configure_settings() method.
As an argument accepts list of features loaded before current one.
"""
self.configure_settings()
def on_after_load(self):
"""
Called when setting is configured
"""
pass
def on_startup(self):
"""
Last chance to do something after settings are configured, called even later than on_after_load.
"""
pass
def configure_settings(self):
"""
API method.
Meant to be overridden by subsequent Features.
Called inside on_load callback.
DEPRECATED!!!
"""
self.init()
def init(self):
"""
API method.
Meant to be overridden by subsequent Features.
Called inside on_load callback.
DEPRECATED!!!
"""
def setup(self):
"""
API method.
Meant to be overridden by subsequent Features.
Called inside on_load callback.
"""
self.init()
def configure_urls(self, urls):
"""
API method.
Meant to be overridden by subsequent Features.
Called when django imports cratis.url from cratis.urls module.
As a parameter accepts urlpatterns variable from cratis.urls
"""
def require(*features):
def require_decorator(cls):
cls.get_required = lambda x: features
return cls
return require_decorator
class Feature(BaseFeature):
"""
Feature add some concreate functionality to the BaseFeature class.
"""
# INSTALLED_APPS = Collectable()
# MIDDLEWARE_CLASSES = Collectable()
# TEMPLATE_CONTEXT_PROCESSORS = Collectable()
# TEMPLATE_DIRS = Collectable()
# STATICFILES_DIRS = Collectable()
def get_dir(self):
return os.path.dirname(inspect.getfile(self.__class__))
def set_default(self, name, value):
if not hasattr(self.settings, name):
setattr(self.settings, name, value)
def append_apps(self, apps):
"""
Use this in configure_settings, to append new INSTALLED_APPS.
"""
if isinstance(apps, str):
apps = (apps,)
if not hasattr(self.settings, 'INSTALLED_APPS'):
self.settings.INSTALLED_APPS = ()
for app in apps:
if app not in self.settings.INSTALLED_APPS:
self.settings.INSTALLED_APPS += (app,)
def append_middleware(self, classes):
"""
Use this in configure_settings, to append new middleware classes.
"""
if isinstance(classes, str):
classes = (classes,)
if not hasattr(self.settings, 'MIDDLEWARE_CLASSES'):
self.settings.MIDDLEWARE_CLASSES = ()
for classname in classes:
if classname not in self.settings.MIDDLEWARE_CLASSES:
self.settings.MIDDLEWARE_CLASSES += (classname,)
def append_template_processor(self, processors):
"""
Use this in configure_settings, to append new template processors.
"""
if isinstance(processors, str):
processors = (processors,)
if not hasattr(self.settings, 'TEMPLATE_CONTEXT_PROCESSORS'):
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS
self.settings.TEMPLATE_CONTEXT_PROCESSORS = TEMPLATE_CONTEXT_PROCESSORS
for classname in processors:
if classname not in self.settings.TEMPLATE_CONTEXT_PROCESSORS:
self.settings.TEMPLATE_CONTEXT_PROCESSORS += (classname,)
def append_template_dir(self, dirs):
if isinstance(dirs, str):
dirs = (dirs,)
if not hasattr(self.settings, 'TEMPLATE_DIRS'):
from django.conf.global_settings import TEMPLATE_DIRS
self.settings.TEMPLATE_DIRS = TEMPLATE_DIRS
self.settings.TEMPLATE_DIRS += tuple(dirs)
def append_asset_dir(self, dirs):
if isinstance(dirs, str):
dirs = (dirs,)
if not hasattr(self.settings, 'STATICFILES_DIRS'):
from django.conf.global_settings import STATICFILES_DIRS
self.settings.STATICFILES_DIRS = STATICFILES_DIRS
self.settings.STATICFILES_DIRS += tuple(dirs)
class Common(Feature):
"""
This feature is used by most of the django-applications
"""
def __init__(self, sites_framework=True):
super().__init__()
self.sites_framework = sites_framework
def configure_settings(self):
s = self.settings
self.append_apps([
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
])
self.append_middleware([
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
])
s.STATIC_URL = '/static/'
s.STATIC_ROOT = s.BASE_DIR + '/var/static'
s.MEDIA_URL = '/media/'
s.MEDIA_ROOT = s.BASE_DIR + '/var/media'
s.USE_TZ = True
self.append_asset_dir(
s.BASE_DIR + '/assets',
)
self.append_template_dir(
s.BASE_DIR + '/templates/',
)
if self.sites_framework:
self.append_apps([
'django.contrib.sites',
])
s.SITE_ID = 1
s.LOGGING = copy.deepcopy(DEFAULT_LOGGING)
def configure_urls(self, urls):
# if self.settings.DEBUG:
from django.views.static import serve
def serve_cors(*args, **kwargs):
response = serve(*args, **kwargs)
response['Access-Control-Allow-Origin'] = "*"
return response
urls += [
url(r'^media/(?P<path>.*)$', serve_cors, {'document_root': self.settings.MEDIA_ROOT}),
url(r'^static/(?P<path>.*)$', serve_cors, {'document_root': self.settings.STATIC_ROOT}),
] | unknown | codeparrot/codeparrot-clean | ||
#pragma once
#include <c10/macros/Macros.h>
#include <c10/util/Exception.h>
#include <cstring>
#include <optional>
#include <ostream>
#include <string>
#include <string_view>
#include <utility>
namespace c10 {
// TODO: consider storing namespace separately too
struct OperatorName final {
std::string name;
std::string overload_name;
OperatorName(std::string name, std::string overload_name)
: name(std::move(name)), overload_name(std::move(overload_name)) {}
// TODO: These two functions below are slow! Fix internal data structures so
// I don't have to manually reconstruct the namespaces!
// Return the namespace of this OperatorName, if it exists. The
// returned string_view is only live as long as the OperatorName
// exists and name is not mutated
std::optional<std::string_view> getNamespace() const {
auto pos = name.find("::");
if (pos == std::string::npos) {
return std::nullopt;
} else {
return std::string_view(name.data(), pos);
}
}
// Returns true if we successfully set the namespace
bool setNamespaceIfNotSet(const char* ns) {
if (!getNamespace().has_value()) {
const auto ns_len = strlen(ns);
const auto old_name_size = name.size();
name.resize(ns_len + 2 + old_name_size);
// Shift current value of name to the end of the new space.
name.replace(
name.size() - old_name_size, old_name_size, name, 0, old_name_size);
name.replace(0, ns_len, ns, ns_len);
name[ns_len] = ':';
name[ns_len + 1] = ':';
return true;
} else {
return false;
}
}
};
// Non-owning view of an OperatorName. Unlike OperatorName, most of
// its functions are constexpr, so it can be used for compile time
// computations
struct OperatorNameView final {
std::string_view name;
std::string_view overload_name;
constexpr OperatorNameView(
std::string_view name,
std::string_view overload_name)
: name(name), overload_name(overload_name) {}
// Parses strings like "foo.overload" and also "foo"
constexpr static OperatorNameView parse(std::string_view full_name) {
auto i = full_name.find('.');
if (i == std::string_view::npos) {
return OperatorNameView(full_name, std::string_view());
} else {
return OperatorNameView(full_name.substr(0, i), full_name.substr(i + 1));
}
}
};
inline bool operator==(const OperatorName& lhs, const OperatorName& rhs) {
return lhs.name == rhs.name && lhs.overload_name == rhs.overload_name;
}
inline bool operator!=(const OperatorName& lhs, const OperatorName& rhs) {
return !operator==(lhs, rhs);
}
TORCH_API std::string toString(const OperatorName& opName);
TORCH_API std::ostream& operator<<(std::ostream& /*os*/, const OperatorName& /*opName*/);
} // namespace c10
namespace std {
template <>
struct hash<::c10::OperatorName> {
size_t operator()(const ::c10::OperatorName& x) const noexcept {
return std::hash<std::string>()(x.name) ^
(~std::hash<std::string>()(x.overload_name));
}
};
} // namespace std | c | github | https://github.com/pytorch/pytorch | aten/src/ATen/core/operator_name.h |
# Copyright (C) 2010 Google, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Research in Motion Ltd. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from datetime import datetime
from google.appengine.ext import testbed
from model.activeworkitems import ActiveWorkItems
class ActiveWorkItemsTest(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
def tearDown(self):
self.testbed.deactivate()
def test_basic(self):
items = ActiveWorkItems.lookup_by_queue("test-queue")
queued_items = [1, 2]
# db.Model only stores dates to second resolution, so we use an explicit datetime without milliseconds.
time = datetime(2011, 4, 18, 18, 50, 44)
self.assertEqual(items.next_item(queued_items, time), 1)
self.assertEqual(items.next_item([1], time), None)
self.assertEqual(items.next_item([], time), None)
self.assertEqual(items.time_for_item(1), time)
self.assertEqual(items.time_for_item(2), None)
items.expire_item(1)
# expire_item uses a transaction so it doesn't take effect on the current object.
self.assertEqual(items.time_for_item(1), time)
# If we look up the saved object, we see it's been updated.
items = ActiveWorkItems.lookup_by_queue("test-queue")
self.assertEqual(items.time_for_item(1), None) | unknown | codeparrot/codeparrot-clean | ||
import datetime
from django.db import models
BREED_CHOICES = [
('collie', 'Collie'),
('labrador', 'Labrador'),
('pembroke', 'Pembroke Corgi'),
('shetland', 'Shetland Sheepdog'),
('border', 'Border Collie'),
]
class Dog(models.Model):
breed = models.CharField(max_length=255, choices=BREED_CHOICES)
name = models.CharField(max_length=255)
owner_last_name = models.CharField(max_length=255, blank=True)
birth_date = models.DateField(default=datetime.date.today)
bio = models.TextField(blank=True)
public = models.BooleanField(default=True)
created = models.DateTimeField(default=datetime.datetime.now)
updated = models.DateTimeField(default=datetime.datetime.now)
def __unicode__(self):
return self.full_name()
@models.permalink
def get_absolute_url(self):
return ('dog_detail', [], {'id': self.id})
def full_name(self):
if self.owner_last_name:
return u"%s %s" % (self.name, self.owner_last_name)
return self.name
class Toy(models.Model):
dog = models.ForeignKey(Dog, related_name='toys')
name = models.CharField(max_length=60)
def __unicode__(self):
return u"%s's %s" % (self.dog.name, self.name) | unknown | codeparrot/codeparrot-clean | ||
from yelp.client import Client
from YelpAPIv3 import Client3
from GoogleMapAPI import GoogleMap
from yelp.oauth1_authenticator import Oauth1Authenticator
from math import radians, cos, sin, asin, sqrt
from datetime import datetime
import config
from bs4 import BeautifulSoup
import requests
# Yelp Auth API v2
auth = Oauth1Authenticator(
consumer_key=config.CONSUMER_KEY,
consumer_secret=config.CONSUMER_SECRET,
token=config.TOKEN,
token_secret=config.TOKEN_SECRET
)
yelpClient = Client(auth)
yelpClient3 = Client3(config.YELP_V3_TOKEN)
def yelp_search_v2(searchTerm, location, coordinates=None, limit=None, offset=0):
if limit is None:
limit = 10
params = {
'term': searchTerm,
'lang': 'en',
'limit': limit,
'offset': offset
# 'category_filter':''
}
returnData = {}
returnData['businesses'] = []
returnData['status'] = 0
try:
if coordinates is not None:
response = yelpClient.search_by_coordinates(coordinates[0], coordinates[1], **params)
elif location != '':
response = yelpClient.search(location, **params)
except Exception, e:
print e
return returnData
# v2
if len(response.businesses):
returnData['status'] = 1
for biz in response.businesses:
business = {}
business['name'] = biz.name
business['address'] = biz.location.address[0]
if coordinates is not None:
business['distance'] = calculate_distance(coordinates, [biz.location.coordinate.latitude, biz.location.coordinate.longitude])
business['rating'] = str(biz.rating) +u"\u2605 (" + str(biz.review_count) + " reviews)"
business['url'] = biz.url
business['image_url'] = biz.image_url
business['categories'] = ', '.join([b.name for b in biz.categories])
returnData['businesses'].append(business)
else:
returnData['status'] = 0
return returnData
def yelp_search_v3(searchTerm, location, coordinates=None, limit=None, offset=0):
if limit is None:
limit = 10
params = {
'term': searchTerm,
'lang': 'en',
'limit': limit,
'offset': offset,
'location': location
# 'category_filter':''
}
returnData = {}
returnData['businesses'] = []
returnData['status'] = 0
print coordinates
try:
if coordinates is not None:
params['latitude'] = coordinates[0]
params['longitude'] = coordinates[1]
response = yelpClient3.search_by_coordinates(**params)
# print response
elif location != '':
response = yelpClient3.search(**params)
# print response
except Exception, e:
print e
return returnData
if len(response['businesses']):
returnData['status'] = 1
for biz in response['businesses']:
details = yelpClient3.get_details(biz['id'])
business = {}
business['id'] = biz['id']
business['name'] = biz['name']
business['price'] = biz['price'] if 'prize' in biz else ""
# business['hours'] = details['hours'][0]['open']
if 'hours' in details and len(details['hours']) > 0:
business['is_open_now'] = details['hours'][0]['is_open_now']
if len(details['hours'][0]['open']) > 0:
business['hours_today'] = hours_today(details['hours'][0]['open'])
business['address'] = biz['location']['address1']
if coordinates is not None:
business['distance'] = calculate_distance(coordinates, [biz['coordinates']['latitude'], biz['coordinates']['longitude']])
business['rating'] = str(biz['rating']) +u"\u2605 (" + str(biz['review_count']) + " reviews)"
business['url'] = biz['url']
business['image_url'] = biz['image_url']
business['categories'] = ', '.join([b['title'] for b in biz['categories']])
returnData['businesses'].append(business)
return returnData
def hours_today(hours, api='yelp'):
todayWkday = datetime.weekday(datetime.now())
if todayWkday >= len(hours):
return ""
if api == 'yelp':
start = hours[todayWkday]['start']
end = hours[todayWkday]['end']
elif api == 'google':
start = hours[todayWkday]['open']['time']
start = hours[todayWkday]['close']['time']
return "%s:%s - %s:%s"%(start[:2], start[2:], end[:2], end[2:])
def get_reviews(business_id, limit=3):
return yelpClient3.get_reviews(business_id)['reviews']
def calculate_distance(coord1, coord2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
coords = coord1 + coord2
lon1, lat1, lon2, lat2 = map(radians, coords)
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
km = 6371 * c # earth radius * c
mi = km / 1.609344
return round(mi, 2)
def filtered_search(returnData, filter_list):
businesses = returnData['businesses']
result = []
for business in businesses:
url = business['url']
r = requests.get(url)
soup = BeautifulSoup(r, "html")
attr_list = soup.select('.short-def-list > dl')
open_div = soup.select(".open")
if 'wifi' in filter_list:
for attr in attr_list:
if "Wi-Fi" in str(attr) and "Free" in str(attr):
if 'open_now' in filter_list:
if len(open_div) != 0:
result.append(business)
else:
result.append(business)
return result
googlePlaceClient = GoogleMap()
# Not using Google Place Search since it doesn't provide a image URL
# but rather an image blob
def google_place_search(searchTerm, location, coordinates=None):
query = "%s %s"%(searchTerm, location) if location is not None else searchTerm
params = {
'query': query,
}
if coordinates is not None:
params['location'] = ",".join([str(coord) for coord in coordinates])
params['radius'] = 10000 # meters
returnData = {}
returnData['businesses'] = []
returnData['status'] = 0
try:
results = googlePlaceClient.search_place(**params)
except Exception, e:
print e
return returnData
if len(results['businesses']):
returnData['status'] = 1
for biz in results:
details = googlePlaceClient.get_details(biz['place_id'])
business = {}
business['id'] = biz['id']
business['name'] = biz['name']
business['rating'] = str(biz['rating']) +u"\u2605 (" + str(biz['review_count']) + " reviews)"
business['price'] = ""
if 'opening_hours' in details:
business['opening_hours'] = details['opening_hours']['open_now']
if len(details['opening_hours']['periods']) > 0:
business['hours_today'] = hours_today(details['opening_hours']['periods'], 'google')
business['address'] = biz['formatted_address']
if coordinates is not None:
business['distance'] = calculate_distance(coordinates, [biz['geometry']['location']['lat'], biz['geometry']['location']['lng']])
business['url'] = details['url']
business['image_url'] = ""
business['categories'] = ""
returnData['businesses'].append(business)
return returnData | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
import http_lazy_headers as hlh
from . import utils
class ContentLanguageTest(utils.FieldTestCase):
field = hlh.ContentLanguage
def test_raw_values(self):
self.assertFieldRawEqual(
['mi, da', 'en'],
(('mi', (), None, None, (), (), (), None),
('da', (), None, None, (), (), (), None),
('en', (), None, None, (), (), (), None)))
self.assertFieldRawEqual(
['en'],
(('en', (), None, None, (), (), (), None),))
self.assertFieldRawEqual(
['EN'],
(('en', (), None, None, (), (), (), None),))
def test_str(self):
self.assertFieldStrEqual(
(('mi', (), None, None, (), (), (), None),
('da', (), None, None, (), (), (), None),
('en', (), None, None, (), (), (), None)),
'content-language: mi, da, en')
def test_raw_empty(self):
"""
Should NOT allow empty raw value
"""
self.assertRaisesHeaderError([''])
def test_empty(self):
"""
Should NOT allow empty value
"""
self.assertRaisesInternalError(())
def test_raw_bad_values(self):
"""
Should not allow bad raw values
"""
self.assertRawOK(['en-US'])
self.assertRaisesHeaderError([';'])
self.assertRaisesHeaderError([';=;;q=0'])
self.assertRaisesHeaderError(['en-US;'])
def test_bad_values(self):
"""
Should not allow bad values
"""
self.assertOK([
('en', (), None, 'us', (), (), (), None)])
self.assertOK([
(None, (), None, None, (), (), (), 'i-klingon')])
self.assertRaisesInternalError([''])
self.assertRaisesInternalError([None])
self.assertRaisesInternalError([
(None, (), None, None, (), (), (), None)])
self.assertRaisesInternalError([
('', (), None, None, (), (), (), None)])
self.assertRaisesInternalError([
('es', (), None, None, (), (), (), 'i-klingon')]) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/local/bin/python3
import os
import argparse
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
# Convolution method?
# This was slow at best, and crashed on the high-res Moon pic for an
# authentically sized "spider FWHM."
# import scipy.ndimage as spnd
# self.imgSpdr[:,:,channel] = spnd.convolve(imgChan[:,:,channel],spiderPSF,
# mode='nearest')
# This method is much faster, with no crashing for a realistic kernel size
import scipy.signal as spsg
thisCode = os.path.realpath(__file__)
projRoot = os.path.dirname(thisCode)
class arachnivision():
"""spidercam, spidercam, sees the sky like a spider can."""
def __init__(self):
self.imgOrig = None
self.imgDimX = 0
self.imgDimY = 0
self.numChan = 0
self.numFWHM = 5. # Size of 2-D kernel in FWHM (+/- numFWHM/2)
self.peopleAngRes = 0.
self.spiderAngRes = 0.
self.sourceScale = 0.
self.spiderVisRespFile = \
"Habronattus_pyrrithrix_Photoreceptor_absorbance.csv"
def _setupFigure(self,figID):
# Is this scaling because of a matplotlib convention or did I just
# happen to use a 100 DPI image for testing? TBD - todo
self.figwid = 0.01*self.imgDimX
self.figrat = self.imgDimY/self.imgDimX
plt.figure(figID,figsize=(self.figwid,self.figwid*self.figrat))
plt.subplots_adjust(left=0.000,right=1.000,top=1.000,bottom=0.000)
plt.axes().set_xticks([])
plt.axes().set_yticks([])
def _makeGaussian(self,size,fwhm=3,center=None):
"""Make a square gaussian kernel (borrowed from stack overflow)
- Size is the length of a side of the square.
- Fwhm is full-width-half-maximum, which can be thought of as an
effective radius.
NOTE1 - kernel now normalized - was previously scaled so range was 0->1
NOTE2 - There's probably a package function for this somewhere already
"""
# x = np.arange(0, size, 1, float)
x = np.arange(0,size,1,dtype=np.longdouble)
y = x[:,np.newaxis]
if center is None:
x0 = y0 = size // 2
else:
x0 = center[0]
y0 = center[1]
kernel = np.exp(-4.*np.log(2.) * ((x-x0)**2 + (y-y0)**2) / fwhm**2)
kernel /= np.sum(kernel)
return kernel
def setSourcePlateScale(self,degreesPerPixel):
"""Set value for degrees per pixel in the input/source image"""
self.sourceScale = degreesPerPixel
def setPeopleAngularResolution(self,fwhmInDegrees):
"""Set value for FWHM of PSF in degrees, assumed to be Gaussian"""
self.peopleAngRes = fwhmInDegrees
def setSpiderAngularResolution(self,fwhmInDegrees):
"""Set value for FWHM of PSF in degrees, assumed to be Gaussian"""
self.spiderAngRes = fwhmInDegrees
def loadSpiderData(self):
csvFile = os.path.join(projRoot,self.spiderVisRespFile)
# Reads data but indexing of resulting 2-D array does not work as expected?
# import csv
# with open(csvFile,'rU') as csviter:
# csvRows = csv.reader(csviter)
# respData = []
# for rr,row in enumerate(csvRows):
# if rr == 0:
# columnHeaders = row
# else:
# respData.append([float(xx) for xx in row])
# respData = np.array(respData)
# print(len(columnHeaders),len(respData),np.shape(respData))
# print(respData[0])
# print(respData[-1])
# print(respData[0][0],respData[0][1],respData[0][2],respData[0][3])
# print([respData[0:10]][0])
# respData = np.reshape(respData)
# import sys
# sys.exit()
respData = np.genfromtxt(csvFile,dtype=float,delimiter=',',names=True)
colmName = respData.dtype.names
print("Read file: %s" % self.spiderVisRespFile)
print("Extracted columns:")
for header in colmName:
print(header)
plt.figure('spiderVisResp')
plt.axes().set_title(self.spiderVisRespFile)
plt.axes().set_xlabel('Wavelength (nm)')
plt.axes().set_ylabel('Normalized Photoreceptor Absorbance')
plt.grid(True)
plt.plot(respData[colmName[0]][:],respData[colmName[1]][:],color='b',
label=colmName[1])
plt.plot(respData[colmName[0]][:],respData[colmName[2]][:],color='g',
label=colmName[2])
plt.plot(respData[colmName[0]][:],respData[colmName[3]][:],color='r',
label=colmName[3])
plt.legend(loc='lower center',fontsize=6)
plt.savefig(os.path.join(projRoot,"photoreceptor-absorbance.png"))
# plt.clf()
# plt.cla()
def loadSourceImage(self,srcImg):
"""Load source image and set dimensions. Assuming color channels are in
last dimension at the moment."""
self.srcImg = srcImg # File basename, without full path
self.imgOrig = mpimg.imread(os.path.join(projRoot,srcImg))
imgDims = np.shape(self.imgOrig)
self.imgDimX = imgDims[1] # Yeah, this isn't IDL, deal with it
self.imgDimY = imgDims[0] # Yeah, this still isn't IDL, deal with it
self.numChan = imgDims[2]
print("Loaded source image: %s" % self.srcImg)
def sourceToEyeball(self):
"""Take a source image and 1) convolve with 0.02º FWHM Gaussian PSF to
estimate what people would see with the naked eye, and 2) convolve with
0.07º FWHM Gaussian PSF and modify the color balance to try and
replicate what a jumping spider might see if it gazed up at the night
sky."""
imgChan = self.imgOrig.astype('float64')/255. # Rescale 0-255 -> 0.-1.
self.imgPepl = np.empty_like(imgChan) # Store convolved version here
self.imgSpdr = np.empty_like(imgChan) # Store convolved version here
# Make a 2-D Gaussian kernel for people and spider eye PSFs.
# FwHM and corresponding kernel size are image dependent, set by angular
# resolution of the particular critter's visual system and the plate
# scale (degrees per pixel here) of the image. The plate scale and
# visual angular reolutions are assumed to be the
# same in both dimensions at present.
peopleFWHM = self.peopleAngRes/self.sourceScale
peopleSize = np.int(self.numFWHM*peopleFWHM) # Extend kernel to N FWHM
peoplePSF = self._makeGaussian(peopleSize,fwhm=peopleFWHM)
# peoplePSF /= np.sum(peoplePSF) # Normalize kernel... or else
spiderFWHM = self.spiderAngRes/self.sourceScale
spiderSize = np.int(self.numFWHM*spiderFWHM) # Extend kernel to N FWHM
spiderPSF = self._makeGaussian(spiderSize,fwhm=spiderFWHM)
# spiderPSF /= np.sum(spiderPSF) # Normalize kernel... or else
# Do people-eye convolution, using original color channel weighting.
for channel in range(self.numChan):
self.imgPepl[:,:,channel] = spsg.fftconvolve(imgChan[:,:,channel],
peoplePSF,mode='same')
# Tweak color balance for spider version - just an utter SWAG right now.
# Eventually this ought to be its own method, relying on the spectral
# information of the source image and the spectral response of the
# critter visual system.
imgChan[:,:,0] *= 0.85 # Red
imgChan[:,:,1] *= 1.00 # Green
imgChan[:,:,2] *= 0.85 # Blue
# Do spider eye convolution, using modified color channel weighting.
for channel in range(self.numChan):
self.imgSpdr[:,:,channel] = spsg.fftconvolve(imgChan[:,:,channel],
spiderPSF,mode='same')
def saveSourceImage(self):
self._setupFigure('source')
plt.imshow(jumper.imgOrig)
print("Saving unaltered version.")
plt.savefig(os.path.join(projRoot,"source-"+self.srcImg))
def savePeopleImage(self):
self._setupFigure('people')
plt.imshow(jumper.imgPepl)
print("Saving people/naked eye version.")
plt.savefig(os.path.join(projRoot,"people-"+self.srcImg))
def saveSpiderImage(self):
self._setupFigure('spider')
plt.imshow(jumper.imgSpdr)
print("Saving spider-eyes-ed version.")
plt.savefig(os.path.join(projRoot,"spider-"+self.srcImg))
if __name__ == "__main__":
# Use argparse to... parse args
parser = argparse.ArgumentParser(description="Simulate what a jumping "
"spider might see when they look at an "
"object in the night sky.")
parser.add_argument("-i","--image",required=False,
default="20141008tleBaldridge001.jpg",
help="Source image")
# default="beletskYairglow_pano.jpg",
# 2250 pixels for moon diameter of ~0.5 degrees.
parser.add_argument("-d","--plate-scale",required=False,type=float,
default=2.222e-04,help="Plate scale of source image - "
"For default image is 2.222e-04 degrees/pixel")
parser.add_argument("-p","--people-resolution",required=False,type=float,
default=0.007,help="Resolution to use for human eye - "
"default is foveal resolution of 0.007 degrees")
parser.add_argument("-s","--spider-resolution",required=False,type=float,
default=0.070,help="Resolution to use for spider eye - "
"default is resolution of 0.07 degrees")
# Process arguments - no screening for valid inputs done here beyond what
# argparse does internally right now.
args = parser.parse_args()
srcImg = args.image # relative path from directory containing this file
# Create instance of class to load and manipulate image.
jumper = arachnivision()
# Set plate scale (degrees/pixel) of the source image.
jumper.setSourcePlateScale(args.plate_scale)
# Set the visual angular resolution of the two critters in question -
# "People" and "Spider" ony at the moment. Perhaps make more general later?
jumper.setPeopleAngularResolution(args.people_resolution)
jumper.setSpiderAngularResolution(args.spider_resolution)
# Load spider photoreceptor absorbance curves
jumper.loadSpiderData()
# Load source image
jumper.loadSourceImage(srcImg)
# Save copy of original with "source" stuck at front of name - have we done
# any violence to it unintentionally in loading and saving? Sanity check...
jumper.saveSourceImage()
# Modify it to something resembling what spider would see?
jumper.sourceToEyeball()
# Save convolved version of original with "people" stuck at front of name.
# This is identical to the original in terms of color balance, but uses a
# people-vision-specific angular resolution.
jumper.savePeopleImage()
# Save "spider-eyes-ed" version with "spider" stuck at front of name. This
# is different from the original in terms of both color balance and the fact
# that it uses a spider-vision-specific angular resolution.
jumper.saveSpiderImage()
# Miscellaneous discussion notes and source Tweet references:
#
# Jumping spider vision angular resolution quoted to be ~0.07 degrees. Wikipedia
# quotes value for typical human eye to be 0.02 degrees, so only about 3.5 times
# better! But *foveal* resolution is closer to 0.007º or 10 times better.
# Perhaps Wikipedia value is for rods rather than cones? The foveal area of the
# retina sees a swath ~2º wide, located at center of retina.
# "So did some back of envelope calcs. The jumping spider in our avatar
# (Habronattus pyrrithrix) can def see the moon, maybe even craters…"
# https://twitter.com/MorehouseLab/status/872081983819612161
# "Moon diameter is 9.22x10^-3 radians, or ~0.53 deg visual angle. H.
# pyrrithrix can resolve objects up to 0.07 deg visual angle."
# https://twitter.com/MorehouseLab/status/872082579217887232 | unknown | codeparrot/codeparrot-clean | ||
# **********************************************************************
#
# Copyright (c) 2003-2017 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import Ice, Test, math, sys, threading
def test(b):
if not b:
raise RuntimeError('test assertion failed')
class CallbackBase:
def __init__(self):
self._called = False
self._cond = threading.Condition()
def check(self):
with self._cond:
while not self._called:
self._cond.wait()
self._called = False
def called(self):
with self._cond:
self._called = True
self._cond.notify()
class Callback(CallbackBase):
def __init__(self, communicator=None):
CallbackBase.__init__(self)
self._communicator = communicator
def opByte(self, f):
try:
(r, b) = f.result()
test(b == 0xf0)
test(r == 0xff)
self.called()
except:
test(False)
def opBool(self, f):
try:
(r, b) = f.result()
test(b)
test(not r)
self.called()
except:
test(False)
def opShortIntLong(self, f):
try:
(r, s, i, l) = f.result()
test(s == 10)
test(i == 11)
test(l == 12)
test(r == 12)
self.called()
except:
test(False)
def opFloatDouble(self, fut):
try:
(r, f, d) = fut.result()
test(f - 3.14 < 0.001)
test(d == 1.1E10)
test(r == 1.1E10)
self.called()
except:
test(False)
def opString(self, f):
try:
(r, s) = f.result()
test(s == "world hello")
test(r == "hello world")
self.called()
except:
test(False)
def opMyEnum(self, f):
try:
(r, e) = f.result()
test(e == Test.MyEnum.enum2)
test(r == Test.MyEnum.enum3)
self.called()
except:
test(False)
def opMyClass(self, f):
try:
(r, c1, c2) = f.result()
test(c1.ice_getIdentity() == Ice.stringToIdentity("test"))
test(c2.ice_getIdentity() == Ice.stringToIdentity("noSuchIdentity"))
test(r.ice_getIdentity() == Ice.stringToIdentity("test"))
# We can't do the callbacks below in serialize mode
if self._communicator.getProperties().getPropertyAsInt("Ice.Client.ThreadPool.Serialize") == 0:
r.opVoid()
c1.opVoid()
try:
c2.opVoid()
test(False)
except Ice.ObjectNotExistException:
pass
self.called()
except:
test(False)
def opStruct(self, f):
try:
(rso, so) = f.result()
test(rso.p == None)
test(rso.e == Test.MyEnum.enum2)
test(rso.s.s == "def")
test(so.e == Test.MyEnum.enum3)
test(so.s.s == "a new string")
# We can't do the callbacks below in serialize mode.
if self._communicator.getProperties().getPropertyAsInt("Ice.ThreadPool.Client.Serialize") == 0:
so.p.opVoid()
self.called()
except:
test(False)
def opByteS(self, f):
try:
(rso, bso) = f.result()
test(len(bso) == 4)
test(len(rso) == 8)
if sys.version_info[0] == 2:
test(bso[0] == '\x22')
test(bso[1] == '\x12')
test(bso[2] == '\x11')
test(bso[3] == '\x01')
test(rso[0] == '\x01')
test(rso[1] == '\x11')
test(rso[2] == '\x12')
test(rso[3] == '\x22')
test(rso[4] == '\xf1')
test(rso[5] == '\xf2')
test(rso[6] == '\xf3')
test(rso[7] == '\xf4')
else:
test(bso[0] == 0x22)
test(bso[1] == 0x12)
test(bso[2] == 0x11)
test(bso[3] == 0x01)
test(rso[0] == 0x01)
test(rso[1] == 0x11)
test(rso[2] == 0x12)
test(rso[3] == 0x22)
test(rso[4] == 0xf1)
test(rso[5] == 0xf2)
test(rso[6] == 0xf3)
test(rso[7] == 0xf4)
self.called()
except:
test(False)
def opBoolS(self, f):
try:
(rso, bso) = f.result()
test(len(bso) == 4)
test(bso[0])
test(bso[1])
test(not bso[2])
test(not bso[3])
test(len(rso) == 3)
test(not rso[0])
test(rso[1])
test(rso[2])
self.called()
except:
test(False)
def opShortIntLongS(self, f):
try:
(rso, sso, iso, lso) = f.result()
test(len(sso) == 3)
test(sso[0] == 1)
test(sso[1] == 2)
test(sso[2] == 3)
test(len(iso) == 4)
test(iso[0] == 8)
test(iso[1] == 7)
test(iso[2] == 6)
test(iso[3] == 5)
test(len(lso) == 6)
test(lso[0] == 10)
test(lso[1] == 30)
test(lso[2] == 20)
test(lso[3] == 10)
test(lso[4] == 30)
test(lso[5] == 20)
test(len(rso) == 3)
test(rso[0] == 10)
test(rso[1] == 30)
test(rso[2] == 20)
self.called()
except:
test(False)
def opFloatDoubleS(self, f):
try:
(rso, fso, dso) = f.result()
test(len(fso) == 2)
test(fso[0] - 3.14 < 0.001)
test(fso[1] - 1.11 < 0.001)
test(len(dso) == 3)
test(dso[0] == 1.3E10)
test(dso[1] == 1.2E10)
test(dso[2] == 1.1E10)
test(len(rso) == 5)
test(rso[0] == 1.1E10)
test(rso[1] == 1.2E10)
test(rso[2] == 1.3E10)
test(rso[3] - 3.14 < 0.001)
test(rso[4] - 1.11 < 0.001)
self.called()
except:
test(False)
def opStringS(self, f):
try:
(rso, sso) = f.result()
test(len(sso) == 4)
test(sso[0] == "abc")
test(sso[1] == "de")
test(sso[2] == "fghi")
test(sso[3] == "xyz")
test(len(rso) == 3)
test(rso[0] == "fghi")
test(rso[1] == "de")
test(rso[2] == "abc")
self.called()
except:
test(False)
def opByteSS(self, f):
try:
(rso, bso) = f.result()
test(len(bso) == 2)
test(len(bso[0]) == 1)
test(len(bso[1]) == 3)
test(len(rso) == 4)
test(len(rso[0]) == 3)
test(len(rso[1]) == 1)
test(len(rso[2]) == 1)
test(len(rso[3]) == 2)
if sys.version_info[0] == 2:
test(bso[0][0] == '\xff')
test(bso[1][0] == '\x01')
test(bso[1][1] == '\x11')
test(bso[1][2] == '\x12')
test(rso[0][0] == '\x01')
test(rso[0][1] == '\x11')
test(rso[0][2] == '\x12')
test(rso[1][0] == '\xff')
test(rso[2][0] == '\x0e')
test(rso[3][0] == '\xf2')
test(rso[3][1] == '\xf1')
else:
test(bso[0][0] == 0xff)
test(bso[1][0] == 0x01)
test(bso[1][1] == 0x11)
test(bso[1][2] == 0x12)
test(rso[0][0] == 0x01)
test(rso[0][1] == 0x11)
test(rso[0][2] == 0x12)
test(rso[1][0] == 0xff)
test(rso[2][0] == 0x0e)
test(rso[3][0] == 0xf2)
test(rso[3][1] == 0xf1)
self.called()
except:
test(False)
def opBoolSS(self, f):
try:
(rso, bso) = f.result()
test(len(bso) == 4);
test(len(bso[0]) == 1);
test(bso[0][0]);
test(len(bso[1]) == 1);
test(not bso[1][0]);
test(len(bso[2]) == 2);
test(bso[2][0]);
test(bso[2][1]);
test(len(bso[3]) == 3);
test(not bso[3][0]);
test(not bso[3][1]);
test(bso[3][2]);
test(len(rso) == 3);
test(len(rso[0]) == 2);
test(rso[0][0]);
test(rso[0][1]);
test(len(rso[1]) == 1);
test(not rso[1][0]);
test(len(rso[2]) == 1);
test(rso[2][0]);
self.called();
except:
test(False)
def opShortIntLongSS(self, f):
try:
(rso, sso, iso, lso) = f.result()
test(len(rso) == 1);
test(len(rso[0]) == 2);
test(rso[0][0] == 496);
test(rso[0][1] == 1729);
test(len(sso) == 3);
test(len(sso[0]) == 3);
test(sso[0][0] == 1);
test(sso[0][1] == 2);
test(sso[0][2] == 5);
test(len(sso[1]) == 1);
test(sso[1][0] == 13);
test(len(sso[2]) == 0);
test(len(iso) == 2);
test(len(iso[0]) == 1);
test(iso[0][0] == 42);
test(len(iso[1]) == 2);
test(iso[1][0] == 24);
test(iso[1][1] == 98);
test(len(lso) == 2);
test(len(lso[0]) == 2);
test(lso[0][0] == 496);
test(lso[0][1] == 1729);
test(len(lso[1]) == 2);
test(lso[1][0] == 496);
test(lso[1][1] == 1729);
self.called();
except:
test(False)
def opFloatDoubleSS(self, f):
try:
(rso, fso, dso) = f.result()
test(len(fso) == 3)
test(len(fso[0]) == 1)
test(fso[0][0] - 3.14 < 0.001)
test(len(fso[1]) == 1)
test(fso[1][0] - 1.11 < 0.001)
test(len(fso[2]) == 0)
test(len(dso) == 1)
test(len(dso[0]) == 3)
test(dso[0][0] == 1.1E10)
test(dso[0][1] == 1.2E10)
test(dso[0][2] == 1.3E10)
test(len(rso) == 2)
test(len(rso[0]) == 3)
test(rso[0][0] == 1.1E10)
test(rso[0][1] == 1.2E10)
test(rso[0][2] == 1.3E10)
test(len(rso[1]) == 3)
test(rso[1][0] == 1.1E10)
test(rso[1][1] == 1.2E10)
test(rso[1][2] == 1.3E10)
self.called()
except:
test(False)
def opStringSS(self, f):
try:
(rso, sso) = f.result()
test(len(sso) == 5)
test(len(sso[0]) == 1)
test(sso[0][0] == "abc")
test(len(sso[1]) == 2)
test(sso[1][0] == "de")
test(sso[1][1] == "fghi")
test(len(sso[2]) == 0)
test(len(sso[3]) == 0)
test(len(sso[4]) == 1)
test(sso[4][0] == "xyz")
test(len(rso) == 3)
test(len(rso[0]) == 1)
test(rso[0][0] == "xyz")
test(len(rso[1]) == 0)
test(len(rso[2]) == 0)
self.called()
except:
test(False)
def opByteBoolD(self, f):
try:
(ro, do) = f.result()
di1 = {10: True, 100: False}
test(do == di1)
test(len(ro) == 4)
test(ro[10])
test(not ro[11])
test(not ro[100])
test(ro[101])
self.called()
except:
test(False)
def opShortIntD(self, f):
try:
(ro, do) = f.result()
di1 = {110: -1, 1100: 123123}
test(do == di1)
test(len(ro) == 4)
test(ro[110] == -1)
test(ro[111] == -100)
test(ro[1100] == 123123)
test(ro[1101] == 0)
self.called()
except:
test(False)
def opLongFloatD(self, f):
try:
(ro, do) = f.result()
di1 = {999999110: -1.1, 999999111: 123123.2}
for k in do:
test(math.fabs(do[k] - di1[k]) < 0.01)
test(len(ro) == 4)
test(ro[999999110] - -1.1 < 0.01)
test(ro[999999120] - -100.4 < 0.01)
test(ro[999999111] - 123123.2 < 0.01)
test(ro[999999130] - 0.5 < 0.01)
self.called()
except:
test(False)
def opStringStringD(self, f):
try:
(ro, do) = f.result()
di1 = {'foo': 'abc -1.1', 'bar': 'abc 123123.2'}
test(do == di1)
test(len(ro) == 4)
test(ro["foo"] == "abc -1.1")
test(ro["FOO"] == "abc -100.4")
test(ro["bar"] == "abc 123123.2")
test(ro["BAR"] == "abc 0.5")
self.called()
except:
test(False)
def opStringMyEnumD(self, f):
try:
(ro, do) = f.result()
di1 = {'abc': Test.MyEnum.enum1, '': Test.MyEnum.enum2}
test(do == di1)
test(len(ro) == 4)
test(ro["abc"] == Test.MyEnum.enum1)
test(ro["qwerty"] == Test.MyEnum.enum3)
test(ro[""] == Test.MyEnum.enum2)
test(ro["Hello!!"] == Test.MyEnum.enum2)
self.called()
except:
test(False)
def opMyEnumStringD(self, f):
try:
(ro, do) = f.result()
di1 = {Test.MyEnum.enum1: 'abc'}
test(do == di1)
test(len(ro) == 3)
test(ro[Test.MyEnum.enum1] == "abc")
test(ro[Test.MyEnum.enum2] == "Hello!!")
test(ro[Test.MyEnum.enum3] == "qwerty")
self.called()
except:
test(False)
def opMyStructMyEnumD(self, f):
try:
(ro, do) = f.result()
s11 = Test.MyStruct()
s11.i = 1
s11.j = 1
s12 = Test.MyStruct()
s12.i = 1
s12.j = 2
s22 = Test.MyStruct()
s22.i = 2
s22.j = 2
s23 = Test.MyStruct()
s23.i = 2
s23.j = 3
di1 = {s11: Test.MyEnum.enum1, s12: Test.MyEnum.enum2}
test(do == di1)
test(len(ro) == 4)
test(ro[s11] == Test.MyEnum.enum1)
test(ro[s12] == Test.MyEnum.enum2)
test(ro[s22] == Test.MyEnum.enum3)
test(ro[s23] == Test.MyEnum.enum2)
self.called()
except:
test(False)
def opByteBoolDS(self, f):
try:
(ro, do) = f.result()
test(len(ro) == 2)
test(len(ro[0]) == 3)
test(ro[0][10])
test(not ro[0][11])
test(ro[0][101])
test(len(ro[1]) == 2)
test(ro[1][10])
test(not ro[1][100])
test(len(do) == 3)
test(len(do[0]) == 2)
test(not do[0][100])
test(not do[0][101])
test(len(do[1]) == 2)
test(do[1][10])
test(not do[1][100])
test(len(do[2]) == 3)
test(do[2][10])
test(not do[2][11])
test(do[2][101])
self.called()
except:
test(False)
def opShortIntDS(self, f):
try:
(ro, do) = f.result()
test(len(ro) == 2)
test(len(ro[0]) == 3)
test(ro[0][110] == -1)
test(ro[0][111] == -100)
test(ro[0][1101] == 0)
test(len(ro[1]) == 2)
test(ro[1][110] == -1)
test(ro[1][1100] == 123123)
test(len(do) == 3)
test(len(do[0]) == 1)
test(do[0][100] == -1001)
test(len(do[1]) == 2)
test(do[1][110] == -1)
test(do[1][1100] == 123123)
test(len(do[2]) == 3)
test(do[2][110] == -1)
test(do[2][111] == -100)
test(do[2][1101] == 0)
self.called()
except:
test(False)
def opLongFloatDS(self, f):
try:
(ro, do) = f.result()
test(len(ro) == 2)
test(len(ro[0]) == 3)
test(ro[0][999999110] - -1.1 < 0.01)
test(ro[0][999999120] - -100.4 < 0.01)
test(ro[0][999999130] - 0.5 < 0.01)
test(len(ro[1]) == 2)
test(ro[1][999999110] - -1.1 < 0.01)
test(ro[1][999999111] - 123123.2 < 0.01)
test(len(do) == 3)
test(len(do[0]) == 1)
test(do[0][999999140] - 3.14 < 0.01)
test(len(do[1]) == 2)
test(do[1][999999110] - -1.1 < 0.01)
test(do[1][999999111] - 123123.2 < 0.01)
test(len(do[2]) == 3)
test(do[2][999999110] - -1.1 < 0.01)
test(do[2][999999120] - -100.4 < 0.01)
test(do[2][999999130] - 0.5 < 0.01)
self.called()
except:
test(False)
def opStringStringDS(self, f):
try:
(ro, do) = f.result()
test(len(ro) == 2)
test(len(ro[0]) == 3)
test(ro[0]["foo"] == "abc -1.1")
test(ro[0]["FOO"] == "abc -100.4")
test(ro[0]["BAR"] == "abc 0.5")
test(len(ro[1]) == 2)
test(ro[1]["foo"] == "abc -1.1")
test(ro[1]["bar"] == "abc 123123.2")
test(len(do) == 3)
test(len(do[0]) == 1)
test(do[0]["f00"] == "ABC -3.14")
test(len(do[1]) == 2)
test(do[1]["foo"] == "abc -1.1")
test(do[1]["bar"] == "abc 123123.2")
test(len(do[2]) == 3)
test(do[2]["foo"] == "abc -1.1")
test(do[2]["FOO"] == "abc -100.4")
test(do[2]["BAR"] == "abc 0.5")
self.called()
except:
test(False)
def opStringMyEnumDS(self, f):
try:
(ro, do) = f.result()
test(len(ro) == 2)
test(len(ro[0]) == 3)
test(ro[0]["abc"] == Test.MyEnum.enum1)
test(ro[0]["qwerty"] == Test.MyEnum.enum3)
test(ro[0]["Hello!!"] == Test.MyEnum.enum2)
test(len(ro[1]) == 2)
test(ro[1]["abc"] == Test.MyEnum.enum1)
test(ro[1][""] == Test.MyEnum.enum2)
test(len(do) == 3)
test(len(do[0]) == 1)
test(do[0]["Goodbye"] == Test.MyEnum.enum1)
test(len(do[1]) == 2)
test(do[1]["abc"] == Test.MyEnum.enum1)
test(do[1][""] == Test.MyEnum.enum2)
test(len(do[2]) == 3)
test(do[2]["abc"] == Test.MyEnum.enum1)
test(do[2]["qwerty"] == Test.MyEnum.enum3)
test(do[2]["Hello!!"] == Test.MyEnum.enum2)
self.called()
except:
test(False)
def opMyEnumStringDS(self, f):
try:
(ro, do) = f.result()
test(len(ro) == 2)
test(len(ro[0]) == 2)
test(ro[0][Test.MyEnum.enum2] == "Hello!!")
test(ro[0][Test.MyEnum.enum3] == "qwerty")
test(len(ro[1]) == 1)
test(ro[1][Test.MyEnum.enum1] == "abc")
test(len(do) == 3)
test(len(do[0]) == 1)
test(do[0][Test.MyEnum.enum1] == "Goodbye")
test(len(do[1]) == 1)
test(do[1][Test.MyEnum.enum1] == "abc")
test(len(do[2]) == 2)
test(do[2][Test.MyEnum.enum2] == "Hello!!")
test(do[2][Test.MyEnum.enum3] == "qwerty")
self.called()
except:
test(False)
def opMyStructMyEnumDS(self, f):
try:
(ro, do) = f.result()
s11 = Test.MyStruct(1, 1)
s12 = Test.MyStruct(1, 2)
s22 = Test.MyStruct(2, 2)
s23 = Test.MyStruct(2, 3)
test(len(ro) == 2)
test(len(ro[0]) == 3)
test(ro[0][s11] == Test.MyEnum.enum1)
test(ro[0][s22] == Test.MyEnum.enum3)
test(ro[0][s23] == Test.MyEnum.enum2)
test(len(ro[1]) == 2)
test(ro[1][s11] == Test.MyEnum.enum1)
test(ro[1][s12] == Test.MyEnum.enum2)
test(len(do) == 3)
test(len(do[0]) == 1)
test(do[0][s23] == Test.MyEnum.enum3)
test(len(do[1]) == 2)
test(do[1][s11] == Test.MyEnum.enum1)
test(do[1][s12] == Test.MyEnum.enum2)
test(len(do[2]) == 3)
test(do[2][s11] == Test.MyEnum.enum1)
test(do[2][s22] == Test.MyEnum.enum3)
test(do[2][s23] == Test.MyEnum.enum2)
self.called()
except:
test(False)
def opByteByteSD(self, f):
try:
(ro, do) = f.result()
if sys.version_info[0] == 2:
test(len(do) == 1)
test(len(do[0xf1]) == 2)
test(do[0xf1][0] == '\xf2')
test(do[0xf1][1] == '\xf3')
test(len(ro) == 3)
test(len(ro[0x01]) == 2)
test(ro[0x01][0] == '\x01')
test(ro[0x01][1] == '\x11')
test(len(ro[0x22]) == 1)
test(ro[0x22][0] == '\x12')
test(len(ro[0xf1]) == 2)
test(ro[0xf1][0] == '\xf2')
test(ro[0xf1][1] == '\xf3')
else:
test(len(do) == 1)
test(len(do[0xf1]) == 2)
test(do[0xf1][0] == 0xf2)
test(do[0xf1][1] == 0xf3)
test(len(ro) == 3)
test(len(ro[0x01]) == 2)
test(ro[0x01][0] == 0x01)
test(ro[0x01][1] == 0x11)
test(len(ro[0x22]) == 1)
test(ro[0x22][0] == 0x12)
test(len(ro[0xf1]) == 2)
test(ro[0xf1][0] == 0xf2)
test(ro[0xf1][1] == 0xf3)
self.called()
except:
test(False)
def opBoolBoolSD(self, f):
try:
(ro, do) = f.result()
test(len(do) == 1)
test(len(do[False]) == 2)
test(do[False][0])
test(not do[False][1])
test(len(ro) == 2)
test(len(ro[False]) == 2)
test(ro[False][0])
test(not ro[False][1])
test(len(ro[True]) == 3)
test(not ro[True][0])
test(ro[True][1])
test(ro[True][2])
self.called()
except:
test(False)
def opShortShortSD(self, f):
try:
(ro, do) = f.result()
test(len(do) == 1)
test(len(do[4]) == 2)
test(do[4][0] == 6)
test(do[4][1] == 7)
test(len(ro) == 3)
test(len(ro[1]) == 3)
test(ro[1][0] == 1)
test(ro[1][1] == 2)
test(ro[1][2] == 3)
test(len(ro[2]) == 2)
test(ro[2][0] == 4)
test(ro[2][1] == 5)
test(len(ro[4]) == 2)
test(ro[4][0] == 6)
test(ro[4][1] == 7)
self.called()
except:
test(False)
def opIntIntSD(self, f):
try:
(ro, do) = f.result()
test(len(do) == 1)
test(len(do[400]) == 2)
test(do[400][0] == 600)
test(do[400][1] == 700)
test(len(ro) == 3)
test(len(ro[100]) == 3)
test(ro[100][0] == 100)
test(ro[100][1] == 200)
test(ro[100][2] == 300)
test(len(ro[200]) == 2)
test(ro[200][0] == 400)
test(ro[200][1] == 500)
test(len(ro[400]) == 2)
test(ro[400][0] == 600)
test(ro[400][1] == 700)
self.called()
except:
test(False)
def opLongLongSD(self, f):
try:
(ro, do) = f.result()
test(len(do) == 1)
test(len(do[999999992]) == 2)
test(do[999999992][0] == 999999110)
test(do[999999992][1] == 999999120)
test(len(ro) == 3)
test(len(ro[999999990]) == 3)
test(ro[999999990][0] == 999999110)
test(ro[999999990][1] == 999999111)
test(ro[999999990][2] == 999999110)
test(len(ro[999999991]) == 2)
test(ro[999999991][0] == 999999120)
test(ro[999999991][1] == 999999130)
test(len(ro[999999992]) == 2)
test(ro[999999992][0] == 999999110)
test(ro[999999992][1] == 999999120)
self.called()
except:
test(False)
def opStringFloatSD(self, f):
try:
(ro, do) = f.result()
test(len(do) == 1)
test(len(do["aBc"]) == 2)
test(do["aBc"][0] - -3.14 < 0.10)
test(do["aBc"][1] - 3.14 < 0.10)
test(len(ro) == 3)
test(len(ro["abc"]) == 3)
test(ro["abc"][0] - -1.1 < 0.10)
test(ro["abc"][1] - 123123.2 < 0.10)
test(ro["abc"][2] - 100.0 < 0.10)
test(len(ro["ABC"]) == 2)
test(ro["ABC"][0] - 42.24 < 0.10)
test(ro["ABC"][1] - -1.61 < 0.10)
test(len(ro["aBc"]) == 2)
test(ro["aBc"][0] - -3.14 < 0.10)
test(ro["aBc"][1] - 3.14 < 0.10)
self.called()
except:
test(False)
def opStringDoubleSD(self, f):
try:
(ro, do) = f.result()
test(len(do) == 1)
test(len(do[""]) == 2)
test(do[""][0] == 1.6E10)
test(do[""][1] == 1.7E10)
test(len(ro) == 3)
test(len(ro["Hello!!"]) == 3)
test(ro["Hello!!"][0] == 1.1E10)
test(ro["Hello!!"][1] == 1.2E10)
test(ro["Hello!!"][2] == 1.3E10)
test(len(ro["Goodbye"]) == 2)
test(ro["Goodbye"][0] == 1.4E10)
test(ro["Goodbye"][1] == 1.5E10)
test(len(ro[""]) == 2)
test(ro[""][0] == 1.6E10)
test(ro[""][1] == 1.7E10)
self.called()
except:
test(False)
def opStringStringSD(self, f):
try:
(ro, do) = f.result()
test(len(do) == 1)
test(len(do["ghi"]) == 2)
test(do["ghi"][0] == "and")
test(do["ghi"][1] == "xor")
test(len(ro) == 3)
test(len(ro["abc"]) == 3)
test(ro["abc"][0] == "abc")
test(ro["abc"][1] == "de")
test(ro["abc"][2] == "fghi")
test(len(ro["def"]) == 2)
test(ro["def"][0] == "xyz")
test(ro["def"][1] == "or")
test(len(ro["ghi"]) == 2)
test(ro["ghi"][0] == "and")
test(ro["ghi"][1] == "xor")
self.called()
except:
test(False)
def opMyEnumMyEnumSD(self, f):
try:
(ro, do) = f.result()
test(len(do) == 1)
test(len(do[Test.MyEnum.enum1]) == 2)
test(do[Test.MyEnum.enum1][0] == Test.MyEnum.enum3)
test(do[Test.MyEnum.enum1][1] == Test.MyEnum.enum3)
test(len(ro) == 3)
test(len(ro[Test.MyEnum.enum3]) == 3)
test(ro[Test.MyEnum.enum3][0] == Test.MyEnum.enum1)
test(ro[Test.MyEnum.enum3][1] == Test.MyEnum.enum1)
test(ro[Test.MyEnum.enum3][2] == Test.MyEnum.enum2)
test(len(ro[Test.MyEnum.enum2]) == 2)
test(ro[Test.MyEnum.enum2][0] == Test.MyEnum.enum1)
test(ro[Test.MyEnum.enum2][1] == Test.MyEnum.enum2)
test(len(ro[Test.MyEnum.enum1]) == 2)
test(ro[Test.MyEnum.enum1][0] == Test.MyEnum.enum3)
test(ro[Test.MyEnum.enum1][1] == Test.MyEnum.enum3)
self.called()
except:
test(False)
def opIntS(self, f):
try:
r = f.result()
for j in range(0, len(r)):
test(r[j] == -j)
self.called()
except:
test(False)
def opIdempotent(self, f):
self.called()
def opNonmutating(self, f):
self.called()
def opDerived(self, f):
self.called()
def twowaysFuture(communicator, p):
f = p.ice_pingAsync()
test(f.result() is None)
f = p.ice_isAAsync(Test.MyClass.ice_staticId())
test(f.result())
f = p.ice_idAsync()
test(f.result() == "::Test::MyDerivedClass")
f = p.ice_idsAsync()
test(len(f.result()) == 3)
f = p.opVoidAsync()
test(f.result() is None)
cb = Callback()
p.opVoidAsync().add_done_callback(lambda f: cb.called())
cb.check()
f = p.opByteAsync(0xff, 0x0f)
(ret, p3) = f.result()
test(p3 == 0xf0)
test(ret == 0xff)
cb = Callback()
p.opByteAsync(0xff, 0x0f).add_done_callback(cb.opByte)
cb.check()
cb = Callback()
p.opBoolAsync(True, False).add_done_callback(cb.opBool)
cb.check()
cb = Callback()
p.opShortIntLongAsync(10, 11, 12).add_done_callback(cb.opShortIntLong)
cb.check()
cb = Callback()
p.opFloatDoubleAsync(3.14, 1.1E10).add_done_callback(cb.opFloatDouble)
cb.check()
cb = Callback()
p.opStringAsync("hello", "world").add_done_callback(cb.opString)
cb.check()
cb = Callback()
p.opMyEnumAsync(Test.MyEnum.enum2).add_done_callback(cb.opMyEnum)
cb.check()
cb = Callback(communicator)
p.opMyClassAsync(p).add_done_callback(cb.opMyClass)
cb.check()
si1 = Test.Structure()
si1.p = p
si1.e = Test.MyEnum.enum3
si1.s = Test.AnotherStruct()
si1.s.s = "abc"
si2 = Test.Structure()
si2.p = None
si2.e = Test.MyEnum.enum2
si2.s = Test.AnotherStruct()
si2.s.s = "def"
cb = Callback(communicator)
p.opStructAsync(si1, si2).add_done_callback(cb.opStruct)
cb.check()
bsi1 = (0x01, 0x11, 0x12, 0x22)
bsi2 = (0xf1, 0xf2, 0xf3, 0xf4)
cb = Callback()
p.opByteSAsync(bsi1, bsi2).add_done_callback(cb.opByteS)
cb.check()
bsi1 = (True, True, False)
bsi2 = (False,)
cb = Callback()
p.opBoolSAsync(bsi1, bsi2).add_done_callback(cb.opBoolS)
cb.check()
ssi = (1, 2, 3)
isi = (5, 6, 7, 8)
lsi = (10, 30, 20)
cb = Callback()
p.opShortIntLongSAsync(ssi, isi, lsi).add_done_callback(cb.opShortIntLongS)
cb.check()
fsi = (3.14, 1.11)
dsi = (1.1E10, 1.2E10, 1.3E10)
cb = Callback()
p.opFloatDoubleSAsync(fsi, dsi).add_done_callback(cb.opFloatDoubleS)
cb.check()
ssi1 = ('abc', 'de', 'fghi')
ssi2 = ('xyz',)
cb = Callback()
p.opStringSAsync(ssi1, ssi2).add_done_callback(cb.opStringS)
cb.check()
bsi1 = ((0x01, 0x11, 0x12), (0xff,))
bsi2 = ((0x0e,), (0xf2, 0xf1))
cb = Callback()
p.opByteSSAsync(bsi1, bsi2).add_done_callback(cb.opByteSS)
cb.check()
bsi1 = ((True,), (False,), (True, True),)
bsi2 = ((False, False, True),)
cb = Callback()
p.opBoolSSAsync(bsi1, bsi2).add_done_callback(cb.opBoolSS)
cb.check();
ssi = ((1,2,5), (13,), ())
isi = ((24, 98), (42,))
lsi = ((496, 1729),)
cb = Callback()
p.opShortIntLongSSAsync(ssi, isi, lsi).add_done_callback(cb.opShortIntLongSS)
cb.check()
fsi = ((3.14,), (1.11,), ())
dsi = ((1.1E10, 1.2E10, 1.3E10),)
cb = Callback()
p.opFloatDoubleSSAsync(fsi, dsi).add_done_callback(cb.opFloatDoubleSS)
cb.check()
ssi1 = (('abc',), ('de', 'fghi'))
ssi2 = ((), (), ('xyz',))
cb = Callback()
p.opStringSSAsync(ssi1, ssi2).add_done_callback(cb.opStringSS)
cb.check()
di1 = {10: True, 100: False}
di2 = {10: True, 11: False, 101: True}
cb = Callback()
p.opByteBoolDAsync(di1, di2).add_done_callback(cb.opByteBoolD)
cb.check()
di1 = {110: -1, 1100: 123123}
di2 = {110: -1, 111: -100, 1101: 0}
cb = Callback()
p.opShortIntDAsync(di1, di2).add_done_callback(cb.opShortIntD)
cb.check()
di1 = {999999110: -1.1, 999999111: 123123.2}
di2 = {999999110: -1.1, 999999120: -100.4, 999999130: 0.5}
cb = Callback()
p.opLongFloatDAsync(di1, di2).add_done_callback(cb.opLongFloatD)
cb.check()
di1 = {'foo': 'abc -1.1', 'bar': 'abc 123123.2'}
di2 = {'foo': 'abc -1.1', 'FOO': 'abc -100.4', 'BAR': 'abc 0.5'}
cb = Callback()
p.opStringStringDAsync(di1, di2).add_done_callback(cb.opStringStringD)
cb.check()
di1 = {'abc': Test.MyEnum.enum1, '': Test.MyEnum.enum2}
di2 = {'abc': Test.MyEnum.enum1, 'qwerty': Test.MyEnum.enum3, 'Hello!!': Test.MyEnum.enum2}
cb = Callback()
p.opStringMyEnumDAsync(di1, di2).add_done_callback(cb.opStringMyEnumD)
cb.check()
di1 = {Test.MyEnum.enum1: 'abc'}
di2 = {Test.MyEnum.enum2: 'Hello!!', Test.MyEnum.enum3: 'qwerty'}
cb = Callback()
p.opMyEnumStringDAsync(di1, di2).add_done_callback(cb.opMyEnumStringD)
cb.check()
s11 = Test.MyStruct()
s11.i = 1
s11.j = 1
s12 = Test.MyStruct()
s12.i = 1
s12.j = 2
s22 = Test.MyStruct()
s22.i = 2
s22.j = 2
s23 = Test.MyStruct()
s23.i = 2
s23.j = 3
di1 = {s11: Test.MyEnum.enum1, s12: Test.MyEnum.enum2}
di2 = {s11: Test.MyEnum.enum1, s22: Test.MyEnum.enum3, s23: Test.MyEnum.enum2}
cb = Callback()
p.opMyStructMyEnumDAsync(di1, di2).add_done_callback(cb.opMyStructMyEnumD)
cb.check()
dsi1 = ({ 10: True, 100: False }, { 10: True, 11: False, 101: True })
dsi2 = ({ 100: False, 101: False },)
cb = Callback()
p.opByteBoolDSAsync(dsi1, dsi2).add_done_callback(cb.opByteBoolDS)
cb.check()
dsi1 = ({ 110: -1, 1100: 123123 }, { 110: -1, 111: -100, 1101: 0 })
dsi2 = ({ 100: -1001 },)
cb = Callback()
p.opShortIntDSAsync(dsi1, dsi2).add_done_callback(cb.opShortIntDS)
cb.called()
dsi1 = ({ 999999110: -1.1, 999999111: 123123.2 }, { 999999110: -1.1, 999999120: -100.4, 999999130: 0.5 })
dsi2 = ({ 999999140: 3.14 },)
cb = Callback()
p.opLongFloatDSAsync(dsi1, dsi2).add_done_callback(cb.opLongFloatDS)
cb.called()
dsi1 = ({ "foo": "abc -1.1", "bar": "abc 123123.2" }, { "foo": "abc -1.1", "FOO": "abc -100.4", "BAR": "abc 0.5" })
dsi2 = ({ "f00": "ABC -3.14" },)
cb = Callback()
p.opStringStringDSAsync(dsi1, dsi2).add_done_callback(cb.opStringStringDS)
cb.called()
dsi1 = (
{ "abc": Test.MyEnum.enum1, "": Test.MyEnum.enum2 },
{ "abc": Test.MyEnum.enum1, "qwerty": Test.MyEnum.enum3, "Hello!!": Test.MyEnum.enum2 }
)
dsi2 = ({ "Goodbye": Test.MyEnum.enum1 },)
cb = Callback()
p.opStringMyEnumDSAsync(dsi1, dsi2).add_done_callback(cb.opStringMyEnumDS)
cb.called()
dsi1 = ({ Test.MyEnum.enum1: 'abc' }, { Test.MyEnum.enum2: 'Hello!!', Test.MyEnum.enum3: 'qwerty'})
dsi2 = ({ Test.MyEnum.enum1: 'Goodbye' },)
cb = Callback()
p.opMyEnumStringDSAsync(dsi1, dsi2).add_done_callback(cb.opMyEnumStringDS)
cb.called()
s11 = Test.MyStruct(1, 1)
s12 = Test.MyStruct(1, 2)
s22 = Test.MyStruct(2, 2)
s23 = Test.MyStruct(2, 3)
dsi1 = (
{ s11: Test.MyEnum.enum1, s12: Test.MyEnum.enum2 },
{ s11: Test.MyEnum.enum1, s22: Test.MyEnum.enum3, s23: Test.MyEnum.enum2 }
)
dsi2 = ({ s23: Test.MyEnum.enum3 },)
cb = Callback()
p.opMyStructMyEnumDSAsync(dsi1, dsi2).add_done_callback(cb.opMyStructMyEnumDS)
cb.called()
sdi1 = { 0x01: (0x01, 0x11), 0x22: (0x12,) }
sdi2 = { 0xf1: (0xf2, 0xf3) }
cb = Callback()
p.opByteByteSDAsync(sdi1, sdi2).add_done_callback(cb.opByteByteSD)
cb.called()
sdi1 = { False: (True, False), True: (False, True, True) }
sdi2 = { False: (True, False) }
cb = Callback()
p.opBoolBoolSDAsync(sdi1, sdi2).add_done_callback(cb.opBoolBoolSD)
cb.called()
sdi1 = { 1: (1, 2, 3), 2: (4, 5) }
sdi2 = { 4: (6, 7) }
cb = Callback()
p.opShortShortSDAsync(sdi1, sdi2).add_done_callback(cb.opShortShortSD)
cb.called()
sdi1 = { 100: (100, 200, 300), 200: (400, 500) }
sdi2 = { 400: (600, 700) }
cb = Callback()
p.opIntIntSDAsync(sdi1, sdi2).add_done_callback(cb.opIntIntSD)
cb.called()
sdi1 = { 999999990: (999999110, 999999111, 999999110), 999999991: (999999120, 999999130) }
sdi2 = { 999999992: (999999110, 999999120) }
cb = Callback()
p.opLongLongSDAsync(sdi1, sdi2).add_done_callback(cb.opLongLongSD)
cb.called()
sdi1 = { "abc": (-1.1, 123123.2, 100.0), "ABC": (42.24, -1.61) }
sdi2 = { "aBc": (-3.14, 3.14) }
cb = Callback()
p.opStringFloatSDAsync(sdi1, sdi2).add_done_callback(cb.opStringFloatSD)
cb.called()
sdi1 = { "Hello!!": (1.1E10, 1.2E10, 1.3E10), "Goodbye": (1.4E10, 1.5E10) }
sdi2 = { "": (1.6E10, 1.7E10) }
cb = Callback()
p.opStringDoubleSDAsync(sdi1, sdi2).add_done_callback(cb.opStringDoubleSD)
cb.called()
sdi1 = { "abc": ("abc", "de", "fghi") , "def": ("xyz", "or") }
sdi2 = { "ghi": ("and", "xor") }
cb = Callback()
p.opStringStringSDAsync(sdi1, sdi2).add_done_callback(cb.opStringStringSD)
cb.called()
sdi1 = {
Test.MyEnum.enum3: (Test.MyEnum.enum1, Test.MyEnum.enum1, Test.MyEnum.enum2),
Test.MyEnum.enum2: (Test.MyEnum.enum1, Test.MyEnum.enum2)
}
sdi2 = { Test.MyEnum.enum1: (Test.MyEnum.enum3, Test.MyEnum.enum3) }
cb = Callback()
p.opMyEnumMyEnumSDAsync(sdi1, sdi2).add_done_callback(cb.opMyEnumMyEnumSD)
cb.called()
lengths = ( 0, 1, 2, 126, 127, 128, 129, 253, 254, 255, 256, 257, 1000 )
for l in lengths:
s = []
for i in range(l):
s.append(i)
cb = Callback(l)
p.opIntSAsync(s).add_done_callback(cb.opIntS)
cb.check()
ctx = {'one': 'ONE', 'two': 'TWO', 'three': 'THREE'}
test(len(p.ice_getContext()) == 0)
f = p.opContextAsync()
c = f.result()
test(c != ctx)
test(len(p.ice_getContext()) == 0)
f = p.opContextAsync(context=ctx)
c = f.result()
test(c == ctx)
p2 = Test.MyClassPrx.checkedCast(p.ice_context(ctx))
test(p2.ice_getContext() == ctx)
f = p2.opContextAsync()
c = f.result()
test(c == ctx)
f = p2.opContextAsync(context=ctx)
c = f.result()
test(c == ctx)
#
# Test implicit context propagation
#
if p.ice_getConnection():
impls = ( 'Shared', 'PerThread' )
for i in impls:
initData = Ice.InitializationData()
initData.properties = communicator.getProperties().clone()
initData.properties.setProperty('Ice.ImplicitContext', i)
ic = Ice.initialize(data=initData)
ctx = {'one': 'ONE', 'two': 'TWO', 'three': 'THREE'}
p3 = Test.MyClassPrx.uncheckedCast(ic.stringToProxy("test:default -p 12010"))
ic.getImplicitContext().setContext(ctx)
test(ic.getImplicitContext().getContext() == ctx)
f = p3.opContextAsync()
c = f.result()
test(c == ctx)
ic.getImplicitContext().put('zero', 'ZERO')
ctx = ic.getImplicitContext().getContext()
f = p3.opContextAsync()
c = f.result()
test(c == ctx)
prxContext = {'one': 'UN', 'four': 'QUATRE'}
combined = {}
combined.update(ctx)
combined.update(prxContext)
test(combined['one'] == 'UN')
p3 = Test.MyClassPrx.uncheckedCast(p3.ice_context(prxContext))
ic.getImplicitContext().setContext({})
f = p3.opContextAsync()
c = f.result()
test(c == prxContext)
ic.getImplicitContext().setContext(ctx)
f = p3.opContextAsync()
c = f.result()
test(c == combined)
ic.destroy()
cb = Callback()
p.opIdempotentAsync().add_done_callback(cb.opIdempotent)
cb.check()
cb = Callback()
p.opNonmutatingAsync().add_done_callback(cb.opNonmutating)
cb.check()
derived = Test.MyDerivedClassPrx.checkedCast(p)
test(derived)
cb = Callback()
derived.opDerivedAsync().add_done_callback(cb.opDerived)
cb.check()
f = p.opByte1Async(0xFF)
test(f.result() == 0xFF)
f = p.opShort1Async(0x7FFF)
test(f.result() == 0x7FFF)
f = p.opInt1Async(0x7FFFFFFF)
test(f.result() == 0x7FFFFFFF)
f = p.opLong1Async(0x7FFFFFFFFFFFFFFF)
test(f.result() == 0x7FFFFFFFFFFFFFFF)
f = p.opFloat1Async(1.0)
test(f.result() == 1.0)
f = p.opDouble1Async(1.0)
test(f.result() == 1.0)
f = p.opString1Async("opString1")
test(f.result() == "opString1")
f = p.opStringS1Async(None)
test(len(f.result()) == 0)
f = p.opByteBoolD1Async(None)
test(len(f.result()) == 0)
f = p.opStringS2Async(None)
test(len(f.result()) == 0)
f = p.opByteBoolD2Async(None)
test(len(f.result()) == 0) | unknown | codeparrot/codeparrot-clean | ||
'''
Bubble
======
.. versionadded:: 1.1.0
.. image:: images/bubble.jpg
:align: right
The Bubble widget is a form of menu or a small popup where the menu options
are stacked either vertically or horizontally.
The :class:`Bubble` contains an arrow pointing in the direction you
choose.
Simple example
--------------
.. include:: ../../examples/widgets/bubble_test.py
:literal:
Customize the Bubble
--------------------
You can choose the direction in which the arrow points::
Bubble(arrow_pos='top_mid')
The widgets added to the Bubble are ordered horizontally by default, like a
Boxlayout. You can change that by::
orientation = 'vertical'
To add items to the bubble::
bubble = Bubble(orientation = 'vertical')
bubble.add_widget(your_widget_instance)
To remove items::
bubble.remove_widget(widget)
or
bubble.clear_widgets()
To access the list of children, use content.children::
bubble.content.children
.. warning::
This is important! Do not use bubble.children
To change the appearance of the bubble::
bubble.background_color = (1, 0, 0, .5) #50% translucent red
bubble.border = [0, 0, 0, 0]
background_image = 'path/to/background/image'
arrow_image = 'path/to/arrow/image'
'''
__all__ = ('Bubble', 'BubbleButton', 'BubbleContent')
from kivy.uix.image import Image
from kivy.uix.widget import Widget
from kivy.uix.scatter import Scatter
from kivy.uix.gridlayout import GridLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.properties import ObjectProperty, StringProperty, OptionProperty, \
ListProperty, BooleanProperty
from kivy.clock import Clock
from kivy.base import EventLoop
from kivy.metrics import dp
class BubbleButton(Button):
'''A button intended for use in a Bubble widget.
You can use a "normal" button class, but it will not look good unless
the background is changed.
Rather use this BubbleButton widget that is already defined and provides a
suitable background for you.
'''
pass
class BubbleContent(GridLayout):
pass
class Bubble(GridLayout):
'''Bubble class. See module documentation for more information.
'''
background_color = ListProperty([1, 1, 1, 1])
'''Background color, in the format (r, g, b, a).
:attr:`background_color` is a :class:`~kivy.properties.ListProperty` and
defaults to [1, 1, 1, 1].
'''
border = ListProperty([16, 16, 16, 16])
'''Border used for :class:`~kivy.graphics.vertex_instructions.BorderImage`
graphics instruction. Used with the :attr:`background_image`.
It should be used when using custom backgrounds.
It must be a list of 4 values: (top, right, bottom, left). Read the
BorderImage instructions for more information about how to use it.
:attr:`border` is a :class:`~kivy.properties.ListProperty` and defaults to
(16, 16, 16, 16)
'''
background_image = StringProperty(
'atlas://data/images/defaulttheme/bubble')
'''Background image of the bubble.
:attr:`background_image` is a :class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/bubble'.
'''
arrow_image = StringProperty(
'atlas://data/images/defaulttheme/bubble_arrow')
''' Image of the arrow pointing to the bubble.
:attr:`arrow_image` is a :class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/bubble_arrow'.
'''
show_arrow = BooleanProperty(True)
''' Indicates whether to show arrow.
.. versionadded:: 1.8.0
:attr:`show_arrow` is a :class:`~kivy.properties.BooleanProperty` and
defaults to `True`.
'''
arrow_pos = OptionProperty('bottom_mid', options=(
'left_top', 'left_mid', 'left_bottom', 'top_left', 'top_mid',
'top_right', 'right_top', 'right_mid', 'right_bottom',
'bottom_left', 'bottom_mid', 'bottom_right'))
'''Specifies the position of the arrow relative to the bubble.
Can be one of: left_top, left_mid, left_bottom top_left, top_mid, top_right
right_top, right_mid, right_bottom bottom_left, bottom_mid, bottom_right.
:attr:`arrow_pos` is a :class:`~kivy.properties.OptionProperty` and
defaults to 'bottom_mid'.
'''
content = ObjectProperty(None)
'''This is the object where the main content of the bubble is held.
:attr:`content` is a :class:`~kivy.properties.ObjectProperty` and
defaults to 'None'.
'''
orientation = OptionProperty('horizontal',
options=('horizontal', 'vertical'))
'''This specifies the manner in which the children inside bubble
are arranged. Can be one of 'vertical' or 'horizontal'.
:attr:`orientation` is a :class:`~kivy.properties.OptionProperty` and
defaults to 'horizontal'.
'''
limit_to = ObjectProperty(None, allownone=True)
'''Specifies the widget to which the bubbles position is restricted.
.. versionadded:: 1.6.0
:attr:`limit_to` is a :class:`~kivy.properties.ObjectProperty` and
defaults to 'None'.
'''
def __init__(self, **kwargs):
self._prev_arrow_pos = None
self._arrow_layout = BoxLayout()
self._bk_img = Image(
source=self.background_image, allow_stretch=True,
keep_ratio=False, color=self.background_color)
self.background_texture = self._bk_img.texture
self._arrow_img = Image(source=self.arrow_image,
allow_stretch=True,
color=self.background_color)
self.content = content = BubbleContent(parent=self)
super(Bubble, self).__init__(**kwargs)
content.parent = None
self.add_widget(content)
self.on_arrow_pos()
def add_widget(self, *l):
content = self.content
if content is None:
return
if l[0] == content or l[0] == self._arrow_img\
or l[0] == self._arrow_layout:
super(Bubble, self).add_widget(*l)
else:
content.add_widget(*l)
def remove_widget(self, *l):
content = self.content
if not content:
return
if l[0] == content or l[0] == self._arrow_img\
or l[0] == self._arrow_layout:
super(Bubble, self).remove_widget(*l)
else:
content.remove_widget(l[0])
def clear_widgets(self, **kwargs):
content = self.content
if not content:
return
if kwargs.get('do_super', False):
super(Bubble, self).clear_widgets()
else:
content.clear_widgets()
def on_show_arrow(self, instance, value):
self._arrow_img.opacity = int(value)
def on_parent(self, instance, value):
Clock.schedule_once(self._update_arrow)
def on_pos(self, instance, pos):
lt = self.limit_to
if lt:
self.limit_to = None
if lt is EventLoop.window:
x = y = 0
top = lt.height
right = lt.width
else:
x, y = lt.x, lt.y
top, right = lt.top, lt.right
self.x = max(self.x, x)
self.right = min(self.right, right)
self.top = min(self.top, top)
self.y = max(self.y, y)
self.limit_to = lt
def on_background_image(self, *l):
self._bk_img.source = self.background_image
def on_background_color(self, *l):
if self.content is None:
return
self._arrow_img.color = self._bk_img.color = self.background_color
def on_orientation(self, *l):
content = self.content
if not content:
return
if self.orientation[0] == 'v':
content.cols = 1
content.rows = 99
else:
content.cols = 99
content.rows = 1
def on_arrow_image(self, *l):
self._arrow_img.source = self.arrow_image
def on_arrow_pos(self, *l):
self_content = self.content
if not self_content:
Clock.schedule_once(self.on_arrow_pos)
return
if self_content not in self.children:
Clock.schedule_once(self.on_arrow_pos)
return
self_arrow_pos = self.arrow_pos
if self._prev_arrow_pos == self_arrow_pos:
return
self._prev_arrow_pos = self_arrow_pos
self_arrow_layout = self._arrow_layout
self_arrow_layout.clear_widgets()
self_arrow_img = self._arrow_img
self._sctr = self._arrow_img
self.clear_widgets(do_super=True)
self_content.parent = None
self_arrow_img.size_hint = (1, None)
self_arrow_img.height = dp(self_arrow_img.texture_size[1])
self_arrow_img.pos = 0, 0
widget_list = []
arrow_list = []
parent = self_arrow_img.parent
if parent:
parent.remove_widget(self_arrow_img)
if self_arrow_pos[0] == 'b' or self_arrow_pos[0] == 't':
self.cols = 1
self.rows = 3
self_arrow_layout.orientation = 'horizontal'
self_arrow_img.width = self.width / 3
self_arrow_layout.size_hint = (1, None)
self_arrow_layout.height = self_arrow_img.height
if self_arrow_pos[0] == 'b':
if self_arrow_pos == 'bottom_mid':
widget_list = (self_content, self_arrow_img)
else:
if self_arrow_pos == 'bottom_left':
arrow_list = (self_arrow_img, Widget(), Widget())
elif self_arrow_pos == 'bottom_right':
#add two dummy widgets
arrow_list = (Widget(), Widget(), self_arrow_img)
widget_list = (self_content, self_arrow_layout)
else:
sctr = Scatter(do_translation=False,
rotation=180,
do_rotation=False,
do_scale=False,
size_hint=(None, None),
size=self_arrow_img.size)
sctr.add_widget(self_arrow_img)
if self_arrow_pos == 'top_mid':
#add two dummy widgets
arrow_list = (Widget(), sctr, Widget())
elif self_arrow_pos == 'top_left':
arrow_list = (sctr, Widget(), Widget())
elif self_arrow_pos == 'top_right':
arrow_list = (Widget(), Widget(), sctr)
widget_list = (self_arrow_layout, self_content)
elif self_arrow_pos[0] == 'l' or self_arrow_pos[0] == 'r':
self.cols = 3
self.rows = 1
self_arrow_img.width = self.height / 3
self_arrow_layout.orientation = 'vertical'
self_arrow_layout.cols = 1
self_arrow_layout.size_hint = (None, 1)
self_arrow_layout.width = self_arrow_img.height
rotation = -90 if self_arrow_pos[0] == 'l' else 90
self._sctr = sctr = Scatter(do_translation=False,
rotation=rotation,
do_rotation=False,
do_scale=False,
size_hint=(None, None),
size=(self_arrow_img.size))
sctr.add_widget(self_arrow_img)
if self_arrow_pos[-4:] == '_top':
arrow_list = (Widget(size_hint=(1, .07)),
sctr, Widget(size_hint=(1, .3)))
elif self_arrow_pos[-4:] == '_mid':
arrow_list = (Widget(), sctr, Widget())
Clock.schedule_once(self._update_arrow)
elif self_arrow_pos[-7:] == '_bottom':
arrow_list = (Widget(), Widget(), sctr)
if self_arrow_pos[0] == 'l':
widget_list = (self_arrow_layout, self_content)
else:
widget_list = (self_content, self_arrow_layout)
# add widgets to arrow_layout
add = self_arrow_layout.add_widget
for widg in arrow_list:
add(widg)
# add widgets to self
add = self.add_widget
for widg in widget_list:
add(widg)
def _update_arrow(self, *dt):
if self.arrow_pos in ('left_mid', 'right_mid'):
self._sctr.center_y = self._arrow_layout.center_y | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import document_configuration
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
import {bootstrapApplication} from '@angular/platform-browser';
import {CdkDragDropHandleExample} from './app/app';
bootstrapApplication(CdkDragDropHandleExample); | typescript | github | https://github.com/angular/angular | adev/src/content/examples/drag-drop/src/custom-handle/main.ts |
import { test, expect } from "@playwright/test";
import {
createAppFixture,
createFixture,
js,
} from "./helpers/create-fixture.js";
import type { Fixture, AppFixture } from "./helpers/create-fixture.js";
import { PlaywrightFixture } from "./helpers/playwright-fixture.js";
test.describe("loader", () => {
let fixture: Fixture;
let ROOT_DATA = "ROOT_DATA";
let INDEX_DATA = "INDEX_DATA";
test.beforeAll(async () => {
fixture = await createFixture({
files: {
"app/root.tsx": js`
import { Links, Meta, Outlet, Scripts } from "react-router";
export const loader = () => "${ROOT_DATA}";
export default function Root() {
return (
<html lang="en">
<head>
<Meta />
<Links />
</head>
<body>
<Outlet />
<Scripts />
</body>
</html>
);
}
`,
"app/routes/_index.tsx": js`
export function loader() {
return "${INDEX_DATA}"
}
export default function Index() {
return <div/>
}
`,
},
});
});
test("returns responses for single fetch routes", async () => {
let { data } = await fixture.requestSingleFetchData("/_root.data");
expect(data).toEqual({
root: { data: ROOT_DATA },
"routes/_index": { data: INDEX_DATA },
});
});
});
test.describe("loader in an app", () => {
let appFixture: AppFixture;
let HOME_PAGE_TEXT = "hello world";
let REDIRECT_TARGET_TEXT = "redirect target";
let FETCH_TARGET_TEXT = "fetch target";
test.beforeAll(async () => {
appFixture = await createAppFixture(
await createFixture({
files: {
"app/root.tsx": js`
import { Outlet } from "react-router"
export default function Root() {
return (
<html>
<body>
${HOME_PAGE_TEXT}
<Outlet />
</body>
</html>
);
}
`,
"app/routes/redirect.tsx": js`
import { redirect } from "react-router";
export const loader = () => redirect("/redirect-target");
export default () => <div>Yo</div>
`,
"app/routes/redirect-target.tsx": js`
export default () => <div>${REDIRECT_TARGET_TEXT}</div>
`,
"app/routes/fetch.tsx": js`
export function loader({ request }) {
return fetch(new URL(request.url).origin + '/fetch-target');
}
`,
"app/routes/fetch-target.tsx": js`
export function loader() {
return Response.json({ message: "${FETCH_TARGET_TEXT}" })
}
`,
},
}),
);
});
test.afterAll(() => {
appFixture.close();
});
test("sends a redirect", async ({ page }) => {
let app = new PlaywrightFixture(appFixture, page);
await app.goto("/redirect");
expect(await app.getHtml()).toMatch(HOME_PAGE_TEXT);
expect(await app.getHtml()).toMatch(REDIRECT_TARGET_TEXT);
});
test("handles raw fetch responses", async ({ page }) => {
let app = new PlaywrightFixture(appFixture, page);
let res = await app.goto(`/fetch`);
expect((await res.json()).message).toBe(FETCH_TARGET_TEXT);
});
}); | typescript | github | https://github.com/remix-run/react-router | integration/loader-test.ts |
/*
* Copyright 2010-2024 JetBrains s.r.o. and Kotlin Programming Language contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package org.jetbrains.kotlin.analysis.api.fe10.test.cases.generated.cases.components.readWriteAccess;
import com.intellij.testFramework.TestDataPath;
import org.jetbrains.kotlin.test.util.KtTestUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.kotlin.analysis.api.fe10.test.configurator.AnalysisApiFe10TestConfiguratorFactory;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiTestConfiguratorFactoryData;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiTestConfigurator;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.TestModuleKind;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.FrontendKind;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisSessionMode;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiMode;
import org.jetbrains.kotlin.analysis.api.impl.base.test.cases.components.readWriteAccess.AbstractReadWriteAccessTest;
import org.jetbrains.kotlin.test.TestMetadata;
import org.junit.jupiter.api.Test;
import java.io.File;
import java.util.regex.Pattern;
/** This class is generated by {@link org.jetbrains.kotlin.generators.tests.analysis.api.GenerateAnalysisApiTestsKt}. DO NOT MODIFY MANUALLY */
@SuppressWarnings("all")
@TestMetadata("analysis/analysis-api/testData/components/expressionInfoProvider/readWriteAccess")
@TestDataPath("$PROJECT_ROOT")
public class Fe10IdeNormalAnalysisSourceModuleReadWriteAccessTestGenerated extends AbstractReadWriteAccessTest {
@NotNull
@Override
public AnalysisApiTestConfigurator getConfigurator() {
return AnalysisApiFe10TestConfiguratorFactory.INSTANCE.createConfigurator(
new AnalysisApiTestConfiguratorFactoryData(
FrontendKind.Fe10,
TestModuleKind.Source,
AnalysisSessionMode.Normal,
AnalysisApiMode.Ide
)
);
}
@Test
public void testAllFilesPresentInReadWriteAccess() {
KtTestUtil.assertAllTestsPresentByMetadataWithExcluded(this.getClass(), new File("analysis/analysis-api/testData/components/expressionInfoProvider/readWriteAccess"), Pattern.compile("^(.+)\\.kt$"), null, true);
}
@Test
@TestMetadata("assignment.kt")
public void testAssignment() {
runTest("analysis/analysis-api/testData/components/expressionInfoProvider/readWriteAccess/assignment.kt");
}
@Test
@TestMetadata("assignmentPlusEq.kt")
public void testAssignmentPlusEq() {
runTest("analysis/analysis-api/testData/components/expressionInfoProvider/readWriteAccess/assignmentPlusEq.kt");
}
@Test
@TestMetadata("namedArguments.kt")
public void testNamedArguments() {
runTest("analysis/analysis-api/testData/components/expressionInfoProvider/readWriteAccess/namedArguments.kt");
}
@Test
@TestMetadata("readReference.kt")
public void testReadReference() {
runTest("analysis/analysis-api/testData/components/expressionInfoProvider/readWriteAccess/readReference.kt");
}
@Test
@TestMetadata("readReferenceInParenthesis.kt")
public void testReadReferenceInParenthesis() {
runTest("analysis/analysis-api/testData/components/expressionInfoProvider/readWriteAccess/readReferenceInParenthesis.kt");
}
@Test
@TestMetadata("unary.kt")
public void testUnary() {
runTest("analysis/analysis-api/testData/components/expressionInfoProvider/readWriteAccess/unary.kt");
}
} | java | github | https://github.com/JetBrains/kotlin | analysis/analysis-api-fe10/tests-gen/org/jetbrains/kotlin/analysis/api/fe10/test/cases/generated/cases/components/readWriteAccess/Fe10IdeNormalAnalysisSourceModuleReadWriteAccessTestGenerated.java |
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module contains classes that help to emulate xcodebuild behavior on top of
other build systems, such as make and ninja.
"""
import copy
import gyp.common
import os
import os.path
import re
import shlex
import subprocess
import sys
import tempfile
from gyp.common import GypError
# Populated lazily by XcodeVersion, for efficiency, and to fix an issue when
# "xcodebuild" is called too quickly (it has been found to return incorrect
# version number).
XCODE_VERSION_CACHE = None
# Populated lazily by GetXcodeArchsDefault, to an |XcodeArchsDefault| instance
# corresponding to the installed version of Xcode.
XCODE_ARCHS_DEFAULT_CACHE = None
def XcodeArchsVariableMapping(archs, archs_including_64_bit=None):
"""Constructs a dictionary with expansion for $(ARCHS_STANDARD) variable,
and optionally for $(ARCHS_STANDARD_INCLUDING_64_BIT)."""
mapping = {'$(ARCHS_STANDARD)': archs}
if archs_including_64_bit:
mapping['$(ARCHS_STANDARD_INCLUDING_64_BIT)'] = archs_including_64_bit
return mapping
class XcodeArchsDefault(object):
"""A class to resolve ARCHS variable from xcode_settings, resolving Xcode
macros and implementing filtering by VALID_ARCHS. The expansion of macros
depends on the SDKROOT used ("macosx", "iphoneos", "iphonesimulator") and
on the version of Xcode.
"""
# Match variable like $(ARCHS_STANDARD).
variable_pattern = re.compile(r'\$\([a-zA-Z_][a-zA-Z0-9_]*\)$')
def __init__(self, default, mac, iphonesimulator, iphoneos):
self._default = (default,)
self._archs = {'mac': mac, 'ios': iphoneos, 'iossim': iphonesimulator}
def _VariableMapping(self, sdkroot):
"""Returns the dictionary of variable mapping depending on the SDKROOT."""
sdkroot = sdkroot.lower()
if 'iphoneos' in sdkroot:
return self._archs['ios']
elif 'iphonesimulator' in sdkroot:
return self._archs['iossim']
else:
return self._archs['mac']
def _ExpandArchs(self, archs, sdkroot):
"""Expands variables references in ARCHS, and remove duplicates."""
variable_mapping = self._VariableMapping(sdkroot)
expanded_archs = []
for arch in archs:
if self.variable_pattern.match(arch):
variable = arch
try:
variable_expansion = variable_mapping[variable]
for arch in variable_expansion:
if arch not in expanded_archs:
expanded_archs.append(arch)
except KeyError as e:
print 'Warning: Ignoring unsupported variable "%s".' % variable
elif arch not in expanded_archs:
expanded_archs.append(arch)
return expanded_archs
def ActiveArchs(self, archs, valid_archs, sdkroot):
"""Expands variables references in ARCHS, and filter by VALID_ARCHS if it
is defined (if not set, Xcode accept any value in ARCHS, otherwise, only
values present in VALID_ARCHS are kept)."""
expanded_archs = self._ExpandArchs(archs or self._default, sdkroot or '')
if valid_archs:
filtered_archs = []
for arch in expanded_archs:
if arch in valid_archs:
filtered_archs.append(arch)
expanded_archs = filtered_archs
return expanded_archs
def GetXcodeArchsDefault():
"""Returns the |XcodeArchsDefault| object to use to expand ARCHS for the
installed version of Xcode. The default values used by Xcode for ARCHS
and the expansion of the variables depends on the version of Xcode used.
For all version anterior to Xcode 5.0 or posterior to Xcode 5.1 included
uses $(ARCHS_STANDARD) if ARCHS is unset, while Xcode 5.0 to 5.0.2 uses
$(ARCHS_STANDARD_INCLUDING_64_BIT). This variable was added to Xcode 5.0
and deprecated with Xcode 5.1.
For "macosx" SDKROOT, all version starting with Xcode 5.0 includes 64-bit
architecture as part of $(ARCHS_STANDARD) and default to only building it.
For "iphoneos" and "iphonesimulator" SDKROOT, 64-bit architectures are part
of $(ARCHS_STANDARD_INCLUDING_64_BIT) from Xcode 5.0. From Xcode 5.1, they
are also part of $(ARCHS_STANDARD).
All thoses rules are coded in the construction of the |XcodeArchsDefault|
object to use depending on the version of Xcode detected. The object is
for performance reason."""
global XCODE_ARCHS_DEFAULT_CACHE
if XCODE_ARCHS_DEFAULT_CACHE:
return XCODE_ARCHS_DEFAULT_CACHE
xcode_version, _ = XcodeVersion()
if xcode_version < '0500':
XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
'$(ARCHS_STANDARD)',
XcodeArchsVariableMapping(['i386']),
XcodeArchsVariableMapping(['i386']),
XcodeArchsVariableMapping(['armv7']))
elif xcode_version < '0510':
XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
'$(ARCHS_STANDARD_INCLUDING_64_BIT)',
XcodeArchsVariableMapping(['x86_64'], ['x86_64']),
XcodeArchsVariableMapping(['i386'], ['i386', 'x86_64']),
XcodeArchsVariableMapping(
['armv7', 'armv7s'],
['armv7', 'armv7s', 'arm64']))
else:
XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
'$(ARCHS_STANDARD)',
XcodeArchsVariableMapping(['x86_64'], ['x86_64']),
XcodeArchsVariableMapping(['i386', 'x86_64'], ['i386', 'x86_64']),
XcodeArchsVariableMapping(
['armv7', 'armv7s', 'arm64'],
['armv7', 'armv7s', 'arm64']))
return XCODE_ARCHS_DEFAULT_CACHE
class XcodeSettings(object):
"""A class that understands the gyp 'xcode_settings' object."""
# Populated lazily by _SdkPath(). Shared by all XcodeSettings, so cached
# at class-level for efficiency.
_sdk_path_cache = {}
_platform_path_cache = {}
_sdk_root_cache = {}
# Populated lazily by GetExtraPlistItems(). Shared by all XcodeSettings, so
# cached at class-level for efficiency.
_plist_cache = {}
# Populated lazily by GetIOSPostbuilds. Shared by all XcodeSettings, so
# cached at class-level for efficiency.
_codesigning_key_cache = {}
def __init__(self, spec):
self.spec = spec
self.isIOS = False
# Per-target 'xcode_settings' are pushed down into configs earlier by gyp.
# This means self.xcode_settings[config] always contains all settings
# for that config -- the per-target settings as well. Settings that are
# the same for all configs are implicitly per-target settings.
self.xcode_settings = {}
configs = spec['configurations']
for configname, config in configs.iteritems():
self.xcode_settings[configname] = config.get('xcode_settings', {})
self._ConvertConditionalKeys(configname)
if self.xcode_settings[configname].get('IPHONEOS_DEPLOYMENT_TARGET',
None):
self.isIOS = True
# This is only non-None temporarily during the execution of some methods.
self.configname = None
# Used by _AdjustLibrary to match .a and .dylib entries in libraries.
self.library_re = re.compile(r'^lib([^/]+)\.(a|dylib)$')
def _ConvertConditionalKeys(self, configname):
"""Converts or warns on conditional keys. Xcode supports conditional keys,
such as CODE_SIGN_IDENTITY[sdk=iphoneos*]. This is a partial implementation
with some keys converted while the rest force a warning."""
settings = self.xcode_settings[configname]
conditional_keys = [key for key in settings if key.endswith(']')]
for key in conditional_keys:
# If you need more, speak up at http://crbug.com/122592
if key.endswith("[sdk=iphoneos*]"):
if configname.endswith("iphoneos"):
new_key = key.split("[")[0]
settings[new_key] = settings[key]
else:
print 'Warning: Conditional keys not implemented, ignoring:', \
' '.join(conditional_keys)
del settings[key]
def _Settings(self):
assert self.configname
return self.xcode_settings[self.configname]
def _Test(self, test_key, cond_key, default):
return self._Settings().get(test_key, default) == cond_key
def _Appendf(self, lst, test_key, format_str, default=None):
if test_key in self._Settings():
lst.append(format_str % str(self._Settings()[test_key]))
elif default:
lst.append(format_str % str(default))
def _WarnUnimplemented(self, test_key):
if test_key in self._Settings():
print 'Warning: Ignoring not yet implemented key "%s".' % test_key
def IsBinaryOutputFormat(self, configname):
default = "binary" if self.isIOS else "xml"
format = self.xcode_settings[configname].get('INFOPLIST_OUTPUT_FORMAT',
default)
return format == "binary"
def _IsBundle(self):
return int(self.spec.get('mac_bundle', 0)) != 0 or self._IsXCTest()
def _IsXCTest(self):
return int(self.spec.get('mac_xctest_bundle', 0)) != 0
def _IsIosAppExtension(self):
return int(self.spec.get('ios_app_extension', 0)) != 0
def _IsIosWatchKitExtension(self):
return int(self.spec.get('ios_watchkit_extension', 0)) != 0
def _IsIosWatchApp(self):
return int(self.spec.get('ios_watch_app', 0)) != 0
def GetFrameworkVersion(self):
"""Returns the framework version of the current target. Only valid for
bundles."""
assert self._IsBundle()
return self.GetPerTargetSetting('FRAMEWORK_VERSION', default='A')
def GetWrapperExtension(self):
"""Returns the bundle extension (.app, .framework, .plugin, etc). Only
valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('loadable_module', 'shared_library'):
default_wrapper_extension = {
'loadable_module': 'bundle',
'shared_library': 'framework',
}[self.spec['type']]
wrapper_extension = self.GetPerTargetSetting(
'WRAPPER_EXTENSION', default=default_wrapper_extension)
return '.' + self.spec.get('product_extension', wrapper_extension)
elif self.spec['type'] == 'executable':
if self._IsIosAppExtension() or self._IsIosWatchKitExtension():
return '.' + self.spec.get('product_extension', 'appex')
else:
return '.' + self.spec.get('product_extension', 'app')
else:
assert False, "Don't know extension for '%s', target '%s'" % (
self.spec['type'], self.spec['target_name'])
def GetProductName(self):
"""Returns PRODUCT_NAME."""
return self.spec.get('product_name', self.spec['target_name'])
def GetFullProductName(self):
"""Returns FULL_PRODUCT_NAME."""
if self._IsBundle():
return self.GetWrapperName()
else:
return self._GetStandaloneBinaryPath()
def GetWrapperName(self):
"""Returns the directory name of the bundle represented by this target.
Only valid for bundles."""
assert self._IsBundle()
return self.GetProductName() + self.GetWrapperExtension()
def GetBundleContentsFolderPath(self):
"""Returns the qualified path to the bundle's contents folder. E.g.
Chromium.app/Contents or Foo.bundle/Versions/A. Only valid for bundles."""
if self.isIOS:
return self.GetWrapperName()
assert self._IsBundle()
if self.spec['type'] == 'shared_library':
return os.path.join(
self.GetWrapperName(), 'Versions', self.GetFrameworkVersion())
else:
# loadable_modules have a 'Contents' folder like executables.
return os.path.join(self.GetWrapperName(), 'Contents')
def GetBundleResourceFolder(self):
"""Returns the qualified path to the bundle's resource folder. E.g.
Chromium.app/Contents/Resources. Only valid for bundles."""
assert self._IsBundle()
if self.isIOS:
return self.GetBundleContentsFolderPath()
return os.path.join(self.GetBundleContentsFolderPath(), 'Resources')
def GetBundlePlistPath(self):
"""Returns the qualified path to the bundle's plist file. E.g.
Chromium.app/Contents/Info.plist. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('executable', 'loadable_module'):
return os.path.join(self.GetBundleContentsFolderPath(), 'Info.plist')
else:
return os.path.join(self.GetBundleContentsFolderPath(),
'Resources', 'Info.plist')
def GetProductType(self):
"""Returns the PRODUCT_TYPE of this target."""
if self._IsIosAppExtension():
assert self._IsBundle(), ('ios_app_extension flag requires mac_bundle '
'(target %s)' % self.spec['target_name'])
return 'com.apple.product-type.app-extension'
if self._IsIosWatchKitExtension():
assert self._IsBundle(), ('ios_watchkit_extension flag requires '
'mac_bundle (target %s)' % self.spec['target_name'])
return 'com.apple.product-type.watchkit-extension'
if self._IsIosWatchApp():
assert self._IsBundle(), ('ios_watch_app flag requires mac_bundle '
'(target %s)' % self.spec['target_name'])
return 'com.apple.product-type.application.watchapp'
if self._IsBundle():
return {
'executable': 'com.apple.product-type.application',
'loadable_module': 'com.apple.product-type.bundle',
'shared_library': 'com.apple.product-type.framework',
}[self.spec['type']]
else:
return {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.apple.product-type.library.dynamic',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
}[self.spec['type']]
def GetMachOType(self):
"""Returns the MACH_O_TYPE of this target."""
# Weird, but matches Xcode.
if not self._IsBundle() and self.spec['type'] == 'executable':
return ''
return {
'executable': 'mh_execute',
'static_library': 'staticlib',
'shared_library': 'mh_dylib',
'loadable_module': 'mh_bundle',
}[self.spec['type']]
def _GetBundleBinaryPath(self):
"""Returns the name of the bundle binary of by this target.
E.g. Chromium.app/Contents/MacOS/Chromium. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('shared_library') or self.isIOS:
path = self.GetBundleContentsFolderPath()
elif self.spec['type'] in ('executable', 'loadable_module'):
path = os.path.join(self.GetBundleContentsFolderPath(), 'MacOS')
return os.path.join(path, self.GetExecutableName())
def _GetStandaloneExecutableSuffix(self):
if 'product_extension' in self.spec:
return '.' + self.spec['product_extension']
return {
'executable': '',
'static_library': '.a',
'shared_library': '.dylib',
'loadable_module': '.so',
}[self.spec['type']]
def _GetStandaloneExecutablePrefix(self):
return self.spec.get('product_prefix', {
'executable': '',
'static_library': 'lib',
'shared_library': 'lib',
# Non-bundled loadable_modules are called foo.so for some reason
# (that is, .so and no prefix) with the xcode build -- match that.
'loadable_module': '',
}[self.spec['type']])
def _GetStandaloneBinaryPath(self):
"""Returns the name of the non-bundle binary represented by this target.
E.g. hello_world. Only valid for non-bundles."""
assert not self._IsBundle()
assert self.spec['type'] in (
'executable', 'shared_library', 'static_library', 'loadable_module'), (
'Unexpected type %s' % self.spec['type'])
target = self.spec['target_name']
if self.spec['type'] == 'static_library':
if target[:3] == 'lib':
target = target[3:]
elif self.spec['type'] in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = self._GetStandaloneExecutablePrefix()
target = self.spec.get('product_name', target)
target_ext = self._GetStandaloneExecutableSuffix()
return target_prefix + target + target_ext
def GetExecutableName(self):
"""Returns the executable name of the bundle represented by this target.
E.g. Chromium."""
if self._IsBundle():
return self.spec.get('product_name', self.spec['target_name'])
else:
return self._GetStandaloneBinaryPath()
def GetExecutablePath(self):
"""Returns the directory name of the bundle represented by this target. E.g.
Chromium.app/Contents/MacOS/Chromium."""
if self._IsBundle():
return self._GetBundleBinaryPath()
else:
return self._GetStandaloneBinaryPath()
def GetActiveArchs(self, configname):
"""Returns the architectures this target should be built for."""
config_settings = self.xcode_settings[configname]
xcode_archs_default = GetXcodeArchsDefault()
return xcode_archs_default.ActiveArchs(
config_settings.get('ARCHS'),
config_settings.get('VALID_ARCHS'),
config_settings.get('SDKROOT'))
def _GetSdkVersionInfoItem(self, sdk, infoitem):
# xcodebuild requires Xcode and can't run on Command Line Tools-only
# systems from 10.7 onward.
# Since the CLT has no SDK paths anyway, returning None is the
# most sensible route and should still do the right thing.
try:
return GetStdout(['xcrun', '--sdk', sdk, infoitem])
except:
pass
def _SdkRoot(self, configname):
if configname is None:
configname = self.configname
return self.GetPerConfigSetting('SDKROOT', configname, default='')
def _XcodePlatformPath(self, configname=None):
sdk_root = self._SdkRoot(configname)
if sdk_root not in XcodeSettings._platform_path_cache:
platform_path = self._GetSdkVersionInfoItem(sdk_root,
'--show-sdk-platform-path')
XcodeSettings._platform_path_cache[sdk_root] = platform_path
return XcodeSettings._platform_path_cache[sdk_root]
def _SdkPath(self, configname=None):
sdk_root = self._SdkRoot(configname)
if sdk_root.startswith('/'):
return sdk_root
return self._XcodeSdkPath(sdk_root)
def _XcodeSdkPath(self, sdk_root):
if sdk_root not in XcodeSettings._sdk_path_cache:
sdk_path = self._GetSdkVersionInfoItem(sdk_root, '--show-sdk-path')
XcodeSettings._sdk_path_cache[sdk_root] = sdk_path
if sdk_root:
XcodeSettings._sdk_root_cache[sdk_path] = sdk_root
return XcodeSettings._sdk_path_cache[sdk_root]
def _AppendPlatformVersionMinFlags(self, lst):
self._Appendf(lst, 'MACOSX_DEPLOYMENT_TARGET', '-mmacosx-version-min=%s')
if 'IPHONEOS_DEPLOYMENT_TARGET' in self._Settings():
# TODO: Implement this better?
sdk_path_basename = os.path.basename(self._SdkPath())
if sdk_path_basename.lower().startswith('iphonesimulator'):
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-mios-simulator-version-min=%s')
else:
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-miphoneos-version-min=%s')
def GetCflags(self, configname, arch=None):
"""Returns flags that need to be added to .c, .cc, .m, and .mm
compilations."""
# This functions (and the similar ones below) do not offer complete
# emulation of all xcode_settings keys. They're implemented on demand.
self.configname = configname
cflags = []
sdk_root = self._SdkPath()
if 'SDKROOT' in self._Settings() and sdk_root:
cflags.append('-isysroot %s' % sdk_root)
if self._Test('CLANG_WARN_CONSTANT_CONVERSION', 'YES', default='NO'):
cflags.append('-Wconstant-conversion')
if self._Test('GCC_CHAR_IS_UNSIGNED_CHAR', 'YES', default='NO'):
cflags.append('-funsigned-char')
if self._Test('GCC_CW_ASM_SYNTAX', 'YES', default='YES'):
cflags.append('-fasm-blocks')
if 'GCC_DYNAMIC_NO_PIC' in self._Settings():
if self._Settings()['GCC_DYNAMIC_NO_PIC'] == 'YES':
cflags.append('-mdynamic-no-pic')
else:
pass
# TODO: In this case, it depends on the target. xcode passes
# mdynamic-no-pic by default for executable and possibly static lib
# according to mento
if self._Test('GCC_ENABLE_PASCAL_STRINGS', 'YES', default='YES'):
cflags.append('-mpascal-strings')
self._Appendf(cflags, 'GCC_OPTIMIZATION_LEVEL', '-O%s', default='s')
if self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES'):
dbg_format = self._Settings().get('DEBUG_INFORMATION_FORMAT', 'dwarf')
if dbg_format == 'dwarf':
cflags.append('-gdwarf-2')
elif dbg_format == 'stabs':
raise NotImplementedError('stabs debug format is not supported yet.')
elif dbg_format == 'dwarf-with-dsym':
cflags.append('-gdwarf-2')
else:
raise NotImplementedError('Unknown debug format %s' % dbg_format)
if self._Settings().get('GCC_STRICT_ALIASING') == 'YES':
cflags.append('-fstrict-aliasing')
elif self._Settings().get('GCC_STRICT_ALIASING') == 'NO':
cflags.append('-fno-strict-aliasing')
if self._Test('GCC_SYMBOLS_PRIVATE_EXTERN', 'YES', default='NO'):
cflags.append('-fvisibility=hidden')
if self._Test('GCC_TREAT_WARNINGS_AS_ERRORS', 'YES', default='NO'):
cflags.append('-Werror')
if self._Test('GCC_WARN_ABOUT_MISSING_NEWLINE', 'YES', default='NO'):
cflags.append('-Wnewline-eof')
# In Xcode, this is only activated when GCC_COMPILER_VERSION is clang or
# llvm-gcc. It also requires a fairly recent libtool, and
# if the system clang isn't used, DYLD_LIBRARY_PATH needs to contain the
# path to the libLTO.dylib that matches the used clang.
if self._Test('LLVM_LTO', 'YES', default='NO'):
cflags.append('-flto')
self._AppendPlatformVersionMinFlags(cflags)
# TODO:
if self._Test('COPY_PHASE_STRIP', 'YES', default='NO'):
self._WarnUnimplemented('COPY_PHASE_STRIP')
self._WarnUnimplemented('GCC_DEBUGGING_SYMBOLS')
self._WarnUnimplemented('GCC_ENABLE_OBJC_EXCEPTIONS')
# TODO: This is exported correctly, but assigning to it is not supported.
self._WarnUnimplemented('MACH_O_TYPE')
self._WarnUnimplemented('PRODUCT_TYPE')
if arch is not None:
archs = [arch]
else:
assert self.configname
archs = self.GetActiveArchs(self.configname)
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
cflags.append('-arch ' + archs[0])
if archs[0] in ('i386', 'x86_64'):
if self._Test('GCC_ENABLE_SSE3_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse3')
if self._Test('GCC_ENABLE_SUPPLEMENTAL_SSE3_INSTRUCTIONS', 'YES',
default='NO'):
cflags.append('-mssse3') # Note 3rd 's'.
if self._Test('GCC_ENABLE_SSE41_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.1')
if self._Test('GCC_ENABLE_SSE42_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.2')
cflags += self._Settings().get('WARNING_CFLAGS', [])
platform_root = self._XcodePlatformPath(configname)
if platform_root and self._IsXCTest():
cflags.append('-F' + platform_root + '/Developer/Library/Frameworks/')
if sdk_root:
framework_root = sdk_root
else:
framework_root = ''
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
cflags.append('-F' + directory.replace('$(SDKROOT)', framework_root))
self.configname = None
return cflags
def GetCflagsC(self, configname):
"""Returns flags that need to be added to .c, and .m compilations."""
self.configname = configname
cflags_c = []
if self._Settings().get('GCC_C_LANGUAGE_STANDARD', '') == 'ansi':
cflags_c.append('-ansi')
else:
self._Appendf(cflags_c, 'GCC_C_LANGUAGE_STANDARD', '-std=%s')
cflags_c += self._Settings().get('OTHER_CFLAGS', [])
self.configname = None
return cflags_c
def GetCflagsCC(self, configname):
"""Returns flags that need to be added to .cc, and .mm compilations."""
self.configname = configname
cflags_cc = []
clang_cxx_language_standard = self._Settings().get(
'CLANG_CXX_LANGUAGE_STANDARD')
# Note: Don't make c++0x to c++11 so that c++0x can be used with older
# clangs that don't understand c++11 yet (like Xcode 4.2's).
if clang_cxx_language_standard:
cflags_cc.append('-std=%s' % clang_cxx_language_standard)
self._Appendf(cflags_cc, 'CLANG_CXX_LIBRARY', '-stdlib=%s')
if self._Test('GCC_ENABLE_CPP_RTTI', 'NO', default='YES'):
cflags_cc.append('-fno-rtti')
if self._Test('GCC_ENABLE_CPP_EXCEPTIONS', 'NO', default='YES'):
cflags_cc.append('-fno-exceptions')
if self._Test('GCC_INLINES_ARE_PRIVATE_EXTERN', 'YES', default='NO'):
cflags_cc.append('-fvisibility-inlines-hidden')
if self._Test('GCC_THREADSAFE_STATICS', 'NO', default='YES'):
cflags_cc.append('-fno-threadsafe-statics')
# Note: This flag is a no-op for clang, it only has an effect for gcc.
if self._Test('GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO', 'NO', default='YES'):
cflags_cc.append('-Wno-invalid-offsetof')
other_ccflags = []
for flag in self._Settings().get('OTHER_CPLUSPLUSFLAGS', ['$(inherited)']):
# TODO: More general variable expansion. Missing in many other places too.
if flag in ('$inherited', '$(inherited)', '${inherited}'):
flag = '$OTHER_CFLAGS'
if flag in ('$OTHER_CFLAGS', '$(OTHER_CFLAGS)', '${OTHER_CFLAGS}'):
other_ccflags += self._Settings().get('OTHER_CFLAGS', [])
else:
other_ccflags.append(flag)
cflags_cc += other_ccflags
self.configname = None
return cflags_cc
def _AddObjectiveCGarbageCollectionFlags(self, flags):
gc_policy = self._Settings().get('GCC_ENABLE_OBJC_GC', 'unsupported')
if gc_policy == 'supported':
flags.append('-fobjc-gc')
elif gc_policy == 'required':
flags.append('-fobjc-gc-only')
def _AddObjectiveCARCFlags(self, flags):
if self._Test('CLANG_ENABLE_OBJC_ARC', 'YES', default='NO'):
flags.append('-fobjc-arc')
def _AddObjectiveCMissingPropertySynthesisFlags(self, flags):
if self._Test('CLANG_WARN_OBJC_MISSING_PROPERTY_SYNTHESIS',
'YES', default='NO'):
flags.append('-Wobjc-missing-property-synthesis')
def GetCflagsObjC(self, configname):
"""Returns flags that need to be added to .m compilations."""
self.configname = configname
cflags_objc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objc)
self._AddObjectiveCARCFlags(cflags_objc)
self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objc)
self.configname = None
return cflags_objc
def GetCflagsObjCC(self, configname):
"""Returns flags that need to be added to .mm compilations."""
self.configname = configname
cflags_objcc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objcc)
self._AddObjectiveCARCFlags(cflags_objcc)
self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objcc)
if self._Test('GCC_OBJC_CALL_CXX_CDTORS', 'YES', default='NO'):
cflags_objcc.append('-fobjc-call-cxx-cdtors')
self.configname = None
return cflags_objcc
def GetInstallNameBase(self):
"""Return DYLIB_INSTALL_NAME_BASE for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
install_base = self.GetPerTargetSetting(
'DYLIB_INSTALL_NAME_BASE',
default='/Library/Frameworks' if self._IsBundle() else '/usr/local/lib')
return install_base
def _StandardizePath(self, path):
"""Do :standardizepath processing for path."""
# I'm not quite sure what :standardizepath does. Just call normpath(),
# but don't let @executable_path/../foo collapse to foo.
if '/' in path:
prefix, rest = '', path
if path.startswith('@'):
prefix, rest = path.split('/', 1)
rest = os.path.normpath(rest) # :standardizepath
path = os.path.join(prefix, rest)
return path
def GetInstallName(self):
"""Return LD_DYLIB_INSTALL_NAME for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
default_install_name = \
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(EXECUTABLE_PATH)'
install_name = self.GetPerTargetSetting(
'LD_DYLIB_INSTALL_NAME', default=default_install_name)
# Hardcode support for the variables used in chromium for now, to
# unblock people using the make build.
if '$' in install_name:
assert install_name in ('$(DYLIB_INSTALL_NAME_BASE:standardizepath)/'
'$(WRAPPER_NAME)/$(PRODUCT_NAME)', default_install_name), (
'Variables in LD_DYLIB_INSTALL_NAME are not generally supported '
'yet in target \'%s\' (got \'%s\')' %
(self.spec['target_name'], install_name))
install_name = install_name.replace(
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)',
self._StandardizePath(self.GetInstallNameBase()))
if self._IsBundle():
# These are only valid for bundles, hence the |if|.
install_name = install_name.replace(
'$(WRAPPER_NAME)', self.GetWrapperName())
install_name = install_name.replace(
'$(PRODUCT_NAME)', self.GetProductName())
else:
assert '$(WRAPPER_NAME)' not in install_name
assert '$(PRODUCT_NAME)' not in install_name
install_name = install_name.replace(
'$(EXECUTABLE_PATH)', self.GetExecutablePath())
return install_name
def _MapLinkerFlagFilename(self, ldflag, gyp_to_build_path):
"""Checks if ldflag contains a filename and if so remaps it from
gyp-directory-relative to build-directory-relative."""
# This list is expanded on demand.
# They get matched as:
# -exported_symbols_list file
# -Wl,exported_symbols_list file
# -Wl,exported_symbols_list,file
LINKER_FILE = r'(\S+)'
WORD = r'\S+'
linker_flags = [
['-exported_symbols_list', LINKER_FILE], # Needed for NaCl.
['-unexported_symbols_list', LINKER_FILE],
['-reexported_symbols_list', LINKER_FILE],
['-sectcreate', WORD, WORD, LINKER_FILE], # Needed for remoting.
]
for flag_pattern in linker_flags:
regex = re.compile('(?:-Wl,)?' + '[ ,]'.join(flag_pattern))
m = regex.match(ldflag)
if m:
ldflag = ldflag[:m.start(1)] + gyp_to_build_path(m.group(1)) + \
ldflag[m.end(1):]
# Required for ffmpeg (no idea why they don't use LIBRARY_SEARCH_PATHS,
# TODO(thakis): Update ffmpeg.gyp):
if ldflag.startswith('-L'):
ldflag = '-L' + gyp_to_build_path(ldflag[len('-L'):])
return ldflag
def GetLdflags(self, configname, product_dir, gyp_to_build_path, arch=None):
"""Returns flags that need to be passed to the linker.
Args:
configname: The name of the configuration to get ld flags for.
product_dir: The directory where products such static and dynamic
libraries are placed. This is added to the library search path.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
self.configname = configname
ldflags = []
# The xcode build is relative to a gyp file's directory, and OTHER_LDFLAGS
# can contain entries that depend on this. Explicitly absolutify these.
for ldflag in self._Settings().get('OTHER_LDFLAGS', []):
ldflags.append(self._MapLinkerFlagFilename(ldflag, gyp_to_build_path))
if self._Test('DEAD_CODE_STRIPPING', 'YES', default='NO'):
ldflags.append('-Wl,-dead_strip')
if self._Test('PREBINDING', 'YES', default='NO'):
ldflags.append('-Wl,-prebind')
self._Appendf(
ldflags, 'DYLIB_COMPATIBILITY_VERSION', '-compatibility_version %s')
self._Appendf(
ldflags, 'DYLIB_CURRENT_VERSION', '-current_version %s')
self._AppendPlatformVersionMinFlags(ldflags)
if 'SDKROOT' in self._Settings() and self._SdkPath():
ldflags.append('-isysroot ' + self._SdkPath())
for library_path in self._Settings().get('LIBRARY_SEARCH_PATHS', []):
ldflags.append('-L' + gyp_to_build_path(library_path))
if 'ORDER_FILE' in self._Settings():
ldflags.append('-Wl,-order_file ' +
'-Wl,' + gyp_to_build_path(
self._Settings()['ORDER_FILE']))
if arch is not None:
archs = [arch]
else:
assert self.configname
archs = self.GetActiveArchs(self.configname)
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
ldflags.append('-arch ' + archs[0])
# Xcode adds the product directory by default.
ldflags.append('-L' + product_dir)
install_name = self.GetInstallName()
if install_name and self.spec['type'] != 'loadable_module':
ldflags.append('-install_name ' + install_name.replace(' ', r'\ '))
for rpath in self._Settings().get('LD_RUNPATH_SEARCH_PATHS', []):
ldflags.append('-Wl,-rpath,' + rpath)
sdk_root = self._SdkPath()
if not sdk_root:
sdk_root = ''
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
ldflags.append('-F' + directory.replace('$(SDKROOT)', sdk_root))
platform_root = self._XcodePlatformPath(configname)
if sdk_root and platform_root and self._IsXCTest():
ldflags.append('-F' + platform_root + '/Developer/Library/Frameworks/')
ldflags.append('-framework XCTest')
is_extension = self._IsIosAppExtension() or self._IsIosWatchKitExtension()
if sdk_root and is_extension:
# Adds the link flags for extensions. These flags are common for all
# extensions and provide loader and main function.
# These flags reflect the compilation options used by xcode to compile
# extensions.
if XcodeVersion() < '0900':
ldflags.append('-lpkstart')
ldflags.append(sdk_root +
'/System/Library/PrivateFrameworks/PlugInKit.framework/PlugInKit')
else:
ldflags.append('-e _NSExtensionMain')
ldflags.append('-fapplication-extension')
self._Appendf(ldflags, 'CLANG_CXX_LIBRARY', '-stdlib=%s')
self.configname = None
return ldflags
def GetLibtoolflags(self, configname):
"""Returns flags that need to be passed to the static linker.
Args:
configname: The name of the configuration to get ld flags for.
"""
self.configname = configname
libtoolflags = []
for libtoolflag in self._Settings().get('OTHER_LDFLAGS', []):
libtoolflags.append(libtoolflag)
# TODO(thakis): ARCHS?
self.configname = None
return libtoolflags
def GetPerTargetSettings(self):
"""Gets a list of all the per-target settings. This will only fetch keys
whose values are the same across all configurations."""
first_pass = True
result = {}
for configname in sorted(self.xcode_settings.keys()):
if first_pass:
result = dict(self.xcode_settings[configname])
first_pass = False
else:
for key, value in self.xcode_settings[configname].iteritems():
if key not in result:
continue
elif result[key] != value:
del result[key]
return result
def GetPerConfigSetting(self, setting, configname, default=None):
if configname in self.xcode_settings:
return self.xcode_settings[configname].get(setting, default)
else:
return self.GetPerTargetSetting(setting, default)
def GetPerTargetSetting(self, setting, default=None):
"""Tries to get xcode_settings.setting from spec. Assumes that the setting
has the same value in all configurations and throws otherwise."""
is_first_pass = True
result = None
for configname in sorted(self.xcode_settings.keys()):
if is_first_pass:
result = self.xcode_settings[configname].get(setting, None)
is_first_pass = False
else:
assert result == self.xcode_settings[configname].get(setting, None), (
"Expected per-target setting for '%s', got per-config setting "
"(target %s)" % (setting, self.spec['target_name']))
if result is None:
return default
return result
def _GetStripPostbuilds(self, configname, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to strip this target's binary. These should be run as postbuilds
before the actual postbuilds run."""
self.configname = configname
result = []
if (self._Test('DEPLOYMENT_POSTPROCESSING', 'YES', default='NO') and
self._Test('STRIP_INSTALLED_PRODUCT', 'YES', default='NO')):
default_strip_style = 'debugging'
if ((self.spec['type'] == 'loadable_module' or self._IsIosAppExtension())
and self._IsBundle()):
default_strip_style = 'non-global'
elif self.spec['type'] == 'executable':
default_strip_style = 'all'
strip_style = self._Settings().get('STRIP_STYLE', default_strip_style)
strip_flags = {
'all': '',
'non-global': '-x',
'debugging': '-S',
}[strip_style]
explicit_strip_flags = self._Settings().get('STRIPFLAGS', '')
if explicit_strip_flags:
strip_flags += ' ' + _NormalizeEnvVarReferences(explicit_strip_flags)
if not quiet:
result.append('echo STRIP\\(%s\\)' % self.spec['target_name'])
result.append('strip %s %s' % (strip_flags, output_binary))
self.configname = None
return result
def _GetDebugInfoPostbuilds(self, configname, output, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to massage this target's debug information. These should be run
as postbuilds before the actual postbuilds run."""
self.configname = configname
# For static libraries, no dSYMs are created.
result = []
if (self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES') and
self._Test(
'DEBUG_INFORMATION_FORMAT', 'dwarf-with-dsym', default='dwarf') and
self.spec['type'] != 'static_library'):
if not quiet:
result.append('echo DSYMUTIL\\(%s\\)' % self.spec['target_name'])
result.append('dsymutil %s -o %s' % (output_binary, output + '.dSYM'))
self.configname = None
return result
def _GetTargetPostbuilds(self, configname, output, output_binary,
quiet=False):
"""Returns a list of shell commands that contain the shell commands
to run as postbuilds for this target, before the actual postbuilds."""
# dSYMs need to build before stripping happens.
return (
self._GetDebugInfoPostbuilds(configname, output, output_binary, quiet) +
self._GetStripPostbuilds(configname, output_binary, quiet))
def _GetIOSPostbuilds(self, configname, output_binary):
"""Return a shell command to codesign the iOS output binary so it can
be deployed to a device. This should be run as the very last step of the
build."""
if not (self.isIOS and
(self.spec['type'] == 'executable' or self._IsXCTest())):
return []
settings = self.xcode_settings[configname]
key = self._GetIOSCodeSignIdentityKey(settings)
if not key:
return []
# Warn for any unimplemented signing xcode keys.
unimpl = ['OTHER_CODE_SIGN_FLAGS']
unimpl = set(unimpl) & set(self.xcode_settings[configname].keys())
if unimpl:
print 'Warning: Some codesign keys not implemented, ignoring: %s' % (
', '.join(sorted(unimpl)))
return ['%s code-sign-bundle "%s" "%s" "%s"' % (
os.path.join('${TARGET_BUILD_DIR}', 'gyp-mac-tool'), key,
settings.get('CODE_SIGN_ENTITLEMENTS', ''),
settings.get('PROVISIONING_PROFILE', ''))
]
def _GetIOSCodeSignIdentityKey(self, settings):
identity = settings.get('CODE_SIGN_IDENTITY')
if not identity:
return None
if identity not in XcodeSettings._codesigning_key_cache:
output = subprocess.check_output(
['security', 'find-identity', '-p', 'codesigning', '-v'])
for line in output.splitlines():
if identity in line:
fingerprint = line.split()[1]
cache = XcodeSettings._codesigning_key_cache
assert identity not in cache or fingerprint == cache[identity], (
"Multiple codesigning fingerprints for identity: %s" % identity)
XcodeSettings._codesigning_key_cache[identity] = fingerprint
return XcodeSettings._codesigning_key_cache.get(identity, '')
def AddImplicitPostbuilds(self, configname, output, output_binary,
postbuilds=[], quiet=False):
"""Returns a list of shell commands that should run before and after
|postbuilds|."""
assert output_binary is not None
pre = self._GetTargetPostbuilds(configname, output, output_binary, quiet)
post = self._GetIOSPostbuilds(configname, output_binary)
return pre + postbuilds + post
def _AdjustLibrary(self, library, config_name=None):
if library.endswith('.framework'):
l = '-framework ' + os.path.splitext(os.path.basename(library))[0]
else:
m = self.library_re.match(library)
if m:
l = '-l' + m.group(1)
else:
l = library
sdk_root = self._SdkPath(config_name)
if not sdk_root:
sdk_root = ''
# Xcode 7 started shipping with ".tbd" (text based stubs) files instead of
# ".dylib" without providing a real support for them. What it does, for
# "/usr/lib" libraries, is do "-L/usr/lib -lname" which is dependent on the
# library order and cause collision when building Chrome.
#
# Instead substitude ".tbd" to ".dylib" in the generated project when the
# following conditions are both true:
# - library is referenced in the gyp file as "$(SDKROOT)/**/*.dylib",
# - the ".dylib" file does not exists but a ".tbd" file do.
library = l.replace('$(SDKROOT)', sdk_root)
if l.startswith('$(SDKROOT)'):
basename, ext = os.path.splitext(library)
if ext == '.dylib' and not os.path.exists(library):
tbd_library = basename + '.tbd'
if os.path.exists(tbd_library):
library = tbd_library
return library
def AdjustLibraries(self, libraries, config_name=None):
"""Transforms entries like 'Cocoa.framework' in libraries into entries like
'-framework Cocoa', 'libcrypto.dylib' into '-lcrypto', etc.
"""
libraries = [self._AdjustLibrary(library, config_name)
for library in libraries]
return libraries
def _BuildMachineOSBuild(self):
return GetStdout(['sw_vers', '-buildVersion'])
def _XcodeIOSDeviceFamily(self, configname):
family = self.xcode_settings[configname].get('TARGETED_DEVICE_FAMILY', '1')
return [int(x) for x in family.split(',')]
def GetExtraPlistItems(self, configname=None):
"""Returns a dictionary with extra items to insert into Info.plist."""
if configname not in XcodeSettings._plist_cache:
cache = {}
cache['BuildMachineOSBuild'] = self._BuildMachineOSBuild()
xcode, xcode_build = XcodeVersion()
cache['DTXcode'] = xcode
cache['DTXcodeBuild'] = xcode_build
compiler = self.xcode_settings[configname].get('GCC_VERSION')
if compiler is not None:
cache['DTCompiler'] = compiler
sdk_root = self._SdkRoot(configname)
if not sdk_root:
sdk_root = self._DefaultSdkRoot()
sdk_version = self._GetSdkVersionInfoItem(sdk_root, '--show-sdk-version')
cache['DTSDKName'] = sdk_root + (sdk_version or '')
if xcode >= '0720':
cache['DTSDKBuild'] = self._GetSdkVersionInfoItem(
sdk_root, '--show-sdk-build-version')
elif xcode >= '0430':
cache['DTSDKBuild'] = sdk_version
else:
cache['DTSDKBuild'] = cache['BuildMachineOSBuild']
if self.isIOS:
cache['MinimumOSVersion'] = self.xcode_settings[configname].get(
'IPHONEOS_DEPLOYMENT_TARGET')
cache['DTPlatformName'] = sdk_root
cache['DTPlatformVersion'] = sdk_version
if configname.endswith("iphoneos"):
cache['CFBundleSupportedPlatforms'] = ['iPhoneOS']
cache['DTPlatformBuild'] = cache['DTSDKBuild']
else:
cache['CFBundleSupportedPlatforms'] = ['iPhoneSimulator']
# This is weird, but Xcode sets DTPlatformBuild to an empty field
# for simulator builds.
cache['DTPlatformBuild'] = ""
XcodeSettings._plist_cache[configname] = cache
# Include extra plist items that are per-target, not per global
# XcodeSettings.
items = dict(XcodeSettings._plist_cache[configname])
if self.isIOS:
items['UIDeviceFamily'] = self._XcodeIOSDeviceFamily(configname)
return items
def _DefaultSdkRoot(self):
"""Returns the default SDKROOT to use.
Prior to version 5.0.0, if SDKROOT was not explicitly set in the Xcode
project, then the environment variable was empty. Starting with this
version, Xcode uses the name of the newest SDK installed.
"""
xcode_version, xcode_build = XcodeVersion()
if xcode_version < '0500':
return ''
default_sdk_path = self._XcodeSdkPath('')
default_sdk_root = XcodeSettings._sdk_root_cache.get(default_sdk_path)
if default_sdk_root:
return default_sdk_root
try:
all_sdks = GetStdout(['xcodebuild', '-showsdks'])
except:
# If xcodebuild fails, there will be no valid SDKs
return ''
for line in all_sdks.splitlines():
items = line.split()
if len(items) >= 3 and items[-2] == '-sdk':
sdk_root = items[-1]
sdk_path = self._XcodeSdkPath(sdk_root)
if sdk_path == default_sdk_path:
return sdk_root
return ''
class MacPrefixHeader(object):
"""A class that helps with emulating Xcode's GCC_PREFIX_HEADER feature.
This feature consists of several pieces:
* If GCC_PREFIX_HEADER is present, all compilations in that project get an
additional |-include path_to_prefix_header| cflag.
* If GCC_PRECOMPILE_PREFIX_HEADER is present too, then the prefix header is
instead compiled, and all other compilations in the project get an
additional |-include path_to_compiled_header| instead.
+ Compiled prefix headers have the extension gch. There is one gch file for
every language used in the project (c, cc, m, mm), since gch files for
different languages aren't compatible.
+ gch files themselves are built with the target's normal cflags, but they
obviously don't get the |-include| flag. Instead, they need a -x flag that
describes their language.
+ All o files in the target need to depend on the gch file, to make sure
it's built before any o file is built.
This class helps with some of these tasks, but it needs help from the build
system for writing dependencies to the gch files, for writing build commands
for the gch files, and for figuring out the location of the gch files.
"""
def __init__(self, xcode_settings,
gyp_path_to_build_path, gyp_path_to_build_output):
"""If xcode_settings is None, all methods on this class are no-ops.
Args:
gyp_path_to_build_path: A function that takes a gyp-relative path,
and returns a path relative to the build directory.
gyp_path_to_build_output: A function that takes a gyp-relative path and
a language code ('c', 'cc', 'm', or 'mm'), and that returns a path
to where the output of precompiling that path for that language
should be placed (without the trailing '.gch').
"""
# This doesn't support per-configuration prefix headers. Good enough
# for now.
self.header = None
self.compile_headers = False
if xcode_settings:
self.header = xcode_settings.GetPerTargetSetting('GCC_PREFIX_HEADER')
self.compile_headers = xcode_settings.GetPerTargetSetting(
'GCC_PRECOMPILE_PREFIX_HEADER', default='NO') != 'NO'
self.compiled_headers = {}
if self.header:
if self.compile_headers:
for lang in ['c', 'cc', 'm', 'mm']:
self.compiled_headers[lang] = gyp_path_to_build_output(
self.header, lang)
self.header = gyp_path_to_build_path(self.header)
def _CompiledHeader(self, lang, arch):
assert self.compile_headers
h = self.compiled_headers[lang]
if arch:
h += '.' + arch
return h
def GetInclude(self, lang, arch=None):
"""Gets the cflags to include the prefix header for language |lang|."""
if self.compile_headers and lang in self.compiled_headers:
return '-include %s' % self._CompiledHeader(lang, arch)
elif self.header:
return '-include %s' % self.header
else:
return ''
def _Gch(self, lang, arch):
"""Returns the actual file name of the prefix header for language |lang|."""
assert self.compile_headers
return self._CompiledHeader(lang, arch) + '.gch'
def GetObjDependencies(self, sources, objs, arch=None):
"""Given a list of source files and the corresponding object files, returns
a list of (source, object, gch) tuples, where |gch| is the build-directory
relative path to the gch file each object file depends on. |compilable[i]|
has to be the source file belonging to |objs[i]|."""
if not self.header or not self.compile_headers:
return []
result = []
for source, obj in zip(sources, objs):
ext = os.path.splitext(source)[1]
lang = {
'.c': 'c',
'.cpp': 'cc', '.cc': 'cc', '.cxx': 'cc',
'.m': 'm',
'.mm': 'mm',
}.get(ext, None)
if lang:
result.append((source, obj, self._Gch(lang, arch)))
return result
def GetPchBuildCommands(self, arch=None):
"""Returns [(path_to_gch, language_flag, language, header)].
|path_to_gch| and |header| are relative to the build directory.
"""
if not self.header or not self.compile_headers:
return []
return [
(self._Gch('c', arch), '-x c-header', 'c', self.header),
(self._Gch('cc', arch), '-x c++-header', 'cc', self.header),
(self._Gch('m', arch), '-x objective-c-header', 'm', self.header),
(self._Gch('mm', arch), '-x objective-c++-header', 'mm', self.header),
]
def XcodeVersion():
"""Returns a tuple of version and build version of installed Xcode."""
# `xcodebuild -version` output looks like
# Xcode 4.6.3
# Build version 4H1503
# or like
# Xcode 3.2.6
# Component versions: DevToolsCore-1809.0; DevToolsSupport-1806.0
# BuildVersion: 10M2518
# Convert that to '0463', '4H1503'.
global XCODE_VERSION_CACHE
if XCODE_VERSION_CACHE:
return XCODE_VERSION_CACHE
try:
version_list = GetStdout(['xcodebuild', '-version']).splitlines()
# In some circumstances xcodebuild exits 0 but doesn't return
# the right results; for example, a user on 10.7 or 10.8 with
# a bogus path set via xcode-select
# In that case this may be a CLT-only install so fall back to
# checking that version.
if len(version_list) < 2:
raise GypError("xcodebuild returned unexpected results")
except:
version = CLTVersion()
if version:
version = re.match(r'(\d\.\d\.?\d*)', version).groups()[0]
else:
raise GypError("No Xcode or CLT version detected!")
# The CLT has no build information, so we return an empty string.
version_list = [version, '']
version = version_list[0]
build = version_list[-1]
# Be careful to convert "4.2" to "0420":
version = version.split()[-1].replace('.', '')
version = (version + '0' * (3 - len(version))).zfill(4)
if build:
build = build.split()[-1]
XCODE_VERSION_CACHE = (version, build)
return XCODE_VERSION_CACHE
# This function ported from the logic in Homebrew's CLT version check
def CLTVersion():
"""Returns the version of command-line tools from pkgutil."""
# pkgutil output looks like
# package-id: com.apple.pkg.CLTools_Executables
# version: 5.0.1.0.1.1382131676
# volume: /
# location: /
# install-time: 1382544035
# groups: com.apple.FindSystemFiles.pkg-group com.apple.DevToolsBoth.pkg-group com.apple.DevToolsNonRelocatableShared.pkg-group
STANDALONE_PKG_ID = "com.apple.pkg.DeveloperToolsCLILeo"
FROM_XCODE_PKG_ID = "com.apple.pkg.DeveloperToolsCLI"
MAVERICKS_PKG_ID = "com.apple.pkg.CLTools_Executables"
regex = re.compile('version: (?P<version>.+)')
for key in [MAVERICKS_PKG_ID, STANDALONE_PKG_ID, FROM_XCODE_PKG_ID]:
try:
output = GetStdout(['/usr/sbin/pkgutil', '--pkg-info', key])
return re.search(regex, output).groupdict()['version']
except:
continue
def GetStdout(cmdlist):
"""Returns the content of standard output returned by invoking |cmdlist|.
Raises |GypError| if the command return with a non-zero return code."""
job = subprocess.Popen(cmdlist, stdout=subprocess.PIPE)
out = job.communicate()[0]
if job.returncode != 0:
sys.stderr.write(out + '\n')
raise GypError('Error %d running %s' % (job.returncode, cmdlist[0]))
return out.rstrip('\n')
def MergeGlobalXcodeSettingsToSpec(global_dict, spec):
"""Merges the global xcode_settings dictionary into each configuration of the
target represented by spec. For keys that are both in the global and the local
xcode_settings dict, the local key gets precendence.
"""
# The xcode generator special-cases global xcode_settings and does something
# that amounts to merging in the global xcode_settings into each local
# xcode_settings dict.
global_xcode_settings = global_dict.get('xcode_settings', {})
for config in spec['configurations'].values():
if 'xcode_settings' in config:
new_settings = global_xcode_settings.copy()
new_settings.update(config['xcode_settings'])
config['xcode_settings'] = new_settings
def IsMacBundle(flavor, spec):
"""Returns if |spec| should be treated as a bundle.
Bundles are directories with a certain subdirectory structure, instead of
just a single file. Bundle rules do not produce a binary but also package
resources into that directory."""
is_mac_bundle = int(spec.get('mac_xctest_bundle', 0)) != 0 or \
(int(spec.get('mac_bundle', 0)) != 0 and flavor == 'mac')
if is_mac_bundle:
assert spec['type'] != 'none', (
'mac_bundle targets cannot have type none (target "%s")' %
spec['target_name'])
return is_mac_bundle
def GetMacBundleResources(product_dir, xcode_settings, resources):
"""Yields (output, resource) pairs for every resource in |resources|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
resources: A list of bundle resources, relative to the build directory.
"""
dest = os.path.join(product_dir,
xcode_settings.GetBundleResourceFolder())
for res in resources:
output = dest
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in res, (
"Spaces in resource filenames not supported (%s)" % res)
# Split into (path,file).
res_parts = os.path.split(res)
# Now split the path into (prefix,maybe.lproj).
lproj_parts = os.path.split(res_parts[0])
# If the resource lives in a .lproj bundle, add that to the destination.
if lproj_parts[1].endswith('.lproj'):
output = os.path.join(output, lproj_parts[1])
output = os.path.join(output, res_parts[1])
# Compiled XIB files are referred to by .nib.
if output.endswith('.xib'):
output = os.path.splitext(output)[0] + '.nib'
# Compiled storyboard files are referred to by .storyboardc.
if output.endswith('.storyboard'):
output = os.path.splitext(output)[0] + '.storyboardc'
yield output, res
def GetMacInfoPlist(product_dir, xcode_settings, gyp_path_to_build_path):
"""Returns (info_plist, dest_plist, defines, extra_env), where:
* |info_plist| is the source plist path, relative to the
build directory,
* |dest_plist| is the destination plist path, relative to the
build directory,
* |defines| is a list of preprocessor defines (empty if the plist
shouldn't be preprocessed,
* |extra_env| is a dict of env variables that should be exported when
invoking |mac_tool copy-info-plist|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
info_plist = xcode_settings.GetPerTargetSetting('INFOPLIST_FILE')
if not info_plist:
return None, None, [], {}
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in info_plist, (
"Spaces in Info.plist filenames not supported (%s)" % info_plist)
info_plist = gyp_path_to_build_path(info_plist)
# If explicitly set to preprocess the plist, invoke the C preprocessor and
# specify any defines as -D flags.
if xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESS', default='NO') == 'YES':
# Create an intermediate file based on the path.
defines = shlex.split(xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESSOR_DEFINITIONS', default=''))
else:
defines = []
dest_plist = os.path.join(product_dir, xcode_settings.GetBundlePlistPath())
extra_env = xcode_settings.GetPerTargetSettings()
return info_plist, dest_plist, defines, extra_env
def _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings=None):
"""Return the environment variables that Xcode would set. See
http://developer.apple.com/library/mac/#documentation/DeveloperTools/Reference/XcodeBuildSettingRef/1-Build_Setting_Reference/build_setting_ref.html#//apple_ref/doc/uid/TP40003931-CH3-SW153
for a full list.
Args:
xcode_settings: An XcodeSettings object. If this is None, this function
returns an empty dict.
built_products_dir: Absolute path to the built products dir.
srcroot: Absolute path to the source root.
configuration: The build configuration name.
additional_settings: An optional dict with more values to add to the
result.
"""
if not xcode_settings: return {}
# This function is considered a friend of XcodeSettings, so let it reach into
# its implementation details.
spec = xcode_settings.spec
# These are filled in on a as-needed basis.
env = {
'BUILT_FRAMEWORKS_DIR' : built_products_dir,
'BUILT_PRODUCTS_DIR' : built_products_dir,
'CONFIGURATION' : configuration,
'PRODUCT_NAME' : xcode_settings.GetProductName(),
# See /Developer/Platforms/MacOSX.platform/Developer/Library/Xcode/Specifications/MacOSX\ Product\ Types.xcspec for FULL_PRODUCT_NAME
'SRCROOT' : srcroot,
'SOURCE_ROOT': '${SRCROOT}',
# This is not true for static libraries, but currently the env is only
# written for bundles:
'TARGET_BUILD_DIR' : built_products_dir,
'TEMP_DIR' : '${TMPDIR}',
'XCODE_VERSION_ACTUAL' : XcodeVersion()[0],
}
if xcode_settings.GetPerConfigSetting('SDKROOT', configuration):
env['SDKROOT'] = xcode_settings._SdkPath(configuration)
else:
env['SDKROOT'] = ''
if spec['type'] in (
'executable', 'static_library', 'shared_library', 'loadable_module'):
env['EXECUTABLE_NAME'] = xcode_settings.GetExecutableName()
env['EXECUTABLE_PATH'] = xcode_settings.GetExecutablePath()
env['FULL_PRODUCT_NAME'] = xcode_settings.GetFullProductName()
mach_o_type = xcode_settings.GetMachOType()
if mach_o_type:
env['MACH_O_TYPE'] = mach_o_type
env['PRODUCT_TYPE'] = xcode_settings.GetProductType()
if xcode_settings._IsBundle():
env['CONTENTS_FOLDER_PATH'] = \
xcode_settings.GetBundleContentsFolderPath()
env['UNLOCALIZED_RESOURCES_FOLDER_PATH'] = \
xcode_settings.GetBundleResourceFolder()
env['INFOPLIST_PATH'] = xcode_settings.GetBundlePlistPath()
env['WRAPPER_NAME'] = xcode_settings.GetWrapperName()
install_name = xcode_settings.GetInstallName()
if install_name:
env['LD_DYLIB_INSTALL_NAME'] = install_name
install_name_base = xcode_settings.GetInstallNameBase()
if install_name_base:
env['DYLIB_INSTALL_NAME_BASE'] = install_name_base
if XcodeVersion() >= '0500' and not env.get('SDKROOT'):
sdk_root = xcode_settings._SdkRoot(configuration)
if not sdk_root:
sdk_root = xcode_settings._XcodeSdkPath('')
env['SDKROOT'] = sdk_root
if not additional_settings:
additional_settings = {}
else:
# Flatten lists to strings.
for k in additional_settings:
if not isinstance(additional_settings[k], str):
additional_settings[k] = ' '.join(additional_settings[k])
additional_settings.update(env)
for k in additional_settings:
additional_settings[k] = _NormalizeEnvVarReferences(additional_settings[k])
return additional_settings
def _NormalizeEnvVarReferences(str):
"""Takes a string containing variable references in the form ${FOO}, $(FOO),
or $FOO, and returns a string with all variable references in the form ${FOO}.
"""
# $FOO -> ${FOO}
str = re.sub(r'\$([a-zA-Z_][a-zA-Z0-9_]*)', r'${\1}', str)
# $(FOO) -> ${FOO}
matches = re.findall(r'(\$\(([a-zA-Z0-9\-_]+)\))', str)
for match in matches:
to_replace, variable = match
assert '$(' not in match, '$($(FOO)) variables not supported: ' + match
str = str.replace(to_replace, '${' + variable + '}')
return str
def ExpandEnvVars(string, expansions):
"""Expands ${VARIABLES}, $(VARIABLES), and $VARIABLES in string per the
expansions list. If the variable expands to something that references
another variable, this variable is expanded as well if it's in env --
until no variables present in env are left."""
for k, v in reversed(expansions):
string = string.replace('${' + k + '}', v)
string = string.replace('$(' + k + ')', v)
string = string.replace('$' + k, v)
return string
def _TopologicallySortedEnvVarKeys(env):
"""Takes a dict |env| whose values are strings that can refer to other keys,
for example env['foo'] = '$(bar) and $(baz)'. Returns a list L of all keys of
env such that key2 is after key1 in L if env[key2] refers to env[key1].
Throws an Exception in case of dependency cycles.
"""
# Since environment variables can refer to other variables, the evaluation
# order is important. Below is the logic to compute the dependency graph
# and sort it.
regex = re.compile(r'\$\{([a-zA-Z0-9\-_]+)\}')
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
# We can then reverse the result of the topological sort at the end.
# Since: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
matches = set([v for v in regex.findall(env[node]) if v in env])
for dependee in matches:
assert '${' not in dependee, 'Nested variables not supported: ' + dependee
return matches
try:
# Topologically sort, and then reverse, because we used an edge definition
# that's inverted from the expected result of this function (see comment
# above).
order = gyp.common.TopologicallySorted(env.keys(), GetEdges)
order.reverse()
return order
except gyp.common.CycleError, e:
raise GypError(
'Xcode environment variables are cyclically dependent: ' + str(e.nodes))
def GetSortedXcodeEnv(xcode_settings, built_products_dir, srcroot,
configuration, additional_settings=None):
env = _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings)
return [(key, env[key]) for key in _TopologicallySortedEnvVarKeys(env)]
def GetSpecPostbuildCommands(spec, quiet=False):
"""Returns the list of postbuilds explicitly defined on |spec|, in a form
executable by a shell."""
postbuilds = []
for postbuild in spec.get('postbuilds', []):
if not quiet:
postbuilds.append('echo POSTBUILD\\(%s\\) %s' % (
spec['target_name'], postbuild['postbuild_name']))
postbuilds.append(gyp.common.EncodePOSIXShellList(postbuild['action']))
return postbuilds
def _HasIOSTarget(targets):
"""Returns true if any target contains the iOS specific key
IPHONEOS_DEPLOYMENT_TARGET."""
for target_dict in targets.values():
for config in target_dict['configurations'].values():
if config.get('xcode_settings', {}).get('IPHONEOS_DEPLOYMENT_TARGET'):
return True
return False
def _AddIOSDeviceConfigurations(targets):
"""Clone all targets and append -iphoneos to the name. Configure these targets
to build for iOS devices and use correct architectures for those builds."""
for target_dict in targets.itervalues():
toolset = target_dict['toolset']
configs = target_dict['configurations']
for config_name, config_dict in dict(configs).iteritems():
iphoneos_config_dict = copy.deepcopy(config_dict)
configs[config_name + '-iphoneos'] = iphoneos_config_dict
configs[config_name + '-iphonesimulator'] = config_dict
if toolset == 'target':
iphoneos_config_dict['xcode_settings']['SDKROOT'] = 'iphoneos'
return targets
def CloneConfigurationForDeviceAndEmulator(target_dicts):
"""If |target_dicts| contains any iOS targets, automatically create -iphoneos
targets for iOS device builds."""
if _HasIOSTarget(target_dicts):
return _AddIOSDeviceConfigurations(target_dicts)
return target_dicts | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: docker_container
short_description: manage docker containers
description:
- Manage the life cycle of docker containers.
- Supports check mode. Run with --check and --diff to view config difference and list of actions to be taken.
version_added: "2.1.0"
options:
blkio_weight:
description:
- Block IO (relative weight), between 10 and 1000.
default: null
required: false
capabilities:
description:
- List of capabilities to add to the container.
default: null
required: false
cleanup:
description:
- Use with I(detach) to remove the container after successful execution.
default: false
required: false
version_added: "2.2"
command:
description:
- Command to execute when the container starts.
default: null
required: false
cpu_period:
description:
- Limit CPU CFS (Completely Fair Scheduler) period
default: 0
required: false
cpu_quota:
description:
- Limit CPU CFS (Completely Fair Scheduler) quota
default: 0
required: false
cpuset_cpus:
description:
- CPUs in which to allow execution C(1,3) or C(1-3).
default: null
required: false
cpuset_mems:
description:
- Memory nodes (MEMs) in which to allow execution C(0-3) or C(0,1)
default: null
required: false
cpu_shares:
description:
- CPU shares (relative weight).
default: null
required: false
detach:
description:
- Enable detached mode to leave the container running in background.
If disabled, the task will reflect the status of the container run (failed if the command failed).
default: true
required: false
devices:
description:
- "List of host device bindings to add to the container. Each binding is a mapping expressed
in the format: <path_on_host>:<path_in_container>:<cgroup_permissions>"
default: null
required: false
dns_servers:
description:
- List of custom DNS servers.
default: null
required: false
dns_search_domains:
description:
- List of custom DNS search domains.
default: null
required: false
env:
description:
- Dictionary of key,value pairs.
default: null
required: false
env_file:
version_added: "2.2"
description:
- Path to a file containing environment variables I(FOO=BAR).
- If variable also present in C(env), then C(env) value will override.
- Requires docker-py >= 1.4.0.
default: null
required: false
entrypoint:
description:
- Command that overwrites the default ENTRYPOINT of the image.
default: null
required: false
etc_hosts:
description:
- Dict of host-to-IP mappings, where each host name is a key in the dictionary.
Each host name will be added to the container's /etc/hosts file.
default: null
required: false
exposed_ports:
description:
- List of additional container ports to expose for port mappings or links.
If the port is already exposed using EXPOSE in a Dockerfile, it does not
need to be exposed again.
default: null
required: false
aliases:
- exposed
force_kill:
description:
- Use the kill command when stopping a running container.
default: false
required: false
groups:
description:
- List of additional group names and/or IDs that the container process will run as.
default: null
required: false
hostname:
description:
- Container hostname.
default: null
required: false
ignore_image:
description:
- When C(state) is I(present) or I(started) the module compares the configuration of an existing
container to requested configuration. The evaluation includes the image version. If
the image version in the registry does not match the container, the container will be
recreated. Stop this behavior by setting C(ignore_image) to I(True).
default: false
required: false
version_added: "2.2"
image:
description:
- Repository path and tag used to create the container. If an image is not found or pull is true, the image
will be pulled from the registry. If no tag is included, 'latest' will be used.
default: null
required: false
interactive:
description:
- Keep stdin open after a container is launched, even if not attached.
default: false
required: false
ipc_mode:
description:
- Set the IPC mode for the container. Can be one of 'container:<name|id>' to reuse another
container's IPC namespace or 'host' to use the host's IPC namespace within the container.
default: null
required: false
keep_volumes:
description:
- Retain volumes associated with a removed container.
default: true
required: false
kill_signal:
description:
- Override default signal used to kill a running container.
default null:
required: false
kernel_memory:
description:
- "Kernel memory limit (format: <number>[<unit>]). Number is a positive integer.
Unit can be one of b, k, m, or g. Minimum is 4M."
default: 0
required: false
labels:
description:
- Dictionary of key value pairs.
default: null
required: false
links:
description:
- List of name aliases for linked containers in the format C(container_name:alias)
default: null
required: false
log_driver:
description:
- Specify the logging driver. Docker uses json-file by default.
choices:
- json-file
- syslog
- journald
- gelf
- fluentd
- awslogs
- splunk
default: null
required: false
log_options:
description:
- Dictionary of options specific to the chosen log_driver. See https://docs.docker.com/engine/admin/logging/overview/
for details.
required: false
default: null
mac_address:
description:
- Container MAC address (e.g. 92:d0:c6:0a:29:33)
default: null
required: false
memory:
description:
- "Memory limit (format: <number>[<unit>]). Number is a positive integer.
Unit can be one of b, k, m, or g"
default: 0
required: false
memory_reservation:
description:
- "Memory soft limit (format: <number>[<unit>]). Number is a positive integer.
Unit can be one of b, k, m, or g"
default: 0
required: false
memory_swap:
description:
- Total memory limit (memory + swap, format:<number>[<unit>]).
Number is a positive integer. Unit can be one of b, k, m, or g.
default: 0
required: false
memory_swappiness:
description:
- Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100.
default: 0
required: false
name:
description:
- Assign a name to a new container or match an existing container.
- When identifying an existing container name may be a name or a long or short container ID.
required: true
network_mode:
description:
- Connect the container to a network.
choices:
- bridge
- container:<name|id>
- host
- none
default: null
required: false
networks:
description:
- List of networks the container belongs to.
- Each network is a dict with keys C(name), C(ipv4_address), C(ipv6_address), C(links), C(aliases).
- For each network C(name) is required, all other keys are optional.
- If included, C(links) or C(aliases) are lists.
- For examples of the data structure and usage see EXAMPLES below.
- To remove a container from one or more networks, use the C(purge_networks) option.
default: null
required: false
version_added: "2.2"
oom_killer:
description:
- Whether or not to disable OOM Killer for the container.
default: false
required: false
paused:
description:
- Use with the started state to pause running processes inside the container.
default: false
required: false
pid_mode:
description:
- Set the PID namespace mode for the container. Currenly only supports 'host'.
default: null
required: false
privileged:
description:
- Give extended privileges to the container.
default: false
required: false
published_ports:
description:
- List of ports to publish from the container to the host.
- "Use docker CLI syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000), where 8000 is a
container port, 9000 is a host port, and 0.0.0.0 is a host interface."
- Container ports must be exposed either in the Dockerfile or via the C(expose) option.
- A value of ALL will publish all exposed container ports to random host ports, ignoring
any other mappings.
- If C(networks) parameter is provided, will inspect each network to see if there exists
a bridge network with optional parameter com.docker.network.bridge.host_binding_ipv4.
If such a network is found, then published ports where no host IP address is specified
will be bound to the host IP pointed to by com.docker.network.bridge.host_binding_ipv4.
Note that the first bridge network with a com.docker.network.bridge.host_binding_ipv4
value encountered in the list of C(networks) is the one that will be used.
aliases:
- ports
required: false
default: null
pull:
description:
- If true, always pull the latest version of an image. Otherwise, will only pull an image when missing.
default: false
required: false
purge_networks:
description:
- Remove the container from ALL networks not included in C(networks) parameter.
- Any default networks such as I(bridge), if not found in C(networks), will be removed as well.
default: false
required: false
version_added: "2.2"
read_only:
description:
- Mount the container's root file system as read-only.
default: false
required: false
recreate:
description:
- Use with present and started states to force the re-creation of an existing container.
default: false
required: false
restart:
description:
- Use with started state to force a matching container to be stopped and restarted.
default: false
required: false
restart_policy:
description:
- Container restart policy. Place quotes around I(no) option.
choices:
- always
- no
- on-failure
- unless-stopped
default: on-failure
required: false
restart_retries:
description:
- Use with restart policy to control maximum number of restart attempts.
default: 0
required: false
shm_size:
description:
- Size of `/dev/shm`. The format is `<number><unit>`. `number` must be greater than `0`.
Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes).
- Ommitting the unit defaults to bytes. If you omit the size entirely, the system uses `64m`.
default: null
required: false
security_opts:
description:
- List of security options in the form of C("label:user:User")
default: null
required: false
state:
description:
- 'I(absent) - A container matching the specified name will be stopped and removed. Use force_kill to kill the container
rather than stopping it. Use keep_volumes to retain volumes associated with the removed container.'
- 'I(present) - Asserts the existence of a container matching the name and any provided configuration parameters. If no
container matches the name, a container will be created. If a container matches the name but the provided configuration
does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed and re-created
with the requested config. Image version will be taken into account when comparing configuration. To ignore image
version use the ignore_image option. Use the recreate option to force the re-creation of the matching container. Use
force_kill to kill the container rather than stopping it. Use keep_volumes to retain volumes associated with a removed
container.'
- 'I(started) - Asserts there is a running container matching the name and any provided configuration. If no container
matches the name, a container will be created and started. If a container matching the name is found but the
configuration does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed
and a new container will be created with the requested configuration and started. Image version will be taken into
account when comparing configuration. To ignore image version use the ignore_image option. Use recreate to always
re-create a matching container, even if it is running. Use restart to force a matching container to be stopped and
restarted. Use force_kill to kill a container rather than stopping it. Use keep_volumes to retain volumes associated
with a removed container.'
- 'I(stopped) - Asserts that the container is first I(present), and then if the container is running moves it to a stopped
state. Use force_kill to kill a container rather than stopping it.'
required: false
default: started
choices:
- absent
- present
- stopped
- started
stop_signal:
description:
- Override default signal used to stop the container.
default: null
required: false
stop_timeout:
description:
- Number of seconds to wait for the container to stop before sending SIGKILL.
required: false
default: null
trust_image_content:
description:
- If true, skip image verification.
default: false
requried: false
tty:
description:
- Allocate a psuedo-TTY.
default: false
required: false
ulimits:
description:
- "List of ulimit options. A ulimit is specified as C(nofile:262144:262144)"
default: null
required: false
user:
description:
- Sets the username or UID used and optionally the groupname or GID for the specified command.
- "Can be [ user | user:group | uid | uid:gid | user:gid | uid:group ]"
default: null
required: false
uts:
description:
- Set the UTS namespace mode for the container.
default: null
required: false
volumes:
description:
- List of volumes to mount within the container.
- "Use docker CLI-style syntax: C(/host:/container[:mode])"
- You can specify a read mode for the mount with either C(ro) or C(rw).
- SELinux hosts can additionally use C(z) or C(Z) to use a shared or
private label for the volume.
default: null
required: false
volume_driver:
description:
- The container volume driver.
default: none
required: false
volumes_from:
description:
- List of container names or Ids to get volumes from.
default: null
required: false
extends_documentation_fragment:
- docker
author:
- "Cove Schneider (@cove)"
- "Joshua Conner (@joshuaconner)"
- "Pavel Antonov (@softzilla)"
- "Thomas Steinbach (@ThomasSteinbach)"
- "Philippe Jandot (@zfil)"
- "Daan Oosterveld (@dusdanig)"
- "James Tanner (@jctanner)"
- "Chris Houseknecht (@chouseknecht)"
requirements:
- "python >= 2.6"
- "docker-py >= 1.7.0"
- "Docker API >= 1.20"
'''
EXAMPLES = '''
- name: Create a data container
docker_container:
name: mydata
image: busybox
volumes:
- /data
- name: Re-create a redis container
docker_container:
name: myredis
image: redis
command: redis-server --appendonly yes
state: present
recreate: yes
exposed_ports:
- 6379
volumes_from:
- mydata
- name: Restart a container
docker_container:
name: myapplication
image: someuser/appimage
state: started
restart: yes
links:
- "myredis:aliasedredis"
devices:
- "/dev/sda:/dev/xvda:rwm"
ports:
- "8080:9000"
- "127.0.0.1:8081:9001/udp"
env:
SECRET_KEY: ssssh
- name: Container present
docker_container:
name: mycontainer
state: present
image: ubuntu:14.04
command: sleep infinity
- name: Stop a contianer
docker_container:
name: mycontainer
state: stopped
- name: Start 4 load-balanced containers
docker_container:
name: "container{{ item }}"
recreate: yes
image: someuser/anotherappimage
command: sleep 1d
with_sequence: count=4
- name: remove container
docker_container:
name: ohno
state: absent
- name: Syslogging output
docker_container:
name: myservice
image: busybox
log_driver: syslog
log_options:
syslog-address: tcp://my-syslog-server:514
syslog-facility: daemon
syslog-tag: myservice
- name: Create db container and connect to network
docker_container:
name: db_test
image: "postgres:latest"
networks:
- name: "{{ docker_network_name }}"
- name: Start container, connect to network and link
docker_container:
name: sleeper
image: ubuntu:14.04
networks:
- name: TestingNet
ipv4_address: "172.1.1.100"
aliases:
- sleepyzz
links:
- db_test:db
- name: TestingNet2
- name: Start a container with a command
docker_container:
name: sleepy
image: ubuntu:14.04
command: sleep infinity
- name: Add container to networks
docker_container:
docker_container:
name: sleepy
networks:
- name: TestingNet
ipv4_address: 172.1.1.18
links:
- sleeper
- name: TestingNet2
ipv4_address: 172.1.10.20
- name: Update network with aliases
docker_container:
name: sleepy
networks:
- name: TestingNet
aliases:
- sleepyz
- zzzz
- name: Remove container from one network
docker_container:
name: sleepy
networks:
- name: TestingNet2
purge_networks: yes
- name: Remove container from all networks
docker_container:
name: sleepy
purge_networks: yes
'''
RETURN = '''
ansible_docker_container:
description:
- Facts representing the current state of the container. Matches the docker inspection output.
- Note that facts are not part of registered vars but accessible directly.
- Empty if C(state) is I(absent)
- If detached is I(False), will include Output attribute containing any output from container run.
returned: always
type: dict
sample: '{
"AppArmorProfile": "",
"Args": [],
"Config": {
"AttachStderr": false,
"AttachStdin": false,
"AttachStdout": false,
"Cmd": [
"/usr/bin/supervisord"
],
"Domainname": "",
"Entrypoint": null,
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"ExposedPorts": {
"443/tcp": {},
"80/tcp": {}
},
"Hostname": "8e47bf643eb9",
"Image": "lnmp_nginx:v1",
"Labels": {},
"OnBuild": null,
"OpenStdin": false,
"StdinOnce": false,
"Tty": false,
"User": "",
"Volumes": {
"/tmp/lnmp/nginx-sites/logs/": {}
},
...
}'
'''
import re
from ansible.module_utils.docker_common import *
try:
from docker import utils
from docker.utils.types import Ulimit
except:
# missing docker-py handled in ansible.module_utils.docker
pass
REQUIRES_CONVERSION_TO_BYTES = [
'memory',
'memory_reservation',
'memory_swap',
'shm_size'
]
VOLUME_PERMISSIONS = ('rw', 'ro', 'z', 'Z')
class TaskParameters(DockerBaseClass):
'''
Access and parse module parameters
'''
def __init__(self, client):
super(TaskParameters, self).__init__()
self.client = client
self.blkio_weight = None
self.capabilities = None
self.cleanup = None
self.command = None
self.cpu_period = None
self.cpu_quota = None
self.cpuset_cpus = None
self.cpuset_mems = None
self.cpu_shares = None
self.detach = None
self.debug = None
self.devices = None
self.dns_servers = None
self.dns_opts = None
self.dns_search_domains = None
self.env = None
self.env_file = None
self.entrypoint = None
self.etc_hosts = None
self.exposed_ports = None
self.force_kill = None
self.groups = None
self.hostname = None
self.ignore_image = None
self.image = None
self.interactive = None
self.ipc_mode = None
self.keep_volumes = None
self.kernel_memory = None
self.kill_signal = None
self.labels = None
self.links = None
self.log_driver = None
self.log_options = None
self.mac_address = None
self.memory = None
self.memory_reservation = None
self.memory_swap = None
self.memory_swappiness = None
self.name = None
self.network_mode = None
self.networks = None
self.oom_killer = None
self.paused = None
self.pid_mode = None
self.privileged = None
self.purge_networks = None
self.pull = None
self.read_only = None
self.recreate = None
self.restart = None
self.restart_retries = None
self.restart_policy = None
self.shm_size = None
self.security_opts = None
self.state = None
self.stop_signal = None
self.stop_timeout = None
self.trust_image_content = None
self.tty = None
self.user = None
self.uts = None
self.volumes = None
self.volume_binds = dict()
self.volumes_from = None
self.volume_driver = None
for key, value in client.module.params.items():
setattr(self, key, value)
for param_name in REQUIRES_CONVERSION_TO_BYTES:
if client.module.params.get(param_name):
try:
setattr(self, param_name, human_to_bytes(client.module.params.get(param_name)))
except ValueError as exc:
self.fail("Failed to convert %s to bytes: %s" % (param_name, exc))
self.publish_all_ports = False
self.published_ports = self._parse_publish_ports()
if self.published_ports == 'all':
self.publish_all_ports = True
self.published_ports = None
self.ports = self._parse_exposed_ports(self.published_ports)
self.log("expose ports:")
self.log(self.ports, pretty_print=True)
self.links = self._parse_links(self.links)
if self.volumes:
self.volumes = self._expand_host_paths()
self.env = self._get_environment()
self.ulimits = self._parse_ulimits()
self.log_config = self._parse_log_config()
self.exp_links = None
self.volume_binds = self._get_volume_binds(self.volumes)
self.log("volumes:")
self.log(self.volumes, pretty_print=True)
self.log("volume binds:")
self.log(self.volume_binds, pretty_print=True)
if self.networks:
for network in self.networks:
if not network.get('name'):
self.fail("Parameter error: network must have a name attribute.")
network['id'] = self._get_network_id(network['name'])
if not network['id']:
self.fail("Parameter error: network named %s could not be found. Does it exist?" % network['name'])
if network.get('links'):
network['links'] = self._parse_links(network['links'])
def fail(self, msg):
self.client.module.fail_json(msg=msg)
@property
def update_parameters(self):
'''
Returns parameters used to update a container
'''
update_parameters = dict(
blkio_weight='blkio_weight',
cpu_period='cpu_period',
cpu_quota='cpu_quota',
cpu_shares='cpu_shares',
cpuset_cpus='cpuset_cpus',
mem_limit='memory',
mem_reservation='mem_reservation',
memswap_limit='memory_swap',
kernel_memory='kernel_memory'
)
result = dict()
for key, value in update_parameters.iteritems():
if getattr(self, value, None) is not None:
result[key] = getattr(self, value)
return result
@property
def create_parameters(self):
'''
Returns parameters used to create a container
'''
create_params = dict(
command='command',
hostname='hostname',
user='user',
detach='detach',
stdin_open='interactive',
tty='tty',
ports='ports',
environment='env',
name='name',
entrypoint='entrypoint',
cpu_shares='cpu_shares',
mac_address='mac_address',
labels='labels',
stop_signal='stop_signal',
volume_driver='volume_driver',
)
result = dict(
host_config=self._host_config(),
volumes=self._get_mounts(),
)
for key, value in create_params.items():
if getattr(self, value, None) is not None:
result[key] = getattr(self, value)
return result
def _expand_host_paths(self):
new_vols = []
for vol in self.volumes:
if ':' in vol:
if len(vol.split(':')) == 3:
host, container, mode = vol.split(':')
if re.match(r'[\.~]', host):
host = os.path.abspath(host)
new_vols.append("%s:%s:%s" % (host, container, mode))
continue
elif len(vol.split(':')) == 2:
parts = vol.split(':')
if parts[1] not in VOLUME_PERMISSIONS and re.match(r'[\.~]', parts[0]):
host = os.path.abspath(parts[0])
new_vols.append("%s:%s:rw" % (host, parts[1]))
continue
new_vols.append(vol)
return new_vols
def _get_mounts(self):
'''
Return a list of container mounts.
:return:
'''
result = []
if self.volumes:
for vol in self.volumes:
if ':' in vol:
if len(vol.split(':')) == 3:
host, container, _ = vol.split(':')
result.append(container)
continue
if len(vol.split(':')) == 2:
parts = vol.split(':')
if parts[1] not in VOLUME_PERMISSIONS:
result.append(parts[1])
continue
result.append(vol)
self.log("mounts:")
self.log(result, pretty_print=True)
return result
def _host_config(self):
'''
Returns parameters used to create a HostConfig object
'''
host_config_params=dict(
port_bindings='published_ports',
publish_all_ports='publish_all_ports',
links='links',
privileged='privileged',
dns='dns_servers',
dns_search='dns_search_domains',
binds='volume_binds',
volumes_from='volumes_from',
network_mode='network_mode',
cap_add='capabilities',
extra_hosts='etc_hosts',
read_only='read_only',
ipc_mode='ipc_mode',
security_opt='security_opts',
ulimits='ulimits',
log_config='log_config',
mem_limit='memory',
memswap_limit='memory_swap',
mem_swappiness='memory_swappiness',
shm_size='shm_size',
group_add='groups',
devices='devices',
pid_mode='pid_mode'
)
params = dict()
for key, value in host_config_params.iteritems():
if getattr(self, value, None) is not None:
params[key] = getattr(self, value)
if self.restart_policy:
params['restart_policy'] = dict(Name=self.restart_policy,
MaximumRetryCount=self.restart_retries)
return self.client.create_host_config(**params)
@property
def default_host_ip(self):
ip = '0.0.0.0'
if not self.networks:
return ip
for net in self.networks:
if net.get('name'):
network = self.client.inspect_network(net['name'])
if network.get('Driver') == 'bridge' and \
network.get('Options', {}).get('com.docker.network.bridge.host_binding_ipv4'):
ip = network['Options']['com.docker.network.bridge.host_binding_ipv4']
break
return ip
def _parse_publish_ports(self):
'''
Parse ports from docker CLI syntax
'''
if self.published_ports is None:
return None
if 'all' in self.published_ports:
return 'all'
default_ip = self.default_host_ip
binds = {}
for port in self.published_ports:
parts = str(port).split(':')
container_port = parts[-1]
if '/' not in container_port:
container_port = int(parts[-1])
p_len = len(parts)
if p_len == 1:
bind = (default_ip,)
elif p_len == 2:
bind = (default_ip, int(parts[0]))
elif p_len == 3:
bind = (parts[0], int(parts[1])) if parts[1] else (parts[0],)
if container_port in binds:
old_bind = binds[container_port]
if isinstance(old_bind, list):
old_bind.append(bind)
else:
binds[container_port] = [binds[container_port], bind]
else:
binds[container_port] = bind
return binds
@staticmethod
def _get_volume_binds(volumes):
'''
Extract host bindings, if any, from list of volume mapping strings.
:return: dictionary of bind mappings
'''
result = dict()
if volumes:
for vol in volumes:
host = None
if ':' in vol:
if len(vol.split(':')) == 3:
host, container, mode = vol.split(':')
if len(vol.split(':')) == 2:
parts = vol.split(':')
if parts[1] not in VOLUME_PERMISSIONS:
host, container, mode = (vol.split(':') + ['rw'])
if host is not None:
result[host] = dict(
bind=container,
mode=mode
)
return result
def _parse_exposed_ports(self, published_ports):
'''
Parse exposed ports from docker CLI-style ports syntax.
'''
exposed = []
if self.exposed_ports:
for port in self.exposed_ports:
port = str(port).strip()
protocol = 'tcp'
match = re.search(r'(/.+$)', port)
if match:
protocol = match.group(1).replace('/', '')
port = re.sub(r'/.+$', '', port)
exposed.append((port, protocol))
if published_ports:
# Any published port should also be exposed
for publish_port in published_ports:
match = False
if isinstance(publish_port, basestring) and '/' in publish_port:
port, protocol = publish_port.split('/')
port = int(port)
else:
protocol = 'tcp'
port = int(publish_port)
for exposed_port in exposed:
if isinstance(exposed_port[0], basestring) and '-' in exposed_port[0]:
start_port, end_port = exposed_port[0].split('-')
if int(start_port) <= port <= int(end_port):
match = True
elif exposed_port[0] == port:
match = True
if not match:
exposed.append((port, protocol))
return exposed
@staticmethod
def _parse_links(links):
'''
Turn links into a dictionary
'''
if links is None:
return None
result = {}
for link in links:
parsed_link = link.split(':', 1)
if len(parsed_link) == 2:
result[parsed_link[0]] = parsed_link[1]
else:
result[parsed_link[0]] = parsed_link[0]
return result
def _parse_ulimits(self):
'''
Turn ulimits into an array of Ulimit objects
'''
if self.ulimits is None:
return None
results = []
for limit in self.ulimits:
limits = dict()
pieces = limit.split(':')
if len(pieces) >= 2:
limits['name'] = pieces[0]
limits['soft'] = int(pieces[1])
limits['hard'] = int(pieces[1])
if len(pieces) == 3:
limits['hard'] = int(pieces[2])
try:
results.append(Ulimit(**limits))
except ValueError as exc:
self.fail("Error parsing ulimits value %s - %s" % (limit, exc))
return results
def _parse_log_config(self):
'''
Create a LogConfig object
'''
if self.log_driver is None:
return None
options = dict(
Type=self.log_driver,
Config = dict()
)
if self.log_options is not None:
options['Config'] = self.log_options
try:
return LogConfig(**options)
except ValueError as exc:
self.fail('Error parsing logging options - %s' % (exc))
def _get_environment(self):
"""
If environment file is combined with explicit environment variables, the explicit environment variables
take precedence.
"""
final_env = {}
if self.env_file:
parsed_env_file = utils.parse_env_file(self.env_file)
for name, value in parsed_env_file.iteritems():
final_env[name] = str(value)
if self.env:
for name, value in self.env.iteritems():
final_env[name] = str(value)
return final_env
def _get_network_id(self, network_name):
network_id = None
try:
for network in self.client.networks(names=[network_name]):
if network['Name'] == network_name:
network_id = network['Id']
break
except Exception as exc:
self.fail("Error getting network id for %s - %s" % (network_name, str(exc)))
return network_id
class Container(DockerBaseClass):
def __init__(self, container, parameters):
super(Container, self).__init__()
self.raw = container
self.Id = None
self.container = container
if container:
self.Id = container['Id']
self.Image = container['Image']
self.log(self.container, pretty_print=True)
self.parameters = parameters
self.parameters.expected_links = None
self.parameters.expected_ports = None
self.parameters.expected_exposed = None
self.parameters.expected_volumes = None
self.parameters.expected_ulimits = None
self.parameters.expected_etc_hosts = None
self.parameters.expected_env = None
def fail(self, msg):
self.parameters.client.module.fail_json(msg=msg)
@property
def exists(self):
return True if self.container else False
@property
def running(self):
if self.container and self.container.get('State'):
if self.container['State'].get('Running') and not self.container['State'].get('Ghost', False):
return True
return False
def has_different_configuration(self, image):
'''
Diff parameters vs existing container config. Returns tuple: (True | False, List of differences)
'''
self.log('Starting has_different_configuration')
self.parameters.expected_entrypoint = self._get_expected_entrypoint()
self.parameters.expected_links = self._get_expected_links()
self.parameters.expected_ports = self._get_expected_ports()
self.parameters.expected_exposed = self._get_expected_exposed(image)
self.parameters.expected_volumes = self._get_expected_volumes(image)
self.parameters.expected_binds = self._get_expected_binds(image)
self.parameters.expected_ulimits = self._get_expected_ulimits(self.parameters.ulimits)
self.parameters.expected_etc_hosts = self._convert_simple_dict_to_list('etc_hosts')
self.parameters.expected_env = self._get_expected_env(image)
self.parameters.expected_cmd = self._get_expected_cmd()
if not self.container.get('HostConfig'):
self.fail("has_config_diff: Error parsing container properties. HostConfig missing.")
if not self.container.get('Config'):
self.fail("has_config_diff: Error parsing container properties. Config missing.")
if not self.container.get('NetworkSettings'):
self.fail("has_config_diff: Error parsing container properties. NetworkSettings missing.")
host_config = self.container['HostConfig']
log_config = host_config.get('LogConfig', dict())
restart_policy = host_config.get('RestartPolicy', dict())
config = self.container['Config']
network = self.container['NetworkSettings']
# The previous version of the docker module ignored the detach state by
# assuming if the container was running, it must have been detached.
detach = not (config.get('AttachStderr') and config.get('AttachStdout'))
# Map parameters to container inspect results
config_mapping = dict(
image=config.get('Image'),
expected_cmd=config.get('Cmd'),
hostname=config.get('Hostname'),
user=config.get('User'),
detach=detach,
interactive=config.get('OpenStdin'),
capabilities=host_config.get('CapAdd'),
devices=host_config.get('Devices'),
dns_servers=host_config.get('Dns'),
dns_opts=host_config.get('DnsOptions'),
dns_search_domains=host_config.get('DnsSearch'),
expected_env=(config.get('Env') or []),
expected_entrypoint=config.get('Entrypoint'),
expected_etc_hosts=host_config['ExtraHosts'],
expected_exposed=[re.sub(r'/.+$', '', p) for p in config.get('ExposedPorts', dict()).keys()],
groups=host_config.get('GroupAdd'),
ipc_mode=host_config.get("IpcMode"),
labels=config.get('Labels'),
expected_links=host_config.get('Links'),
log_driver=log_config.get('Type'),
log_options=log_config.get('Config'),
mac_address=network.get('MacAddress'),
memory_swappiness=host_config.get('MemorySwappiness'),
network_mode=host_config.get('NetworkMode'),
oom_killer=host_config.get('OomKillDisable'),
pid_mode=host_config.get('PidMode'),
privileged=host_config.get('Privileged'),
expected_ports=host_config.get('PortBindings'),
read_only=host_config.get('ReadonlyRootfs'),
restart_policy=restart_policy.get('Name'),
restart_retries=restart_policy.get('MaximumRetryCount'),
# Cannot test shm_size, as shm_size is not included in container inspection results.
# shm_size=host_config.get('ShmSize'),
security_opts=host_config.get("SecuriytOpt"),
stop_signal=config.get("StopSignal"),
tty=config.get('Tty'),
expected_ulimits=host_config.get('Ulimits'),
uts=host_config.get('UTSMode'),
expected_volumes=config.get('Volumes'),
expected_binds=host_config.get('Binds'),
volumes_from=host_config.get('VolumesFrom'),
volume_driver=host_config.get('VolumeDriver')
)
differences = []
for key, value in config_mapping.iteritems():
self.log('check differences %s %s vs %s' % (key, getattr(self.parameters, key), str(value)))
if getattr(self.parameters, key, None) is not None:
if isinstance(getattr(self.parameters, key), list) and isinstance(value, list):
if len(getattr(self.parameters, key)) > 0 and isinstance(getattr(self.parameters, key)[0], dict):
# compare list of dictionaries
self.log("comparing list of dict: %s" % key)
match = self._compare_dictionary_lists(getattr(self.parameters, key), value)
else:
# compare two lists. Is list_a in list_b?
self.log("comparing lists: %s" % key)
set_a = set(getattr(self.parameters, key))
set_b = set(value)
match = (set_a <= set_b)
elif isinstance(getattr(self.parameters, key), dict) and isinstance(value, dict):
# compare two dicts
self.log("comparing two dicts: %s" % key)
match = self._compare_dicts(getattr(self.parameters, key), value)
else:
# primitive compare
self.log("primitive compare: %s" % key)
match = (getattr(self.parameters, key) == value)
if not match:
# no match. record the differences
item = dict()
item[key] = dict(
parameter=getattr(self.parameters, key),
container=value
)
differences.append(item)
has_differences = True if len(differences) > 0 else False
return has_differences, differences
def _compare_dictionary_lists(self, list_a, list_b):
'''
If all of list_a exists in list_b, return True
'''
if not isinstance(list_a, list) or not isinstance(list_b, list):
return False
matches = 0
for dict_a in list_a:
for dict_b in list_b:
if self._compare_dicts(dict_a, dict_b):
matches += 1
break
result = (matches == len(list_a))
return result
def _compare_dicts(self, dict_a, dict_b):
'''
If dict_a in dict_b, return True
'''
if not isinstance(dict_a, dict) or not isinstance(dict_b, dict):
return False
for key, value in dict_a.iteritems():
if isinstance(value, dict):
match = self._compare_dicts(value, dict_b.get(key))
elif isinstance(value, list):
if len(value) > 0 and isinstance(value[0], dict):
match = self._compare_dictionary_lists(value, dict_b.get(key))
else:
set_a = set(value)
set_b = set(dict_b.get(key))
match = (set_a == set_b)
else:
match = (value == dict_b.get(key))
if not match:
return False
return True
def has_different_resource_limits(self):
'''
Diff parameters and container resource limits
'''
if not self.container.get('HostConfig'):
self.fail("limits_differ_from_container: Error parsing container properties. HostConfig missing.")
host_config = self.container['HostConfig']
config_mapping = dict(
cpu_period=host_config.get('CpuPeriod'),
cpu_quota=host_config.get('CpuQuota'),
cpuset_cpus=host_config.get('CpusetCpus'),
cpuset_mems=host_config.get('CpusetMems'),
cpu_shares=host_config.get('CpuShares'),
kernel_memory=host_config.get("KernelMemory"),
memory=host_config.get('Memory'),
memory_reservation=host_config.get('MemoryReservation'),
memory_swap=host_config.get('MemorySwap'),
)
differences = []
for key, value in config_mapping.iteritems():
if getattr(self.parameters, key, None) and getattr(self.parameters, key) != value:
# no match. record the differences
item = dict()
item[key] = dict(
parameter=getattr(self.parameters, key),
container=value
)
differences.append(item)
different = (len(differences) > 0)
return different, differences
def has_network_differences(self):
'''
Check if the container is connected to requested networks with expected options: links, aliases, ipv4, ipv6
'''
different = False
differences = []
if not self.parameters.networks:
return different, differences
if not self.container.get('NetworkSettings'):
self.fail("has_missing_networks: Error parsing container properties. NetworkSettings missing.")
connected_networks = self.container['NetworkSettings']['Networks']
for network in self.parameters.networks:
if connected_networks.get(network['name'], None) is None:
different = True
differences.append(dict(
parameter=network,
container=None
))
else:
diff = False
if network.get('ipv4_address') and network['ipv4_address'] != connected_networks[network['name']].get('IPAddress'):
diff = True
if network.get('ipv6_address') and network['ipv6_address'] != connected_networks[network['name']].get('GlobalIPv6Address'):
diff = True
if network.get('aliases') and not connected_networks[network['name']].get('Aliases'):
diff = True
if network.get('aliases') and connected_networks[network['name']].get('Aliases'):
for alias in network.get('aliases'):
if alias not in connected_networks[network['name']].get('Aliases', []):
diff = True
if network.get('links') and not connected_networks[network['name']].get('Links'):
diff = True
if network.get('links') and connected_networks[network['name']].get('Links'):
expected_links = []
for link, alias in network['links'].iteritems():
expected_links.append("%s:%s" % (link, alias))
for link in expected_links:
if link not in connected_networks[network['name']].get('Links', []):
diff = True
if diff:
different = True
differences.append(dict(
parameter=network,
container=dict(
name=network['name'],
ipv4_address=connected_networks[network['name']].get('IPAddress'),
ipv6_address=connected_networks[network['name']].get('GlobalIPv6Address'),
aliases=connected_networks[network['name']].get('Aliases'),
links=connected_networks[network['name']].get('Links')
)
))
return different, differences
def has_extra_networks(self):
'''
Check if the container is connected to non-requested networks
'''
extra_networks = []
extra = False
if not self.container.get('NetworkSettings'):
self.fail("has_extra_networks: Error parsing container properties. NetworkSettings missing.")
connected_networks = self.container['NetworkSettings'].get('Networks')
if connected_networks:
for network, network_config in connected_networks.iteritems():
keep = False
if self.parameters.networks:
for expected_network in self.parameters.networks:
if expected_network['name'] == network:
keep = True
if not keep:
extra = True
extra_networks.append(dict(name=network, id=network_config['NetworkID']))
return extra, extra_networks
def _get_expected_entrypoint(self):
self.log('_get_expected_entrypoint')
if not self.parameters.entrypoint:
return None
return shlex.split(self.parameters.entrypoint)
def _get_expected_ports(self):
if not self.parameters.published_ports:
return None
expected_bound_ports = {}
for container_port, config in self.parameters.published_ports.iteritems():
if isinstance(container_port, int):
container_port = "%s/tcp" % container_port
if len(config) == 1:
expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': ""}]
elif isinstance(config[0], tuple):
expected_bound_ports[container_port] = []
for host_ip, host_port in config:
expected_bound_ports[container_port].append({'HostIp': host_ip, 'HostPort': str(host_port)})
else:
expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': str(config[1])}]
return expected_bound_ports
def _get_expected_links(self):
if self.parameters.links is None:
return None
self.log('parameter links:')
self.log(self.parameters.links, pretty_print=True)
exp_links = []
for link, alias in self.parameters.links.iteritems():
exp_links.append("/%s:%s/%s" % (link, ('/' + self.parameters.name), alias))
return exp_links
def _get_expected_binds(self, image):
self.log('_get_expected_binds')
image_vols = []
if image:
image_vols = self._get_image_binds(image['ContainerConfig'].get('Volumes'))
param_vols = []
if self.parameters.volumes:
for vol in self.parameters.volumes:
host = None
if ':' in vol:
if len(vol.split(':')) == 3:
host, container, mode = vol.split(':')
if len(vol.split(':')) == 2:
parts = vol.split(':')
if parts[1] not in VOLUME_PERMISSIONS:
host, container, mode = vol.split(':') + ['rw']
if host:
param_vols.append("%s:%s:%s" % (host, container, mode))
result = list(set(image_vols + param_vols))
self.log("expected_binds:")
self.log(result, pretty_print=True)
return result
def _get_image_binds(self, volumes):
'''
Convert array of binds to array of strings with format host_path:container_path:mode
:param volumes: array of bind dicts
:return: array of strings
'''
results = []
if isinstance(volumes, dict):
results += self._get_bind_from_dict(volumes)
elif isinstance(volumes, list):
for vol in volumes:
results += self._get_bind_from_dict(vol)
return results
@staticmethod
def _get_bind_from_dict(volume_dict):
results = []
if volume_dict:
for host_path, config in volume_dict.items():
if isinstance(config, dict) and config.get('bind'):
container_path = config.get('bind')
mode = config.get('mode', 'rw')
results.append("%s:%s:%s" % (host_path, container_path, mode))
return results
def _get_expected_volumes(self, image):
self.log('_get_expected_volumes')
expected_vols = dict()
if image and image['ContainerConfig'].get('Volumes'):
expected_vols.update(image['ContainerConfig'].get('Volumes'))
if self.parameters.volumes:
for vol in self.parameters.volumes:
container = None
if ':' in vol:
if len(vol.split(':')) == 3:
host, container, mode = vol.split(':')
if len(vol.split(':')) == 2:
parts = vol.split(':')
if parts[1] not in VOLUME_PERMISSIONS:
host, container, mode = vol.split(':') + ['rw']
new_vol = dict()
if container:
new_vol[container] = dict()
else:
new_vol[vol] = dict()
expected_vols.update(new_vol)
if not expected_vols:
expected_vols = None
self.log("expected_volumes:")
self.log(expected_vols, pretty_print=True)
return expected_vols
def _get_expected_env(self, image):
self.log('_get_expected_env')
expected_env = dict()
if image and image['ContainerConfig'].get('Env'):
for env_var in image['ContainerConfig']['Env']:
parts = env_var.split('=', 1)
expected_env[parts[0]] = parts[1]
if self.parameters.env:
expected_env.update(self.parameters.env)
param_env = []
for key, value in expected_env.items():
param_env.append("%s=%s" % (key, value))
return param_env
def _get_expected_exposed(self, image):
self.log('_get_expected_exposed')
image_ports = []
if image:
image_ports = [re.sub(r'/.+$', '', p) for p in (image['ContainerConfig'].get('ExposedPorts') or {}).keys()]
param_ports = []
if self.parameters.ports:
param_ports = [str(p[0]) for p in self.parameters.ports]
result = list(set(image_ports + param_ports))
self.log(result, pretty_print=True)
return result
def _get_expected_ulimits(self, config_ulimits):
self.log('_get_expected_ulimits')
if config_ulimits is None:
return None
results = []
for limit in config_ulimits:
results.append(dict(
Name=limit.name,
Soft=limit.soft,
Hard=limit.hard
))
return results
def _get_expected_cmd(self):
self.log('_get_expected_cmd')
if not self.parameters.command:
return None
return shlex.split(self.parameters.command)
def _convert_simple_dict_to_list(self, param_name, join_with=':'):
if getattr(self.parameters, param_name, None) is None:
return None
results = []
for key, value in getattr(self.parameters, param_name).iteritems():
results.append("%s%s%s" % (key, join_with, value))
return results
class ContainerManager(DockerBaseClass):
'''
Perform container management tasks
'''
def __init__(self, client):
super(ContainerManager, self).__init__()
self.client = client
self.parameters = TaskParameters(client)
self.check_mode = self.client.check_mode
self.results = {'changed': False, 'actions': []}
self.diff = {}
self.facts = {}
state = self.parameters.state
if state in ('stopped', 'started', 'present'):
self.present(state)
elif state == 'absent':
self.absent()
if not self.check_mode and not self.parameters.debug:
self.results.pop('actions')
if self.client.module._diff or self.parameters.debug:
self.results['diff'] = self.diff
if self.facts:
self.results['ansible_facts'] = {'ansible_docker_container': self.facts}
def present(self, state):
container = self._get_container(self.parameters.name)
image = self._get_image()
if not container.exists:
# New container
self.log('No container found')
new_container = self.container_create(self.parameters.image, self.parameters.create_parameters)
if new_container:
container = new_container
else:
# Existing container
different, differences = container.has_different_configuration(image)
image_different = False
if not self.parameters.ignore_image:
image_different = self._image_is_different(image, container)
if image_different or different or self.parameters.recreate:
self.diff['differences'] = differences
if image_different:
self.diff['image_different'] = True
self.log("differences")
self.log(differences, pretty_print=True)
if container.running:
self.container_stop(container.Id)
self.container_remove(container.Id)
new_container = self.container_create(self.parameters.image, self.parameters.create_parameters)
if new_container:
container = new_container
if container and container.exists:
container = self.update_limits(container)
container = self.update_networks(container)
if state == 'started' and not container.running:
container = self.container_start(container.Id)
elif state == 'started' and self.parameters.restart:
self.container_stop(container.Id)
container = self.container_start(container.Id)
elif state == 'stopped' and container.running:
self.container_stop(container.Id)
container = self._get_container(container.Id)
self.facts = container.raw
def absent(self):
container = self._get_container(self.parameters.name)
if container.exists:
if container.running:
self.container_stop(container.Id)
self.container_remove(container.Id)
def fail(self, msg, **kwargs):
self.client.module.fail_json(msg=msg, **kwargs)
def _get_container(self, container):
'''
Expects container ID or Name. Returns a container object
'''
return Container(self.client.get_container(container), self.parameters)
def _get_image(self):
if not self.parameters.image:
self.log('No image specified')
return None
repository, tag = utils.parse_repository_tag(self.parameters.image)
if not tag:
tag = "latest"
image = self.client.find_image(repository, tag)
if not self.check_mode:
if not image or self.parameters.pull:
self.log("Pull the image.")
image = self.client.pull_image(repository, tag)
self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag)))
self.results['changed'] = True
self.log("image")
self.log(image, pretty_print=True)
return image
def _image_is_different(self, image, container):
if image and image.get('Id'):
if container and container.Image:
if image.get('Id') != container.Image:
return True
return False
def update_limits(self, container):
limits_differ, different_limits = container.has_different_resource_limits()
if limits_differ:
self.log("limit differences:")
self.log(different_limits, pretty_print=True)
if limits_differ and not self.check_mode:
self.container_update(container.Id, self.parameters.update_parameters)
return self._get_container(container.Id)
return container
def update_networks(self, container):
has_network_differences, network_differences = container.has_network_differences()
updated_container = container
if has_network_differences:
if self.diff.get('differences'):
self.diff['differences'].append(dict(network_differences=network_differences))
else:
self.diff['differences'] = [dict(network_differences=network_differences)]
self.results['changed'] = True
updated_container = self._add_networks(container, network_differences)
if self.parameters.purge_networks:
has_extra_networks, extra_networks = container.has_extra_networks()
if has_extra_networks:
if self.diff.get('differences'):
self.diff['differences'].append(dict(purge_networks=extra_networks))
else:
self.diff['differences'] = [dict(purge_networks=extra_networks)]
self.results['changed'] = True
updated_container = self._purge_networks(container, extra_networks)
return updated_container
def _add_networks(self, container, differences):
for diff in differences:
# remove the container from the network, if connected
if diff.get('container'):
self.results['actions'].append(dict(removed_from_network=diff['parameter']['name']))
if not self.check_mode:
try:
self.client.disconnect_container_from_network(container.Id, diff['parameter']['id'])
except Exception as exc:
self.fail("Error disconnecting container from network %s - %s" % (diff['parameter']['name'],
str(exc)))
# connect to the network
params = dict(
ipv4_address=diff['parameter'].get('ipv4_address', None),
ipv6_address=diff['parameter'].get('ipv6_address', None),
links=diff['parameter'].get('links', None),
aliases=diff['parameter'].get('aliases', None)
)
self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=params))
if not self.check_mode:
try:
self.log("Connecting conainer to network %s" % diff['parameter']['id'])
self.log(params, pretty_print=True)
self.client.connect_container_to_network(container.Id, diff['parameter']['id'], **params)
except Exception as exc:
self.fail("Error connecting container to network %s - %s" % (diff['parameter']['name'], str(exc)))
return self._get_container(container.Id)
def _purge_networks(self, container, networks):
for network in networks:
self.results['actions'].append(dict(removed_from_network=network['name']))
if not self.check_mode:
try:
self.client.disconnect_container_from_network(container.Id, network['name'])
except Exception as exc:
self.fail("Error disconnecting container from network %s - %s" % (network['name'],
str(exc)))
return self._get_container(container.Id)
def container_create(self, image, create_parameters):
self.log("create container")
self.log("image: %s parameters:" % image)
self.log(create_parameters, pretty_print=True)
self.results['actions'].append(dict(created="Created container", create_parameters=create_parameters))
self.results['changed'] = True
new_container = None
if not self.check_mode:
try:
new_container = self.client.create_container(image, **create_parameters)
except Exception as exc:
self.fail("Error creating container: %s" % str(exc))
return self._get_container(new_container['Id'])
return new_container
def container_start(self, container_id):
self.log("start container %s" % (container_id))
self.results['actions'].append(dict(started=container_id))
self.results['changed'] = True
if not self.check_mode:
try:
self.client.start(container=container_id)
except Exception as exc:
self.fail("Error starting container %s: %s" % (container_id, str(exc)))
if not self.parameters.detach:
status = self.client.wait(container_id)
output = self.client.logs(container_id, stdout=True, stderr=True, stream=False, timestamps=False)
if status != 0:
self.fail(output, status=status)
if self.parameters.cleanup:
self.container_remove(container_id, force=True)
insp = self._get_container(container_id)
if insp.raw:
insp.raw['Output'] = output
else:
insp.raw = dict(Output=output)
return insp
return self._get_container(container_id)
def container_remove(self, container_id, link=False, force=False):
volume_state = (not self.parameters.keep_volumes)
self.log("remove container container:%s v:%s link:%s force%s" % (container_id, volume_state, link, force))
self.results['actions'].append(dict(removed=container_id, volume_state=volume_state, link=link, force=force))
self.results['changed'] = True
response = None
if not self.check_mode:
try:
response = self.client.remove_container(container_id, v=volume_state, link=link, force=force)
except Exception as exc:
self.fail("Error removing container %s: %s" % (container_id, str(exc)))
return response
def container_update(self, container_id, update_parameters):
if update_parameters:
self.log("update container %s" % (container_id))
self.log(update_parameters, pretty_print=True)
self.results['actions'].append(dict(updated=container_id, update_parameters=update_parameters))
self.results['changed'] = True
if not self.check_mode and callable(getattr(self.client, 'update_container')):
try:
self.client.update_container(container_id, **update_parameters)
except Exception as exc:
self.fail("Error updating container %s: %s" % (container_id, str(exc)))
return self._get_container(container_id)
def container_kill(self, container_id):
self.results['actions'].append(dict(killed=container_id, signal=self.parameters.kill_signal))
self.results['changed'] = True
response = None
if not self.check_mode:
try:
if self.parameters.kill_signal:
response = self.client.kill(container_id, signal=self.parameters.kill_signal)
else:
response = self.client.kill(container_id)
except Exception as exc:
self.fail("Error killing container %s: %s" % (container_id, exc))
return response
def container_stop(self, container_id):
if self.parameters.force_kill:
self.container_kill(container_id)
return
self.results['actions'].append(dict(stopped=container_id, timeout=self.parameters.stop_timeout))
self.results['changed'] = True
response = None
if not self.check_mode:
try:
if self.parameters.stop_timeout:
response = self.client.stop(container_id, timeout=self.parameters.stop_timeout)
else:
response = self.client.stop(container_id)
except Exception as exc:
self.fail("Error stopping container %s: %s" % (container_id, str(exc)))
return response
def main():
argument_spec = dict(
blkio_weight=dict(type='int'),
capabilities=dict(type='list'),
cleanup=dict(type='bool', default=False),
command=dict(type='str'),
cpu_period=dict(type='int'),
cpu_quota=dict(type='int'),
cpuset_cpus=dict(type='str'),
cpuset_mems=dict(type='str'),
cpu_shares=dict(type='int'),
detach=dict(type='bool', default=True),
devices=dict(type='list'),
dns_servers=dict(type='list'),
dns_opts=dict(type='list'),
dns_search_domains=dict(type='list'),
env=dict(type='dict'),
env_file=dict(type='path'),
entrypoint=dict(type='str'),
etc_hosts=dict(type='dict'),
exposed_ports=dict(type='list', aliases=['exposed', 'expose']),
force_kill=dict(type='bool', default=False, aliases=['forcekill']),
groups=dict(type='list'),
hostname=dict(type='str'),
ignore_image=dict(type='bool', default=False),
image=dict(type='str'),
interactive=dict(type='bool', default=False),
ipc_mode=dict(type='str'),
keep_volumes=dict(type='bool', default=True),
kernel_memory=dict(type='str'),
kill_signal=dict(type='str'),
labels=dict(type='dict'),
links=dict(type='list'),
log_driver=dict(type='str', choices=['json-file', 'syslog', 'journald', 'gelf', 'fluentd', 'awslogs', 'splunk'], default=None),
log_options=dict(type='dict', aliases=['log_opt']),
mac_address=dict(type='str'),
memory=dict(type='str', default='0'),
memory_reservation=dict(type='str'),
memory_swap=dict(type='str'),
memory_swappiness=dict(type='int'),
name=dict(type='str', required=True),
network_mode=dict(type='str'),
networks=dict(type='list'),
oom_killer=dict(type='bool'),
paused=dict(type='bool', default=False),
pid_mode=dict(type='str'),
privileged=dict(type='bool', default=False),
published_ports=dict(type='list', aliases=['ports']),
pull=dict(type='bool', default=False),
purge_networks=dict(type='bool', deault=False),
read_only=dict(type='bool', default=False),
recreate=dict(type='bool', default=False),
restart=dict(type='bool', default=False),
restart_policy=dict(type='str', choices=['no', 'on-failure', 'always', 'unless-stopped']),
restart_retries=dict(type='int', default=None),
shm_size=dict(type='str'),
security_opts=dict(type='list'),
state=dict(type='str', choices=['absent', 'present', 'started', 'stopped'], default='started'),
stop_signal=dict(type='str'),
stop_timeout=dict(type='int'),
trust_image_content=dict(type='bool', default=False),
tty=dict(type='bool', default=False),
ulimits=dict(type='list'),
user=dict(type='str'),
uts=dict(type='str'),
volumes=dict(type='list'),
volumes_from=dict(type='list'),
volume_driver=dict(type='str'),
)
required_if = [
('state', 'present', ['image'])
]
client = AnsibleDockerClient(
argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True
)
cm = ContainerManager(client)
client.module.exit_json(**cm.results)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (C) Igor Sysoev
* Copyright (C) Nginx, Inc.
*/
#include <ngx_config.h>
#include <ngx_core.h>
ngx_uint_t ngx_pagesize;
ngx_uint_t ngx_pagesize_shift;
ngx_uint_t ngx_cacheline_size;
void *ngx_alloc(size_t size, ngx_log_t *log)
{
void *p;
p = malloc(size);
if (p == NULL) {
ngx_log_error(NGX_LOG_EMERG, log, ngx_errno,
"malloc(%uz) failed", size);
}
ngx_log_debug2(NGX_LOG_DEBUG_ALLOC, log, 0, "malloc: %p:%uz", p, size);
return p;
}
void *ngx_calloc(size_t size, ngx_log_t *log)
{
void *p;
p = ngx_alloc(size, log);
if (p) {
ngx_memzero(p, size);
}
return p;
} | c | github | https://github.com/nginx/nginx | src/os/win32/ngx_alloc.c |
function Component(props) {
const [y, ...{z}] = props.value;
return [y, z];
}
export const FIXTURE_ENTRYPOINT = {
fn: Component,
params: [{value: ['y', {z: 'z!'}]}],
}; | javascript | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/destructuring-object-pattern-within-rest.js |
# (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# from python and deps
from cStringIO import StringIO
import inspect
import os
import shlex
# from Ansible
from ansible import errors
from ansible import utils
from ansible import constants as C
REPLACER = "#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
REPLACER_ARGS = "\"<<INCLUDE_ANSIBLE_MODULE_ARGS>>\""
REPLACER_COMPLEX = "\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\""
class ModuleReplacer(object):
"""
The Replacer is used to insert chunks of code into modules before
transfer. Rather than doing classical python imports, this allows for more
efficient transfer in a no-bootstrapping scenario by not moving extra files
over the wire, and also takes care of embedding arguments in the transferred
modules.
This version is done in such a way that local imports can still be
used in the module code, so IDEs don't have to be aware of what is going on.
Example:
from ansible.module_utils.basic import *
will result in a template evaluation of
{{ include 'basic.py' }}
from the module_utils/ directory in the source tree.
All modules are required to import at least basic, though there will also
be other snippets.
"""
# ******************************************************************************
def __init__(self, strip_comments=False):
this_file = inspect.getfile(inspect.currentframe())
self.snippet_path = os.path.join(os.path.dirname(this_file), 'module_utils')
self.strip_comments = strip_comments # TODO: implement
# ******************************************************************************
def slurp(self, path):
if not os.path.exists(path):
raise errors.AnsibleError("imported module support code does not exist at %s" % path)
fd = open(path)
data = fd.read()
fd.close()
return data
def _find_snippet_imports(self, module_data, module_path):
"""
Given the source of the module, convert it to a Jinja2 template to insert
module code and return whether it's a new or old style module.
"""
module_style = 'old'
if REPLACER in module_data:
module_style = 'new'
elif 'from ansible.module_utils.' in module_data:
module_style = 'new'
elif 'WANT_JSON' in module_data:
module_style = 'non_native_want_json'
output = StringIO()
lines = module_data.split('\n')
snippet_names = []
for line in lines:
if REPLACER in line:
output.write(self.slurp(os.path.join(self.snippet_path, "basic.py")))
snippet_names.append('basic')
elif line.startswith('from ansible.module_utils.'):
tokens=line.split(".")
import_error = False
if len(tokens) != 3:
import_error = True
if " import *" not in line:
import_error = True
if import_error:
raise errors.AnsibleError("error importing module in %s, expecting format like 'from ansible.module_utils.basic import *'" % module_path)
snippet_name = tokens[2].split()[0]
snippet_names.append(snippet_name)
output.write(self.slurp(os.path.join(self.snippet_path, snippet_name + ".py")))
else:
if self.strip_comments and line.startswith("#") or line == '':
pass
output.write(line)
output.write("\n")
if len(snippet_names) > 0 and not 'basic' in snippet_names:
raise errors.AnsibleError("missing required import in %s: from ansible.module_utils.basic import *" % module_path)
return (output.getvalue(), module_style)
# ******************************************************************************
def modify_module(self, module_path, complex_args, module_args, inject):
with open(module_path) as f:
# read in the module source
module_data = f.read()
(module_data, module_style) = self._find_snippet_imports(module_data, module_path)
complex_args_json = utils.jsonify(complex_args)
# We force conversion of module_args to str because module_common calls shlex.split,
# a standard library function that incorrectly handles Unicode input before Python 2.7.3.
try:
encoded_args = repr(module_args.encode('utf-8'))
except UnicodeDecodeError:
encoded_args = repr(module_args)
encoded_complex = repr(complex_args_json)
# these strings should be part of the 'basic' snippet which is required to be included
module_data = module_data.replace(REPLACER_ARGS, encoded_args)
module_data = module_data.replace(REPLACER_COMPLEX, encoded_complex)
if module_style == 'new':
facility = C.DEFAULT_SYSLOG_FACILITY
if 'ansible_syslog_facility' in inject:
facility = inject['ansible_syslog_facility']
module_data = module_data.replace('syslog.LOG_USER', "syslog.%s" % facility)
lines = module_data.split("\n")
shebang = None
if lines[0].startswith("#!"):
shebang = lines[0].strip()
args = shlex.split(str(shebang[2:]))
interpreter = args[0]
interpreter_config = 'ansible_%s_interpreter' % os.path.basename(interpreter)
if interpreter_config in inject:
lines[0] = shebang = "#!%s %s" % (inject[interpreter_config], " ".join(args[1:]))
module_data = "\n".join(lines)
return (module_data, module_style, shebang) | unknown | codeparrot/codeparrot-clean | ||
# frozen_string_literal: true
require "cases/helper"
require "models/author"
require "models/post"
require "models/book"
class FieldOrderedValuesTest < ActiveRecord::TestCase
fixtures :posts
def test_in_order_of
order = [3, 4, 1]
posts = Post.in_order_of(:id, order)
assert_equal(order, posts.map(&:id))
end
def test_in_order_of_empty
posts = Post.in_order_of(:id, [])
assert_empty(posts)
end
def test_in_order_of_with_enums_values
Book.destroy_all
Book.create!(status: :proposed)
Book.create!(status: :written)
Book.create!(status: :published)
order = %w[written published proposed]
books = Book.in_order_of(:status, order)
assert_equal(order, books.map(&:status))
books = Book.in_order_of("status", order)
assert_equal(order, books.map(&:status))
end
def test_in_order_of_with_enums_keys
Book.destroy_all
Book.create!(status: :proposed)
Book.create!(status: :written)
Book.create!(status: :published)
order = [Book.statuses[:written], Book.statuses[:published], Book.statuses[:proposed]]
books = Book.in_order_of(:status, order)
assert_equal(order, books.map { |book| Book.statuses[book.status] })
end
def test_in_order_of_expression
order = [3, 4, 1]
posts = Post.in_order_of(Arel.sql("id * 2"), order.map { |id| id * 2 })
assert_equal(order, posts.map(&:id))
end
def test_in_order_of_with_string_column
Book.destroy_all
Book.create!(format: "paperback")
Book.create!(format: "ebook")
Book.create!(format: "hardcover")
order = %w[hardcover paperback ebook]
books = Book.in_order_of(:format, order)
assert_equal(order, books.map(&:format))
books = Book.in_order_of("format", order)
assert_equal(order, books.map(&:format))
end
def test_in_order_of_after_regular_order
order = [3, 4, 1]
posts = Post.where(type: "Post").order(:type).in_order_of(:id, order)
assert_equal(order, posts.map(&:id))
posts = Post.where(type: "Post").order(:type).in_order_of("id", order)
assert_equal(order, posts.map(&:id))
end
def test_in_order_of_with_nil
Book.destroy_all
Book.create!(format: "paperback")
Book.create!(format: "ebook")
Book.create!(format: nil)
order = ["ebook", nil, "paperback"]
books = Book.in_order_of(:format, order)
assert_equal(order, books.map(&:format))
books = Book.in_order_of("format", order)
assert_equal(order, books.map(&:format))
end
def test_in_order_of_with_associations
Author.destroy_all
Book.destroy_all
john = Author.create(name: "John")
bob = Author.create(name: "Bob")
anna = Author.create(name: "Anna")
john.books.create
bob.books.create
anna.books.create
order = ["Bob", "Anna", "John"]
books = Book.joins(:author).in_order_of("authors.name", order)
assert_equal(order, books.map { |book| book.author.name })
books = Book.joins(:author).in_order_of(:"authors.name", order)
assert_equal(order, books.map { |book| book.author.name })
end
def test_in_order_of_with_filter_false
order = [3, 4, 1]
posts = Post.in_order_of(:id, order, filter: false)
assert_equal(order, posts.limit(3).map(&:id))
assert_equal(11, posts.count)
end
end | ruby | github | https://github.com/rails/rails | activerecord/test/cases/relation/field_ordered_values_test.rb |
# -*- coding: utf-8 -*-
############################################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
############################################################################
import re
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
class VeohCom(SimpleHoster):
__name__ = "VeohCom"
__type__ = "hoster"
__pattern__ = r'http://(?:www\.)?veoh\.com/(tv/)?(watch|videos)/(?P<ID>v\w+)'
__version__ = "0.2"
__config__ = [("quality", "Low;High;Auto", "Quality", "Auto")]
__description__ = """Veoh.com hoster plugin"""
__author_name__ = "Walter Purcaro"
__author_mail__ = "vuolter@gmail.com"
FILE_NAME_PATTERN = r'<meta name="title" content="(?P<N>.*?)"'
OFFLINE_PATTERN = r'>Sorry, we couldn\'t find the video you were looking for'
FILE_URL_REPLACEMENTS = [(__pattern__, r'http://www.veoh.com/watch/\g<ID>')]
SH_COOKIES = [(".veoh.com", "lassieLocale", "en")]
def setup(self):
self.resumeDownload = self.multiDL = True
self.chunkLimit = -1
def handleFree(self):
quality = self.getConfig("quality")
if quality == "Auto":
quality = ("High", "Low")
for q in quality:
pattern = r'"fullPreviewHash%sPath":"(.+?)"' % q
m = re.search(pattern, self.html)
if m:
self.pyfile.name += ".mp4"
link = m.group(1).replace("\\", "")
self.logDebug("Download link: " + link)
self.download(link)
return
else:
self.logInfo("No %s quality video found" % q.upper())
else:
self.fail("No video found!")
getInfo = create_getInfo(VeohCom) | unknown | codeparrot/codeparrot-clean | ||
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cryptographic API.
*
* TEA, XTEA, and XETA crypto alogrithms
*
* The TEA and Xtended TEA algorithms were developed by David Wheeler
* and Roger Needham at the Computer Laboratory of Cambridge University.
*
* Due to the order of evaluation in XTEA many people have incorrectly
* implemented it. XETA (XTEA in the wrong order), exists for
* compatibility with these implementations.
*
* Copyright (c) 2004 Aaron Grothe ajgrothe@yahoo.com
*/
#include <crypto/algapi.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/unaligned.h>
#include <linux/types.h>
#define TEA_KEY_SIZE 16
#define TEA_BLOCK_SIZE 8
#define TEA_ROUNDS 32
#define TEA_DELTA 0x9e3779b9
#define XTEA_KEY_SIZE 16
#define XTEA_BLOCK_SIZE 8
#define XTEA_ROUNDS 32
#define XTEA_DELTA 0x9e3779b9
struct tea_ctx {
u32 KEY[4];
};
struct xtea_ctx {
u32 KEY[4];
};
static int tea_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
ctx->KEY[0] = get_unaligned_le32(&in_key[0]);
ctx->KEY[1] = get_unaligned_le32(&in_key[4]);
ctx->KEY[2] = get_unaligned_le32(&in_key[8]);
ctx->KEY[3] = get_unaligned_le32(&in_key[12]);
return 0;
}
static void tea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
u32 y, z, n, sum = 0;
u32 k0, k1, k2, k3;
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
y = get_unaligned_le32(&src[0]);
z = get_unaligned_le32(&src[4]);
k0 = ctx->KEY[0];
k1 = ctx->KEY[1];
k2 = ctx->KEY[2];
k3 = ctx->KEY[3];
n = TEA_ROUNDS;
while (n-- > 0) {
sum += TEA_DELTA;
y += ((z << 4) + k0) ^ (z + sum) ^ ((z >> 5) + k1);
z += ((y << 4) + k2) ^ (y + sum) ^ ((y >> 5) + k3);
}
put_unaligned_le32(y, &dst[0]);
put_unaligned_le32(z, &dst[4]);
}
static void tea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
u32 y, z, n, sum;
u32 k0, k1, k2, k3;
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
y = get_unaligned_le32(&src[0]);
z = get_unaligned_le32(&src[4]);
k0 = ctx->KEY[0];
k1 = ctx->KEY[1];
k2 = ctx->KEY[2];
k3 = ctx->KEY[3];
sum = TEA_DELTA << 5;
n = TEA_ROUNDS;
while (n-- > 0) {
z -= ((y << 4) + k2) ^ (y + sum) ^ ((y >> 5) + k3);
y -= ((z << 4) + k0) ^ (z + sum) ^ ((z >> 5) + k1);
sum -= TEA_DELTA;
}
put_unaligned_le32(y, &dst[0]);
put_unaligned_le32(z, &dst[4]);
}
static int xtea_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct xtea_ctx *ctx = crypto_tfm_ctx(tfm);
ctx->KEY[0] = get_unaligned_le32(&in_key[0]);
ctx->KEY[1] = get_unaligned_le32(&in_key[4]);
ctx->KEY[2] = get_unaligned_le32(&in_key[8]);
ctx->KEY[3] = get_unaligned_le32(&in_key[12]);
return 0;
}
static void xtea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
u32 y, z, sum = 0;
u32 limit = XTEA_DELTA * XTEA_ROUNDS;
struct xtea_ctx *ctx = crypto_tfm_ctx(tfm);
y = get_unaligned_le32(&src[0]);
z = get_unaligned_le32(&src[4]);
while (sum != limit) {
y += ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum&3]);
sum += XTEA_DELTA;
z += ((y << 4 ^ y >> 5) + y) ^ (sum + ctx->KEY[sum>>11 &3]);
}
put_unaligned_le32(y, &dst[0]);
put_unaligned_le32(z, &dst[4]);
}
static void xtea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
u32 y, z, sum;
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
y = get_unaligned_le32(&src[0]);
z = get_unaligned_le32(&src[4]);
sum = XTEA_DELTA * XTEA_ROUNDS;
while (sum) {
z -= ((y << 4 ^ y >> 5) + y) ^ (sum + ctx->KEY[sum>>11 & 3]);
sum -= XTEA_DELTA;
y -= ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum & 3]);
}
put_unaligned_le32(y, &dst[0]);
put_unaligned_le32(z, &dst[4]);
}
static void xeta_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
u32 y, z, sum = 0;
u32 limit = XTEA_DELTA * XTEA_ROUNDS;
struct xtea_ctx *ctx = crypto_tfm_ctx(tfm);
y = get_unaligned_le32(&src[0]);
z = get_unaligned_le32(&src[4]);
while (sum != limit) {
y += (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum&3];
sum += XTEA_DELTA;
z += (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 &3];
}
put_unaligned_le32(y, &dst[0]);
put_unaligned_le32(z, &dst[4]);
}
static void xeta_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
u32 y, z, sum;
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
y = get_unaligned_le32(&src[0]);
z = get_unaligned_le32(&src[4]);
sum = XTEA_DELTA * XTEA_ROUNDS;
while (sum) {
z -= (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 & 3];
sum -= XTEA_DELTA;
y -= (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum & 3];
}
put_unaligned_le32(y, &dst[0]);
put_unaligned_le32(z, &dst[4]);
}
static struct crypto_alg tea_algs[3] = { {
.cra_name = "tea",
.cra_driver_name = "tea-generic",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = TEA_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct tea_ctx),
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = TEA_KEY_SIZE,
.cia_max_keysize = TEA_KEY_SIZE,
.cia_setkey = tea_setkey,
.cia_encrypt = tea_encrypt,
.cia_decrypt = tea_decrypt } }
}, {
.cra_name = "xtea",
.cra_driver_name = "xtea-generic",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = XTEA_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct xtea_ctx),
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = XTEA_KEY_SIZE,
.cia_max_keysize = XTEA_KEY_SIZE,
.cia_setkey = xtea_setkey,
.cia_encrypt = xtea_encrypt,
.cia_decrypt = xtea_decrypt } }
}, {
.cra_name = "xeta",
.cra_driver_name = "xeta-generic",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = XTEA_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct xtea_ctx),
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = XTEA_KEY_SIZE,
.cia_max_keysize = XTEA_KEY_SIZE,
.cia_setkey = xtea_setkey,
.cia_encrypt = xeta_encrypt,
.cia_decrypt = xeta_decrypt } }
} };
static int __init tea_mod_init(void)
{
return crypto_register_algs(tea_algs, ARRAY_SIZE(tea_algs));
}
static void __exit tea_mod_fini(void)
{
crypto_unregister_algs(tea_algs, ARRAY_SIZE(tea_algs));
}
MODULE_ALIAS_CRYPTO("tea");
MODULE_ALIAS_CRYPTO("xtea");
MODULE_ALIAS_CRYPTO("xeta");
module_init(tea_mod_init);
module_exit(tea_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("TEA, XTEA & XETA Cryptographic Algorithms"); | c | github | https://github.com/torvalds/linux | crypto/tea.c |
/*!
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import { HStack } from "@chakra-ui/react";
import type { ApiError } from "openapi-gen/requests/core/ApiError";
import type { HTTPExceptionResponse, HTTPValidationError } from "openapi-gen/requests/types.gen";
import { Alert } from "./Alert";
type ExpandedApiError = {
body: HTTPExceptionResponse | HTTPValidationError | undefined;
} & ApiError;
type Props = {
readonly error?: unknown;
};
export const ErrorAlert = ({ error: err }: Props) => {
const error = err as ExpandedApiError;
if (!Boolean(error)) {
return undefined;
}
const details = error.body?.detail;
let detailMessage;
if (details !== undefined) {
if (typeof details === "string") {
detailMessage = details;
} else if (Array.isArray(details)) {
detailMessage = details.map(
(detail) => `
${detail.loc.join(".")} ${detail.msg}`,
);
} else {
detailMessage = Object.keys(details).map((key) => `${key}: ${details[key] as string}`);
}
}
return (
<Alert status="error">
<HStack align="start" flexDirection="column" gap={2} mt={-1}>
{error.status} {error.message}
{detailMessage === error.message ? undefined : <span>{detailMessage}</span>}
</HStack>
</Alert>
);
}; | typescript | github | https://github.com/apache/airflow | airflow-core/src/airflow/api_fastapi/auth/managers/simple/ui/src/alert/ErrorAlert.tsx |
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsColorButton.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '25/05/2016'
__copyright__ = 'Copyright 2016, The QGIS Project'
import qgis # NOQA
from qgis.gui import QgsColorButton
from qgis.core import QgsApplication, QgsProjectColorScheme
from qgis.testing import start_app, unittest
from qgis.PyQt.QtGui import QColor
from qgis.PyQt.QtTest import QSignalSpy
start_app()
class TestQgsColorButton(unittest.TestCase):
def testClearingColors(self):
"""
Test setting colors to transparent
"""
# start with a valid color
button = QgsColorButton()
button.setAllowOpacity(True)
button.setColor(QColor(255, 100, 200, 255))
self.assertEqual(button.color(), QColor(255, 100, 200, 255))
# now set to no color
button.setToNoColor()
# ensure that only the alpha channel has changed - not the other color components
self.assertEqual(button.color(), QColor(255, 100, 200, 0))
def testNulling(self):
"""
Test clearing colors to null
"""
# start with a valid color
button = QgsColorButton()
button.setAllowOpacity(True)
button.setColor(QColor(255, 100, 200, 255))
self.assertEqual(button.color(), QColor(255, 100, 200, 255))
spy_changed = QSignalSpy(button.colorChanged)
spy_cleared = QSignalSpy(button.cleared)
button.setColor(QColor(50, 100, 200, 255))
self.assertEqual(button.color(), QColor(50, 100, 200, 255))
self.assertEqual(len(spy_changed), 1)
self.assertEqual(len(spy_cleared), 0)
# now set to null
button.setToNull()
self.assertEqual(button.color(), QColor())
self.assertEqual(len(spy_changed), 2)
self.assertEqual(len(spy_cleared), 1)
button.setToNull()
self.assertEqual(button.color(), QColor())
# should not be refired, the color wasn't changed
self.assertEqual(len(spy_changed), 2)
# SHOULD be refired
self.assertEqual(len(spy_cleared), 2)
def testLinkProjectColor(self):
"""
Test linking to a project color
"""
project_scheme = [s for s in QgsApplication.colorSchemeRegistry().schemes() if isinstance(s, QgsProjectColorScheme)][0]
project_scheme.setColors([[QColor(255, 0, 0), 'col1'], [QColor(0, 255, 0), 'col2']])
button = QgsColorButton()
spy = QSignalSpy(button.unlinked)
button.setColor(QColor(0, 0, 255))
self.assertFalse(button.linkedProjectColorName())
button.linkToProjectColor('col1')
self.assertEqual(button.linkedProjectColorName(), 'col1')
self.assertEqual(button.color().name(), '#ff0000')
self.assertEqual(len(spy), 0)
button.unlink()
self.assertFalse(button.linkedProjectColorName())
self.assertEqual(button.color().name(), '#0000ff')
self.assertEqual(len(spy), 1)
button.linkToProjectColor('col2')
self.assertEqual(button.linkedProjectColorName(), 'col2')
self.assertEqual(button.color().name(), '#00ff00')
self.assertEqual(len(spy), 1)
project_scheme.setColors([[QColor(255, 0, 0), 'xcol1'], [QColor(0, 255, 0), 'xcol2']])
# linked color no longer exists
self.assertFalse(button.linkedProjectColorName())
self.assertEqual(button.color().name(), '#0000ff')
self.assertEqual(len(spy), 2)
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
// run
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Test len constants and non-constants, https://golang.org/issue/3244.
package main
var b struct {
a [10]int
}
var m map[string][20]int
var s [][30]int
const (
n1 = len(b.a)
n2 = len(m[""])
n3 = len(s[10])
)
// Non-constants (see also const5.go).
var (
n4 = len(f())
n5 = len(<-c)
n6 = cap(g())
n7 = cap(<-c1)
)
var calledF = false
func f() *[40]int {
calledF = true
return nil
}
var c = func() chan *[50]int {
c := make(chan *[50]int, 2)
c <- nil
c <- new([50]int)
return c
}()
var calledG = false
func g() *[60]int {
calledG = true
return nil
}
var c1 = func() chan *[70]int {
c := make(chan *[70]int, 2)
c <- nil
c <- new([70]int)
return c
}()
func main() {
if n1 != 10 || n2 != 20 || n3 != 30 || n4 != 40 || n5 != 50 || n6 != 60 || n7 != 70 {
println("BUG:", n1, n2, n3, n4, n5, n6, n7)
panic("fail")
}
if !calledF {
println("BUG: did not call f")
panic("fail")
}
if <-c == nil {
println("BUG: did not receive from c")
panic("fail")
}
if !calledG {
println("BUG: did not call g")
panic("fail")
}
if <-c1 == nil {
println("BUG: did not receive from c1")
panic("fail")
}
} | go | github | https://github.com/golang/go | test/const4.go |
# frozen_string_literal: true
require "cases/helper"
require "models/post"
require "models/author"
require "models/developer"
require "models/computer"
require "models/project"
require "models/comment"
require "models/category"
require "models/person"
require "models/reference"
class RelationScopingTest < ActiveRecord::TestCase
fixtures :authors, :author_addresses, :developers, :projects, :comments, :posts, :developers_projects
setup do
developers(:david)
end
def test_unscoped_breaks_caching
author = authors :mary
assert_nil author.first_post
post = FirstPost.unscoped do
author.reload.first_post
end
assert post
end
def test_scope_breaks_caching_on_collections
author = authors :david
ids = author.reload.special_posts_with_default_scope.map(&:id)
assert_equal [1, 5, 6], ids.sort
scoped_posts = SpecialPostWithDefaultScope.unscoped do
author = authors :david
author.reload.special_posts_with_default_scope.to_a
end
assert_equal author.posts.map(&:id).sort, scoped_posts.map(&:id).sort
end
def test_reverse_order
assert_equal Developer.order("id DESC").to_a.reverse, Developer.order("id DESC").reverse_order
end
def test_reverse_order_with_arel_attribute
assert_equal Developer.order("id DESC").to_a.reverse, Developer.order(Developer.arel_table[:id].desc).reverse_order
end
def test_reverse_order_with_arel_attribute_as_hash
assert_equal Developer.order("id DESC").to_a.reverse, Developer.order(Developer.arel_table[:id] => :desc).reverse_order
end
def test_reverse_order_with_arel_node_as_hash
node = Developer.arel_table[:id] + 0 # converts to Arel::Nodes::Grouping
assert_equal Developer.order("id DESC").to_a.reverse, Developer.order(node => :desc).reverse_order
end
def test_reverse_order_with_multiple_arel_attributes
assert_equal Developer.order("id DESC").order("name DESC").to_a.reverse, Developer.order(Developer.arel_table[:id].desc).order(Developer.arel_table[:name].desc).reverse_order
end
def test_reverse_order_with_arel_attributes_and_strings
assert_equal Developer.order("id DESC").order("name DESC").to_a.reverse, Developer.order("id DESC").order(Developer.arel_table[:name].desc).reverse_order
end
def test_double_reverse_order_produces_original_order
assert_equal Developer.order("name DESC"), Developer.order("name DESC").reverse_order.reverse_order
end
def test_scoped_find
Developer.where("name = 'David'").scoping do
assert_nothing_raised { Developer.find(1) }
end
end
def test_scoped_find_first
developer = Developer.find(10)
Developer.where("salary = 100000").scoping do
assert_equal developer, Developer.order("name").first
end
end
def test_scoped_find_last
highest_salary = Developer.order("salary DESC").first
Developer.order("salary").scoping do
assert_equal highest_salary, Developer.last
end
end
def test_scoped_find_last_preserves_scope
lowest_salary = Developer.order("salary ASC").first
highest_salary = Developer.order("salary DESC").first
Developer.order("salary").scoping do
assert_equal highest_salary, Developer.last
assert_equal lowest_salary, Developer.first
end
end
def test_scoped_find_combines_and_sanitizes_conditions
Developer.where("salary = 9000").scoping do
assert_equal developers(:poor_jamis), Developer.where("name = 'Jamis'").first
end
end
def test_scoped_unscoped
DeveloperOrderedBySalary.where("salary = 9000").scoping do
assert_equal 11, DeveloperOrderedBySalary.first.id
assert_equal 1, DeveloperOrderedBySalary.unscoped.first.id
end
end
def test_scoped_default_scoped
DeveloperOrderedBySalary.where("salary = 9000").scoping do
assert_equal 11, DeveloperOrderedBySalary.first.id
assert_equal 2, DeveloperOrderedBySalary.default_scoped.first.id
end
end
def test_scoped_find_all
Developer.where("name = 'David'").scoping do
assert_equal [developers(:david)], Developer.all
end
end
def test_scoped_find_select
Developer.select("id, name").scoping do
developer = Developer.where("name = 'David'").first
assert_equal "David", developer.name
assert_not developer.has_attribute?(:salary)
end
end
def test_scope_select_concatenates
Developer.select("id, name").scoping do
developer = Developer.select("salary").where("name = 'David'").first
assert_equal 80000, developer.salary
assert developer.has_attribute?(:id)
assert developer.has_attribute?(:name)
assert developer.has_attribute?(:salary)
end
end
def test_scoped_count
Developer.where("name = 'David'").scoping do
assert_equal 1, Developer.count
end
Developer.where("salary = 100000").scoping do
assert_equal 8, Developer.count
assert_equal 1, Developer.where("name LIKE 'fixture_1%'").count
end
end
def test_scoped_find_with_annotation
Developer.annotate("scoped").scoping do
developer = nil
assert_queries_match(%r{/\* scoped \*/}) do
developer = Developer.where("name = 'David'").first
end
assert_equal "David", developer.name
end
end
def test_find_with_annotation_unscoped
Developer.annotate("scoped").unscoped do
developer = nil
log = capture_sql do
developer = Developer.where("name = 'David'").first
end
assert_not_predicate log, :empty?
assert_predicate log.select { |query| query.match?(%r{/\* scoped \*/}) }, :empty?
assert_equal "David", developer.name
end
end
def test_find_with_annotation_unscope
developer = nil
log = capture_sql do
developer = Developer.annotate("unscope").
where("name = 'David'").
unscope(:annotate).first
end
assert_not_predicate log, :empty?
assert_predicate log.select { |query| query.match?(%r{/\* unscope \*/}) }, :empty?
assert_equal "David", developer.name
end
def test_scoped_find_include
# with the include, will retrieve only developers for the given project
scoped_developers = Developer.includes(:projects).scoping do
Developer.where("projects.id" => 2).to_a
end
assert_includes scoped_developers, developers(:david)
assert_not_includes scoped_developers, developers(:jamis)
assert_equal 1, scoped_developers.size
end
def test_scoped_find_joins
scoped_developers = Developer.joins("JOIN developers_projects ON id = developer_id").scoping do
Developer.where("developers_projects.project_id = 2").to_a
end
assert_includes scoped_developers, developers(:david)
assert_not_includes scoped_developers, developers(:jamis)
assert_equal 1, scoped_developers.size
assert_equal developers(:david).attributes, scoped_developers.first.attributes
end
def test_scoped_create_with_where
new_comment = VerySpecialComment.where(post_id: 1).scoping do
VerySpecialComment.create body: "Wonderful world"
end
assert_equal 1, new_comment.post_id
assert_includes Post.find(1).comments, new_comment
end
def test_scoped_create_with_where_with_array
new_comment = VerySpecialComment.where(label: [0, 1], post_id: 1).scoping do
VerySpecialComment.create body: "Wonderful world"
end
assert_equal 1, new_comment.post_id
assert_equal "default", new_comment.label
assert_includes Post.find(1).comments, new_comment
end
def test_scoped_create_with_where_with_range
new_comment = VerySpecialComment.where(label: 0..1, post_id: 1).scoping do
VerySpecialComment.create body: "Wonderful world"
end
assert_equal 1, new_comment.post_id
assert_equal "default", new_comment.label
assert_includes Post.find(1).comments, new_comment
end
def test_scoped_create_with_create_with
new_comment = VerySpecialComment.create_with(post_id: 1).scoping do
VerySpecialComment.create body: "Wonderful world"
end
assert_equal 1, new_comment.post_id
assert_includes Post.find(1).comments, new_comment
end
def test_scoped_create_with_create_with_has_higher_priority
new_comment = VerySpecialComment.where(post_id: 2).create_with(post_id: 1).scoping do
VerySpecialComment.create body: "Wonderful world"
end
assert_equal 1, new_comment.post_id
assert_includes Post.find(1).comments, new_comment
end
def test_ensure_that_method_scoping_is_correctly_restored
begin
Developer.where("name = 'Jamis'").scoping do
raise "an exception"
end
rescue
end
assert_not Developer.all.to_sql.include?("name = 'Jamis'"), "scope was not restored"
end
def test_default_scope_filters_on_joins
assert_equal 1, DeveloperFilteredOnJoins.all.count
assert_equal DeveloperFilteredOnJoins.all.first, developers(:david).becomes(DeveloperFilteredOnJoins)
end
def test_update_all_default_scope_filters_on_joins
DeveloperFilteredOnJoins.update_all(salary: 65000)
assert_equal 65000, Developer.find(developers(:david).id).salary
# has not changed jamis
assert_not_equal 65000, Developer.find(developers(:jamis).id).salary
end
def test_delete_all_default_scope_filters_on_joins
assert_not_equal [], DeveloperFilteredOnJoins.all
DeveloperFilteredOnJoins.delete_all()
assert_equal [], DeveloperFilteredOnJoins.all
assert_not_equal [], Developer.all
end
def test_current_scope_does_not_pollute_sibling_subclasses
Comment.none.scoping do
assert_not_predicate SpecialComment.all, :any?
assert_not_predicate VerySpecialComment.all, :any?
assert_not_predicate SubSpecialComment.all, :any?
end
SpecialComment.none.scoping do
assert_predicate Comment.all, :any?
assert_predicate VerySpecialComment.all, :any?
assert_not_predicate SubSpecialComment.all, :any?
end
SubSpecialComment.none.scoping do
assert_predicate Comment.all, :any?
assert_predicate VerySpecialComment.all, :any?
assert_predicate SpecialComment.all, :any?
end
end
def test_scoping_is_correctly_restored
Comment.unscoped do
SpecialComment.unscoped.created
end
assert_nil Comment.current_scope
assert_nil SpecialComment.current_scope
end
def test_scoping_respects_current_class
Comment.unscoped do
assert_equal "a comment...", Comment.all.what_are_you
assert_equal "a special comment...", SpecialComment.all.what_are_you
end
end
def test_scoping_respects_sti_constraint
Comment.unscoped do
assert_equal comments(:greetings), Comment.find(1)
assert_raises(ActiveRecord::RecordNotFound) { SpecialComment.find(1) }
end
end
def test_scoping_with_klass_method_works_in_the_scope_block
expected = SpecialPostWithDefaultScope.unscoped.to_a
assert_equal expected, SpecialPostWithDefaultScope.unscoped_all
end
def test_scoping_with_query_method_works_in_the_scope_block
expected = SpecialPostWithDefaultScope.unscoped.where(author_id: 0).to_a
assert_equal expected, SpecialPostWithDefaultScope.authorless
end
def test_circular_joins_with_scoping_does_not_crash
posts = Post.joins(comments: :post).scoping do
Post.first(10)
end
assert_equal posts, Post.joins(comments: :post).first(10)
end
def test_circular_left_joins_with_scoping_does_not_crash
posts = Post.left_joins(comments: :post).scoping do
Post.first(10)
end
assert_equal posts, Post.left_joins(comments: :post).first(10)
end
def test_scoping_applies_to_update_with_all_queries
Author.all.limit(5).update_all(organization_id: 1)
dev = Author.where(organization_id: 1).first
Author.where(organization_id: 1).scoping do
update_sql = capture_sql { dev.update(name: "Eileen") }.first
assert_no_match(/organization_id/, update_sql)
end
Author.where(organization_id: 1).scoping(all_queries: true) do
update_scoped_sql = capture_sql { dev.update(name: "Not Eileen") }.second
assert_match(/organization_id/, update_scoped_sql)
end
end
def test_scoping_applies_to_delete_with_all_queries
Author.all.limit(5).update_all(organization_id: 1)
dev1 = Author.where(organization_id: 1).first
dev2 = Author.where(organization_id: 1).last
Author.where(organization_id: 1).scoping do
delete_sql = capture_sql { dev1.delete }.first
assert_no_match(/organization_id/, delete_sql)
end
Author.where(organization_id: 1).scoping(all_queries: true) do
delete_scoped_sql = capture_sql { dev2.delete }.first
assert_match(/organization_id/, delete_scoped_sql)
end
end
def test_scoping_applies_to_reload_with_all_queries
Author.all.limit(5).update_all(organization_id: 1)
dev1 = Author.where(organization_id: 1).first
Author.where(organization_id: 1).scoping do
reload_sql = capture_sql { dev1.reload }.first
assert_no_match(/organization_id/, reload_sql)
end
Author.where(organization_id: 1).scoping(all_queries: true) do
scoped_reload_sql = capture_sql { dev1.reload }.first
assert_match(/organization_id/, scoped_reload_sql)
end
end
def test_nested_scoping_applies_with_all_queries_set
Author.all.limit(5).update_all(organization_id: 1)
Author.where(organization_id: 1).scoping(all_queries: true) do
select_sql = capture_sql { Author.first }.first
assert_match(/organization_id/, select_sql)
Author.where(owned_essay_id: nil).scoping do
second_select_sql = capture_sql { Author.first }.first
assert_match(/organization_id/, second_select_sql)
assert_match(/owned_essay_id/, second_select_sql)
end
third_select_sql = capture_sql { Author.first }.first
assert_match(/organization_id/, third_select_sql)
assert_no_match(/owned_essay_id/, third_select_sql)
end
end
def test_raises_error_if_all_queries_is_set_to_false_while_nested
Author.all.limit(5).update_all(organization_id: 1)
Author.where(organization_id: 1).scoping(all_queries: true) do
select_sql = capture_sql { Author.first }.first
assert_match(/organization_id/, select_sql)
error = assert_raises ArgumentError do
Author.where(organization_id: 1).scoping(all_queries: false) { }
end
assert_equal "Scoping is set to apply to all queries and cannot be " \
"unset in a nested block.", error.message
end
end
end
class NestedRelationScopingTest < ActiveRecord::TestCase
fixtures :authors, :author_addresses, :developers, :projects, :comments, :posts
def test_merge_options
Developer.where("salary = 80000").scoping do
Developer.limit(10).scoping do
devs = Developer.all
sql = devs.to_sql
assert_match "(salary = 80000)", sql
assert_match(/LIMIT 10|ROWNUM <= 10|FETCH FIRST 10 ROWS ONLY/, sql)
end
end
end
def test_merge_inner_scope_has_priority
Developer.limit(5).scoping do
Developer.limit(10).scoping do
assert_equal 10, Developer.all.size
end
end
end
def test_replace_options
Developer.where(name: "David").scoping do
Developer.unscoped do
assert_equal "Jamis", Developer.where(name: "Jamis").first[:name]
end
assert_equal "David", Developer.first[:name]
end
end
def test_three_level_nested_exclusive_scoped_find
Developer.where("name = 'Jamis'").scoping do
assert_equal "Jamis", Developer.first.name
Developer.unscoped.where("name = 'David'") do
assert_equal "David", Developer.first.name
Developer.unscoped.where("name = 'Maiha'") do
assert_nil Developer.first
end
# ensure that scoping is restored
assert_equal "David", Developer.first.name
end
# ensure that scoping is restored
assert_equal "Jamis", Developer.first.name
end
end
def test_nested_scoped_create
comment = Comment.create_with(post_id: 1).scoping do
Comment.create_with(post_id: 2).scoping do
Comment.create body: "Hey guys, nested scopes are broken. Please fix!"
end
end
assert_equal 2, comment.post_id
end
def test_nested_exclusive_scope_for_create
comment = Comment.create_with(body: "Hey guys, nested scopes are broken. Please fix!").scoping do
Comment.unscoped.create_with(post_id: 1).scoping do
assert_predicate Comment.new.body, :blank?
Comment.create body: "Hey guys"
end
end
assert_equal 1, comment.post_id
assert_equal "Hey guys", comment.body
end
end
class HasManyScopingTest < ActiveRecord::TestCase
fixtures :comments, :posts, :people, :references
def setup
@welcome = Post.find(1)
end
def test_forwarding_of_static_methods
assert_equal "a comment...", Comment.what_are_you
assert_equal "a comment...", @welcome.comments.what_are_you
end
def test_forwarding_to_scoped
assert_equal 5, Comment.search_by_type("Comment").size
assert_equal 2, @welcome.comments.search_by_type("Comment").size
end
def test_nested_scope_finder
Comment.where("1=0").scoping do
assert_equal 2, @welcome.comments.count
assert_equal "a comment...", @welcome.comments.what_are_you
end
Comment.where("1=1").scoping do
assert_equal 2, @welcome.comments.count
assert_equal "a comment...", @welcome.comments.what_are_you
end
end
def test_none_scoping
Comment.none.scoping do
assert_equal 2, @welcome.comments.count
assert_equal "a comment...", @welcome.comments.what_are_you
end
Comment.where("1=1").scoping do
assert_equal 2, @welcome.comments.count
assert_equal "a comment...", @welcome.comments.what_are_you
end
end
def test_should_maintain_default_scope_on_associations
magician = BadReference.find(1)
assert_equal [magician], people(:michael).bad_references
end
def test_should_default_scope_on_associations_is_overridden_by_association_conditions
reference = references(:michael_unicyclist).becomes(BadReference)
assert_equal [reference], people(:michael).fixed_bad_references
end
def test_should_maintain_default_scope_on_eager_loaded_associations
michael = Person.where(id: people(:michael).id).includes(:bad_references).first
magician = BadReference.find(1)
assert_equal [magician], michael.bad_references
end
def test_scoping_applies_to_all_queries_on_has_many_when_set
@welcome.comments.update_all(author_id: 1)
comments_sql = capture_sql { @welcome.comments.to_a }.last
assert_no_match(/author_id/, comments_sql)
Comment.where(author_id: 1).scoping(all_queries: true) do
scoped_comments_sql = capture_sql { @welcome.comments.reload.to_a }.last
assert_match(/author_id/, scoped_comments_sql)
end
unscoped_comments_sql = capture_sql { @welcome.comments.reload.to_a }.last
assert_no_match(/author_id/, unscoped_comments_sql)
end
end
class HasAndBelongsToManyScopingTest < ActiveRecord::TestCase
fixtures :posts, :categories, :categories_posts
def setup
@welcome = Post.find(1)
end
def test_forwarding_of_static_methods
assert_equal "a category...", Category.what_are_you
assert_equal "a category...", @welcome.categories.what_are_you
end
def test_nested_scope_finder
Category.where("1=0").scoping do
assert_equal 2, @welcome.categories.count
assert_equal "a category...", @welcome.categories.what_are_you
end
Category.where("1=1").scoping do
assert_equal 2, @welcome.categories.count
assert_equal "a category...", @welcome.categories.what_are_you
end
end
def test_none_scoping
Category.none.scoping do
assert_equal 2, @welcome.categories.count
assert_equal "a category...", @welcome.categories.what_are_you
end
Category.where("1=1").scoping do
assert_equal 2, @welcome.categories.count
assert_equal "a category...", @welcome.categories.what_are_you
end
end
end | ruby | github | https://github.com/rails/rails | activerecord/test/cases/scoping/relation_scoping_test.rb |
# Copyright 2011 Viewfinder Inc. All Rights Reserved.
"""AsyncS3Connection module tests."""
__author__ = "andy@emailscrubbed.com (Andy Kimball)"
import os
import random
import unittest
from tornado import options, httpclient, simple_httpclient
from viewfinder.backend.storage.async_s3 import AsyncS3Connection, S3RetryPolicy
from viewfinder.backend.base import base_options, secrets
from viewfinder.backend.base.testing import BaseTestCase, LogMatchTestCase
try:
import pycurl
except ImportError:
pycurl = None
@unittest.skip("needs aws credentials")
@unittest.skipIf('NO_NETWORK' in os.environ, 'no network')
class AsyncS3TestCase(BaseTestCase, LogMatchTestCase):
def setUp(self):
super(AsyncS3TestCase, self).setUp()
# Init secrets with the unencrypted 'goviewfinder.com' domain.
options.options.domain = 'goviewfinder.com'
secrets.InitSecretsForTest()
self.bucket = 'test-goviewfinder-com'
self.key = 'test/hello%d' % random.randint(1, 1000000)
def tearDown(self):
def _OnCompletedDelete(response):
self.stop()
asyncS3 = AsyncS3Connection(aws_access_key_id=secrets.GetSecret('aws_access_key_id'),
aws_secret_access_key=secrets.GetSecret('aws_secret_access_key'))
asyncS3.make_request('DELETE', self.bucket, self.key, callback=_OnCompletedDelete)
self.wait()
super(AsyncS3TestCase, self).tearDown()
def testMakeByteRequest(self):
"""Try several successful AsyncS3Connection.make_request operations using a byte string value."""
self._TestMakeRequest('abc 123\n\0\xc3\xb1')
def testMakeUnicodeRequest(self):
"""Try calling AsyncS3Connection.make_request with a Unicode string (not supported)."""
self.assertRaises(AssertionError, self._TestMakeRequest, u'abc 123\n\0\u1000')
# Tornado 2.3 introduces _save_configuration and _restore_configuration.
# When running on 2.2, implement them locally (in 3.0 the _impl variables
# are being renamed, so we can't use our local versions all the time).
def _SaveHTTPClientConfig(self):
cls = httpclient.AsyncHTTPClient
if hasattr(cls, '_save_configuration'):
return cls._save_configuration()
return cls._impl_class, cls._impl_kwargs
def _RestoreHTTPClientConfig(self, saved):
cls = httpclient.AsyncHTTPClient
if hasattr(cls, '_restore_configuration'):
cls._restore_configuration(saved)
cls._impl_class, cls._impl_kwargs = saved
def testMakeRequestError(self):
"""Trigger errors in AsyncS3Connection.make_request using the Simple HTTP async client."""
saved = self._SaveHTTPClientConfig()
try:
httpclient.AsyncHTTPClient.configure(None)
self.assertIsInstance(httpclient.AsyncHTTPClient(io_loop=self.io_loop), simple_httpclient.SimpleAsyncHTTPClient)
self._TestMakeRequestError()
finally:
self._RestoreHTTPClientConfig(saved)
@unittest.skipIf(pycurl is None, 'pycurl not available')
def testMakeRequestCurlError(self):
"""Trigger errors in AsyncS3Connection.make_request using the Curl HTTP async client."""
from tornado import curl_httpclient
saved = self._SaveHTTPClientConfig()
try:
httpclient.AsyncHTTPClient.configure('tornado.curl_httpclient.CurlAsyncHTTPClient')
self.assertIsInstance(httpclient.AsyncHTTPClient(io_loop=self.io_loop), curl_httpclient.CurlAsyncHTTPClient)
self._TestMakeRequestError()
finally:
self._RestoreHTTPClientConfig(saved)
def _TestMakeRequest(self, value):
asyncS3 = AsyncS3Connection(host='s3.amazonaws.com', aws_access_key_id=secrets.GetSecret('aws_access_key_id'),
aws_secret_access_key=secrets.GetSecret('aws_secret_access_key'))
def _OnCompletedGet(response):
self.assertEqual(response.body, value if type(value) is str else value.encode('utf-8'))
self.assertTrue(1)
self.assertEqual(response.headers['Content-Type'], 'text/plain; charset=utf-8')
self.stop()
def _OnCompletedPut(response):
self.assertFalse(response.error)
asyncS3.make_request('GET', self.bucket, self.key, callback=_OnCompletedGet)
asyncS3.make_request('PUT', self.bucket, self.key, headers={'Content-Type' : 'text/plain; charset=utf-8'},
body=value, callback=_OnCompletedPut)
self.wait(timeout=30)
def _TestMakeRequestError(self):
def _OnErrorRetry(response):
self.assertTrue(response.error)
self.assertLogMatches('(Retrying function after){1}', 'Retry should have occurred once')
self.stop()
def _OnErrorNoRetry(response):
self.assertTrue(response.error)
self.assertNotLogMatches('Retrying function after', 'Retry should not happen on HTTP 403 error')
asyncS3 = AsyncS3Connection(host='unknown', aws_access_key_id=secrets.GetSecret('aws_access_key_id'),
aws_secret_access_key=secrets.GetSecret('aws_secret_access_key'),
retry_policy=S3RetryPolicy(max_tries=2, min_delay=0))
asyncS3.make_request('GET', self.bucket, self.key, callback=_OnErrorRetry)
asyncS3 = AsyncS3Connection(aws_access_key_id='unknown',
aws_secret_access_key=secrets.GetSecret('aws_secret_access_key'))
asyncS3.make_request('GET', self.bucket, self.key, callback=_OnErrorNoRetry)
self.wait(timeout=30) | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.