code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
module.exports = "c"; | javascript | github | https://github.com/webpack/webpack | examples/extra-async-chunk-advanced/c.js |
#
# Copyright 2019-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
#
"""
Functionality to facilitate keeping code compatible with Python 2 & Python 3.
Implementations for Python 2.
:author: Kenneth Hoste (Ghent University)
"""
# these are not used here, but imported from here in other places
import ConfigParser as configparser # noqa
import json
import subprocess
import urllib2 as std_urllib # noqa
from HTMLParser import HTMLParser # noqa
from string import letters as ascii_letters # noqa
from string import lowercase as ascii_lowercase # noqa
from StringIO import StringIO # noqa
from urllib import urlencode # noqa
from urllib2 import HTTPError, HTTPSHandler, Request, URLError, build_opener, urlopen # noqa
try:
# Python 2.7
from collections import OrderedDict # noqa
except ImportError:
# only needed to keep supporting Python 2.6
from easybuild.tools.ordereddict import OrderedDict # noqa
# reload function (built-in in Python 2)
reload = reload
# string type that can be used in 'isinstance' calls
string_type = basestring
# trivial wrapper for json.loads (Python 3 version is less trivial)
json_loads = json.loads
def subprocess_popen_text(cmd, **kwargs):
"""Call subprocess.Popen with specified named arguments."""
return subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
def raise_with_traceback(exception_class, message, traceback):
"""Raise exception of specified class with given message and traceback."""
raise exception_class, message, traceback # noqa: E999
def extract_method_name(method_func):
"""Extract method name from lambda function."""
return '_'.join(method_func.func_code.co_names)
def mk_wrapper_baseclass(metaclass):
class WrapperBase(object):
"""
Wrapper class that provides proxy access to an instance of some internal instance.
"""
__metaclass__ = metaclass
__wraps__ = None
return WrapperBase
def sort_looseversions(looseversions):
"""Sort list of (values including) LooseVersion instances."""
# with Python 2, we can safely use 'sorted' on LooseVersion instances
# (but we can't in Python 3, see https://bugs.python.org/issue14894)
return sorted(looseversions) | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2019-2024 Tauri Programme within The Commons Conservancy
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier: MIT
use base64::Engine;
use minisign::{
sign, KeyPair as KP, PublicKey, PublicKeyBox, SecretKey, SecretKeyBox, SignatureBox,
};
use std::{
fs::{self, File, OpenOptions},
io::{BufReader, BufWriter, Write},
path::{Path, PathBuf},
str,
time::{SystemTime, UNIX_EPOCH},
};
use crate::error::{Context, ErrorExt};
/// A key pair (`PublicKey` and `SecretKey`).
#[derive(Clone, Debug)]
pub struct KeyPair {
pub pk: String,
pub sk: String,
}
fn create_file(path: &Path) -> crate::Result<BufWriter<File>> {
if let Some(parent) = path.parent() {
fs::create_dir_all(parent).fs_context("failed to create directory", parent.to_path_buf())?;
}
let file = File::create(path).fs_context("failed to create file", path.to_path_buf())?;
Ok(BufWriter::new(file))
}
/// Generate base64 encoded keypair
pub fn generate_key(password: Option<String>) -> crate::Result<KeyPair> {
let KP { pk, sk } = KP::generate_encrypted_keypair(password).unwrap();
let pk_box_str = pk.to_box().unwrap().to_string();
let sk_box_str = sk.to_box(None).unwrap().to_string();
let encoded_pk = base64::engine::general_purpose::STANDARD.encode(pk_box_str);
let encoded_sk = base64::engine::general_purpose::STANDARD.encode(sk_box_str);
Ok(KeyPair {
pk: encoded_pk,
sk: encoded_sk,
})
}
/// Transform a base64 String to readable string for the main signer
pub fn decode_key<S: AsRef<[u8]>>(base64_key: S) -> crate::Result<String> {
let decoded_str = &base64::engine::general_purpose::STANDARD
.decode(base64_key)
.context("failed to decode base64 key")?[..];
Ok(String::from(
str::from_utf8(decoded_str).context("failed to convert base64 to utf8")?,
))
}
/// Save KeyPair to disk
pub fn save_keypair<P>(
force: bool,
sk_path: P,
key: &str,
pubkey: &str,
) -> crate::Result<(PathBuf, PathBuf)>
where
P: AsRef<Path>,
{
let sk_path = sk_path.as_ref();
let pubkey_path = format!("{}.pub", sk_path.display());
let pk_path = Path::new(&pubkey_path);
if sk_path.exists() {
if !force {
crate::error::bail!(
"Key generation aborted:\n{} already exists\nIf you really want to overwrite the existing key pair, add the --force switch to force this operation.",
sk_path.display()
);
} else {
std::fs::remove_file(sk_path)
.fs_context("failed to remove secret key file", sk_path.to_path_buf())?;
}
}
if pk_path.exists() {
std::fs::remove_file(pk_path)
.fs_context("failed to remove public key file", pk_path.to_path_buf())?;
}
let write_file = |mut writer: BufWriter<File>, contents: &str| -> std::io::Result<()> {
write!(writer, "{contents:}")?;
writer.flush()?;
Ok(())
};
write_file(create_file(sk_path)?, key)
.fs_context("failed to write secret key", sk_path.to_path_buf())?;
write_file(create_file(pk_path)?, pubkey)
.fs_context("failed to write public key", pk_path.to_path_buf())?;
Ok((
fs::canonicalize(sk_path).fs_context(
"failed to canonicalize secret key path",
sk_path.to_path_buf(),
)?,
fs::canonicalize(pk_path).fs_context(
"failed to canonicalize public key path",
pk_path.to_path_buf(),
)?,
))
}
/// Sign files
pub fn sign_file<P>(secret_key: &SecretKey, bin_path: P) -> crate::Result<(PathBuf, SignatureBox)>
where
P: AsRef<Path>,
{
let bin_path = bin_path.as_ref();
// We need to append .sig at the end it's where the signature will be stored
// TODO: use with_added_extension when we bump MSRV to > 1.91'
let signature_path = if let Some(ext) = bin_path.extension() {
let mut extension = ext.to_os_string();
extension.push(".sig");
bin_path.with_extension(extension)
} else {
bin_path.with_extension("sig")
};
let trusted_comment = format!(
"timestamp:{}\tfile:{}",
unix_timestamp(),
bin_path.file_name().unwrap().to_string_lossy()
);
let data_reader = open_data_file(bin_path)?;
let signature_box = sign(
None,
secret_key,
data_reader,
Some(trusted_comment.as_str()),
Some("signature from tauri secret key"),
)
.context("failed to sign file")?;
let encoded_signature =
base64::engine::general_purpose::STANDARD.encode(signature_box.to_string());
std::fs::write(&signature_path, encoded_signature.as_bytes())
.fs_context("failed to write signature file", signature_path.clone())?;
Ok((
fs::canonicalize(&signature_path)
.fs_context("failed to canonicalize signature file", &signature_path)?,
signature_box,
))
}
/// Gets the updater secret key from the given private key and password.
pub fn secret_key<S: AsRef<[u8]>>(
private_key: S,
password: Option<String>,
) -> crate::Result<SecretKey> {
let decoded_secret = decode_key(private_key).context("failed to decode base64 secret key")?;
let sk_box =
SecretKeyBox::from_string(&decoded_secret).context("failed to load updater private key")?;
let sk = sk_box
.into_secret_key(password)
.context("incorrect updater private key password")?;
Ok(sk)
}
/// Gets the updater secret key from the given private key and password.
pub fn pub_key<S: AsRef<[u8]>>(public_key: S) -> crate::Result<PublicKey> {
let decoded_publick = decode_key(public_key).context("failed to decode base64 pubkey")?;
let pk_box =
PublicKeyBox::from_string(&decoded_publick).context("failed to load updater pubkey")?;
let pk = pk_box
.into_public_key()
.context("failed to convert updater pubkey")?;
Ok(pk)
}
fn unix_timestamp() -> u64 {
let start = SystemTime::now();
let since_the_epoch = start
.duration_since(UNIX_EPOCH)
.expect("system clock is incorrect");
since_the_epoch.as_secs()
}
fn open_data_file<P>(data_path: P) -> crate::Result<BufReader<File>>
where
P: AsRef<Path>,
{
let data_path = data_path.as_ref();
let file = OpenOptions::new()
.read(true)
.open(data_path)
.fs_context("failed to open data file", data_path.to_path_buf())?;
Ok(BufReader::new(file))
}
#[cfg(test)]
mod tests {
const PRIVATE_KEY: &str = "dW50cnVzdGVkIGNvbW1lbnQ6IHJzaWduIGVuY3J5cHRlZCBzZWNyZXQga2V5ClJXUlRZMEl5dkpDN09RZm5GeVAzc2RuYlNzWVVJelJRQnNIV2JUcGVXZUplWXZXYXpqUUFBQkFBQUFBQUFBQUFBQUlBQUFBQTZrN2RnWGh5dURxSzZiL1ZQSDdNcktiaHRxczQwMXdQelRHbjRNcGVlY1BLMTBxR2dpa3I3dDE1UTVDRDE4MXR4WlQwa1BQaXdxKy9UU2J2QmVSNXhOQWFDeG1GSVllbUNpTGJQRkhhTnROR3I5RmdUZi90OGtvaGhJS1ZTcjdZU0NyYzhQWlQ5cGM9Cg==";
// minisign >=0.7.4,<0.8.0 couldn't handle empty passwords.
#[test]
fn empty_password_is_valid() {
let path = std::env::temp_dir().join("minisign-password-text.txt");
std::fs::write(&path, b"TAURI").expect("failed to write test file");
let secret_key =
super::secret_key(PRIVATE_KEY, Some("".into())).expect("failed to resolve secret key");
super::sign_file(&secret_key, &path).expect("failed to sign file");
}
} | rust | github | https://github.com/tauri-apps/tauri | crates/tauri-cli/src/helpers/updater_signature.rs |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.template import base
from django.template import defaultfilters
from django.utils import safestring
register = base.Library()
@register.filter(is_safe=True)
@defaultfilters.stringfilter
def shellfilter(value):
"""Replace HTML chars for shell usage."""
replacements = {'\\': '\\\\',
'`': '\`',
"'": "\\'",
'"': '\\"'}
for search, repl in replacements.items():
value = value.replace(search, repl)
return safestring.mark_safe(value) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Basic Authorization tests for the OSF.'''
from __future__ import absolute_import
import unittest
import mock
from nose.tools import * # noqa PEP8 asserts
from website.addons.twofactor.tests import _valid_code
from tests.base import OsfTestCase
from tests.factories import ProjectFactory, AuthUserFactory
class TestAuthBasicAuthentication(OsfTestCase):
TOTP_SECRET = 'b8f85986068f8079aa9d'
def setUp(self):
super(TestAuthBasicAuthentication, self).setUp()
self.user1 = AuthUserFactory()
self.user2 = AuthUserFactory()
# Test projects for which a given user DOES and DOES NOT have appropriate permissions
self.reachable_project = ProjectFactory(title="Private Project User 1", is_public=False, creator=self.user1)
self.unreachable_project = ProjectFactory(title="Private Project User 2", is_public=False, creator=self.user2)
self.reachable_url = self.reachable_project.web_url_for('view_project')
self.unreachable_url = self.unreachable_project.web_url_for('view_project')
def test_missing_credential_fails(self):
res = self.app.get(self.unreachable_url, auth=None, expect_errors=True)
assert_equal(res.status_code, 302)
assert_true('Location' in res.headers)
assert_true('/login' in res.headers['Location'])
def test_invalid_credential_fails(self):
res = self.app.get(self.unreachable_url, auth=(self.user1.username, 'invalid password'), expect_errors=True)
assert_equal(res.status_code, 401)
assert_true('<h2 id=\'error\' data-http-status-code="401">Unauthorized</h2>' in res.body)
def test_valid_credential_authenticates_and_has_permissions(self):
res = self.app.get(self.reachable_url, auth=self.user1.auth)
assert_equal(res.status_code, 200)
def test_valid_credential_authenticates_but_user_lacks_object_permissions(self):
res = self.app.get(self.unreachable_url, auth=self.user1.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_valid_credential_but_twofactor_required(self):
user1_addon = self.user1.get_or_add_addon('twofactor')
user1_addon.totp_drift = 1
user1_addon.totp_secret = self.TOTP_SECRET
user1_addon.is_confirmed = True
user1_addon.save()
res = self.app.get(self.reachable_url, auth=self.user1.auth, expect_errors=True)
assert_equal(res.status_code, 401)
assert_true('<h2 id=\'error\' data-http-status-code="401">Unauthorized</h2>' in res.body)
def test_valid_credential_twofactor_invalid_otp(self):
user1_addon = self.user1.get_or_add_addon('twofactor')
user1_addon.totp_drift = 1
user1_addon.totp_secret = self.TOTP_SECRET
user1_addon.is_confirmed = True
user1_addon.save()
res = self.app.get(self.reachable_url, auth=self.user1.auth, headers={'X-OSF-OTP': 'invalid otp'}, expect_errors=True)
assert_equal(res.status_code, 401)
assert_true('<h2 id=\'error\' data-http-status-code="401">Unauthorized</h2>' in res.body)
def test_valid_credential_twofactor_valid_otp(self):
user1_addon = self.user1.get_or_add_addon('twofactor')
user1_addon.totp_drift = 1
user1_addon.totp_secret = self.TOTP_SECRET
user1_addon.is_confirmed = True
user1_addon.save()
res = self.app.get(self.reachable_url, auth=self.user1.auth, headers={'X-OSF-OTP': _valid_code(self.TOTP_SECRET)})
assert_equal(res.status_code, 200) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#
# Copyright (c) 2001-2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
"""
Test the HTML builder.
"""
import TestSCons
test = TestSCons.TestSCons()
try:
import libxml2
import libxslt
except:
try:
import lxml
except:
test.skip_test('Cannot find installed Python binding for libxml2 or lxml, skipping test.\n')
test.dir_fixture('image')
# Normal invocation
test.run()
test.must_exist(test.workpath('manual.html'))
# Cleanup
test.run(arguments='-c')
test.must_not_exist(test.workpath('manual.html'))
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
from django.core.urlresolvers import reverse
from django.http import QueryDict
from django.test import TestCase
from django.contrib.auth.models import User
from django.contrib.formtools.wizard.views import (NamedUrlSessionWizardView,
NamedUrlCookieWizardView)
from django.contrib.formtools.tests.wizard.forms import get_request, Step1, Step2
class NamedWizardTests(object):
urls = 'django.contrib.formtools.tests.wizard.namedwizardtests.urls'
def setUp(self):
self.testuser, created = User.objects.get_or_create(username='testuser1')
self.wizard_step_data[0]['form1-user'] = self.testuser.pk
def test_initial_call(self):
response = self.client.get(reverse('%s_start' % self.wizard_urlname))
self.assertEqual(response.status_code, 302)
response = self.client.get(response['Location'])
self.assertEqual(response.status_code, 200)
wizard = response.context['wizard']
self.assertEqual(wizard['steps'].current, 'form1')
self.assertEqual(wizard['steps'].step0, 0)
self.assertEqual(wizard['steps'].step1, 1)
self.assertEqual(wizard['steps'].last, 'form4')
self.assertEqual(wizard['steps'].prev, None)
self.assertEqual(wizard['steps'].next, 'form2')
self.assertEqual(wizard['steps'].count, 4)
self.assertEqual(wizard['url_name'], self.wizard_urlname)
def test_initial_call_with_params(self):
get_params = {'getvar1': 'getval1', 'getvar2': 'getval2'}
response = self.client.get(reverse('%s_start' % self.wizard_urlname),
get_params)
self.assertEqual(response.status_code, 302)
# Test for proper redirect GET parameters
location = response['Location']
self.assertNotEqual(location.find('?'), -1)
querydict = QueryDict(location[location.find('?') + 1:])
self.assertEqual(dict(querydict.items()), get_params)
def test_form_post_error(self):
response = self.client.post(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}),
self.wizard_step_1_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
self.assertEqual(response.context['wizard']['form'].errors,
{'name': [u'This field is required.'],
'user': [u'This field is required.']})
def test_form_post_success(self):
response = self.client.post(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}),
self.wizard_step_data[0])
response = self.client.get(response['Location'])
self.assertEqual(response.status_code, 200)
wizard = response.context['wizard']
self.assertEqual(wizard['steps'].current, 'form2')
self.assertEqual(wizard['steps'].step0, 1)
self.assertEqual(wizard['steps'].prev, 'form1')
self.assertEqual(wizard['steps'].next, 'form3')
def test_form_stepback(self):
response = self.client.get(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
response = self.client.post(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}),
self.wizard_step_data[0])
response = self.client.get(response['Location'])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
response = self.client.post(
reverse(self.wizard_urlname, kwargs={
'step': response.context['wizard']['steps'].current
}), {'wizard_goto_step': response.context['wizard']['steps'].prev})
response = self.client.get(response['Location'])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
def test_form_jump(self):
response = self.client.get(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
response = self.client.get(
reverse(self.wizard_urlname, kwargs={'step': 'form3'}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form3')
def test_form_finish(self):
response = self.client.get(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[0])
response = self.client.get(response['Location'])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
post_data = self.wizard_step_data[1]
post_data['form2-file1'] = open(__file__)
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
post_data)
response = self.client.get(response['Location'])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form3')
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[2])
response = self.client.get(response['Location'])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form4')
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[3])
response = self.client.get(response['Location'])
self.assertEqual(response.status_code, 200)
all_data = response.context['form_list']
self.assertEqual(all_data[1]['file1'].read(), open(__file__).read())
del all_data[1]['file1']
self.assertEqual(all_data, [
{'name': u'Pony', 'thirsty': True, 'user': self.testuser},
{'address1': u'123 Main St', 'address2': u'Djangoland'},
{'random_crap': u'blah blah'},
[{'random_crap': u'blah blah'}, {'random_crap': u'blah blah'}]])
def test_cleaned_data(self):
response = self.client.get(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}))
self.assertEqual(response.status_code, 200)
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[0])
response = self.client.get(response['Location'])
self.assertEqual(response.status_code, 200)
post_data = self.wizard_step_data[1]
post_data['form2-file1'] = open(__file__)
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
post_data)
response = self.client.get(response['Location'])
self.assertEqual(response.status_code, 200)
step2_url = reverse(self.wizard_urlname, kwargs={'step': 'form2'})
response = self.client.get(step2_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
self.assertEqual(response.context['wizard']['form'].files['form2-file1'].read(), open(__file__).read())
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[2])
response = self.client.get(response['Location'])
self.assertEqual(response.status_code, 200)
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[3])
response = self.client.get(response['Location'])
self.assertEqual(response.status_code, 200)
all_data = response.context['all_cleaned_data']
self.assertEqual(all_data['file1'].read(), open(__file__).read())
del all_data['file1']
self.assertEqual(
all_data,
{'name': u'Pony', 'thirsty': True, 'user': self.testuser,
'address1': u'123 Main St', 'address2': u'Djangoland',
'random_crap': u'blah blah', 'formset-form4': [
{'random_crap': u'blah blah'},
{'random_crap': u'blah blah'}
]})
def test_manipulated_data(self):
response = self.client.get(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}))
self.assertEqual(response.status_code, 200)
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[0])
response = self.client.get(response['Location'])
self.assertEqual(response.status_code, 200)
post_data = self.wizard_step_data[1]
post_data['form2-file1'] = open(__file__)
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
post_data)
response = self.client.get(response['Location'])
self.assertEqual(response.status_code, 200)
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[2])
loc = response['Location']
response = self.client.get(loc)
self.assertEqual(response.status_code, 200, loc)
self.client.cookies.pop('sessionid', None)
self.client.cookies.pop('wizard_cookie_contact_wizard', None)
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[3])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
def test_form_reset(self):
response = self.client.post(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}),
self.wizard_step_data[0])
response = self.client.get(response['Location'])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
response = self.client.get(
'%s?reset=1' % reverse('%s_start' % self.wizard_urlname))
self.assertEqual(response.status_code, 302)
response = self.client.get(response['Location'])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
class NamedSessionWizardTests(NamedWizardTests, TestCase):
wizard_urlname = 'nwiz_session'
wizard_step_1_data = {
'session_contact_wizard-current_step': 'form1',
}
wizard_step_data = (
{
'form1-name': 'Pony',
'form1-thirsty': '2',
'session_contact_wizard-current_step': 'form1',
},
{
'form2-address1': '123 Main St',
'form2-address2': 'Djangoland',
'session_contact_wizard-current_step': 'form2',
},
{
'form3-random_crap': 'blah blah',
'session_contact_wizard-current_step': 'form3',
},
{
'form4-INITIAL_FORMS': '0',
'form4-TOTAL_FORMS': '2',
'form4-MAX_NUM_FORMS': '0',
'form4-0-random_crap': 'blah blah',
'form4-1-random_crap': 'blah blah',
'session_contact_wizard-current_step': 'form4',
}
)
class NamedCookieWizardTests(NamedWizardTests, TestCase):
wizard_urlname = 'nwiz_cookie'
wizard_step_1_data = {
'cookie_contact_wizard-current_step': 'form1',
}
wizard_step_data = (
{
'form1-name': 'Pony',
'form1-thirsty': '2',
'cookie_contact_wizard-current_step': 'form1',
},
{
'form2-address1': '123 Main St',
'form2-address2': 'Djangoland',
'cookie_contact_wizard-current_step': 'form2',
},
{
'form3-random_crap': 'blah blah',
'cookie_contact_wizard-current_step': 'form3',
},
{
'form4-INITIAL_FORMS': '0',
'form4-TOTAL_FORMS': '2',
'form4-MAX_NUM_FORMS': '0',
'form4-0-random_crap': 'blah blah',
'form4-1-random_crap': 'blah blah',
'cookie_contact_wizard-current_step': 'form4',
}
)
class NamedFormTests(object):
urls = 'django.contrib.formtools.tests.wizard.namedwizardtests.urls'
def test_revalidation(self):
request = get_request()
testform = self.formwizard_class.as_view(
[('start', Step1), ('step2', Step2)],
url_name=self.wizard_urlname)
response, instance = testform(request, step='done')
instance.render_done(None)
self.assertEqual(instance.storage.current_step, 'start')
class TestNamedUrlSessionWizardView(NamedUrlSessionWizardView):
def dispatch(self, request, *args, **kwargs):
response = super(TestNamedUrlSessionWizardView, self).dispatch(request, *args, **kwargs)
return response, self
class TestNamedUrlCookieWizardView(NamedUrlCookieWizardView):
def dispatch(self, request, *args, **kwargs):
response = super(TestNamedUrlCookieWizardView, self).dispatch(request, *args, **kwargs)
return response, self
class NamedSessionFormTests(NamedFormTests, TestCase):
formwizard_class = TestNamedUrlSessionWizardView
wizard_urlname = 'nwiz_session'
class NamedCookieFormTests(NamedFormTests, TestCase):
formwizard_class = TestNamedUrlCookieWizardView
wizard_urlname = 'nwiz_cookie' | unknown | codeparrot/codeparrot-clean | ||
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import EUCKR_SM_MODEL
class EUCKRProber(MultiByteCharSetProber):
def __init__(self):
super(EUCKRProber, self).__init__()
self.coding_sm = CodingStateMachine(EUCKR_SM_MODEL)
self.distribution_analyzer = EUCKRDistributionAnalysis()
self.reset()
@property
def charset_name(self):
return "EUC-KR"
@property
def language(self):
return "Korean" | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Unit tests for the `corpora.Dictionary` class.
"""
from collections import Mapping
import logging
import tempfile
import unittest
import os
import os.path
import scipy
import gensim
from gensim.corpora import Dictionary
from six import PY3
from six.moves import zip
# sample data files are located in the same folder
module_path = os.path.dirname(__file__)
def get_tmpfile(suffix):
return os.path.join(tempfile.gettempdir(), suffix)
class TestDictionary(unittest.TestCase):
def setUp(self):
self.texts = [
['human', 'interface', 'computer'],
['survey', 'user', 'computer', 'system', 'response', 'time'],
['eps', 'user', 'interface', 'system'],
['system', 'human', 'system', 'eps'],
['user', 'response', 'time'],
['trees'],
['graph', 'trees'],
['graph', 'minors', 'trees'],
['graph', 'minors', 'survey']]
def testDocFreqOneDoc(self):
texts = [['human', 'interface', 'computer']]
d = Dictionary(texts)
expected = {0: 1, 1: 1, 2: 1}
self.assertEqual(d.dfs, expected)
def testDocFreqAndToken2IdForSeveralDocsWithOneWord(self):
# two docs
texts = [['human'], ['human']]
d = Dictionary(texts)
expected = {0: 2}
self.assertEqual(d.dfs, expected)
# only one token (human) should exist
expected = {'human': 0}
self.assertEqual(d.token2id, expected)
# three docs
texts = [['human'], ['human'], ['human']]
d = Dictionary(texts)
expected = {0: 3}
self.assertEqual(d.dfs, expected)
# only one token (human) should exist
expected = {'human': 0}
self.assertEqual(d.token2id, expected)
# four docs
texts = [['human'], ['human'], ['human'], ['human']]
d = Dictionary(texts)
expected = {0: 4}
self.assertEqual(d.dfs, expected)
# only one token (human) should exist
expected = {'human': 0}
self.assertEqual(d.token2id, expected)
def testDocFreqForOneDocWithSeveralWord(self):
# two words
texts = [['human', 'cat']]
d = Dictionary(texts)
expected = {0: 1, 1: 1}
self.assertEqual(d.dfs, expected)
# three words
texts = [['human', 'cat', 'minors']]
d = Dictionary(texts)
expected = {0: 1, 1: 1, 2: 1}
self.assertEqual(d.dfs, expected)
def testBuild(self):
d = Dictionary(self.texts)
# Since we don't specify the order in which dictionaries are built,
# we cannot reliably test for the mapping; only the keys and values.
expected_keys = list(range(12))
expected_values = [2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3]
self.assertEqual(sorted(d.dfs.keys()), expected_keys)
self.assertEqual(sorted(d.dfs.values()), expected_values)
expected_keys = sorted(['computer', 'eps', 'graph', 'human',
'interface', 'minors', 'response', 'survey',
'system', 'time', 'trees', 'user'])
expected_values = list(range(12))
self.assertEqual(sorted(d.token2id.keys()), expected_keys)
self.assertEqual(sorted(d.token2id.values()), expected_values)
def testMerge(self):
d = Dictionary(self.texts)
f = Dictionary(self.texts[:3])
g = Dictionary(self.texts[3:])
f.merge_with(g)
self.assertEqual(sorted(d.token2id.keys()), sorted(f.token2id.keys()))
def testFilter(self):
d = Dictionary(self.texts)
d.filter_extremes(no_below=2, no_above=1.0, keep_n=4)
expected = {0: 3, 1: 3, 2: 3, 3: 3}
self.assertEqual(d.dfs, expected)
def testFilterTokens(self):
self.maxDiff = 10000
d = Dictionary(self.texts)
removed_word = d[0]
d.filter_tokens([0])
expected = {'computer': 0, 'eps': 8, 'graph': 10, 'human': 1,
'interface': 2, 'minors': 11, 'response': 3, 'survey': 4,
'system': 5, 'time': 6, 'trees': 9, 'user': 7}
del expected[removed_word]
self.assertEqual(sorted(d.token2id.keys()), sorted(expected.keys()))
expected[removed_word] = len(expected)
d.add_documents([[removed_word]])
self.assertEqual(sorted(d.token2id.keys()), sorted(expected.keys()))
def test_doc2bow(self):
d = Dictionary([["žluťoučký"], ["žluťoučký"]])
# pass a utf8 string
self.assertEqual(d.doc2bow(["žluťoučký"]), [(0, 1)])
# doc2bow must raise a TypeError if passed a string instead of array of strings by accident
self.assertRaises(TypeError, d.doc2bow, "žluťoučký")
# unicode must be converted to utf8
self.assertEqual(d.doc2bow([u'\u017elu\u0165ou\u010dk\xfd']), [(0, 1)])
def test_saveAsText_and_loadFromText(self):
"""`Dictionary` can be saved as textfile and loaded again from textfile. """
tmpf = get_tmpfile('dict_test.txt')
for sort_by_word in [True, False]:
d = Dictionary(self.texts)
d.save_as_text(tmpf, sort_by_word=sort_by_word)
self.assertTrue(os.path.exists(tmpf))
d_loaded = Dictionary.load_from_text(tmpf)
self.assertNotEqual(d_loaded, None)
self.assertEqual(d_loaded.token2id, d.token2id)
def test_from_corpus(self):
"""build `Dictionary` from an existing corpus"""
documents = ["Human machine interface for lab abc computer applications",
"A survey of user opinion of computer system response time",
"The EPS user interface management system",
"System and human system engineering testing of EPS",
"Relation of user perceived response time to error measurement",
"The generation of random binary unordered trees",
"The intersection graph of paths in trees",
"Graph minors IV Widths of trees and well quasi ordering",
"Graph minors A survey"]
stoplist = set('for a of the and to in'.split())
texts = [[word for word in document.lower().split() if word not in stoplist]
for document in documents]
# remove words that appear only once
all_tokens = sum(texts, [])
tokens_once = set(word for word in set(all_tokens) if all_tokens.count(word) == 1)
texts = [[word for word in text if word not in tokens_once]
for text in texts]
dictionary = Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
# Create dictionary from corpus without a token map
dictionary_from_corpus = Dictionary.from_corpus(corpus)
dict_token2id_vals = sorted(dictionary.token2id.values())
dict_from_corpus_vals = sorted(dictionary_from_corpus.token2id.values())
self.assertEqual(dict_token2id_vals, dict_from_corpus_vals)
self.assertEqual(dictionary.dfs, dictionary_from_corpus.dfs)
self.assertEqual(dictionary.num_docs, dictionary_from_corpus.num_docs)
self.assertEqual(dictionary.num_pos, dictionary_from_corpus.num_pos)
self.assertEqual(dictionary.num_nnz, dictionary_from_corpus.num_nnz)
# Create dictionary from corpus with an id=>token map
dictionary_from_corpus_2 = Dictionary.from_corpus(corpus, id2word=dictionary)
self.assertEqual(dictionary.token2id, dictionary_from_corpus_2.token2id)
self.assertEqual(dictionary.dfs, dictionary_from_corpus_2.dfs)
self.assertEqual(dictionary.num_docs, dictionary_from_corpus_2.num_docs)
self.assertEqual(dictionary.num_pos, dictionary_from_corpus_2.num_pos)
self.assertEqual(dictionary.num_nnz, dictionary_from_corpus_2.num_nnz)
# Ensure Sparse2Corpus is compatible with from_corpus
bow = gensim.matutils.Sparse2Corpus(scipy.sparse.rand(10, 100))
dictionary = Dictionary.from_corpus(bow)
self.assertEqual(dictionary.num_docs, 100)
def test_dict_interface(self):
"""Test Python 2 dict-like interface in both Python 2 and 3."""
d = Dictionary(self.texts)
self.assertTrue(isinstance(d, Mapping))
self.assertEqual(list(zip(d.keys(), d.values())), list(d.items()))
# Even in Py3, we want the iter* members.
self.assertEqual(list(d.items()), list(d.iteritems()))
self.assertEqual(list(d.keys()), list(d.iterkeys()))
self.assertEqual(list(d.values()), list(d.itervalues()))
# XXX Do we want list results from the dict members in Py3 too?
if not PY3:
self.assertTrue(isinstance(d.items(), list))
self.assertTrue(isinstance(d.keys(), list))
self.assertTrue(isinstance(d.values(), list))
#endclass TestDictionary
if __name__ == '__main__':
logging.basicConfig(level=logging.WARNING)
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
/*
* This is a curses module for Python.
*
* Based on prior work by Lance Ellinghaus and Oliver Andrich
* Version 1.2 of this module: Copyright 1994 by Lance Ellinghouse,
* Cathedral City, California Republic, United States of America.
*
* Version 1.5b1, heavily extended for ncurses by Oliver Andrich:
* Copyright 1996,1997 by Oliver Andrich, Koblenz, Germany.
*
* Tidied for Python 1.6, and currently maintained by <amk@amk.ca>.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this source file to use, copy, modify, merge, or publish it
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or in any new file that contains a substantial portion of
* this file.
*
* THE AUTHOR MAKES NO REPRESENTATIONS ABOUT THE SUITABILITY OF
* THE SOFTWARE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT
* EXPRESS OR IMPLIED WARRANTY. THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE TO YOU OR ANY OTHER PARTY FOR ANY SPECIAL,
* INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE, STRICT LIABILITY OR
* ANY OTHER ACTION ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
A number of SysV or ncurses functions don't have wrappers yet; if you
need a given function, add it and send a patch. See
https://www.python.org/dev/patches/ for instructions on how to submit
patches to Python.
Here's a list of currently unsupported functions:
addchnstr addchstr color_set define_key
del_curterm delscreen dupwin inchnstr inchstr innstr keyok
mcprint mvaddchnstr mvaddchstr mvcur mvinchnstr
mvinchstr mvinnstr mmvwaddchnstr mvwaddchstr
mvwinchnstr mvwinchstr mvwinnstr newterm
restartterm ripoffline scr_dump
scr_init scr_restore scr_set scrl set_curterm set_term setterm
tgetent tgetflag tgetnum tgetstr tgoto timeout tputs
vidattr vidputs waddchnstr waddchstr
wcolor_set winchnstr winchstr winnstr wmouse_trafo wscrl
Low-priority:
slk_attr slk_attr_off slk_attr_on slk_attr_set slk_attroff
slk_attron slk_attrset slk_clear slk_color slk_init slk_label
slk_noutrefresh slk_refresh slk_restore slk_set slk_touch
Menu extension (ncurses and probably SYSV):
current_item free_item free_menu item_count item_description
item_index item_init item_name item_opts item_opts_off
item_opts_on item_term item_userptr item_value item_visible
menu_back menu_driver menu_fore menu_format menu_grey
menu_init menu_items menu_mark menu_opts menu_opts_off
menu_opts_on menu_pad menu_pattern menu_request_by_name
menu_request_name menu_spacing menu_sub menu_term menu_userptr
menu_win new_item new_menu pos_menu_cursor post_menu
scale_menu set_current_item set_item_init set_item_opts
set_item_term set_item_userptr set_item_value set_menu_back
set_menu_fore set_menu_format set_menu_grey set_menu_init
set_menu_items set_menu_mark set_menu_opts set_menu_pad
set_menu_pattern set_menu_spacing set_menu_sub set_menu_term
set_menu_userptr set_menu_win set_top_row top_row unpost_menu
Form extension (ncurses and probably SYSV):
current_field data_ahead data_behind dup_field
dynamic_fieldinfo field_arg field_back field_buffer
field_count field_fore field_index field_info field_init
field_just field_opts field_opts_off field_opts_on field_pad
field_status field_term field_type field_userptr form_driver
form_fields form_init form_opts form_opts_off form_opts_on
form_page form_request_by_name form_request_name form_sub
form_term form_userptr form_win free_field free_form
link_field link_fieldtype move_field new_field new_form
new_page pos_form_cursor post_form scale_form
set_current_field set_field_back set_field_buffer
set_field_fore set_field_init set_field_just set_field_opts
set_field_pad set_field_status set_field_term set_field_type
set_field_userptr set_fieldtype_arg set_fieldtype_choice
set_form_fields set_form_init set_form_opts set_form_page
set_form_sub set_form_term set_form_userptr set_form_win
set_max_field set_new_page unpost_form
*/
/* Release Number */
static const char PyCursesVersion[] = "2.2";
/* Includes */
#ifndef Py_BUILD_CORE_BUILTIN
# define Py_BUILD_CORE_MODULE 1
#endif
#include "Python.h"
#include "pycore_capsule.h" // _PyCapsule_SetTraverse()
#include "pycore_long.h" // _PyLong_GetZero()
#include "pycore_structseq.h" // _PyStructSequence_NewType()
#include "pycore_fileutils.h" // _Py_set_inheritable
#ifdef __hpux
#define STRICT_SYSV_CURSES
#endif
#define CURSES_MODULE
#include "py_curses.h"
#if defined(HAVE_TERM_H) || defined(__sgi)
/* For termname, longname, putp, tigetflag, tigetnum, tigetstr, tparm
which are not declared in SysV curses and for setupterm. */
#include <term.h>
/* Including <term.h> #defines many common symbols. */
#undef lines
#undef columns
#endif
#ifdef HAVE_LANGINFO_H
#include <langinfo.h>
#endif
#if !defined(NCURSES_VERSION) && (defined(sgi) || defined(__sun) || defined(SCO5))
#define STRICT_SYSV_CURSES /* Don't use ncurses extensions */
typedef chtype attr_t; /* No attr_t type is available */
#endif
#if defined(_AIX)
#define STRICT_SYSV_CURSES
#endif
#if defined(HAVE_NCURSESW) && NCURSES_EXT_FUNCS+0 >= 20170401 && NCURSES_EXT_COLORS+0 >= 20170401
#define _NCURSES_EXTENDED_COLOR_FUNCS 1
#else
#define _NCURSES_EXTENDED_COLOR_FUNCS 0
#endif
#if _NCURSES_EXTENDED_COLOR_FUNCS
#define _CURSES_COLOR_VAL_TYPE int
#define _CURSES_COLOR_NUM_TYPE int
#define _CURSES_INIT_COLOR_FUNC init_extended_color
#define _CURSES_INIT_PAIR_FUNC init_extended_pair
#define _COLOR_CONTENT_FUNC extended_color_content
#define _CURSES_PAIR_CONTENT_FUNC extended_pair_content
#else
#define _CURSES_COLOR_VAL_TYPE short
#define _CURSES_COLOR_NUM_TYPE short
#define _CURSES_INIT_COLOR_FUNC init_color
#define _CURSES_INIT_PAIR_FUNC init_pair
#define _COLOR_CONTENT_FUNC color_content
#define _CURSES_PAIR_CONTENT_FUNC pair_content
#endif /* _NCURSES_EXTENDED_COLOR_FUNCS */
typedef struct {
PyObject *error; // curses exception type
PyTypeObject *window_type; // exposed by PyCursesWindow_Type
} cursesmodule_state;
static inline cursesmodule_state *
get_cursesmodule_state(PyObject *module)
{
void *state = PyModule_GetState(module);
assert(state != NULL);
return (cursesmodule_state *)state;
}
static inline cursesmodule_state *
get_cursesmodule_state_by_cls(PyTypeObject *cls)
{
void *state = PyType_GetModuleState(cls);
assert(state != NULL);
return (cursesmodule_state *)state;
}
static inline cursesmodule_state *
get_cursesmodule_state_by_win(PyCursesWindowObject *win)
{
return get_cursesmodule_state_by_cls(Py_TYPE(win));
}
#define _PyCursesWindowObject_CAST(op) ((PyCursesWindowObject *)(op))
/*[clinic input]
module _curses
class _curses.window "PyCursesWindowObject *" "clinic_state()->window_type"
[clinic start generated code]*/
/*[clinic end generated code: output=da39a3ee5e6b4b0d input=ae6cb623018f2cbc]*/
/* Indicate whether the module has already been loaded or not. */
static int curses_module_loaded = 0;
/* Tells whether setupterm() has been called to initialise terminfo. */
static int curses_setupterm_called = FALSE;
/* Tells whether initscr() has been called to initialise curses. */
static int curses_initscr_called = FALSE;
/* Tells whether start_color() has been called to initialise color usage. */
static int curses_start_color_called = FALSE;
static const char *curses_screen_encoding = NULL;
/* Utility Error Procedures */
static void
_curses_format_error(cursesmodule_state *state,
const char *curses_funcname,
const char *python_funcname,
const char *return_value,
const char *default_message)
{
assert(!PyErr_Occurred());
if (python_funcname == NULL && curses_funcname == NULL) {
PyErr_SetString(state->error, default_message);
}
else if (python_funcname == NULL) {
(void)PyErr_Format(state->error, CURSES_ERROR_FORMAT,
curses_funcname, return_value);
}
else {
assert(python_funcname != NULL);
(void)PyErr_Format(state->error, CURSES_ERROR_VERBOSE_FORMAT,
curses_funcname, python_funcname, return_value);
}
}
/*
* Format a curses error for a function that returned ERR.
*
* Specify a non-NULL 'python_funcname' when the latter differs from
* 'curses_funcname'. If both names are NULL, uses the 'catchall_ERR'
* message instead.
*/
static void
_curses_set_error(cursesmodule_state *state,
const char *curses_funcname,
const char *python_funcname)
{
_curses_format_error(state, curses_funcname, python_funcname,
"ERR", catchall_ERR);
}
/*
* Format a curses error for a function that returned NULL.
*
* Specify a non-NULL 'python_funcname' when the latter differs from
* 'curses_funcname'. If both names are NULL, uses the 'catchall_NULL'
* message instead.
*/
static inline void
_curses_set_null_error(cursesmodule_state *state,
const char *curses_funcname,
const char *python_funcname)
{
_curses_format_error(state, curses_funcname, python_funcname,
"NULL", catchall_NULL);
}
/* Same as _curses_set_error() for a module object. */
static void
curses_set_error(PyObject *module,
const char *curses_funcname,
const char *python_funcname)
{
cursesmodule_state *state = get_cursesmodule_state(module);
_curses_set_error(state, curses_funcname, python_funcname);
}
/* Same as _curses_set_null_error() for a module object. */
static void
curses_set_null_error(PyObject *module,
const char *curses_funcname,
const char *python_funcname)
{
cursesmodule_state *state = get_cursesmodule_state(module);
_curses_set_null_error(state, curses_funcname, python_funcname);
}
/* Same as _curses_set_error() for a Window object. */
static void
curses_window_set_error(PyCursesWindowObject *win,
const char *curses_funcname,
const char *python_funcname)
{
cursesmodule_state *state = get_cursesmodule_state_by_win(win);
_curses_set_error(state, curses_funcname, python_funcname);
}
/* Same as _curses_set_null_error() for a Window object. */
static void
curses_window_set_null_error(PyCursesWindowObject *win,
const char *curses_funcname,
const char *python_funcname)
{
cursesmodule_state *state = get_cursesmodule_state_by_win(win);
_curses_set_null_error(state, curses_funcname, python_funcname);
}
/* Utility Checking Procedures */
/*
* Function to check that 'funcname' has been called by testing
* the 'called' boolean. If an error occurs, an exception is
* set and this returns 0. Otherwise, this returns 1.
*
* Since this function can be called in functions that do not
* have a direct access to the module's state, '_curses.error'
* is imported on demand.
*
* Use _PyCursesStatefulCheckFunction() if the module is given.
*/
static int
_PyCursesCheckFunction(int called, const char *funcname)
{
if (called == TRUE) {
return 1;
}
PyObject *exc = PyImport_ImportModuleAttrString("_curses", "error");
if (exc != NULL) {
PyErr_Format(exc, CURSES_ERROR_MUST_CALL_FORMAT, funcname);
Py_DECREF(exc);
}
assert(PyErr_Occurred());
return 0;
}
/*
* Function to check that 'funcname' has been called by testing
* the 'called'' boolean. If an error occurs, a PyCursesError is
* set and this returns 0. Otherwise this returns 1.
*
* The exception type is obtained from the 'module' state.
*/
static int
_PyCursesStatefulCheckFunction(PyObject *module,
int called, const char *funcname)
{
if (called == TRUE) {
return 1;
}
cursesmodule_state *state = get_cursesmodule_state(module);
PyErr_Format(state->error, CURSES_ERROR_MUST_CALL_FORMAT, funcname);
return 0;
}
#define PyCursesStatefulSetupTermCalled(MODULE) \
do { \
if (!_PyCursesStatefulCheckFunction(MODULE, \
curses_setupterm_called, \
"setupterm")) \
{ \
return 0; \
} \
} while (0)
#define PyCursesStatefulInitialised(MODULE) \
do { \
if (!_PyCursesStatefulCheckFunction(MODULE, \
curses_initscr_called, \
"initscr")) \
{ \
return 0; \
} \
} while (0)
#define PyCursesStatefulInitialisedColor(MODULE) \
do { \
if (!_PyCursesStatefulCheckFunction(MODULE, \
curses_start_color_called, \
"start_color")) \
{ \
return 0; \
} \
} while (0)
/* Utility Functions */
/*
* Check the return code from a curses function, returning None
* on success and setting an exception on error.
*/
/*
* Return None if 'code' is different from ERR (implementation-defined).
* Otherwise, set an exception using curses_set_error() and the remaining
* arguments, and return NULL.
*/
static PyObject *
curses_check_err(PyObject *module, int code,
const char *curses_funcname,
const char *python_funcname)
{
if (code != ERR) {
Py_RETURN_NONE;
}
curses_set_error(module, curses_funcname, python_funcname);
return NULL;
}
/* Same as curses_check_err() for a Window object. */
static PyObject *
curses_window_check_err(PyCursesWindowObject *win, int code,
const char *curses_funcname,
const char *python_funcname)
{
if (code != ERR) {
Py_RETURN_NONE;
}
curses_window_set_error(win, curses_funcname, python_funcname);
return NULL;
}
/* Convert an object to a byte (an integer of type chtype):
- int
- bytes of length 1
- str of length 1
Return 1 on success, 0 on error (invalid type or integer overflow). */
static int
PyCurses_ConvertToChtype(PyCursesWindowObject *win, PyObject *obj, chtype *ch)
{
long value;
if (PyBytes_Check(obj)) {
if (PyBytes_GET_SIZE(obj) != 1) {
PyErr_Format(PyExc_TypeError,
"expect int or bytes or str of length 1, "
"got a bytes of length %zd",
PyBytes_GET_SIZE(obj));
return 0;
}
value = (unsigned char)PyBytes_AsString(obj)[0];
}
else if (PyUnicode_Check(obj)) {
if (PyUnicode_GET_LENGTH(obj) != 1) {
PyErr_Format(PyExc_TypeError,
"expect int or bytes or str of length 1, "
"got a str of length %zi",
PyUnicode_GET_LENGTH(obj));
return 0;
}
value = PyUnicode_READ_CHAR(obj, 0);
if (128 < value) {
PyObject *bytes;
const char *encoding;
if (win)
encoding = win->encoding;
else
encoding = curses_screen_encoding;
bytes = PyUnicode_AsEncodedString(obj, encoding, NULL);
if (bytes == NULL)
return 0;
if (PyBytes_GET_SIZE(bytes) == 1)
value = (unsigned char)PyBytes_AS_STRING(bytes)[0];
else
value = -1;
Py_DECREF(bytes);
if (value < 0)
goto overflow;
}
}
else if (PyLong_CheckExact(obj)) {
int long_overflow;
value = PyLong_AsLongAndOverflow(obj, &long_overflow);
if (long_overflow)
goto overflow;
}
else {
PyErr_Format(PyExc_TypeError,
"expect int or bytes or str of length 1, got %s",
Py_TYPE(obj)->tp_name);
return 0;
}
*ch = (chtype)value;
if ((long)*ch != value)
goto overflow;
return 1;
overflow:
PyErr_SetString(PyExc_OverflowError,
"byte doesn't fit in chtype");
return 0;
}
/* Convert an object to a byte (chtype) or a character (cchar_t):
- int
- bytes of length 1
- str of length 1
Return:
- 2 if obj is a character (written into *wch)
- 1 if obj is a byte (written into *ch)
- 0 on error: raise an exception */
static int
PyCurses_ConvertToCchar_t(PyCursesWindowObject *win, PyObject *obj,
chtype *ch
#ifdef HAVE_NCURSESW
, wchar_t *wch
#endif
)
{
long value;
#ifdef HAVE_NCURSESW
wchar_t buffer[2];
#endif
if (PyUnicode_Check(obj)) {
#ifdef HAVE_NCURSESW
if (PyUnicode_AsWideChar(obj, buffer, 2) != 1) {
PyErr_Format(PyExc_TypeError,
"expect int or bytes or str of length 1, "
"got a str of length %zi",
PyUnicode_GET_LENGTH(obj));
return 0;
}
*wch = buffer[0];
return 2;
#else
return PyCurses_ConvertToChtype(win, obj, ch);
#endif
}
else if (PyBytes_Check(obj)) {
if (PyBytes_GET_SIZE(obj) != 1) {
PyErr_Format(PyExc_TypeError,
"expect int or bytes or str of length 1, "
"got a bytes of length %zd",
PyBytes_GET_SIZE(obj));
return 0;
}
value = (unsigned char)PyBytes_AsString(obj)[0];
}
else if (PyLong_CheckExact(obj)) {
int overflow;
value = PyLong_AsLongAndOverflow(obj, &overflow);
if (overflow) {
PyErr_SetString(PyExc_OverflowError,
"int doesn't fit in long");
return 0;
}
}
else {
PyErr_Format(PyExc_TypeError,
"expect int or bytes or str of length 1, got %s",
Py_TYPE(obj)->tp_name);
return 0;
}
*ch = (chtype)value;
if ((long)*ch != value) {
PyErr_Format(PyExc_OverflowError,
"byte doesn't fit in chtype");
return 0;
}
return 1;
}
/* Convert an object to a byte string (char*) or a wide character string
(wchar_t*). Return:
- 2 if obj is a character string (written into *wch)
- 1 if obj is a byte string (written into *bytes)
- 0 on error: raise an exception */
static int
PyCurses_ConvertToString(PyCursesWindowObject *win, PyObject *obj,
PyObject **bytes, wchar_t **wstr)
{
char *str;
if (PyUnicode_Check(obj)) {
#ifdef HAVE_NCURSESW
assert (wstr != NULL);
*wstr = PyUnicode_AsWideCharString(obj, NULL);
if (*wstr == NULL)
return 0;
return 2;
#else
assert (wstr == NULL);
*bytes = PyUnicode_AsEncodedString(obj, win->encoding, NULL);
if (*bytes == NULL)
return 0;
/* check for embedded null bytes */
if (PyBytes_AsStringAndSize(*bytes, &str, NULL) < 0) {
Py_CLEAR(*bytes);
return 0;
}
return 1;
#endif
}
else if (PyBytes_Check(obj)) {
*bytes = Py_NewRef(obj);
/* check for embedded null bytes */
if (PyBytes_AsStringAndSize(*bytes, &str, NULL) < 0) {
Py_DECREF(obj);
return 0;
}
return 1;
}
PyErr_Format(PyExc_TypeError, "expect bytes or str, got %s",
Py_TYPE(obj)->tp_name);
return 0;
}
static int
color_allow_default_converter(PyObject *arg, void *ptr)
{
long color_number;
int overflow;
color_number = PyLong_AsLongAndOverflow(arg, &overflow);
if (color_number == -1 && PyErr_Occurred())
return 0;
if (overflow > 0 || color_number >= COLORS) {
PyErr_Format(PyExc_ValueError,
"Color number is greater than COLORS-1 (%d).",
COLORS - 1);
return 0;
}
else if (overflow < 0 || color_number < 0) {
color_number = -1;
}
*(int *)ptr = (int)color_number;
return 1;
}
static int
color_converter(PyObject *arg, void *ptr)
{
if (!color_allow_default_converter(arg, ptr)) {
return 0;
}
if (*(int *)ptr < 0) {
PyErr_SetString(PyExc_ValueError,
"Color number is less than 0.");
return 0;
}
return 1;
}
/*[python input]
class color_converter(CConverter):
type = 'int'
converter = 'color_converter'
[python start generated code]*/
/*[python end generated code: output=da39a3ee5e6b4b0d input=4260d2b6e66b3709]*/
/*[python input]
class color_allow_default_converter(CConverter):
type = 'int'
converter = 'color_allow_default_converter'
[python start generated code]*/
/*[python end generated code: output=da39a3ee5e6b4b0d input=975602bc058a872d]*/
static int
pair_converter(PyObject *arg, void *ptr)
{
long pair_number;
int overflow;
pair_number = PyLong_AsLongAndOverflow(arg, &overflow);
if (pair_number == -1 && PyErr_Occurred())
return 0;
#if _NCURSES_EXTENDED_COLOR_FUNCS
if (overflow > 0 || pair_number > INT_MAX) {
PyErr_Format(PyExc_ValueError,
"Color pair is greater than maximum (%d).",
INT_MAX);
return 0;
}
#else
if (overflow > 0 || pair_number >= COLOR_PAIRS) {
PyErr_Format(PyExc_ValueError,
"Color pair is greater than COLOR_PAIRS-1 (%d).",
COLOR_PAIRS - 1);
return 0;
}
#endif
else if (overflow < 0 || pair_number < 0) {
PyErr_SetString(PyExc_ValueError,
"Color pair is less than 0.");
return 0;
}
*(int *)ptr = (int)pair_number;
return 1;
}
/*[python input]
class pair_converter(CConverter):
type = 'int'
converter = 'pair_converter'
[python start generated code]*/
/*[python end generated code: output=da39a3ee5e6b4b0d input=1a918ae6a1b32af7]*/
static int
component_converter(PyObject *arg, void *ptr)
{
long component;
int overflow;
component = PyLong_AsLongAndOverflow(arg, &overflow);
if (component == -1 && PyErr_Occurred())
return 0;
if (overflow > 0 || component > 1000) {
PyErr_SetString(PyExc_ValueError,
"Color component is greater than 1000");
return 0;
}
else if (overflow < 0 || component < 0) {
PyErr_SetString(PyExc_ValueError,
"Color component is less than 0");
return 0;
}
*(short *)ptr = (short)component;
return 1;
}
/*[python input]
class component_converter(CConverter):
type = 'short'
converter = 'component_converter'
[python start generated code]*/
/*[python end generated code: output=da39a3ee5e6b4b0d input=38e9be01d33927fb]*/
/*****************************************************************************
The Window Object
******************************************************************************/
/*
* Macros for creating a PyCursesWindowObject object's method.
*
* Parameters
*
* X The name of the curses C function or macro to invoke.
* TYPE The function parameter(s) type.
* ERGSTR The format string for construction of the return value.
* PARSESTR The format string for argument parsing.
*/
#define Window_NoArgNoReturnFunction(X) \
static PyObject *PyCursesWindow_ ## X \
(PyObject *op, PyObject *Py_UNUSED(ignored)) \
{ \
PyCursesWindowObject *self = _PyCursesWindowObject_CAST(op); \
int code = X(self->win); \
return curses_window_check_err(self, code, # X, NULL); \
}
#define Window_NoArgTrueFalseFunction(X) \
static PyObject * PyCursesWindow_ ## X \
(PyObject *op, PyObject *Py_UNUSED(ignored)) \
{ \
PyCursesWindowObject *self = _PyCursesWindowObject_CAST(op); \
return PyBool_FromLong(X(self->win)); \
}
#define Window_NoArgNoReturnVoidFunction(X) \
static PyObject * PyCursesWindow_ ## X \
(PyObject *op, PyObject *Py_UNUSED(ignored)) \
{ \
PyCursesWindowObject *self = _PyCursesWindowObject_CAST(op); \
X(self->win); \
Py_RETURN_NONE; \
}
#define Window_NoArg2TupleReturnFunction(X, TYPE, ERGSTR) \
static PyObject * PyCursesWindow_ ## X \
(PyObject *op, PyObject *Py_UNUSED(ignored)) \
{ \
TYPE arg1, arg2; \
PyCursesWindowObject *self = _PyCursesWindowObject_CAST(op); \
X(self->win, arg1, arg2); \
return Py_BuildValue(ERGSTR, arg1, arg2); \
}
#define Window_OneArgNoReturnVoidFunction(X, TYPE, PARSESTR) \
static PyObject * PyCursesWindow_ ## X \
(PyObject *op, PyObject *args) \
{ \
TYPE arg1; \
if (!PyArg_ParseTuple(args, PARSESTR, &arg1)) { \
return NULL; \
} \
PyCursesWindowObject *self = _PyCursesWindowObject_CAST(op); \
X(self->win, arg1); \
Py_RETURN_NONE; \
}
#define Window_OneArgNoReturnFunction(X, TYPE, PARSESTR) \
static PyObject * PyCursesWindow_ ## X \
(PyObject *op, PyObject *args) \
{ \
TYPE arg1; \
if (!PyArg_ParseTuple(args, PARSESTR, &arg1)) { \
return NULL; \
} \
PyCursesWindowObject *self = _PyCursesWindowObject_CAST(op); \
int code = X(self->win, arg1); \
return curses_window_check_err(self, code, # X, NULL); \
}
#define Window_TwoArgNoReturnFunction(X, TYPE, PARSESTR) \
static PyObject * PyCursesWindow_ ## X \
(PyObject *op, PyObject *args) \
{ \
TYPE arg1, arg2; \
if (!PyArg_ParseTuple(args,PARSESTR, &arg1, &arg2)) { \
return NULL; \
} \
PyCursesWindowObject *self = _PyCursesWindowObject_CAST(op); \
int code = X(self->win, arg1, arg2); \
return curses_window_check_err(self, code, # X, NULL); \
}
/* ------------- WINDOW routines --------------- */
Window_NoArgNoReturnFunction(untouchwin)
Window_NoArgNoReturnFunction(touchwin)
Window_NoArgNoReturnFunction(redrawwin)
Window_NoArgNoReturnFunction(winsertln)
Window_NoArgNoReturnFunction(werase)
Window_NoArgNoReturnFunction(wdeleteln)
Window_NoArgTrueFalseFunction(is_wintouched)
Window_NoArgNoReturnVoidFunction(wsyncup)
Window_NoArgNoReturnVoidFunction(wsyncdown)
Window_NoArgNoReturnVoidFunction(wstandend)
Window_NoArgNoReturnVoidFunction(wstandout)
Window_NoArgNoReturnVoidFunction(wcursyncup)
Window_NoArgNoReturnVoidFunction(wclrtoeol)
Window_NoArgNoReturnVoidFunction(wclrtobot)
Window_NoArgNoReturnVoidFunction(wclear)
Window_OneArgNoReturnVoidFunction(idcok, int, "i;True(1) or False(0)")
#ifdef HAVE_CURSES_IMMEDOK
Window_OneArgNoReturnVoidFunction(immedok, int, "i;True(1) or False(0)")
#endif
Window_OneArgNoReturnVoidFunction(wtimeout, int, "i;delay")
Window_NoArg2TupleReturnFunction(getyx, int, "ii")
Window_NoArg2TupleReturnFunction(getbegyx, int, "ii")
Window_NoArg2TupleReturnFunction(getmaxyx, int, "ii")
Window_NoArg2TupleReturnFunction(getparyx, int, "ii")
Window_OneArgNoReturnFunction(clearok, int, "i;True(1) or False(0)")
Window_OneArgNoReturnFunction(idlok, int, "i;True(1) or False(0)")
Window_OneArgNoReturnFunction(keypad, int, "i;True(1) or False(0)")
Window_OneArgNoReturnFunction(leaveok, int, "i;True(1) or False(0)")
Window_OneArgNoReturnFunction(nodelay, int, "i;True(1) or False(0)")
Window_OneArgNoReturnFunction(notimeout, int, "i;True(1) or False(0)")
Window_OneArgNoReturnFunction(scrollok, int, "i;True(1) or False(0)")
Window_OneArgNoReturnFunction(winsdelln, int, "i;nlines")
#ifdef HAVE_CURSES_SYNCOK
Window_OneArgNoReturnFunction(syncok, int, "i;True(1) or False(0)")
#endif
Window_TwoArgNoReturnFunction(mvwin, int, "ii;y,x")
Window_TwoArgNoReturnFunction(mvderwin, int, "ii;y,x")
Window_TwoArgNoReturnFunction(wmove, int, "ii;y,x")
#ifndef STRICT_SYSV_CURSES
Window_TwoArgNoReturnFunction(wresize, int, "ii;lines,columns")
#endif
/* Allocation and deallocation of Window Objects */
static PyObject *
PyCursesWindow_New(cursesmodule_state *state,
WINDOW *win, const char *encoding,
PyCursesWindowObject *orig)
{
if (encoding == NULL) {
#if defined(MS_WINDOWS)
char *buffer[100];
UINT cp;
cp = GetConsoleOutputCP();
if (cp != 0) {
PyOS_snprintf(buffer, sizeof(buffer), "cp%u", cp);
encoding = buffer;
}
#elif defined(CODESET)
const char *codeset = nl_langinfo(CODESET);
if (codeset != NULL && codeset[0] != 0) {
encoding = codeset;
}
#endif
if (encoding == NULL) {
encoding = "utf-8";
}
}
PyCursesWindowObject *wo = PyObject_GC_New(PyCursesWindowObject,
state->window_type);
if (wo == NULL) {
return NULL;
}
wo->win = win;
wo->encoding = _PyMem_Strdup(encoding);
if (wo->encoding == NULL) {
Py_DECREF(wo);
PyErr_NoMemory();
return NULL;
}
wo->orig = orig;
Py_XINCREF(orig);
PyObject_GC_Track((PyObject *)wo);
return (PyObject *)wo;
}
static void
PyCursesWindow_dealloc(PyObject *self)
{
PyTypeObject *window_type = Py_TYPE(self);
PyObject_GC_UnTrack(self);
PyCursesWindowObject *wo = (PyCursesWindowObject *)self;
if (wo->win != stdscr && wo->win != NULL) {
if (delwin(wo->win) == ERR) {
curses_window_set_error(wo, "delwin", "__del__");
PyErr_FormatUnraisable("Exception ignored in delwin()");
}
}
if (wo->encoding != NULL) {
PyMem_Free(wo->encoding);
}
Py_XDECREF(wo->orig);
window_type->tp_free(self);
Py_DECREF(window_type);
}
static int
PyCursesWindow_traverse(PyObject *self, visitproc visit, void *arg)
{
Py_VISIT(Py_TYPE(self));
PyCursesWindowObject *wo = (PyCursesWindowObject *)self;
Py_VISIT(wo->orig);
return 0;
}
/* Addch, Addstr, Addnstr */
/*[clinic input]
_curses.window.addch
[
y: int
Y-coordinate.
x: int
X-coordinate.
]
ch: object
Character to add.
[
attr: long(c_default="A_NORMAL") = _curses.A_NORMAL
Attributes for the character.
]
/
Paint the character.
Paint character ch at (y, x) with attributes attr,
overwriting any character previously painted at that location.
By default, the character position and attributes are the
current settings for the window object.
[clinic start generated code]*/
static PyObject *
_curses_window_addch_impl(PyCursesWindowObject *self, int group_left_1,
int y, int x, PyObject *ch, int group_right_1,
long attr)
/*[clinic end generated code: output=00f4c37af3378f45 input=95ce131578458196]*/
{
int coordinates_group = group_left_1;
int rtn;
int type;
chtype cch = 0;
#ifdef HAVE_NCURSESW
wchar_t wstr[2];
cchar_t wcval;
#endif
const char *funcname;
#ifdef HAVE_NCURSESW
type = PyCurses_ConvertToCchar_t(self, ch, &cch, wstr);
if (type == 2) {
wstr[1] = L'\0';
rtn = setcchar(&wcval, wstr, attr, PAIR_NUMBER(attr), NULL);
if (rtn == ERR) {
curses_window_set_error(self, "setcchar", "addch");
return NULL;
}
if (coordinates_group) {
rtn = mvwadd_wch(self->win,y,x, &wcval);
funcname = "mvwadd_wch";
}
else {
rtn = wadd_wch(self->win, &wcval);
funcname = "wadd_wch";
}
}
else
#else
type = PyCurses_ConvertToCchar_t(self, ch, &cch);
#endif
if (type == 1) {
if (coordinates_group) {
rtn = mvwaddch(self->win,y,x, cch | (attr_t) attr);
funcname = "mvwaddch";
}
else {
rtn = waddch(self->win, cch | (attr_t) attr);
funcname = "waddch";
}
}
else {
return NULL;
}
return curses_window_check_err(self, rtn, funcname, "addch");
}
#ifdef HAVE_NCURSESW
#define curses_release_wstr(STRTYPE, WSTR) \
do { \
if ((STRTYPE) == 2) { \
PyMem_Free((WSTR)); \
} \
} while (0)
#else
#define curses_release_wstr(_STRTYPE, _WSTR)
#endif
static int
curses_wattrset(PyCursesWindowObject *self, long attr, const char *funcname)
{
if (wattrset(self->win, attr) == ERR) {
curses_window_set_error(self, "wattrset", funcname);
return -1;
}
return 0;
}
/*[clinic input]
_curses.window.addstr
[
y: int
Y-coordinate.
x: int
X-coordinate.
]
str: object
String to add.
[
attr: long
Attributes for characters.
]
/
Paint the string.
Paint the string str at (y, x) with attributes attr,
overwriting anything previously on the display.
By default, the character position and attributes are the
current settings for the window object.
[clinic start generated code]*/
static PyObject *
_curses_window_addstr_impl(PyCursesWindowObject *self, int group_left_1,
int y, int x, PyObject *str, int group_right_1,
long attr)
/*[clinic end generated code: output=65a928ea85ff3115 input=ff6cbb91448a22a3]*/
{
int rtn;
int strtype;
PyObject *bytesobj = NULL;
#ifdef HAVE_NCURSESW
wchar_t *wstr = NULL;
#endif
attr_t attr_old = A_NORMAL;
int use_xy = group_left_1, use_attr = group_right_1;
const char *funcname;
#ifdef HAVE_NCURSESW
strtype = PyCurses_ConvertToString(self, str, &bytesobj, &wstr);
#else
strtype = PyCurses_ConvertToString(self, str, &bytesobj, NULL);
#endif
if (strtype == 0) {
return NULL;
}
if (use_attr) {
attr_old = getattrs(self->win);
if (curses_wattrset(self, attr, "addstr") < 0) {
curses_release_wstr(strtype, wstr);
return NULL;
}
}
#ifdef HAVE_NCURSESW
if (strtype == 2) {
if (use_xy) {
rtn = mvwaddwstr(self->win,y,x,wstr);
funcname = "mvwaddwstr";
}
else {
rtn = waddwstr(self->win,wstr);
funcname = "waddwstr";
}
PyMem_Free(wstr);
}
else
#endif
{
const char *str = PyBytes_AS_STRING(bytesobj);
if (use_xy) {
rtn = mvwaddstr(self->win,y,x,str);
funcname = "mvwaddstr";
}
else {
rtn = waddstr(self->win,str);
funcname = "waddstr";
}
Py_DECREF(bytesobj);
}
if (rtn == ERR) {
curses_window_set_error(self, funcname, "addstr");
return NULL;
}
if (use_attr) {
rtn = wattrset(self->win, attr_old);
return curses_window_check_err(self, rtn, "wattrset", "addstr");
}
Py_RETURN_NONE;
}
/*[clinic input]
_curses.window.addnstr
[
y: int
Y-coordinate.
x: int
X-coordinate.
]
str: object
String to add.
n: int
Maximal number of characters.
[
attr: long
Attributes for characters.
]
/
Paint at most n characters of the string.
Paint at most n characters of the string str at (y, x) with
attributes attr, overwriting anything previously on the display.
By default, the character position and attributes are the
current settings for the window object.
[clinic start generated code]*/
static PyObject *
_curses_window_addnstr_impl(PyCursesWindowObject *self, int group_left_1,
int y, int x, PyObject *str, int n,
int group_right_1, long attr)
/*[clinic end generated code: output=6d21cee2ce6876d9 input=72718415c2744a2a]*/
{
int rtn;
int strtype;
PyObject *bytesobj = NULL;
#ifdef HAVE_NCURSESW
wchar_t *wstr = NULL;
#endif
attr_t attr_old = A_NORMAL;
int use_xy = group_left_1, use_attr = group_right_1;
const char *funcname;
#ifdef HAVE_NCURSESW
strtype = PyCurses_ConvertToString(self, str, &bytesobj, &wstr);
#else
strtype = PyCurses_ConvertToString(self, str, &bytesobj, NULL);
#endif
if (strtype == 0)
return NULL;
if (use_attr) {
attr_old = getattrs(self->win);
if (curses_wattrset(self, attr, "addnstr") < 0) {
curses_release_wstr(strtype, wstr);
return NULL;
}
}
#ifdef HAVE_NCURSESW
if (strtype == 2) {
if (use_xy) {
rtn = mvwaddnwstr(self->win,y,x,wstr,n);
funcname = "mvwaddnwstr";
}
else {
rtn = waddnwstr(self->win,wstr,n);
funcname = "waddnwstr";
}
PyMem_Free(wstr);
}
else
#endif
{
const char *str = PyBytes_AS_STRING(bytesobj);
if (use_xy) {
rtn = mvwaddnstr(self->win,y,x,str,n);
funcname = "mvwaddnstr";
}
else {
rtn = waddnstr(self->win,str,n);
funcname = "waddnstr";
}
Py_DECREF(bytesobj);
}
if (rtn == ERR) {
curses_window_set_error(self, funcname, "addnstr");
return NULL;
}
if (use_attr) {
rtn = wattrset(self->win, attr_old);
return curses_window_check_err(self, rtn, "wattrset", "addnstr");
}
Py_RETURN_NONE;
}
/*[clinic input]
_curses.window.bkgd
ch: object
Background character.
attr: long(c_default="A_NORMAL") = _curses.A_NORMAL
Background attributes.
/
Set the background property of the window.
[clinic start generated code]*/
static PyObject *
_curses_window_bkgd_impl(PyCursesWindowObject *self, PyObject *ch, long attr)
/*[clinic end generated code: output=058290afb2cf4034 input=634015bcb339283d]*/
{
chtype bkgd;
if (!PyCurses_ConvertToChtype(self, ch, &bkgd))
return NULL;
int rtn = wbkgd(self->win, bkgd | attr);
return curses_window_check_err(self, rtn, "wbkgd", "bkgd");
}
/*[clinic input]
_curses.window.attroff
attr: long
/
Remove attribute attr from the "background" set.
[clinic start generated code]*/
static PyObject *
_curses_window_attroff_impl(PyCursesWindowObject *self, long attr)
/*[clinic end generated code: output=8a2fcd4df682fc64 input=786beedf06a7befe]*/
{
int rtn = wattroff(self->win, (attr_t)attr);
return curses_window_check_err(self, rtn, "wattroff", "attroff");
}
/*[clinic input]
_curses.window.attron
attr: long
/
Add attribute attr to the "background" set.
[clinic start generated code]*/
static PyObject *
_curses_window_attron_impl(PyCursesWindowObject *self, long attr)
/*[clinic end generated code: output=7afea43b237fa870 input=b57f824e1bf58326]*/
{
int rtn = wattron(self->win, (attr_t)attr);
return curses_window_check_err(self, rtn, "wattron", "attron");
}
/*[clinic input]
_curses.window.attrset
attr: long
/
Set the "background" set of attributes.
[clinic start generated code]*/
static PyObject *
_curses_window_attrset_impl(PyCursesWindowObject *self, long attr)
/*[clinic end generated code: output=84e379bff20c0433 input=42e400c0d0154ab5]*/
{
int rtn = wattrset(self->win, (attr_t)attr);
return curses_window_check_err(self, rtn, "wattrset", "attrset");
}
/*[clinic input]
_curses.window.bkgdset
ch: object
Background character.
attr: long(c_default="A_NORMAL") = _curses.A_NORMAL
Background attributes.
/
Set the window's background.
[clinic start generated code]*/
static PyObject *
_curses_window_bkgdset_impl(PyCursesWindowObject *self, PyObject *ch,
long attr)
/*[clinic end generated code: output=8cb994fc4d7e2496 input=e09c682425c9e45b]*/
{
chtype bkgd;
if (!PyCurses_ConvertToChtype(self, ch, &bkgd))
return NULL;
wbkgdset(self->win, bkgd | attr);
Py_RETURN_NONE;
}
/*[clinic input]
@permit_long_docstring_body
_curses.window.border
ls: object(c_default="NULL") = _curses.ACS_VLINE
Left side.
rs: object(c_default="NULL") = _curses.ACS_VLINE
Right side.
ts: object(c_default="NULL") = _curses.ACS_HLINE
Top side.
bs: object(c_default="NULL") = _curses.ACS_HLINE
Bottom side.
tl: object(c_default="NULL") = _curses.ACS_ULCORNER
Upper-left corner.
tr: object(c_default="NULL") = _curses.ACS_URCORNER
Upper-right corner.
bl: object(c_default="NULL") = _curses.ACS_LLCORNER
Bottom-left corner.
br: object(c_default="NULL") = _curses.ACS_LRCORNER
Bottom-right corner.
/
Draw a border around the edges of the window.
Each parameter specifies the character to use for a specific part of the
border. The characters can be specified as integers or as one-character
strings. A 0 value for any parameter will cause the default character to be
used for that parameter.
[clinic start generated code]*/
static PyObject *
_curses_window_border_impl(PyCursesWindowObject *self, PyObject *ls,
PyObject *rs, PyObject *ts, PyObject *bs,
PyObject *tl, PyObject *tr, PyObject *bl,
PyObject *br)
/*[clinic end generated code: output=670ef38d3d7c2aa3 input=adaafca87488ee35]*/
{
chtype ch[8];
int i, rtn;
/* Clear the array of parameters */
for(i=0; i<8; i++)
ch[i] = 0;
#define CONVERTTOCHTYPE(obj, i) \
if ((obj) != NULL && !PyCurses_ConvertToChtype(self, (obj), &ch[(i)])) \
return NULL;
CONVERTTOCHTYPE(ls, 0);
CONVERTTOCHTYPE(rs, 1);
CONVERTTOCHTYPE(ts, 2);
CONVERTTOCHTYPE(bs, 3);
CONVERTTOCHTYPE(tl, 4);
CONVERTTOCHTYPE(tr, 5);
CONVERTTOCHTYPE(bl, 6);
CONVERTTOCHTYPE(br, 7);
#undef CONVERTTOCHTYPE
rtn = wborder(self->win,
ch[0], ch[1], ch[2], ch[3],
ch[4], ch[5], ch[6], ch[7]);
return curses_window_check_err(self, rtn, "wborder", "border");
}
/*[clinic input]
_curses.window.box
[
verch: object(c_default="_PyLong_GetZero()") = 0
Left and right side.
horch: object(c_default="_PyLong_GetZero()") = 0
Top and bottom side.
]
/
Draw a border around the edges of the window.
Similar to border(), but both ls and rs are verch and both ts and bs are
horch. The default corner characters are always used by this function.
[clinic start generated code]*/
static PyObject *
_curses_window_box_impl(PyCursesWindowObject *self, int group_right_1,
PyObject *verch, PyObject *horch)
/*[clinic end generated code: output=f3fcb038bb287192 input=f00435f9c8c98f60]*/
{
chtype ch1 = 0, ch2 = 0;
if (group_right_1) {
if (!PyCurses_ConvertToChtype(self, verch, &ch1)) {
return NULL;
}
if (!PyCurses_ConvertToChtype(self, horch, &ch2)) {
return NULL;
}
}
return curses_window_check_err(self, box(self->win, ch1, ch2), "box", NULL);
}
#if defined(HAVE_NCURSES_H) || defined(MVWDELCH_IS_EXPRESSION)
#define py_mvwdelch mvwdelch
#else
int py_mvwdelch(WINDOW *w, int y, int x)
{
mvwdelch(w,y,x);
/* On HP/UX, mvwdelch already returns. On other systems,
we may well run into this return statement. */
return 0;
}
#endif
#if defined(HAVE_CURSES_IS_PAD)
// is_pad() is defined, either as a macro or as a function
#define py_is_pad(win) is_pad(win)
#elif defined(WINDOW_HAS_FLAGS)
// is_pad() is not defined, but we can inspect WINDOW structure members
#define py_is_pad(win) ((win) ? ((win)->_flags & _ISPAD) != 0 : FALSE)
#endif
/* chgat, added by Fabian Kreutz <fabian.kreutz at gmx.net> */
#ifdef HAVE_CURSES_WCHGAT
PyDoc_STRVAR(_curses_window_chgat__doc__,
"chgat([y, x,] [n=-1,] attr)\n"
"Set the attributes of characters.\n"
"\n"
" y\n"
" Y-coordinate.\n"
" x\n"
" X-coordinate.\n"
" n\n"
" Number of characters.\n"
" attr\n"
" Attributes for characters.\n"
"\n"
"Set the attributes of num characters at the current cursor position, or at\n"
"position (y, x) if supplied. If no value of num is given or num = -1, the\n"
"attribute will be set on all the characters to the end of the line. This\n"
"function does not move the cursor. The changed line will be touched using\n"
"the touchline() method so that the contents will be redisplayed by the next\n"
"window refresh.");
static PyObject *
PyCursesWindow_ChgAt(PyObject *op, PyObject *args)
{
PyCursesWindowObject *self = _PyCursesWindowObject_CAST(op);
int rtn;
const char *funcname;
int x, y;
int num = -1;
short color;
attr_t attr = A_NORMAL;
long lattr;
int use_xy = FALSE;
switch (PyTuple_Size(args)) {
case 1:
if (!PyArg_ParseTuple(args,"l;attr", &lattr))
return NULL;
attr = lattr;
break;
case 2:
if (!PyArg_ParseTuple(args,"il;n,attr", &num, &lattr))
return NULL;
attr = lattr;
break;
case 3:
if (!PyArg_ParseTuple(args,"iil;y,x,attr", &y, &x, &lattr))
return NULL;
attr = lattr;
use_xy = TRUE;
break;
case 4:
if (!PyArg_ParseTuple(args,"iiil;y,x,n,attr", &y, &x, &num, &lattr))
return NULL;
attr = lattr;
use_xy = TRUE;
break;
default:
PyErr_SetString(PyExc_TypeError,
"_curses.window.chgat requires 1 to 4 arguments");
return NULL;
}
color = (short) PAIR_NUMBER(attr);
attr = attr & A_ATTRIBUTES;
if (use_xy) {
rtn = mvwchgat(self->win,y,x,num,attr,color,NULL);
funcname = "mvwchgat";
} else {
getyx(self->win,y,x);
rtn = wchgat(self->win,num,attr,color,NULL);
funcname = "wchgat";
}
if (rtn == ERR) {
curses_window_set_error(self, funcname, "chgat");
return NULL;
}
rtn = touchline(self->win,y,1);
return curses_window_check_err(self, rtn, "touchline", "chgat");
}
#endif
/*[clinic input]
_curses.window.delch
[
y: int
Y-coordinate.
x: int
X-coordinate.
]
/
Delete any character at (y, x).
[clinic start generated code]*/
static PyObject *
_curses_window_delch_impl(PyCursesWindowObject *self, int group_right_1,
int y, int x)
/*[clinic end generated code: output=22e77bb9fa11b461 input=d2f79e630a4fc6d0]*/
{
int rtn;
const char *funcname;
if (!group_right_1) {
rtn = wdelch(self->win);
funcname = "wdelch";
}
else {
rtn = py_mvwdelch(self->win, y, x);
funcname = "mvwdelch";
}
return curses_window_check_err(self, rtn, funcname, "delch");
}
/*[clinic input]
@permit_long_docstring_body
_curses.window.derwin
[
nlines: int = 0
Height.
ncols: int = 0
Width.
]
begin_y: int
Top side y-coordinate.
begin_x: int
Left side x-coordinate.
/
Create a sub-window (window-relative coordinates).
derwin() is the same as calling subwin(), except that begin_y and begin_x
are relative to the origin of the window, rather than relative to the entire
screen.
[clinic start generated code]*/
static PyObject *
_curses_window_derwin_impl(PyCursesWindowObject *self, int group_left_1,
int nlines, int ncols, int begin_y, int begin_x)
/*[clinic end generated code: output=7924b112d9f70d6e input=ebe95ded1c284c8e]*/
{
WINDOW *win;
win = derwin(self->win,nlines,ncols,begin_y,begin_x);
if (win == NULL) {
curses_window_set_null_error(self, "derwin", NULL);
return NULL;
}
cursesmodule_state *state = get_cursesmodule_state_by_win(self);
return PyCursesWindow_New(state, win, NULL, self);
}
/*[clinic input]
_curses.window.echochar
ch: object
Character to add.
attr: long(c_default="A_NORMAL") = _curses.A_NORMAL
Attributes for the character.
/
Add character ch with attribute attr, and refresh.
[clinic start generated code]*/
static PyObject *
_curses_window_echochar_impl(PyCursesWindowObject *self, PyObject *ch,
long attr)
/*[clinic end generated code: output=13e7dd875d4b9642 input=e7f34b964e92b156]*/
{
chtype ch_;
if (!PyCurses_ConvertToChtype(self, ch, &ch_))
return NULL;
int rtn;
const char *funcname;
#ifdef py_is_pad
if (py_is_pad(self->win)) {
rtn = pechochar(self->win, ch_ | (attr_t)attr);
funcname = "pechochar";
}
else
#endif
{
rtn = wechochar(self->win, ch_ | (attr_t)attr);
funcname = "wechochar";
}
return curses_window_check_err(self, rtn, funcname, "echochar");
}
#ifdef NCURSES_MOUSE_VERSION
/*[clinic input]
@permit_long_summary
_curses.window.enclose
y: int
Y-coordinate.
x: int
X-coordinate.
/
Return True if the screen-relative coordinates are enclosed by the window.
[clinic start generated code]*/
static PyObject *
_curses_window_enclose_impl(PyCursesWindowObject *self, int y, int x)
/*[clinic end generated code: output=8679beef50502648 input=9ba7c894cffe5507]*/
{
return PyBool_FromLong(wenclose(self->win, y, x));
}
#endif
/*[clinic input]
_curses.window.getbkgd
Return the window's current background character/attribute pair.
[clinic start generated code]*/
static PyObject *
_curses_window_getbkgd_impl(PyCursesWindowObject *self)
/*[clinic end generated code: output=3ff953412b0e6028 input=7cf1f59a31f89df4]*/
{
chtype rtn = getbkgd(self->win);
if (rtn == (chtype)ERR) {
curses_window_set_error(self, "getbkgd", NULL);
return NULL;
}
return PyLong_FromLong(rtn);
}
static PyObject *
curses_check_signals_on_input_error(PyCursesWindowObject *self,
const char *curses_funcname,
const char *python_funcname)
{
assert(!PyErr_Occurred());
if (PyErr_CheckSignals()) {
return NULL;
}
cursesmodule_state *state = get_cursesmodule_state_by_win(self);
PyErr_Format(state->error, "%s() (called by %s()): no input",
curses_funcname, python_funcname);
return NULL;
}
/*[clinic input]
@permit_long_docstring_body
_curses.window.getch
[
y: int
Y-coordinate.
x: int
X-coordinate.
]
/
Get a character code from terminal keyboard.
The integer returned does not have to be in ASCII range: function keys,
keypad keys and so on return numbers higher than 256. In no-delay mode, -1
is returned if there is no input, else getch() waits until a key is pressed.
[clinic start generated code]*/
static PyObject *
_curses_window_getch_impl(PyCursesWindowObject *self, int group_right_1,
int y, int x)
/*[clinic end generated code: output=e1639e87d545e676 input=9a053077373e2a30]*/
{
int rtn;
Py_BEGIN_ALLOW_THREADS
if (!group_right_1) {
rtn = wgetch(self->win);
}
else {
rtn = mvwgetch(self->win, y, x);
}
Py_END_ALLOW_THREADS
if (rtn == ERR) {
// We suppress ERR returned by wgetch() in nodelay mode
// after we handled possible interruption signals.
if (PyErr_CheckSignals()) {
return NULL;
}
// ERR is an implementation detail, so to be on the safe side,
// we forcibly set the return value to -1 as documented above.
rtn = -1;
}
return PyLong_FromLong(rtn);
}
/*[clinic input]
@permit_long_docstring_body
_curses.window.getkey
[
y: int
Y-coordinate.
x: int
X-coordinate.
]
/
Get a character (string) from terminal keyboard.
Returning a string instead of an integer, as getch() does. Function keys,
keypad keys and other special keys return a multibyte string containing the
key name. In no-delay mode, an exception is raised if there is no input.
[clinic start generated code]*/
static PyObject *
_curses_window_getkey_impl(PyCursesWindowObject *self, int group_right_1,
int y, int x)
/*[clinic end generated code: output=8490a182db46b10f input=5177f03fb6c31ea6]*/
{
int rtn;
Py_BEGIN_ALLOW_THREADS
if (!group_right_1) {
rtn = wgetch(self->win);
}
else {
rtn = mvwgetch(self->win, y, x);
}
Py_END_ALLOW_THREADS
if (rtn == ERR) {
/* wgetch() returns ERR in nodelay mode */
const char *funcname = group_right_1 ? "mvwgetch" : "wgetch";
return curses_check_signals_on_input_error(self, funcname, "getkey");
} else if (rtn <= 255) {
#ifdef NCURSES_VERSION_MAJOR
#if NCURSES_VERSION_MAJOR*100+NCURSES_VERSION_MINOR <= 507
/* Work around a bug in ncurses 5.7 and earlier */
if (rtn < 0) {
rtn += 256;
}
#endif
#endif
return PyUnicode_FromOrdinal(rtn);
} else {
const char *knp = keyname(rtn);
return PyUnicode_FromString((knp == NULL) ? "" : knp);
}
}
#ifdef HAVE_NCURSESW
/*[clinic input]
_curses.window.get_wch
[
y: int
Y-coordinate.
x: int
X-coordinate.
]
/
Get a wide character from terminal keyboard.
Return a character for most keys, or an integer for function keys,
keypad keys, and other special keys.
[clinic start generated code]*/
static PyObject *
_curses_window_get_wch_impl(PyCursesWindowObject *self, int group_right_1,
int y, int x)
/*[clinic end generated code: output=9f4f86e91fe50ef3 input=dd7e5367fb49dc48]*/
{
int ct;
wint_t rtn;
Py_BEGIN_ALLOW_THREADS
if (!group_right_1) {
ct = wget_wch(self->win ,&rtn);
}
else {
ct = mvwget_wch(self->win, y, x, &rtn);
}
Py_END_ALLOW_THREADS
if (ct == ERR) {
/* wget_wch() returns ERR in nodelay mode */
const char *funcname = group_right_1 ? "mvwget_wch" : "wget_wch";
return curses_check_signals_on_input_error(self, funcname, "get_wch");
}
if (ct == KEY_CODE_YES)
return PyLong_FromLong(rtn);
else
return PyUnicode_FromOrdinal(rtn);
}
#endif
/*
* Helper function for parsing parameters from getstr() and instr().
* This function is necessary because Argument Clinic does not know
* how to handle nested optional groups with default values inside.
*
* Return 1 on success and 0 on failure, similar to PyArg_ParseTuple().
*/
static int
curses_clinic_parse_optional_xy_n(PyObject *args,
int *y, int *x, unsigned int *n, int *use_xy,
const char *qualname)
{
switch (PyTuple_GET_SIZE(args)) {
case 0: {
*use_xy = 0;
return 1;
}
case 1: {
*use_xy = 0;
return PyArg_ParseTuple(args, "O&;n",
_PyLong_UnsignedInt_Converter, n);
}
case 2: {
*use_xy = 1;
return PyArg_ParseTuple(args, "ii;y,x", y, x);
}
case 3: {
*use_xy = 1;
return PyArg_ParseTuple(args, "iiO&;y,x,n", y, x,
_PyLong_UnsignedInt_Converter, n);
}
default: {
*use_xy = 0;
PyErr_Format(PyExc_TypeError, "%s requires 0 to 3 arguments",
qualname);
return 0;
}
}
}
PyDoc_STRVAR(_curses_window_getstr__doc__,
"getstr([[y, x,] n=2047])\n"
"Read a string from the user, with primitive line editing capacity.\n"
"\n"
" y\n"
" Y-coordinate.\n"
" x\n"
" X-coordinate.\n"
" n\n"
" Maximal number of characters.");
static PyObject *
PyCursesWindow_getstr(PyObject *op, PyObject *args)
{
PyCursesWindowObject *self = _PyCursesWindowObject_CAST(op);
int rtn, use_xy = 0, y = 0, x = 0;
unsigned int max_buf_size = 2048;
unsigned int n = max_buf_size - 1;
if (!curses_clinic_parse_optional_xy_n(args, &y, &x, &n, &use_xy,
"_curses.window.instr"))
{
return NULL;
}
n = Py_MIN(n, max_buf_size - 1);
PyBytesWriter *writer = PyBytesWriter_Create(n + 1);
if (writer == NULL) {
return NULL;
}
char *buf = PyBytesWriter_GetData(writer);
if (use_xy) {
Py_BEGIN_ALLOW_THREADS
#ifdef STRICT_SYSV_CURSES
rtn = wmove(self->win, y, x) == ERR
? ERR
: wgetnstr(self->win, buf, n);
#else
rtn = mvwgetnstr(self->win, y, x, buf, n);
#endif
Py_END_ALLOW_THREADS
}
else {
Py_BEGIN_ALLOW_THREADS
rtn = wgetnstr(self->win, buf, n);
Py_END_ALLOW_THREADS
}
if (rtn == ERR) {
PyBytesWriter_Discard(writer);
return Py_GetConstant(Py_CONSTANT_EMPTY_BYTES);
}
return PyBytesWriter_FinishWithSize(writer, strlen(buf));
}
/*[clinic input]
_curses.window.hline
[
y: int
Starting Y-coordinate.
x: int
Starting X-coordinate.
]
ch: object
Character to draw.
n: int
Line length.
[
attr: long(c_default="A_NORMAL") = _curses.A_NORMAL
Attributes for the characters.
]
/
Display a horizontal line.
[clinic start generated code]*/
static PyObject *
_curses_window_hline_impl(PyCursesWindowObject *self, int group_left_1,
int y, int x, PyObject *ch, int n,
int group_right_1, long attr)
/*[clinic end generated code: output=c00d489d61fc9eef input=81a4dea47268163e]*/
{
chtype ch_;
if (!PyCurses_ConvertToChtype(self, ch, &ch_))
return NULL;
if (group_left_1) {
if (wmove(self->win, y, x) == ERR) {
curses_window_set_error(self, "wmove", "hline");
return NULL;
}
}
int rtn = whline(self->win, ch_ | (attr_t)attr, n);
return curses_window_check_err(self, rtn, "whline", "hline");
}
/*[clinic input]
@permit_long_docstring_body
_curses.window.insch
[
y: int
Y-coordinate.
x: int
X-coordinate.
]
ch: object
Character to insert.
[
attr: long(c_default="A_NORMAL") = _curses.A_NORMAL
Attributes for the character.
]
/
Insert a character before the current or specified position.
All characters to the right of the cursor are shifted one position right, with
the rightmost characters on the line being lost.
[clinic start generated code]*/
static PyObject *
_curses_window_insch_impl(PyCursesWindowObject *self, int group_left_1,
int y, int x, PyObject *ch, int group_right_1,
long attr)
/*[clinic end generated code: output=ade8cfe3a3bf3e34 input=3f2a230cb09fed5a]*/
{
int rtn;
chtype ch_ = 0;
if (!PyCurses_ConvertToChtype(self, ch, &ch_))
return NULL;
const char *funcname;
if (!group_left_1) {
rtn = winsch(self->win, ch_ | (attr_t)attr);
funcname = "winsch";
}
else {
rtn = mvwinsch(self->win, y, x, ch_ | (attr_t)attr);
funcname = "mvwwinsch";
}
return curses_window_check_err(self, rtn, funcname, "insch");
}
/*[clinic input]
@permit_long_docstring_body
_curses.window.inch
[
y: int
Y-coordinate.
x: int
X-coordinate.
]
/
Return the character at the given position in the window.
The bottom 8 bits are the character proper, and upper bits are the attributes.
[clinic start generated code]*/
static PyObject *
_curses_window_inch_impl(PyCursesWindowObject *self, int group_right_1,
int y, int x)
/*[clinic end generated code: output=97ca8581baaafd06 input=a5846f315464dc86]*/
{
chtype rtn;
const char *funcname;
if (!group_right_1) {
rtn = winch(self->win);
funcname = "winch";
}
else {
rtn = mvwinch(self->win, y, x);
funcname = "mvwinch";
}
if (rtn == (chtype)ERR) {
curses_window_set_error(self, funcname, "inch");
return NULL;
}
return PyLong_FromUnsignedLong(rtn);
}
PyDoc_STRVAR(_curses_window_instr__doc__,
"instr([y, x,] n=2047)\n"
"Return a string of characters, extracted from the window.\n"
"\n"
" y\n"
" Y-coordinate.\n"
" x\n"
" X-coordinate.\n"
" n\n"
" Maximal number of characters.\n"
"\n"
"Return a string of characters, extracted from the window starting at the\n"
"current cursor position, or at y, x if specified. Attributes are stripped\n"
"from the characters. If n is specified, instr() returns a string at most\n"
"n characters long (exclusive of the trailing NUL).");
static PyObject *
PyCursesWindow_instr(PyObject *op, PyObject *args)
{
PyCursesWindowObject *self = _PyCursesWindowObject_CAST(op);
int rtn, use_xy = 0, y = 0, x = 0;
unsigned int max_buf_size = 2048;
unsigned int n = max_buf_size - 1;
if (!curses_clinic_parse_optional_xy_n(args, &y, &x, &n, &use_xy,
"_curses.window.instr"))
{
return NULL;
}
n = Py_MIN(n, max_buf_size - 1);
PyBytesWriter *writer = PyBytesWriter_Create(n + 1);
if (writer == NULL) {
return NULL;
}
char *buf = PyBytesWriter_GetData(writer);
if (use_xy) {
rtn = mvwinnstr(self->win, y, x, buf, n);
}
else {
rtn = winnstr(self->win, buf, n);
}
if (rtn == ERR) {
PyBytesWriter_Discard(writer);
return Py_GetConstant(Py_CONSTANT_EMPTY_BYTES);
}
return PyBytesWriter_FinishWithSize(writer, strlen(buf));
}
/*[clinic input]
_curses.window.insstr
[
y: int
Y-coordinate.
x: int
X-coordinate.
]
str: object
String to insert.
[
attr: long
Attributes for characters.
]
/
Insert the string before the current or specified position.
Insert a character string (as many characters as will fit on the line)
before the character under the cursor. All characters to the right of
the cursor are shifted right, with the rightmost characters on the line
being lost. The cursor position does not change (after moving to y, x,
if specified).
[clinic start generated code]*/
static PyObject *
_curses_window_insstr_impl(PyCursesWindowObject *self, int group_left_1,
int y, int x, PyObject *str, int group_right_1,
long attr)
/*[clinic end generated code: output=c259a5265ad0b777 input=6827cddc6340a7f3]*/
{
int rtn;
int strtype;
PyObject *bytesobj = NULL;
#ifdef HAVE_NCURSESW
wchar_t *wstr = NULL;
#endif
attr_t attr_old = A_NORMAL;
int use_xy = group_left_1, use_attr = group_right_1;
const char *funcname;
#ifdef HAVE_NCURSESW
strtype = PyCurses_ConvertToString(self, str, &bytesobj, &wstr);
#else
strtype = PyCurses_ConvertToString(self, str, &bytesobj, NULL);
#endif
if (strtype == 0)
return NULL;
if (use_attr) {
attr_old = getattrs(self->win);
if (curses_wattrset(self, attr, "insstr") < 0) {
curses_release_wstr(strtype, wstr);
return NULL;
}
}
#ifdef HAVE_NCURSESW
if (strtype == 2) {
if (use_xy) {
rtn = mvwins_wstr(self->win,y,x,wstr);
funcname = "mvwins_wstr";
}
else {
rtn = wins_wstr(self->win,wstr);
funcname = "wins_wstr";
}
PyMem_Free(wstr);
}
else
#endif
{
const char *str = PyBytes_AS_STRING(bytesobj);
if (use_xy) {
rtn = mvwinsstr(self->win,y,x,str);
funcname = "mvwinsstr";
}
else {
rtn = winsstr(self->win,str);
funcname = "winsstr";
}
Py_DECREF(bytesobj);
}
if (rtn == ERR) {
curses_window_set_error(self, funcname, "insstr");
return NULL;
}
if (use_attr) {
rtn = wattrset(self->win, attr_old);
return curses_window_check_err(self, rtn, "wattrset", "insstr");
}
Py_RETURN_NONE;
}
/*[clinic input]
@permit_long_docstring_body
_curses.window.insnstr
[
y: int
Y-coordinate.
x: int
X-coordinate.
]
str: object
String to insert.
n: int
Maximal number of characters.
[
attr: long
Attributes for characters.
]
/
Insert at most n characters of the string.
Insert a character string (as many characters as will fit on the line)
before the character under the cursor, up to n characters. If n is zero
or negative, the entire string is inserted. All characters to the right
of the cursor are shifted right, with the rightmost characters on the line
being lost. The cursor position does not change (after moving to y, x, if
specified).
[clinic start generated code]*/
static PyObject *
_curses_window_insnstr_impl(PyCursesWindowObject *self, int group_left_1,
int y, int x, PyObject *str, int n,
int group_right_1, long attr)
/*[clinic end generated code: output=971a32ea6328ec8b input=dcdc554102fbcd5d]*/
{
int rtn;
int strtype;
PyObject *bytesobj = NULL;
#ifdef HAVE_NCURSESW
wchar_t *wstr = NULL;
#endif
attr_t attr_old = A_NORMAL;
int use_xy = group_left_1, use_attr = group_right_1;
const char *funcname;
#ifdef HAVE_NCURSESW
strtype = PyCurses_ConvertToString(self, str, &bytesobj, &wstr);
#else
strtype = PyCurses_ConvertToString(self, str, &bytesobj, NULL);
#endif
if (strtype == 0)
return NULL;
if (use_attr) {
attr_old = getattrs(self->win);
if (curses_wattrset(self, attr, "insnstr") < 0) {
curses_release_wstr(strtype, wstr);
return NULL;
}
}
#ifdef HAVE_NCURSESW
if (strtype == 2) {
if (use_xy) {
rtn = mvwins_nwstr(self->win,y,x,wstr,n);
funcname = "mvwins_nwstr";
}
else {
rtn = wins_nwstr(self->win,wstr,n);
funcname = "wins_nwstr";
}
PyMem_Free(wstr);
}
else
#endif
{
const char *str = PyBytes_AS_STRING(bytesobj);
if (use_xy) {
rtn = mvwinsnstr(self->win,y,x,str,n);
funcname = "mvwinsnstr";
}
else {
rtn = winsnstr(self->win,str,n);
funcname = "winsnstr";
}
Py_DECREF(bytesobj);
}
if (rtn == ERR) {
curses_window_set_error(self, funcname, "insnstr");
return NULL;
}
if (use_attr) {
rtn = wattrset(self->win, attr_old);
return curses_window_check_err(self, rtn, "wattrset", "insnstr");
}
Py_RETURN_NONE;
}
/*[clinic input]
@permit_long_docstring_body
_curses.window.is_linetouched
line: int
Line number.
/
Return True if the specified line was modified, otherwise return False.
Raise a curses.error exception if line is not valid for the given window.
[clinic start generated code]*/
static PyObject *
_curses_window_is_linetouched_impl(PyCursesWindowObject *self, int line)
/*[clinic end generated code: output=ad4a4edfee2db08c input=af71c040b951c467]*/
{
int erg;
erg = is_linetouched(self->win, line);
if (erg == ERR) {
curses_window_set_error(self, "is_linetouched", NULL);
return NULL;
}
return PyBool_FromLong(erg);
}
#ifdef py_is_pad
/*[clinic input]
@permit_long_docstring_body
_curses.window.noutrefresh
[
pminrow: int
pmincol: int
sminrow: int
smincol: int
smaxrow: int
smaxcol: int
]
/
Mark for refresh but wait.
This function updates the data structure representing the desired state of the
window, but does not force an update of the physical screen. To accomplish
that, call doupdate().
[clinic start generated code]*/
static PyObject *
_curses_window_noutrefresh_impl(PyCursesWindowObject *self,
int group_right_1, int pminrow, int pmincol,
int sminrow, int smincol, int smaxrow,
int smaxcol)
/*[clinic end generated code: output=809a1f3c6a03e23e input=b39fe8fc79b9980b]*/
#else
/*[clinic input]
@permit_long_docstring_body
_curses.window.noutrefresh
Mark for refresh but wait.
This function updates the data structure representing the desired state of the
window, but does not force an update of the physical screen. To accomplish
that, call doupdate().
[clinic start generated code]*/
static PyObject *
_curses_window_noutrefresh_impl(PyCursesWindowObject *self)
/*[clinic end generated code: output=6ef6dec666643fee input=6a9f59ae5e4c139e]*/
#endif
{
int rtn;
#ifdef py_is_pad
if (py_is_pad(self->win)) {
if (!group_right_1) {
PyErr_SetString(PyExc_TypeError,
"noutrefresh() called for a pad "
"requires 6 arguments");
return NULL;
}
Py_BEGIN_ALLOW_THREADS
rtn = pnoutrefresh(self->win, pminrow, pmincol,
sminrow, smincol, smaxrow, smaxcol);
Py_END_ALLOW_THREADS
return curses_window_check_err(self, rtn,
"pnoutrefresh", "noutrefresh");
}
if (group_right_1) {
PyErr_SetString(PyExc_TypeError,
"noutrefresh() takes no arguments (6 given)");
return NULL;
}
#endif
Py_BEGIN_ALLOW_THREADS
rtn = wnoutrefresh(self->win);
Py_END_ALLOW_THREADS
return curses_window_check_err(self, rtn, "wnoutrefresh", "noutrefresh");
}
/*[clinic input]
@permit_long_docstring_body
_curses.window.overlay
destwin: object(type="PyCursesWindowObject *", subclass_of="clinic_state()->window_type")
[
sminrow: int
smincol: int
dminrow: int
dmincol: int
dmaxrow: int
dmaxcol: int
]
/
Overlay the window on top of destwin.
The windows need not be the same size, only the overlapping region is copied.
This copy is non-destructive, which means that the current background
character does not overwrite the old contents of destwin.
To get fine-grained control over the copied region, the second form of
overlay() can be used. sminrow and smincol are the upper-left coordinates
of the source window, and the other variables mark a rectangle in the
destination window.
[clinic start generated code]*/
static PyObject *
_curses_window_overlay_impl(PyCursesWindowObject *self,
PyCursesWindowObject *destwin, int group_right_1,
int sminrow, int smincol, int dminrow,
int dmincol, int dmaxrow, int dmaxcol)
/*[clinic end generated code: output=82bb2c4cb443ca58 input=dd6af34deb892a65]*/
{
int rtn;
if (group_right_1) {
rtn = copywin(self->win, destwin->win, sminrow, smincol,
dminrow, dmincol, dmaxrow, dmaxcol, TRUE);
return curses_window_check_err(self, rtn, "copywin", "overlay");
}
else {
rtn = overlay(self->win, destwin->win);
return curses_window_check_err(self, rtn, "overlay", NULL);
}
}
/*[clinic input]
@permit_long_docstring_body
_curses.window.overwrite
destwin: object(type="PyCursesWindowObject *", subclass_of="clinic_state()->window_type")
[
sminrow: int
smincol: int
dminrow: int
dmincol: int
dmaxrow: int
dmaxcol: int
]
/
Overwrite the window on top of destwin.
The windows need not be the same size, in which case only the overlapping
region is copied. This copy is destructive, which means that the current
background character overwrites the old contents of destwin.
To get fine-grained control over the copied region, the second form of
overwrite() can be used. sminrow and smincol are the upper-left coordinates
of the source window, the other variables mark a rectangle in the destination
window.
[clinic start generated code]*/
static PyObject *
_curses_window_overwrite_impl(PyCursesWindowObject *self,
PyCursesWindowObject *destwin,
int group_right_1, int sminrow, int smincol,
int dminrow, int dmincol, int dmaxrow,
int dmaxcol)
/*[clinic end generated code: output=12ae007d1681be28 input=e84d8ebdf1c09596]*/
{
int rtn;
if (group_right_1) {
rtn = copywin(self->win, destwin->win, sminrow, smincol,
dminrow, dmincol, dmaxrow, dmaxcol, FALSE);
return curses_window_check_err(self, rtn, "copywin", "overwrite");
}
else {
rtn = overwrite(self->win, destwin->win);
return curses_window_check_err(self, rtn, "overwrite", NULL);
}
}
/*[clinic input]
_curses.window.putwin
file: object
/
Write all data associated with the window into the provided file object.
This information can be later retrieved using the getwin() function.
[clinic start generated code]*/
static PyObject *
_curses_window_putwin_impl(PyCursesWindowObject *self, PyObject *file)
/*[clinic end generated code: output=fdae68ac59b0281b input=0608648e09c8ea0a]*/
{
/* We have to simulate this by writing to a temporary FILE*,
then reading back, then writing to the argument file. */
FILE *fp;
PyObject *res = NULL;
fp = tmpfile();
if (fp == NULL)
return PyErr_SetFromErrno(PyExc_OSError);
if (_Py_set_inheritable(fileno(fp), 0, NULL) < 0)
goto exit;
res = curses_window_check_err(self, putwin(self->win, fp), "putwin", NULL);
if (res == NULL)
goto exit;
fseek(fp, 0, 0);
while (1) {
char buf[BUFSIZ];
Py_ssize_t n = fread(buf, 1, BUFSIZ, fp);
if (n <= 0)
break;
Py_DECREF(res);
res = PyObject_CallMethod(file, "write", "y#", buf, n);
if (res == NULL)
break;
}
exit:
fclose(fp);
return res;
}
/*[clinic input]
_curses.window.redrawln
beg: int
Starting line number.
num: int
The number of lines.
/
Mark the specified lines corrupted.
They should be completely redrawn on the next refresh() call.
[clinic start generated code]*/
static PyObject *
_curses_window_redrawln_impl(PyCursesWindowObject *self, int beg, int num)
/*[clinic end generated code: output=ea216e334f9ce1b4 input=152155e258a77a7a]*/
{
int rtn = wredrawln(self->win,beg, num);
return curses_window_check_err(self, rtn, "wredrawln", "redrawln");
}
/*[clinic input]
@permit_long_docstring_body
_curses.window.refresh
[
pminrow: int
pmincol: int
sminrow: int
smincol: int
smaxrow: int
smaxcol: int
]
/
Update the display immediately.
Synchronize actual screen with previous drawing/deleting methods.
The 6 optional arguments can only be specified when the window is a pad
created with newpad(). The additional parameters are needed to indicate
what part of the pad and screen are involved. pminrow and pmincol specify
the upper left-hand corner of the rectangle to be displayed in the pad.
sminrow, smincol, smaxrow, and smaxcol specify the edges of the rectangle to
be displayed on the screen. The lower right-hand corner of the rectangle to
be displayed in the pad is calculated from the screen coordinates, since the
rectangles must be the same size. Both rectangles must be entirely contained
within their respective structures. Negative values of pminrow, pmincol,
sminrow, or smincol are treated as if they were zero.
[clinic start generated code]*/
static PyObject *
_curses_window_refresh_impl(PyCursesWindowObject *self, int group_right_1,
int pminrow, int pmincol, int sminrow,
int smincol, int smaxrow, int smaxcol)
/*[clinic end generated code: output=42199543115e6e63 input=65405c03290496a6]*/
{
int rtn;
#ifdef py_is_pad
if (py_is_pad(self->win)) {
if (!group_right_1) {
PyErr_SetString(PyExc_TypeError,
"refresh() for a pad requires 6 arguments");
return NULL;
}
Py_BEGIN_ALLOW_THREADS
rtn = prefresh(self->win, pminrow, pmincol,
sminrow, smincol, smaxrow, smaxcol);
Py_END_ALLOW_THREADS
return curses_window_check_err(self, rtn, "prefresh", "refresh");
}
#endif
if (group_right_1) {
PyErr_SetString(PyExc_TypeError,
"refresh() takes no arguments (6 given)");
return NULL;
}
Py_BEGIN_ALLOW_THREADS
rtn = wrefresh(self->win);
Py_END_ALLOW_THREADS
return curses_window_check_err(self, rtn, "wrefresh", "refresh");
}
/*[clinic input]
_curses.window.setscrreg
top: int
First line number.
bottom: int
Last line number.
/
Define a software scrolling region.
All scrolling actions will take place in this region.
[clinic start generated code]*/
static PyObject *
_curses_window_setscrreg_impl(PyCursesWindowObject *self, int top,
int bottom)
/*[clinic end generated code: output=486ab5db218d2b1a input=1b517b986838bf0e]*/
{
int rtn = wsetscrreg(self->win, top, bottom);
return curses_window_check_err(self, rtn, "wsetscrreg", "setscrreg");
}
/*[clinic input]
@permit_long_docstring_body
_curses.window.subwin
[
nlines: int = 0
Height.
ncols: int = 0
Width.
]
begin_y: int
Top side y-coordinate.
begin_x: int
Left side x-coordinate.
/
Create a sub-window (screen-relative coordinates).
By default, the sub-window will extend from the specified position to the
lower right corner of the window.
[clinic start generated code]*/
static PyObject *
_curses_window_subwin_impl(PyCursesWindowObject *self, int group_left_1,
int nlines, int ncols, int begin_y, int begin_x)
/*[clinic end generated code: output=93e898afc348f59a input=5292cf610e2f3585]*/
{
WINDOW *win;
const char *funcname;
/* printf("Subwin: %i %i %i %i \n", nlines, ncols, begin_y, begin_x); */
#ifdef py_is_pad
if (py_is_pad(self->win)) {
win = subpad(self->win, nlines, ncols, begin_y, begin_x);
funcname = "subpad";
}
else
#endif
{
win = subwin(self->win, nlines, ncols, begin_y, begin_x);
funcname = "subwin";
}
if (win == NULL) {
curses_window_set_null_error(self, funcname, "subwin");
return NULL;
}
cursesmodule_state *state = get_cursesmodule_state_by_win(self);
return PyCursesWindow_New(state, win, self->encoding, self);
}
/*[clinic input]
@permit_long_docstring_body
_curses.window.scroll
[
lines: int = 1
Number of lines to scroll.
]
/
Scroll the screen or scrolling region.
Scroll upward if the argument is positive and downward if it is negative.
[clinic start generated code]*/
static PyObject *
_curses_window_scroll_impl(PyCursesWindowObject *self, int group_right_1,
int lines)
/*[clinic end generated code: output=4541a8a11852d360 input=386456524c550113]*/
{
int rtn;
const char *funcname;
if (!group_right_1) {
rtn = scroll(self->win);
funcname = "scroll";
}
else {
rtn = wscrl(self->win, lines);
funcname = "wscrl";
}
return curses_window_check_err(self, rtn, funcname, "scroll");
}
/*[clinic input]
@permit_long_docstring_body
_curses.window.touchline
start: int
count: int
[
changed: bool = True
]
/
Pretend count lines have been changed, starting with line start.
If changed is supplied, it specifies whether the affected lines are marked
as having been changed (changed=True) or unchanged (changed=False).
[clinic start generated code]*/
static PyObject *
_curses_window_touchline_impl(PyCursesWindowObject *self, int start,
int count, int group_right_1, int changed)
/*[clinic end generated code: output=65d05b3f7438c61d input=36e13b6f5eb591f5]*/
{
int rtn;
const char *funcname;
if (!group_right_1) {
rtn = touchline(self->win, start, count);
funcname = "touchline";
}
else {
rtn = wtouchln(self->win, start, count, changed);
funcname = "wtouchln";
}
return curses_window_check_err(self, rtn, funcname, "touchline");
}
/*[clinic input]
_curses.window.vline
[
y: int
Starting Y-coordinate.
x: int
Starting X-coordinate.
]
ch: object
Character to draw.
n: int
Line length.
[
attr: long(c_default="A_NORMAL") = _curses.A_NORMAL
Attributes for the character.
]
/
Display a vertical line.
[clinic start generated code]*/
static PyObject *
_curses_window_vline_impl(PyCursesWindowObject *self, int group_left_1,
int y, int x, PyObject *ch, int n,
int group_right_1, long attr)
/*[clinic end generated code: output=287ad1cc8982217f input=a6f2dc86a4648b32]*/
{
chtype ch_;
if (!PyCurses_ConvertToChtype(self, ch, &ch_))
return NULL;
if (group_left_1) {
if (wmove(self->win, y, x) == ERR) {
curses_window_set_error(self, "wmove", "vline");
return NULL;
}
}
int rtn = wvline(self->win, ch_ | (attr_t)attr, n);
return curses_window_check_err(self, rtn, "wvline", "vline");
}
static PyObject *
PyCursesWindow_get_encoding(PyObject *op, void *closure)
{
PyCursesWindowObject *self = _PyCursesWindowObject_CAST(op);
return PyUnicode_FromString(self->encoding);
}
static int
PyCursesWindow_set_encoding(PyObject *op, PyObject *value, void *Py_UNUSED(ignored))
{
PyCursesWindowObject *self = _PyCursesWindowObject_CAST(op);
PyObject *ascii;
char *encoding;
/* It is illegal to del win.encoding */
if (value == NULL) {
PyErr_SetString(PyExc_TypeError,
"encoding may not be deleted");
return -1;
}
if (!PyUnicode_Check(value)) {
PyErr_SetString(PyExc_TypeError,
"setting encoding to a non-string");
return -1;
}
ascii = PyUnicode_AsASCIIString(value);
if (ascii == NULL)
return -1;
encoding = _PyMem_Strdup(PyBytes_AS_STRING(ascii));
Py_DECREF(ascii);
if (encoding == NULL) {
PyErr_NoMemory();
return -1;
}
PyMem_Free(self->encoding);
self->encoding = encoding;
return 0;
}
#define clinic_state() (get_cursesmodule_state_by_cls(Py_TYPE(self)))
#include "clinic/_cursesmodule.c.h"
#undef clinic_state
static PyMethodDef PyCursesWindow_methods[] = {
_CURSES_WINDOW_ADDCH_METHODDEF
_CURSES_WINDOW_ADDNSTR_METHODDEF
_CURSES_WINDOW_ADDSTR_METHODDEF
_CURSES_WINDOW_ATTROFF_METHODDEF
_CURSES_WINDOW_ATTRON_METHODDEF
_CURSES_WINDOW_ATTRSET_METHODDEF
_CURSES_WINDOW_BKGD_METHODDEF
#ifdef HAVE_CURSES_WCHGAT
{
"chgat", PyCursesWindow_ChgAt, METH_VARARGS,
_curses_window_chgat__doc__
},
#endif
_CURSES_WINDOW_BKGDSET_METHODDEF
_CURSES_WINDOW_BORDER_METHODDEF
_CURSES_WINDOW_BOX_METHODDEF
{"clear", PyCursesWindow_wclear, METH_NOARGS},
{"clearok", PyCursesWindow_clearok, METH_VARARGS},
{"clrtobot", PyCursesWindow_wclrtobot, METH_NOARGS},
{"clrtoeol", PyCursesWindow_wclrtoeol, METH_NOARGS},
{"cursyncup", PyCursesWindow_wcursyncup, METH_NOARGS},
_CURSES_WINDOW_DELCH_METHODDEF
{"deleteln", PyCursesWindow_wdeleteln, METH_NOARGS},
_CURSES_WINDOW_DERWIN_METHODDEF
_CURSES_WINDOW_ECHOCHAR_METHODDEF
_CURSES_WINDOW_ENCLOSE_METHODDEF
{"erase", PyCursesWindow_werase, METH_NOARGS},
{"getbegyx", PyCursesWindow_getbegyx, METH_NOARGS},
_CURSES_WINDOW_GETBKGD_METHODDEF
_CURSES_WINDOW_GETCH_METHODDEF
_CURSES_WINDOW_GETKEY_METHODDEF
_CURSES_WINDOW_GET_WCH_METHODDEF
{"getmaxyx", PyCursesWindow_getmaxyx, METH_NOARGS},
{"getparyx", PyCursesWindow_getparyx, METH_NOARGS},
{
"getstr", PyCursesWindow_getstr, METH_VARARGS,
_curses_window_getstr__doc__
},
{"getyx", PyCursesWindow_getyx, METH_NOARGS},
_CURSES_WINDOW_HLINE_METHODDEF
{"idcok", PyCursesWindow_idcok, METH_VARARGS},
{"idlok", PyCursesWindow_idlok, METH_VARARGS},
#ifdef HAVE_CURSES_IMMEDOK
{"immedok", PyCursesWindow_immedok, METH_VARARGS},
#endif
_CURSES_WINDOW_INCH_METHODDEF
_CURSES_WINDOW_INSCH_METHODDEF
{"insdelln", PyCursesWindow_winsdelln, METH_VARARGS},
{"insertln", PyCursesWindow_winsertln, METH_NOARGS},
_CURSES_WINDOW_INSNSTR_METHODDEF
_CURSES_WINDOW_INSSTR_METHODDEF
{
"instr", PyCursesWindow_instr, METH_VARARGS,
_curses_window_instr__doc__
},
_CURSES_WINDOW_IS_LINETOUCHED_METHODDEF
{"is_wintouched", PyCursesWindow_is_wintouched, METH_NOARGS},
{"keypad", PyCursesWindow_keypad, METH_VARARGS},
{"leaveok", PyCursesWindow_leaveok, METH_VARARGS},
{"move", PyCursesWindow_wmove, METH_VARARGS},
{"mvderwin", PyCursesWindow_mvderwin, METH_VARARGS},
{"mvwin", PyCursesWindow_mvwin, METH_VARARGS},
{"nodelay", PyCursesWindow_nodelay, METH_VARARGS},
{"notimeout", PyCursesWindow_notimeout, METH_VARARGS},
_CURSES_WINDOW_NOUTREFRESH_METHODDEF
_CURSES_WINDOW_OVERLAY_METHODDEF
_CURSES_WINDOW_OVERWRITE_METHODDEF
_CURSES_WINDOW_PUTWIN_METHODDEF
_CURSES_WINDOW_REDRAWLN_METHODDEF
{"redrawwin", PyCursesWindow_redrawwin, METH_NOARGS},
_CURSES_WINDOW_REFRESH_METHODDEF
#ifndef STRICT_SYSV_CURSES
{"resize", PyCursesWindow_wresize, METH_VARARGS},
#endif
_CURSES_WINDOW_SCROLL_METHODDEF
{"scrollok", PyCursesWindow_scrollok, METH_VARARGS},
_CURSES_WINDOW_SETSCRREG_METHODDEF
{"standend", PyCursesWindow_wstandend, METH_NOARGS},
{"standout", PyCursesWindow_wstandout, METH_NOARGS},
{"subpad", _curses_window_subwin, METH_VARARGS, _curses_window_subwin__doc__},
_CURSES_WINDOW_SUBWIN_METHODDEF
{"syncdown", PyCursesWindow_wsyncdown, METH_NOARGS},
#ifdef HAVE_CURSES_SYNCOK
{"syncok", PyCursesWindow_syncok, METH_VARARGS},
#endif
{"syncup", PyCursesWindow_wsyncup, METH_NOARGS},
{"timeout", PyCursesWindow_wtimeout, METH_VARARGS},
_CURSES_WINDOW_TOUCHLINE_METHODDEF
{"touchwin", PyCursesWindow_touchwin, METH_NOARGS},
{"untouchwin", PyCursesWindow_untouchwin, METH_NOARGS},
_CURSES_WINDOW_VLINE_METHODDEF
{NULL, NULL} /* sentinel */
};
static PyGetSetDef PyCursesWindow_getsets[] = {
{
"encoding",
PyCursesWindow_get_encoding,
PyCursesWindow_set_encoding,
"the typecode character used to create the array"
},
{NULL, NULL, NULL, NULL } /* sentinel */
};
static PyType_Slot PyCursesWindow_Type_slots[] = {
{Py_tp_methods, PyCursesWindow_methods},
{Py_tp_getset, PyCursesWindow_getsets},
{Py_tp_dealloc, PyCursesWindow_dealloc},
{Py_tp_traverse, PyCursesWindow_traverse},
{0, NULL}
};
static PyType_Spec PyCursesWindow_Type_spec = {
.name = "_curses.window",
.basicsize = sizeof(PyCursesWindowObject),
.flags = Py_TPFLAGS_DEFAULT
| Py_TPFLAGS_DISALLOW_INSTANTIATION
| Py_TPFLAGS_IMMUTABLETYPE
| Py_TPFLAGS_HEAPTYPE
| Py_TPFLAGS_HAVE_GC,
.slots = PyCursesWindow_Type_slots
};
/* -------------------------------------------------------*/
/*
* Macros for implementing simple module's methods.
*
* Parameters
*
* X The name of the curses C function or macro to invoke.
* FLAG When false, prefixes the function name with 'no' at runtime,
* This parameter is present in the signature and auto-generated
* by Argument Clinic.
*
* These macros should only be used for generating the body of
* the module's methods since they need a module reference.
*
* The Python function name must be the same as the curses function name (X).
*/
#define NoArgNoReturnFunctionBody(X) \
{ \
PyCursesStatefulInitialised(module); \
return curses_check_err(module, X(), # X, NULL); \
}
#define NoArgOrFlagNoReturnFunctionBody(X, FLAG) \
{ \
PyCursesStatefulInitialised(module); \
int rtn; \
const char *funcname; \
if (FLAG) { \
rtn = X(); \
funcname = # X; \
} \
else { \
rtn = no ## X(); \
funcname = "no" # X; \
} \
return curses_check_err(module, rtn, funcname, # X); \
}
#define NoArgReturnIntFunctionBody(X) \
{ \
PyCursesStatefulInitialised(module); \
int rtn = X(); \
if (rtn == ERR) { \
curses_set_error(module, # X, NULL); \
return NULL; \
} \
return PyLong_FromLong(rtn); \
}
#define NoArgReturnStringFunctionBody(X) \
{ \
PyCursesStatefulInitialised(module); \
const char *res = X(); \
if (res == NULL) { \
curses_set_null_error(module, # X, NULL); \
return NULL; \
} \
return PyBytes_FromString(res); \
}
#define NoArgTrueFalseFunctionBody(X) \
{ \
PyCursesStatefulInitialised(module); \
return PyBool_FromLong(X()); \
}
#define NoArgNoReturnVoidFunctionBody(X) \
{ \
PyCursesStatefulInitialised(module); \
X(); \
Py_RETURN_NONE; \
}
/*********************************************************************
Global Functions
**********************************************************************/
#ifdef HAVE_CURSES_FILTER
/*[clinic input]
_curses.filter
[clinic start generated code]*/
static PyObject *
_curses_filter_impl(PyObject *module)
/*[clinic end generated code: output=fb5b8a3642eb70b5 input=668c75a6992d3624]*/
{
/* not checking for PyCursesInitialised here since filter() must
be called before initscr() */
filter();
Py_RETURN_NONE;
}
#endif
/*[clinic input]
_curses.baudrate
Return the output speed of the terminal in bits per second.
[clinic start generated code]*/
static PyObject *
_curses_baudrate_impl(PyObject *module)
/*[clinic end generated code: output=3c63c6c401d7d9c0 input=921f022ed04a0fd9]*/
NoArgReturnIntFunctionBody(baudrate)
/*[clinic input]
_curses.beep
Emit a short attention sound.
[clinic start generated code]*/
static PyObject *
_curses_beep_impl(PyObject *module)
/*[clinic end generated code: output=425274962abe49a2 input=a35698ca7d0162bc]*/
NoArgNoReturnFunctionBody(beep)
/*[clinic input]
@permit_long_summary
_curses.can_change_color
Return True if the programmer can change the colors displayed by the terminal.
[clinic start generated code]*/
static PyObject *
_curses_can_change_color_impl(PyObject *module)
/*[clinic end generated code: output=359df8c3c77d8bf1 input=8315c364ba1e5b4c]*/
NoArgTrueFalseFunctionBody(can_change_color)
/*[clinic input]
_curses.cbreak
flag: bool = True
If false, the effect is the same as calling nocbreak().
/
Enter cbreak mode.
In cbreak mode (sometimes called "rare" mode) normal tty line buffering is
turned off and characters are available to be read one by one. However,
unlike raw mode, special characters (interrupt, quit, suspend, and flow
control) retain their effects on the tty driver and calling program.
Calling first raw() then cbreak() leaves the terminal in cbreak mode.
[clinic start generated code]*/
static PyObject *
_curses_cbreak_impl(PyObject *module, int flag)
/*[clinic end generated code: output=9f9dee9664769751 input=c7d0bddda93016c1]*/
NoArgOrFlagNoReturnFunctionBody(cbreak, flag)
/*[clinic input]
@permit_long_docstring_body
_curses.color_content
color_number: color
The number of the color (0 - (COLORS-1)).
/
Return the red, green, and blue (RGB) components of the specified color.
A 3-tuple is returned, containing the R, G, B values for the given color,
which will be between 0 (no component) and 1000 (maximum amount of component).
[clinic start generated code]*/
static PyObject *
_curses_color_content_impl(PyObject *module, int color_number)
/*[clinic end generated code: output=17b466df7054e0de input=baffe25b351eb916]*/
{
_CURSES_COLOR_VAL_TYPE r,g,b;
PyCursesStatefulInitialised(module);
PyCursesStatefulInitialisedColor(module);
if (_COLOR_CONTENT_FUNC(color_number, &r, &g, &b) == ERR) {
const char *funcname = Py_STRINGIFY(_COLOR_CONTENT_FUNC);
curses_set_error(module, funcname, "color_content");
return NULL;
}
return Py_BuildValue("(iii)", r, g, b);
}
/*[clinic input]
_curses.color_pair
pair_number: int
The number of the color pair.
/
Return the attribute value for displaying text in the specified color.
This attribute value can be combined with A_STANDOUT, A_REVERSE, and the
other A_* attributes. pair_number() is the counterpart to this function.
[clinic start generated code]*/
static PyObject *
_curses_color_pair_impl(PyObject *module, int pair_number)
/*[clinic end generated code: output=60718abb10ce9feb input=6034e9146f343802]*/
{
PyCursesStatefulInitialised(module);
PyCursesStatefulInitialisedColor(module);
return PyLong_FromLong(COLOR_PAIR(pair_number));
}
/*[clinic input]
_curses.curs_set
visibility: int
0 for invisible, 1 for normal visible, or 2 for very visible.
/
Set the cursor state.
If the terminal supports the visibility requested, the previous cursor
state is returned; otherwise, an exception is raised. On many terminals,
the "visible" mode is an underline cursor and the "very visible" mode is
a block cursor.
[clinic start generated code]*/
static PyObject *
_curses_curs_set_impl(PyObject *module, int visibility)
/*[clinic end generated code: output=ee8e62483b1d6cd4 input=81a7924a65d29504]*/
{
int erg;
PyCursesStatefulInitialised(module);
erg = curs_set(visibility);
if (erg == ERR) {
curses_set_error(module, "curs_set", NULL);
return NULL;
}
return PyLong_FromLong((long) erg);
}
/*[clinic input]
_curses.def_prog_mode
Save the current terminal mode as the "program" mode.
The "program" mode is the mode when the running program is using curses.
Subsequent calls to reset_prog_mode() will restore this mode.
[clinic start generated code]*/
static PyObject *
_curses_def_prog_mode_impl(PyObject *module)
/*[clinic end generated code: output=05d5a351fff874aa input=768b9cace620dda5]*/
NoArgNoReturnFunctionBody(def_prog_mode)
/*[clinic input]
_curses.def_shell_mode
Save the current terminal mode as the "shell" mode.
The "shell" mode is the mode when the running program is not using curses.
Subsequent calls to reset_shell_mode() will restore this mode.
[clinic start generated code]*/
static PyObject *
_curses_def_shell_mode_impl(PyObject *module)
/*[clinic end generated code: output=d6e42f5c768f860f input=5ead21f6f0baa894]*/
NoArgNoReturnFunctionBody(def_shell_mode)
/*[clinic input]
_curses.delay_output
ms: int
Duration in milliseconds.
/
Insert a pause in output.
[clinic start generated code]*/
static PyObject *
_curses_delay_output_impl(PyObject *module, int ms)
/*[clinic end generated code: output=b6613a67f17fa4f4 input=5316457f5f59196c]*/
{
PyCursesStatefulInitialised(module);
return curses_check_err(module, delay_output(ms), "delay_output", NULL);
}
/*[clinic input]
_curses.doupdate
Update the physical screen to match the virtual screen.
[clinic start generated code]*/
static PyObject *
_curses_doupdate_impl(PyObject *module)
/*[clinic end generated code: output=f34536975a75680c input=8da80914432a6489]*/
NoArgNoReturnFunctionBody(doupdate)
/*[clinic input]
_curses.echo
flag: bool = True
If false, the effect is the same as calling noecho().
/
Enter echo mode.
In echo mode, each character input is echoed to the screen as it is entered.
[clinic start generated code]*/
static PyObject *
_curses_echo_impl(PyObject *module, int flag)
/*[clinic end generated code: output=03acb2ddfa6c8729 input=86cd4d5bb1d569c0]*/
NoArgOrFlagNoReturnFunctionBody(echo, flag)
/*[clinic input]
_curses.endwin
De-initialize the library, and return terminal to normal status.
[clinic start generated code]*/
static PyObject *
_curses_endwin_impl(PyObject *module)
/*[clinic end generated code: output=c0150cd96d2f4128 input=e172cfa43062f3fa]*/
NoArgNoReturnFunctionBody(endwin)
/*[clinic input]
_curses.erasechar
Return the user's current erase character.
[clinic start generated code]*/
static PyObject *
_curses_erasechar_impl(PyObject *module)
/*[clinic end generated code: output=3df305dc6b926b3f input=628c136c3c5758d3]*/
{
char ch;
PyCursesStatefulInitialised(module);
ch = erasechar();
return PyBytes_FromStringAndSize(&ch, 1);
}
/*[clinic input]
@permit_long_docstring_body
_curses.flash
Flash the screen.
That is, change it to reverse-video and then change it back in a short interval.
[clinic start generated code]*/
static PyObject *
_curses_flash_impl(PyObject *module)
/*[clinic end generated code: output=488b8a0ebd9ea9b8 input=dd33d718e6edf436]*/
NoArgNoReturnFunctionBody(flash)
/*[clinic input]
_curses.flushinp
Flush all input buffers.
This throws away any typeahead that has been typed by the user and has not
yet been processed by the program.
[clinic start generated code]*/
static PyObject *
_curses_flushinp_impl(PyObject *module)
/*[clinic end generated code: output=7e7a1fc1473960f5 input=59d042e705cef5ec]*/
NoArgNoReturnVoidFunctionBody(flushinp)
#ifdef getsyx
/*[clinic input]
_curses.getsyx
Return the current coordinates of the virtual screen cursor.
Return a (y, x) tuple. If leaveok is currently true, return (-1, -1).
[clinic start generated code]*/
static PyObject *
_curses_getsyx_impl(PyObject *module)
/*[clinic end generated code: output=c8e6c3f42349a038 input=9e1f862f3b4f7cba]*/
{
int x = 0;
int y = 0;
PyCursesStatefulInitialised(module);
getsyx(y, x);
return Py_BuildValue("(ii)", y, x);
}
#endif
#ifdef NCURSES_MOUSE_VERSION
/*[clinic input]
_curses.getmouse
Retrieve the queued mouse event.
After getch() returns KEY_MOUSE to signal a mouse event, this function
returns a 5-tuple (id, x, y, z, bstate).
[clinic start generated code]*/
static PyObject *
_curses_getmouse_impl(PyObject *module)
/*[clinic end generated code: output=ccf4242546b9cfa8 input=5b756ee6f5b481b1]*/
{
int rtn;
MEVENT event;
PyCursesStatefulInitialised(module);
rtn = getmouse( &event );
if (rtn == ERR) {
curses_set_error(module, "getmouse", NULL);
return NULL;
}
return Py_BuildValue("(hiiik)",
(short)event.id,
(int)event.x, (int)event.y, (int)event.z,
(unsigned long) event.bstate);
}
/*[clinic input]
_curses.ungetmouse
id: short
x: int
y: int
z: int
bstate: unsigned_long(bitwise=True)
/
Push a KEY_MOUSE event onto the input queue.
The following getmouse() will return the given state data.
[clinic start generated code]*/
static PyObject *
_curses_ungetmouse_impl(PyObject *module, short id, int x, int y, int z,
unsigned long bstate)
/*[clinic end generated code: output=3430c9b0fc5c4341 input=fd650b2ca5a01e8f]*/
{
MEVENT event;
PyCursesStatefulInitialised(module);
event.id = id;
event.x = x;
event.y = y;
event.z = z;
event.bstate = bstate;
return curses_check_err(module, ungetmouse(&event), "ungetmouse", NULL);
}
#endif
/*[clinic input]
_curses.getwin
file: object
/
Read window related data stored in the file by an earlier putwin() call.
The routine then creates and initializes a new window using that data,
returning the new window object.
[clinic start generated code]*/
static PyObject *
_curses_getwin(PyObject *module, PyObject *file)
/*[clinic end generated code: output=a79e0df3379af756 input=f713d2bba0e4c929]*/
{
FILE *fp;
PyObject *data;
size_t datalen;
WINDOW *win;
PyObject *res = NULL;
PyCursesStatefulInitialised(module);
fp = tmpfile();
if (fp == NULL)
return PyErr_SetFromErrno(PyExc_OSError);
if (_Py_set_inheritable(fileno(fp), 0, NULL) < 0)
goto error;
data = PyObject_CallMethod(file, "read", NULL);
if (data == NULL)
goto error;
if (!PyBytes_Check(data)) {
PyErr_Format(PyExc_TypeError,
"f.read() returned %.100s instead of bytes",
Py_TYPE(data)->tp_name);
Py_DECREF(data);
goto error;
}
datalen = PyBytes_GET_SIZE(data);
if (fwrite(PyBytes_AS_STRING(data), 1, datalen, fp) != datalen) {
PyErr_SetFromErrno(PyExc_OSError);
Py_DECREF(data);
goto error;
}
Py_DECREF(data);
fseek(fp, 0, 0);
win = getwin(fp);
if (win == NULL) {
curses_set_null_error(module, "getwin", NULL);
goto error;
}
cursesmodule_state *state = get_cursesmodule_state(module);
res = PyCursesWindow_New(state, win, NULL, NULL);
error:
fclose(fp);
return res;
}
/*[clinic input]
_curses.halfdelay
tenths: byte
Maximal blocking delay in tenths of seconds (1 - 255).
/
Enter half-delay mode.
Use nocbreak() to leave half-delay mode.
[clinic start generated code]*/
static PyObject *
_curses_halfdelay_impl(PyObject *module, unsigned char tenths)
/*[clinic end generated code: output=e92cdf0ef33c0663 input=e42dce7259c15100]*/
{
PyCursesStatefulInitialised(module);
return curses_check_err(module, halfdelay(tenths), "halfdelay", NULL);
}
/*[clinic input]
_curses.has_colors
Return True if the terminal can display colors; otherwise, return False.
[clinic start generated code]*/
static PyObject *
_curses_has_colors_impl(PyObject *module)
/*[clinic end generated code: output=db5667483139e3e2 input=b2ec41b739d896c6]*/
NoArgTrueFalseFunctionBody(has_colors)
/*[clinic input]
_curses.has_ic
Return True if the terminal has insert- and delete-character capabilities.
[clinic start generated code]*/
static PyObject *
_curses_has_ic_impl(PyObject *module)
/*[clinic end generated code: output=6be24da9cb1268fe input=9bc2d3a797cc7324]*/
NoArgTrueFalseFunctionBody(has_ic)
/*[clinic input]
_curses.has_il
Return True if the terminal has insert- and delete-line capabilities.
[clinic start generated code]*/
static PyObject *
_curses_has_il_impl(PyObject *module)
/*[clinic end generated code: output=d45bd7788ff9f5f4 input=cd939d5607ee5427]*/
NoArgTrueFalseFunctionBody(has_il)
#ifdef HAVE_CURSES_HAS_KEY
/*[clinic input]
_curses.has_key
key: int
Key number.
/
Return True if the current terminal type recognizes a key with that value.
[clinic start generated code]*/
static PyObject *
_curses_has_key_impl(PyObject *module, int key)
/*[clinic end generated code: output=19ad48319414d0b1 input=78bd44acf1a4997c]*/
{
PyCursesStatefulInitialised(module);
return PyBool_FromLong(has_key(key));
}
#endif
/*[clinic input]
_curses.init_color
color_number: color
The number of the color to be changed (0 - (COLORS-1)).
r: component
Red component (0 - 1000).
g: component
Green component (0 - 1000).
b: component
Blue component (0 - 1000).
/
Change the definition of a color.
When init_color() is used, all occurrences of that color on the screen
immediately change to the new definition. This function is a no-op on
most terminals; it is active only if can_change_color() returns true.
[clinic start generated code]*/
static PyObject *
_curses_init_color_impl(PyObject *module, int color_number, short r, short g,
short b)
/*[clinic end generated code: output=d7ed71b2d818cdf2 input=ae2b8bea0f152c80]*/
{
PyCursesStatefulInitialised(module);
PyCursesStatefulInitialisedColor(module);
return curses_check_err(module,
_CURSES_INIT_COLOR_FUNC(color_number, r, g, b),
Py_STRINGIFY(_CURSES_INIT_COLOR_FUNC),
NULL);
}
/*[clinic input]
_curses.init_pair
pair_number: pair
The number of the color-pair to be changed (1 - (COLOR_PAIRS-1)).
fg: color_allow_default
Foreground color number (-1 - (COLORS-1)).
bg: color_allow_default
Background color number (-1 - (COLORS-1)).
/
Change the definition of a color-pair.
If the color-pair was previously initialized, the screen is refreshed and
all occurrences of that color-pair are changed to the new definition.
[clinic start generated code]*/
static PyObject *
_curses_init_pair_impl(PyObject *module, int pair_number, int fg, int bg)
/*[clinic end generated code: output=a0bba03d2bbc3ee6 input=54b421b44c12c389]*/
{
PyCursesStatefulInitialised(module);
PyCursesStatefulInitialisedColor(module);
if (_CURSES_INIT_PAIR_FUNC(pair_number, fg, bg) == ERR) {
if (pair_number >= COLOR_PAIRS) {
PyErr_Format(PyExc_ValueError,
"Color pair is greater than COLOR_PAIRS-1 (%d).",
COLOR_PAIRS - 1);
}
else {
const char *funcname = Py_STRINGIFY(_CURSES_INIT_PAIR_FUNC);
curses_set_error(module, funcname, "init_pair");
}
return NULL;
}
Py_RETURN_NONE;
}
/*[clinic input]
_curses.initscr
Initialize the library.
Return a WindowObject which represents the whole screen.
[clinic start generated code]*/
static PyObject *
_curses_initscr_impl(PyObject *module)
/*[clinic end generated code: output=619fb68443810b7b input=514f4bce1821f6b5]*/
{
WINDOW *win;
if (curses_initscr_called) {
cursesmodule_state *state = get_cursesmodule_state(module);
int code = wrefresh(stdscr);
if (code == ERR) {
_curses_set_null_error(state, "wrefresh", "initscr");
return NULL;
}
return PyCursesWindow_New(state, stdscr, NULL, NULL);
}
win = initscr();
if (win == NULL) {
curses_set_null_error(module, "initscr", NULL);
return NULL;
}
curses_initscr_called = curses_setupterm_called = TRUE;
PyObject *module_dict = PyModule_GetDict(module); // borrowed
if (module_dict == NULL) {
return NULL;
}
/* This was moved from initcurses() because it core dumped on SGI,
where they're not defined until you've called initscr() */
#define SetDictInt(NAME, VALUE) \
do { \
PyObject *value = PyLong_FromLong((long)(VALUE)); \
if (value == NULL) { \
return NULL; \
} \
int rc = PyDict_SetItemString(module_dict, (NAME), value); \
Py_DECREF(value); \
if (rc < 0) { \
return NULL; \
} \
} while (0)
/* Here are some graphic symbols you can use */
SetDictInt("ACS_ULCORNER", (ACS_ULCORNER));
SetDictInt("ACS_LLCORNER", (ACS_LLCORNER));
SetDictInt("ACS_URCORNER", (ACS_URCORNER));
SetDictInt("ACS_LRCORNER", (ACS_LRCORNER));
SetDictInt("ACS_LTEE", (ACS_LTEE));
SetDictInt("ACS_RTEE", (ACS_RTEE));
SetDictInt("ACS_BTEE", (ACS_BTEE));
SetDictInt("ACS_TTEE", (ACS_TTEE));
SetDictInt("ACS_HLINE", (ACS_HLINE));
SetDictInt("ACS_VLINE", (ACS_VLINE));
SetDictInt("ACS_PLUS", (ACS_PLUS));
#if !defined(__hpux) || defined(HAVE_NCURSES_H)
/* On HP/UX 11, these are of type cchar_t, which is not an
integral type. If this is a problem on more platforms, a
configure test should be added to determine whether ACS_S1
is of integral type. */
SetDictInt("ACS_S1", (ACS_S1));
SetDictInt("ACS_S9", (ACS_S9));
SetDictInt("ACS_DIAMOND", (ACS_DIAMOND));
SetDictInt("ACS_CKBOARD", (ACS_CKBOARD));
SetDictInt("ACS_DEGREE", (ACS_DEGREE));
SetDictInt("ACS_PLMINUS", (ACS_PLMINUS));
SetDictInt("ACS_BULLET", (ACS_BULLET));
SetDictInt("ACS_LARROW", (ACS_LARROW));
SetDictInt("ACS_RARROW", (ACS_RARROW));
SetDictInt("ACS_DARROW", (ACS_DARROW));
SetDictInt("ACS_UARROW", (ACS_UARROW));
SetDictInt("ACS_BOARD", (ACS_BOARD));
SetDictInt("ACS_LANTERN", (ACS_LANTERN));
SetDictInt("ACS_BLOCK", (ACS_BLOCK));
#endif
SetDictInt("ACS_BSSB", (ACS_ULCORNER));
SetDictInt("ACS_SSBB", (ACS_LLCORNER));
SetDictInt("ACS_BBSS", (ACS_URCORNER));
SetDictInt("ACS_SBBS", (ACS_LRCORNER));
SetDictInt("ACS_SBSS", (ACS_RTEE));
SetDictInt("ACS_SSSB", (ACS_LTEE));
SetDictInt("ACS_SSBS", (ACS_BTEE));
SetDictInt("ACS_BSSS", (ACS_TTEE));
SetDictInt("ACS_BSBS", (ACS_HLINE));
SetDictInt("ACS_SBSB", (ACS_VLINE));
SetDictInt("ACS_SSSS", (ACS_PLUS));
/* The following are never available with strict SYSV curses */
#ifdef ACS_S3
SetDictInt("ACS_S3", (ACS_S3));
#endif
#ifdef ACS_S7
SetDictInt("ACS_S7", (ACS_S7));
#endif
#ifdef ACS_LEQUAL
SetDictInt("ACS_LEQUAL", (ACS_LEQUAL));
#endif
#ifdef ACS_GEQUAL
SetDictInt("ACS_GEQUAL", (ACS_GEQUAL));
#endif
#ifdef ACS_PI
SetDictInt("ACS_PI", (ACS_PI));
#endif
#ifdef ACS_NEQUAL
SetDictInt("ACS_NEQUAL", (ACS_NEQUAL));
#endif
#ifdef ACS_STERLING
SetDictInt("ACS_STERLING", (ACS_STERLING));
#endif
SetDictInt("LINES", LINES);
SetDictInt("COLS", COLS);
#undef SetDictInt
cursesmodule_state *state = get_cursesmodule_state(module);
PyObject *winobj = PyCursesWindow_New(state, win, NULL, NULL);
if (winobj == NULL) {
return NULL;
}
curses_screen_encoding = ((PyCursesWindowObject *)winobj)->encoding;
return winobj;
}
/*[clinic input]
_curses.setupterm
term: str(accept={str, NoneType}) = None
Terminal name.
If omitted, the value of the TERM environment variable will be used.
fd: int = -1
File descriptor to which any initialization sequences will be sent.
If not supplied, the file descriptor for sys.stdout will be used.
Initialize the terminal.
[clinic start generated code]*/
static PyObject *
_curses_setupterm_impl(PyObject *module, const char *term, int fd)
/*[clinic end generated code: output=4584e587350f2848 input=4511472766af0c12]*/
{
int err;
if (fd == -1) {
PyObject* sys_stdout;
if (PySys_GetOptionalAttrString("stdout", &sys_stdout) < 0) {
return NULL;
}
if (sys_stdout == NULL || sys_stdout == Py_None) {
cursesmodule_state *state = get_cursesmodule_state(module);
PyErr_SetString(state->error, "lost sys.stdout");
Py_XDECREF(sys_stdout);
return NULL;
}
fd = PyObject_AsFileDescriptor(sys_stdout);
Py_DECREF(sys_stdout);
if (fd == -1) {
return NULL;
}
}
if (!curses_setupterm_called && setupterm((char *)term, fd, &err) == ERR) {
const char* s = "setupterm: unknown error";
if (err == 0) {
s = "setupterm: could not find terminal";
} else if (err == -1) {
s = "setupterm: could not find terminfo database";
}
cursesmodule_state *state = get_cursesmodule_state(module);
PyErr_SetString(state->error, s);
return NULL;
}
curses_setupterm_called = TRUE;
Py_RETURN_NONE;
}
#if defined(NCURSES_EXT_FUNCS) && NCURSES_EXT_FUNCS >= 20081102
// https://invisible-island.net/ncurses/NEWS.html#index-t20080119
/*[clinic input]
_curses.get_escdelay
Gets the curses ESCDELAY setting.
Gets the number of milliseconds to wait after reading an escape character,
to distinguish between an individual escape character entered on the
keyboard from escape sequences sent by cursor and function keys.
[clinic start generated code]*/
static PyObject *
_curses_get_escdelay_impl(PyObject *module)
/*[clinic end generated code: output=222fa1a822555d60 input=be2d5b3dd974d0a4]*/
{
return PyLong_FromLong(ESCDELAY);
}
/*[clinic input]
_curses.set_escdelay
ms: int
length of the delay in milliseconds.
/
Sets the curses ESCDELAY setting.
Sets the number of milliseconds to wait after reading an escape character,
to distinguish between an individual escape character entered on the
keyboard from escape sequences sent by cursor and function keys.
[clinic start generated code]*/
static PyObject *
_curses_set_escdelay_impl(PyObject *module, int ms)
/*[clinic end generated code: output=43818efbf7980ac4 input=7796fe19f111e250]*/
{
if (ms <= 0) {
PyErr_SetString(PyExc_ValueError, "ms must be > 0");
return NULL;
}
return curses_check_err(module, set_escdelay(ms), "set_escdelay", NULL);
}
/*[clinic input]
_curses.get_tabsize
Gets the curses TABSIZE setting.
Gets the number of columns used by the curses library when converting a tab
character to spaces as it adds the tab to a window.
[clinic start generated code]*/
static PyObject *
_curses_get_tabsize_impl(PyObject *module)
/*[clinic end generated code: output=7e9e51fb6126fbdf input=74af86bf6c9f5d7e]*/
{
return PyLong_FromLong(TABSIZE);
}
/*[clinic input]
_curses.set_tabsize
size: int
rendered cell width of a tab character.
/
Sets the curses TABSIZE setting.
Sets the number of columns used by the curses library when converting a tab
character to spaces as it adds the tab to a window.
[clinic start generated code]*/
static PyObject *
_curses_set_tabsize_impl(PyObject *module, int size)
/*[clinic end generated code: output=c1de5a76c0daab1e input=78cba6a3021ad061]*/
{
if (size <= 0) {
PyErr_SetString(PyExc_ValueError, "size must be > 0");
return NULL;
}
return curses_check_err(module, set_tabsize(size), "set_tabsize", NULL);
}
#endif
/*[clinic input]
_curses.intrflush
flag: bool
/
[clinic start generated code]*/
static PyObject *
_curses_intrflush_impl(PyObject *module, int flag)
/*[clinic end generated code: output=c1986df35e999a0f input=c65fe2ef973fe40a]*/
{
PyCursesStatefulInitialised(module);
return curses_check_err(module, intrflush(NULL, flag), "intrflush", NULL);
}
/*[clinic input]
_curses.isendwin
Return True if endwin() has been called.
[clinic start generated code]*/
static PyObject *
_curses_isendwin_impl(PyObject *module)
/*[clinic end generated code: output=d73179e4a7e1eb8c input=6cdb01a7ebf71397]*/
NoArgTrueFalseFunctionBody(isendwin)
#ifdef HAVE_CURSES_IS_TERM_RESIZED
/*[clinic input]
@permit_long_summary
_curses.is_term_resized
nlines: int
Height.
ncols: int
Width.
/
Return True if resize_term() would modify the window structure, False otherwise.
[clinic start generated code]*/
static PyObject *
_curses_is_term_resized_impl(PyObject *module, int nlines, int ncols)
/*[clinic end generated code: output=aafe04afe50f1288 input=5792a3f40cecb010]*/
{
PyCursesStatefulInitialised(module);
return PyBool_FromLong(is_term_resized(nlines, ncols));
}
#endif /* HAVE_CURSES_IS_TERM_RESIZED */
/*[clinic input]
_curses.keyname
key: int
Key number.
/
Return the name of specified key.
[clinic start generated code]*/
static PyObject *
_curses_keyname_impl(PyObject *module, int key)
/*[clinic end generated code: output=fa2675ab3f4e056b input=ee4b1d0f243a2a2b]*/
{
const char *knp;
PyCursesStatefulInitialised(module);
if (key < 0) {
PyErr_SetString(PyExc_ValueError, "invalid key number");
return NULL;
}
knp = keyname(key);
return PyBytes_FromString((knp == NULL) ? "" : knp);
}
/*[clinic input]
_curses.killchar
Return the user's current line kill character.
[clinic start generated code]*/
static PyObject *
_curses_killchar_impl(PyObject *module)
/*[clinic end generated code: output=31c3a45b2c528269 input=1ff171c38df5ccad]*/
{
char ch;
ch = killchar();
return PyBytes_FromStringAndSize(&ch, 1);
}
/*[clinic input]
@permit_long_docstring_body
_curses.longname
Return the terminfo long name field describing the current terminal.
The maximum length of a verbose description is 128 characters. It is defined
only after the call to initscr().
[clinic start generated code]*/
static PyObject *
_curses_longname_impl(PyObject *module)
/*[clinic end generated code: output=fdf30433727ef568 input=5de06852f2230ddb]*/
NoArgReturnStringFunctionBody(longname)
/*[clinic input]
_curses.meta
yes: bool
/
Enable/disable meta keys.
If yes is True, allow 8-bit characters to be input. If yes is False,
allow only 7-bit characters.
[clinic start generated code]*/
static PyObject *
_curses_meta_impl(PyObject *module, int yes)
/*[clinic end generated code: output=22f5abda46a605d8 input=cfe7da79f51d0e30]*/
{
PyCursesStatefulInitialised(module);
return curses_check_err(module, meta(stdscr, yes), "meta", NULL);
}
#ifdef NCURSES_MOUSE_VERSION
/*[clinic input]
_curses.mouseinterval
interval: int
Time in milliseconds.
/
Set and retrieve the maximum time between press and release in a click.
Set the maximum time that can elapse between press and release events in
order for them to be recognized as a click, and return the previous interval
value.
[clinic start generated code]*/
static PyObject *
_curses_mouseinterval_impl(PyObject *module, int interval)
/*[clinic end generated code: output=c4f5ff04354634c5 input=75aaa3f0db10ac4e]*/
{
PyCursesStatefulInitialised(module);
int value = mouseinterval(interval);
if (value == ERR) {
curses_set_error(module, "mouseinterval", NULL);
return NULL;
}
return PyLong_FromLong(value);
}
/*[clinic input]
@permit_long_summary
_curses.mousemask
newmask: unsigned_long(bitwise=True)
/
Set the mouse events to be reported, and return a tuple (availmask, oldmask).
Return a tuple (availmask, oldmask). availmask indicates which of the
specified mouse events can be reported; on complete failure it returns 0.
oldmask is the previous value of the given window's mouse event mask.
If this function is never called, no mouse events are ever reported.
[clinic start generated code]*/
static PyObject *
_curses_mousemask_impl(PyObject *module, unsigned long newmask)
/*[clinic end generated code: output=9406cf1b8a36e485 input=b92ff4fbe5ce61b1]*/
{
mmask_t oldmask, availmask;
PyCursesStatefulInitialised(module);
availmask = mousemask((mmask_t)newmask, &oldmask);
return Py_BuildValue("(kk)",
(unsigned long)availmask, (unsigned long)oldmask);
}
#endif
/*[clinic input]
_curses.napms -> int
ms: int
Duration in milliseconds.
/
Sleep for specified time.
[clinic start generated code]*/
static int
_curses_napms_impl(PyObject *module, int ms)
/*[clinic end generated code: output=5f292a6a724491bd input=c6d6e01f2f1df9f7]*/
{
if (!_PyCursesStatefulCheckFunction(module,
curses_initscr_called,
"initscr")) {
return -1;
}
return napms(ms);
}
/*[clinic input]
_curses.newpad
nlines: int
Height.
ncols: int
Width.
/
Create and return a pointer to a new pad data structure.
[clinic start generated code]*/
static PyObject *
_curses_newpad_impl(PyObject *module, int nlines, int ncols)
/*[clinic end generated code: output=de52a56eb1098ec9 input=93f1272f240d8894]*/
{
WINDOW *win;
PyCursesStatefulInitialised(module);
win = newpad(nlines, ncols);
if (win == NULL) {
curses_set_null_error(module, "newpad", NULL);
return NULL;
}
cursesmodule_state *state = get_cursesmodule_state(module);
return PyCursesWindow_New(state, win, NULL, NULL);
}
/*[clinic input]
_curses.newwin
nlines: int
Height.
ncols: int
Width.
[
begin_y: int = 0
Top side y-coordinate.
begin_x: int = 0
Left side x-coordinate.
]
/
Return a new window.
By default, the window will extend from the specified position to the lower
right corner of the screen.
[clinic start generated code]*/
static PyObject *
_curses_newwin_impl(PyObject *module, int nlines, int ncols,
int group_right_1, int begin_y, int begin_x)
/*[clinic end generated code: output=c1e0a8dc8ac2826c input=29312c15a72a003d]*/
{
WINDOW *win;
PyCursesStatefulInitialised(module);
win = newwin(nlines,ncols,begin_y,begin_x);
if (win == NULL) {
curses_set_null_error(module, "newwin", NULL);
return NULL;
}
cursesmodule_state *state = get_cursesmodule_state(module);
return PyCursesWindow_New(state, win, NULL, NULL);
}
/*[clinic input]
_curses.nl
flag: bool = True
If false, the effect is the same as calling nonl().
/
Enter newline mode.
This mode translates the return key into newline on input, and translates
newline into return and line-feed on output. Newline mode is initially on.
[clinic start generated code]*/
static PyObject *
_curses_nl_impl(PyObject *module, int flag)
/*[clinic end generated code: output=b39cc0ffc9015003 input=18e3e9c6e8cfcf6f]*/
NoArgOrFlagNoReturnFunctionBody(nl, flag)
/*[clinic input]
_curses.nocbreak
Leave cbreak mode.
Return to normal "cooked" mode with line buffering.
[clinic start generated code]*/
static PyObject *
_curses_nocbreak_impl(PyObject *module)
/*[clinic end generated code: output=eabf3833a4fbf620 input=e4b65f7d734af400]*/
NoArgNoReturnFunctionBody(nocbreak)
/*[clinic input]
_curses.noecho
Leave echo mode.
Echoing of input characters is turned off.
[clinic start generated code]*/
static PyObject *
_curses_noecho_impl(PyObject *module)
/*[clinic end generated code: output=cc95ab45bc98f41b input=76714df529e614c3]*/
NoArgNoReturnFunctionBody(noecho)
/*[clinic input]
_curses.nonl
Leave newline mode.
Disable translation of return into newline on input, and disable low-level
translation of newline into newline/return on output.
[clinic start generated code]*/
static PyObject *
_curses_nonl_impl(PyObject *module)
/*[clinic end generated code: output=99e917e9715770c6 input=9d37dd122d3022fc]*/
NoArgNoReturnFunctionBody(nonl)
/*[clinic input]
_curses.noqiflush
Disable queue flushing.
When queue flushing is disabled, normal flush of input and output queues
associated with the INTR, QUIT and SUSP characters will not be done.
[clinic start generated code]*/
static PyObject *
_curses_noqiflush_impl(PyObject *module)
/*[clinic end generated code: output=8b95a4229bbf0877 input=ba3e6b2e3e54c4df]*/
NoArgNoReturnVoidFunctionBody(noqiflush)
/*[clinic input]
_curses.noraw
Leave raw mode.
Return to normal "cooked" mode with line buffering.
[clinic start generated code]*/
static PyObject *
_curses_noraw_impl(PyObject *module)
/*[clinic end generated code: output=39894e5524c430cc input=6ec86692096dffb5]*/
NoArgNoReturnFunctionBody(noraw)
/*[clinic input]
_curses.pair_content
pair_number: pair
The number of the color pair (0 - (COLOR_PAIRS-1)).
/
Return a tuple (fg, bg) containing the colors for the requested color pair.
[clinic start generated code]*/
static PyObject *
_curses_pair_content_impl(PyObject *module, int pair_number)
/*[clinic end generated code: output=4a726dd0e6885f3f input=03970f840fc7b739]*/
{
_CURSES_COLOR_NUM_TYPE f, b;
PyCursesStatefulInitialised(module);
PyCursesStatefulInitialisedColor(module);
if (_CURSES_PAIR_CONTENT_FUNC(pair_number, &f, &b) == ERR) {
if (pair_number >= COLOR_PAIRS) {
PyErr_Format(PyExc_ValueError,
"Color pair is greater than COLOR_PAIRS-1 (%d).",
COLOR_PAIRS - 1);
}
else {
const char *funcname = Py_STRINGIFY(_CURSES_PAIR_CONTENT_FUNC);
curses_set_error(module, funcname, "pair_content");
}
return NULL;
}
return Py_BuildValue("(ii)", f, b);
}
/*[clinic input]
_curses.pair_number
attr: int
/
Return the number of the color-pair set by the specified attribute value.
color_pair() is the counterpart to this function.
[clinic start generated code]*/
static PyObject *
_curses_pair_number_impl(PyObject *module, int attr)
/*[clinic end generated code: output=85bce7d65c0aa3f4 input=d478548e33f5e61a]*/
{
PyCursesStatefulInitialised(module);
PyCursesStatefulInitialisedColor(module);
return PyLong_FromLong(PAIR_NUMBER(attr));
}
/*[clinic input]
_curses.putp
string: str(accept={robuffer})
/
Emit the value of a specified terminfo capability for the current terminal.
Note that the output of putp() always goes to standard output.
[clinic start generated code]*/
static PyObject *
_curses_putp_impl(PyObject *module, const char *string)
/*[clinic end generated code: output=e98081d1b8eb5816 input=1601faa828b44cb3]*/
{
return curses_check_err(module, putp(string), "putp", NULL);
}
/*[clinic input]
_curses.qiflush
flag: bool = True
If false, the effect is the same as calling noqiflush().
/
Enable queue flushing.
If queue flushing is enabled, all output in the display driver queue
will be flushed when the INTR, QUIT and SUSP characters are read.
[clinic start generated code]*/
static PyObject *
_curses_qiflush_impl(PyObject *module, int flag)
/*[clinic end generated code: output=9167e862f760ea30 input=6ec8b3e2b717ec40]*/
{
PyCursesStatefulInitialised(module);
if (flag) {
qiflush();
}
else {
noqiflush();
}
Py_RETURN_NONE;
}
#if defined(HAVE_CURSES_RESIZETERM) || defined(HAVE_CURSES_RESIZE_TERM)
/* Internal helper used for updating curses.LINES, curses.COLS, _curses.LINES
* and _curses.COLS. Returns 1 on success and 0 on failure. */
static int
update_lines_cols(PyObject *private_module)
{
PyObject *exposed_module = NULL, *o = NULL;
exposed_module = PyImport_ImportModule("curses");
if (exposed_module == NULL) {
goto error;
}
PyObject *exposed_module_dict = PyModule_GetDict(exposed_module); // borrowed
if (exposed_module_dict == NULL) {
goto error;
}
PyObject *private_module_dict = PyModule_GetDict(private_module); // borrowed
if (private_module_dict == NULL) {
goto error;
}
o = PyLong_FromLong(LINES);
if (o == NULL) {
goto error;
}
if (PyDict_SetItemString(exposed_module_dict, "LINES", o) < 0) {
goto error;
}
if (PyDict_SetItemString(private_module_dict, "LINES", o) < 0) {
goto error;
}
Py_DECREF(o);
o = PyLong_FromLong(COLS);
if (o == NULL) {
goto error;
}
if (PyDict_SetItemString(exposed_module_dict, "COLS", o) < 0) {
goto error;
}
if (PyDict_SetItemString(private_module_dict, "COLS", o) < 0) {
goto error;
}
Py_DECREF(o);
Py_DECREF(exposed_module);
return 1;
error:
Py_XDECREF(o);
Py_XDECREF(exposed_module);
return 0;
}
/*[clinic input]
_curses.update_lines_cols
[clinic start generated code]*/
static PyObject *
_curses_update_lines_cols_impl(PyObject *module)
/*[clinic end generated code: output=423f2b1e63ed0f75 input=5f065ab7a28a5d90]*/
{
if (!update_lines_cols(module)) {
return NULL;
}
Py_RETURN_NONE;
}
#endif
/*[clinic input]
_curses.raw
flag: bool = True
If false, the effect is the same as calling noraw().
/
Enter raw mode.
In raw mode, normal line buffering and processing of interrupt, quit,
suspend, and flow control keys are turned off; characters are presented to
curses input functions one by one.
[clinic start generated code]*/
static PyObject *
_curses_raw_impl(PyObject *module, int flag)
/*[clinic end generated code: output=a750e4b342be015b input=4b447701389fb4df]*/
NoArgOrFlagNoReturnFunctionBody(raw, flag)
/*[clinic input]
@permit_long_summary
_curses.reset_prog_mode
Restore the terminal to "program" mode, as previously saved by def_prog_mode().
[clinic start generated code]*/
static PyObject *
_curses_reset_prog_mode_impl(PyObject *module)
/*[clinic end generated code: output=15eb765abf0b6575 input=a8b44b5261c8cf3a]*/
NoArgNoReturnFunctionBody(reset_prog_mode)
/*[clinic input]
@permit_long_summary
_curses.reset_shell_mode
Restore the terminal to "shell" mode, as previously saved by def_shell_mode().
[clinic start generated code]*/
static PyObject *
_curses_reset_shell_mode_impl(PyObject *module)
/*[clinic end generated code: output=0238de2962090d33 input=f5224034a2c95931]*/
NoArgNoReturnFunctionBody(reset_shell_mode)
/*[clinic input]
_curses.resetty
Restore terminal mode.
[clinic start generated code]*/
static PyObject *
_curses_resetty_impl(PyObject *module)
/*[clinic end generated code: output=ff4b448e80a7cd63 input=940493de03624bb0]*/
NoArgNoReturnFunctionBody(resetty)
#ifdef HAVE_CURSES_RESIZETERM
/*[clinic input]
_curses.resizeterm
nlines: short
Height.
ncols: short
Width.
/
Resize the standard and current windows to the specified dimensions.
Adjusts other bookkeeping data used by the curses library that record the
window dimensions (in particular the SIGWINCH handler).
[clinic start generated code]*/
static PyObject *
_curses_resizeterm_impl(PyObject *module, short nlines, short ncols)
/*[clinic end generated code: output=4de3abab50c67f02 input=414e92a63e3e9899]*/
{
PyObject *result;
int code;
PyCursesStatefulInitialised(module);
code = resizeterm(nlines, ncols);
result = curses_check_err(module, code, "resizeterm", NULL);
if (!result)
return NULL;
if (!update_lines_cols(module)) {
Py_DECREF(result);
return NULL;
}
return result;
}
#endif
#ifdef HAVE_CURSES_RESIZE_TERM
/*[clinic input]
@permit_long_docstring_body
_curses.resize_term
nlines: short
Height.
ncols: short
Width.
/
Backend function used by resizeterm(), performing most of the work.
When resizing the windows, resize_term() blank-fills the areas that are
extended. The calling application should fill in these areas with appropriate
data. The resize_term() function attempts to resize all windows. However,
due to the calling convention of pads, it is not possible to resize these
without additional interaction with the application.
[clinic start generated code]*/
static PyObject *
_curses_resize_term_impl(PyObject *module, short nlines, short ncols)
/*[clinic end generated code: output=46c6d749fa291dbd input=ebfa840f6b5f03fa]*/
{
PyObject *result;
int code;
PyCursesStatefulInitialised(module);
code = resize_term(nlines, ncols);
result = curses_check_err(module, code, "resize_term", NULL);
if (!result)
return NULL;
if (!update_lines_cols(module)) {
Py_DECREF(result);
return NULL;
}
return result;
}
#endif /* HAVE_CURSES_RESIZE_TERM */
/*[clinic input]
_curses.savetty
Save terminal mode.
[clinic start generated code]*/
static PyObject *
_curses_savetty_impl(PyObject *module)
/*[clinic end generated code: output=6babc49f12b42199 input=fce6b2b7d2200102]*/
NoArgNoReturnFunctionBody(savetty)
#ifdef getsyx
/*[clinic input]
_curses.setsyx
y: int
Y-coordinate.
x: int
X-coordinate.
/
Set the virtual screen cursor.
If y and x are both -1, then leaveok is set.
[clinic start generated code]*/
static PyObject *
_curses_setsyx_impl(PyObject *module, int y, int x)
/*[clinic end generated code: output=23dcf753511a2464 input=fa7f2b208e10a557]*/
{
PyCursesStatefulInitialised(module);
setsyx(y,x);
Py_RETURN_NONE;
}
#endif
/*[clinic input]
_curses.start_color
Initializes eight basic colors and global variables COLORS and COLOR_PAIRS.
Must be called if the programmer wants to use colors, and before any other
color manipulation routine is called. It is good practice to call this
routine right after initscr().
It also restores the colors on the terminal to the values they had when the
terminal was just turned on.
[clinic start generated code]*/
static PyObject *
_curses_start_color_impl(PyObject *module)
/*[clinic end generated code: output=8b772b41d8090ede input=0ca0ecb2b77e1a12]*/
{
PyCursesStatefulInitialised(module);
if (start_color() == ERR) {
curses_set_error(module, "start_color", NULL);
return NULL;
}
curses_start_color_called = TRUE;
PyObject *module_dict = PyModule_GetDict(module); // borrowed
if (module_dict == NULL) {
return NULL;
}
#define DICT_ADD_INT_VALUE(NAME, VALUE) \
do { \
PyObject *value = PyLong_FromLong((long)(VALUE)); \
if (value == NULL) { \
return NULL; \
} \
int rc = PyDict_SetItemString(module_dict, (NAME), value); \
Py_DECREF(value); \
if (rc < 0) { \
return NULL; \
} \
} while (0)
DICT_ADD_INT_VALUE("COLORS", COLORS);
DICT_ADD_INT_VALUE("COLOR_PAIRS", COLOR_PAIRS);
#undef DICT_ADD_INT_VALUE
Py_RETURN_NONE;
}
/*[clinic input]
_curses.termattrs
Return a logical OR of all video attributes supported by the terminal.
[clinic start generated code]*/
static PyObject *
_curses_termattrs_impl(PyObject *module)
/*[clinic end generated code: output=b06f437fce1b6fc4 input=0559882a04f84d1d]*/
NoArgReturnIntFunctionBody(termattrs)
/*[clinic input]
@permit_long_summary
_curses.termname
Return the value of the environment variable TERM, truncated to 14 characters.
[clinic start generated code]*/
static PyObject *
_curses_termname_impl(PyObject *module)
/*[clinic end generated code: output=96375577ebbd67fd input=c34f724d8ce8fc4e]*/
NoArgReturnStringFunctionBody(termname)
/*[clinic input]
_curses.tigetflag
capname: str
The terminfo capability name.
/
Return the value of the Boolean capability.
The value -1 is returned if capname is not a Boolean capability, or 0 if
it is canceled or absent from the terminal description.
[clinic start generated code]*/
static PyObject *
_curses_tigetflag_impl(PyObject *module, const char *capname)
/*[clinic end generated code: output=8853c0e55542195b input=b0787af9e3e9a6ce]*/
{
PyCursesStatefulSetupTermCalled(module);
return PyLong_FromLong( (long) tigetflag( (char *)capname ) );
}
/*[clinic input]
_curses.tigetnum
capname: str
The terminfo capability name.
/
Return the value of the numeric capability.
The value -2 is returned if capname is not a numeric capability, or -1 if
it is canceled or absent from the terminal description.
[clinic start generated code]*/
static PyObject *
_curses_tigetnum_impl(PyObject *module, const char *capname)
/*[clinic end generated code: output=46f8b0a1b5dff42f input=5cdf2f410b109720]*/
{
PyCursesStatefulSetupTermCalled(module);
return PyLong_FromLong( (long) tigetnum( (char *)capname ) );
}
/*[clinic input]
_curses.tigetstr
capname: str
The terminfo capability name.
/
Return the value of the string capability.
None is returned if capname is not a string capability, or is canceled or
absent from the terminal description.
[clinic start generated code]*/
static PyObject *
_curses_tigetstr_impl(PyObject *module, const char *capname)
/*[clinic end generated code: output=f22b576ad60248f3 input=36644df25c73c0a7]*/
{
PyCursesStatefulSetupTermCalled(module);
capname = tigetstr( (char *)capname );
if (capname == NULL || capname == (char*) -1) {
Py_RETURN_NONE;
}
return PyBytes_FromString( capname );
}
/*[clinic input]
_curses.tparm
str: str(accept={robuffer})
Parameterized byte string obtained from the terminfo database.
i1: int = 0
i2: int = 0
i3: int = 0
i4: int = 0
i5: int = 0
i6: int = 0
i7: int = 0
i8: int = 0
i9: int = 0
/
Instantiate the specified byte string with the supplied parameters.
[clinic start generated code]*/
static PyObject *
_curses_tparm_impl(PyObject *module, const char *str, int i1, int i2, int i3,
int i4, int i5, int i6, int i7, int i8, int i9)
/*[clinic end generated code: output=599f62b615c667ff input=5e30b15786f032aa]*/
{
char* result = NULL;
PyCursesStatefulSetupTermCalled(module);
result = tparm((char *)str,i1,i2,i3,i4,i5,i6,i7,i8,i9);
if (!result) {
curses_set_null_error(module, "tparm", NULL);
return NULL;
}
return PyBytes_FromString(result);
}
#ifdef HAVE_CURSES_TYPEAHEAD
/*[clinic input]
_curses.typeahead
fd: int
File descriptor.
/
Specify that the file descriptor fd be used for typeahead checking.
If fd is -1, then no typeahead checking is done.
[clinic start generated code]*/
static PyObject *
_curses_typeahead_impl(PyObject *module, int fd)
/*[clinic end generated code: output=084bb649d7066583 input=f2968d8e1805051b]*/
{
PyCursesStatefulInitialised(module);
return curses_check_err(module, typeahead(fd), "typeahead", NULL);
}
#endif
/*[clinic input]
_curses.unctrl
ch: object
/
Return a string which is a printable representation of the character ch.
Control characters are displayed as a caret followed by the character,
for example as ^C. Printing characters are left as they are.
[clinic start generated code]*/
static PyObject *
_curses_unctrl(PyObject *module, PyObject *ch)
/*[clinic end generated code: output=8e07fafc430c9434 input=cd1e35e16cd1ace4]*/
{
chtype ch_;
PyCursesStatefulInitialised(module);
if (!PyCurses_ConvertToChtype(NULL, ch, &ch_))
return NULL;
const char *res = unctrl(ch_);
if (res == NULL) {
curses_set_null_error(module, "unctrl", NULL);
return NULL;
}
return PyBytes_FromString(res);
}
/*[clinic input]
_curses.ungetch
ch: object
/
Push ch so the next getch() will return it.
[clinic start generated code]*/
static PyObject *
_curses_ungetch(PyObject *module, PyObject *ch)
/*[clinic end generated code: output=9b19d8268376d887 input=6681e6ae4c42e5eb]*/
{
chtype ch_;
PyCursesStatefulInitialised(module);
if (!PyCurses_ConvertToChtype(NULL, ch, &ch_))
return NULL;
return curses_check_err(module, ungetch(ch_), "ungetch", NULL);
}
#ifdef HAVE_NCURSESW
/* Convert an object to a character (wchar_t):
- int
- str of length 1
Return 1 on success, 0 on error. */
static int
PyCurses_ConvertToWchar_t(PyObject *obj,
wchar_t *wch)
{
if (PyUnicode_Check(obj)) {
wchar_t buffer[2];
if (PyUnicode_AsWideChar(obj, buffer, 2) != 1) {
PyErr_Format(PyExc_TypeError,
"expect int or str of length 1, "
"got a str of length %zi",
PyUnicode_GET_LENGTH(obj));
return 0;
}
*wch = buffer[0];
return 2;
}
else if (PyLong_CheckExact(obj)) {
long value;
int overflow;
value = PyLong_AsLongAndOverflow(obj, &overflow);
if (overflow) {
PyErr_SetString(PyExc_OverflowError,
"int doesn't fit in long");
return 0;
}
*wch = (wchar_t)value;
if ((long)*wch != value) {
PyErr_Format(PyExc_OverflowError,
"character doesn't fit in wchar_t");
return 0;
}
return 1;
}
else {
PyErr_Format(PyExc_TypeError,
"expect int or str of length 1, got %s",
Py_TYPE(obj)->tp_name);
return 0;
}
}
/*[clinic input]
_curses.unget_wch
ch: object
/
Push ch so the next get_wch() will return it.
[clinic start generated code]*/
static PyObject *
_curses_unget_wch(PyObject *module, PyObject *ch)
/*[clinic end generated code: output=1974c9fb01d37863 input=0d56dc65a46feebb]*/
{
wchar_t wch;
PyCursesStatefulInitialised(module);
if (!PyCurses_ConvertToWchar_t(ch, &wch))
return NULL;
return curses_check_err(module, unget_wch(wch), "unget_wch", NULL);
}
#endif
#ifdef HAVE_CURSES_USE_ENV
/*[clinic input]
@permit_long_docstring_body
_curses.use_env
flag: bool
/
Use environment variables LINES and COLUMNS.
If used, this function should be called before initscr() or newterm() are
called.
When flag is False, the values of lines and columns specified in the terminfo
database will be used, even if environment variables LINES and COLUMNS (used
by default) are set, or if curses is running in a window (in which case
default behavior would be to use the window size if LINES and COLUMNS are
not set).
[clinic start generated code]*/
static PyObject *
_curses_use_env_impl(PyObject *module, int flag)
/*[clinic end generated code: output=b2c445e435c0b164 input=eaa9047ec73c27d3]*/
{
use_env(flag);
Py_RETURN_NONE;
}
#endif
#ifndef STRICT_SYSV_CURSES
/*[clinic input]
_curses.use_default_colors
Equivalent to assume_default_colors(-1, -1).
[clinic start generated code]*/
static PyObject *
_curses_use_default_colors_impl(PyObject *module)
/*[clinic end generated code: output=a3b81ff71dd901be input=99ff0b7c69834d1f]*/
{
int code;
PyCursesStatefulInitialised(module);
PyCursesStatefulInitialisedColor(module);
code = use_default_colors();
return curses_check_err(module, code, "use_default_colors", NULL);
}
/*[clinic input]
_curses.assume_default_colors
fg: int
bg: int
/
Allow use of default values for colors on terminals supporting this feature.
Assign terminal default foreground/background colors to color number -1.
Change the definition of the color-pair 0 to (fg, bg).
Use this to support transparency in your application.
[clinic start generated code]*/
static PyObject *
_curses_assume_default_colors_impl(PyObject *module, int fg, int bg)
/*[clinic end generated code: output=54985397a7d2b3a5 input=7fe301712ef3e9fb]*/
{
int code;
PyCursesStatefulInitialised(module);
PyCursesStatefulInitialisedColor(module);
code = assume_default_colors(fg, bg);
return curses_check_err(module, code, "assume_default_colors", NULL);
}
#endif /* STRICT_SYSV_CURSES */
#ifdef NCURSES_VERSION
PyDoc_STRVAR(ncurses_version__doc__,
"curses.ncurses_version\n\
\n\
Ncurses version information as a named tuple.");
static PyStructSequence_Field ncurses_version_fields[] = {
{"major", "Major release number"},
{"minor", "Minor release number"},
{"patch", "Patch release number"},
{0}
};
static PyStructSequence_Desc ncurses_version_desc = {
"curses.ncurses_version", /* name */
ncurses_version__doc__, /* doc */
ncurses_version_fields, /* fields */
3
};
static PyObject *
make_ncurses_version(PyTypeObject *type)
{
PyObject *ncurses_version = PyStructSequence_New(type);
if (ncurses_version == NULL) {
return NULL;
}
const char *str = curses_version();
unsigned long major = 0, minor = 0, patch = 0;
if (!str || sscanf(str, "%*[^0-9]%lu.%lu.%lu", &major, &minor, &patch) < 3) {
// Fallback to header version, which cannot be that wrong
major = NCURSES_VERSION_MAJOR;
minor = NCURSES_VERSION_MINOR;
patch = NCURSES_VERSION_PATCH;
}
#define SET_VERSION_COMPONENT(INDEX, VALUE) \
do { \
PyObject *o = PyLong_FromLong(VALUE); \
if (o == NULL) { \
Py_DECREF(ncurses_version); \
return NULL; \
} \
PyStructSequence_SET_ITEM(ncurses_version, INDEX, o); \
} while (0)
SET_VERSION_COMPONENT(0, major);
SET_VERSION_COMPONENT(1, minor);
SET_VERSION_COMPONENT(2, patch);
#undef SET_VERSION_COMPONENT
return ncurses_version;
}
#endif /* NCURSES_VERSION */
/*[clinic input]
_curses.has_extended_color_support
Return True if the module supports extended colors; otherwise, return False.
Extended color support allows more than 256 color-pairs for terminals
that support more than 16 colors (e.g. xterm-256color).
[clinic start generated code]*/
static PyObject *
_curses_has_extended_color_support_impl(PyObject *module)
/*[clinic end generated code: output=68f1be2b57d92e22 input=4b905f046e35ee9f]*/
{
return PyBool_FromLong(_NCURSES_EXTENDED_COLOR_FUNCS);
}
/* List of functions defined in the module */
static PyMethodDef cursesmodule_methods[] = {
_CURSES_BAUDRATE_METHODDEF
_CURSES_BEEP_METHODDEF
_CURSES_CAN_CHANGE_COLOR_METHODDEF
_CURSES_CBREAK_METHODDEF
_CURSES_COLOR_CONTENT_METHODDEF
_CURSES_COLOR_PAIR_METHODDEF
_CURSES_CURS_SET_METHODDEF
_CURSES_DEF_PROG_MODE_METHODDEF
_CURSES_DEF_SHELL_MODE_METHODDEF
_CURSES_DELAY_OUTPUT_METHODDEF
_CURSES_DOUPDATE_METHODDEF
_CURSES_ECHO_METHODDEF
_CURSES_ENDWIN_METHODDEF
_CURSES_ERASECHAR_METHODDEF
_CURSES_FILTER_METHODDEF
_CURSES_FLASH_METHODDEF
_CURSES_FLUSHINP_METHODDEF
_CURSES_GETMOUSE_METHODDEF
_CURSES_UNGETMOUSE_METHODDEF
_CURSES_GETSYX_METHODDEF
_CURSES_GETWIN_METHODDEF
_CURSES_HAS_COLORS_METHODDEF
_CURSES_HAS_EXTENDED_COLOR_SUPPORT_METHODDEF
_CURSES_HAS_IC_METHODDEF
_CURSES_HAS_IL_METHODDEF
_CURSES_HAS_KEY_METHODDEF
_CURSES_HALFDELAY_METHODDEF
_CURSES_INIT_COLOR_METHODDEF
_CURSES_INIT_PAIR_METHODDEF
_CURSES_INITSCR_METHODDEF
_CURSES_INTRFLUSH_METHODDEF
_CURSES_ISENDWIN_METHODDEF
_CURSES_IS_TERM_RESIZED_METHODDEF
_CURSES_KEYNAME_METHODDEF
_CURSES_KILLCHAR_METHODDEF
_CURSES_LONGNAME_METHODDEF
_CURSES_META_METHODDEF
_CURSES_MOUSEINTERVAL_METHODDEF
_CURSES_MOUSEMASK_METHODDEF
_CURSES_NAPMS_METHODDEF
_CURSES_NEWPAD_METHODDEF
_CURSES_NEWWIN_METHODDEF
_CURSES_NL_METHODDEF
_CURSES_NOCBREAK_METHODDEF
_CURSES_NOECHO_METHODDEF
_CURSES_NONL_METHODDEF
_CURSES_NOQIFLUSH_METHODDEF
_CURSES_NORAW_METHODDEF
_CURSES_PAIR_CONTENT_METHODDEF
_CURSES_PAIR_NUMBER_METHODDEF
_CURSES_PUTP_METHODDEF
_CURSES_QIFLUSH_METHODDEF
_CURSES_RAW_METHODDEF
_CURSES_RESET_PROG_MODE_METHODDEF
_CURSES_RESET_SHELL_MODE_METHODDEF
_CURSES_RESETTY_METHODDEF
_CURSES_RESIZETERM_METHODDEF
_CURSES_RESIZE_TERM_METHODDEF
_CURSES_SAVETTY_METHODDEF
#if defined(NCURSES_EXT_FUNCS) && NCURSES_EXT_FUNCS >= 20081102
_CURSES_GET_ESCDELAY_METHODDEF
_CURSES_SET_ESCDELAY_METHODDEF
#endif
_CURSES_GET_TABSIZE_METHODDEF
_CURSES_SET_TABSIZE_METHODDEF
_CURSES_SETSYX_METHODDEF
_CURSES_SETUPTERM_METHODDEF
_CURSES_START_COLOR_METHODDEF
_CURSES_TERMATTRS_METHODDEF
_CURSES_TERMNAME_METHODDEF
_CURSES_TIGETFLAG_METHODDEF
_CURSES_TIGETNUM_METHODDEF
_CURSES_TIGETSTR_METHODDEF
_CURSES_TPARM_METHODDEF
_CURSES_TYPEAHEAD_METHODDEF
_CURSES_UNCTRL_METHODDEF
_CURSES_UNGETCH_METHODDEF
_CURSES_UPDATE_LINES_COLS_METHODDEF
_CURSES_UNGET_WCH_METHODDEF
_CURSES_USE_ENV_METHODDEF
_CURSES_USE_DEFAULT_COLORS_METHODDEF
_CURSES_ASSUME_DEFAULT_COLORS_METHODDEF
{NULL, NULL} /* sentinel */
};
/* Module C API */
/* Function versions of the 3 functions for testing whether curses has been
initialised or not. */
static inline int
curses_capi_setupterm_called(void)
{
return _PyCursesCheckFunction(curses_setupterm_called, "setupterm");
}
static inline int
curses_capi_initscr_called(void)
{
return _PyCursesCheckFunction(curses_initscr_called, "initscr");
}
static inline int
curses_capi_start_color_called(void)
{
return _PyCursesCheckFunction(curses_start_color_called, "start_color");
}
static void *
curses_capi_new(cursesmodule_state *state)
{
assert(state->window_type != NULL);
void **capi = (void **)PyMem_Calloc(PyCurses_API_pointers, sizeof(void *));
if (capi == NULL) {
PyErr_NoMemory();
return NULL;
}
capi[0] = (void *)Py_NewRef(state->window_type);
capi[1] = curses_capi_setupterm_called;
capi[2] = curses_capi_initscr_called;
capi[3] = curses_capi_start_color_called;
return (void *)capi;
}
static void
curses_capi_free(void *capi)
{
assert(capi != NULL);
void **capi_ptr = (void **)capi;
// In free-threaded builds, capi_ptr[0] may have been already cleared
// by curses_capi_capsule_destructor(), hence the use of Py_XDECREF().
Py_XDECREF(capi_ptr[0]); // decref curses window type
PyMem_Free(capi_ptr);
}
/* Module C API Capsule */
static void
curses_capi_capsule_destructor(PyObject *op)
{
void *capi = PyCapsule_GetPointer(op, PyCurses_CAPSULE_NAME);
curses_capi_free(capi);
}
static int
curses_capi_capsule_traverse(PyObject *op, visitproc visit, void *arg)
{
void **capi_ptr = PyCapsule_GetPointer(op, PyCurses_CAPSULE_NAME);
assert(capi_ptr != NULL);
Py_VISIT(capi_ptr[0]); // visit curses window type
return 0;
}
static int
curses_capi_capsule_clear(PyObject *op)
{
void **capi_ptr = PyCapsule_GetPointer(op, PyCurses_CAPSULE_NAME);
assert(capi_ptr != NULL);
Py_CLEAR(capi_ptr[0]); // clear curses window type
return 0;
}
static PyObject *
curses_capi_capsule_new(void *capi)
{
PyObject *capsule = PyCapsule_New(capi, PyCurses_CAPSULE_NAME,
curses_capi_capsule_destructor);
if (capsule == NULL) {
return NULL;
}
if (_PyCapsule_SetTraverse(capsule,
curses_capi_capsule_traverse,
curses_capi_capsule_clear) < 0)
{
Py_DECREF(capsule);
return NULL;
}
return capsule;
}
/* Module initialization and cleanup functions */
static int
cursesmodule_traverse(PyObject *mod, visitproc visit, void *arg)
{
cursesmodule_state *state = get_cursesmodule_state(mod);
Py_VISIT(state->error);
Py_VISIT(state->window_type);
return 0;
}
static int
cursesmodule_clear(PyObject *mod)
{
cursesmodule_state *state = get_cursesmodule_state(mod);
Py_CLEAR(state->error);
Py_CLEAR(state->window_type);
return 0;
}
static void
cursesmodule_free(void *mod)
{
(void)cursesmodule_clear((PyObject *)mod);
curses_module_loaded = 0; // allow reloading once garbage-collected
}
static int
cursesmodule_exec(PyObject *module)
{
if (curses_module_loaded) {
PyErr_SetString(PyExc_ImportError,
"module 'curses' can only be loaded once per process");
return -1;
}
curses_module_loaded = 1;
cursesmodule_state *state = get_cursesmodule_state(module);
/* Initialize object type */
state->window_type = (PyTypeObject *)PyType_FromModuleAndSpec(
module, &PyCursesWindow_Type_spec, NULL);
if (state->window_type == NULL) {
return -1;
}
if (PyModule_AddType(module, state->window_type) < 0) {
return -1;
}
/* Add some symbolic constants to the module */
PyObject *module_dict = PyModule_GetDict(module);
if (module_dict == NULL) {
return -1;
}
/* Create the C API object */
void *capi = curses_capi_new(state);
if (capi == NULL) {
return -1;
}
/* Add a capsule for the C API */
PyObject *capi_capsule = curses_capi_capsule_new(capi);
if (capi_capsule == NULL) {
curses_capi_free(capi);
return -1;
}
int rc = PyDict_SetItemString(module_dict, "_C_API", capi_capsule);
Py_DECREF(capi_capsule);
if (rc < 0) {
return -1;
}
/* For exception curses.error */
state->error = PyErr_NewException("_curses.error", NULL, NULL);
if (state->error == NULL) {
return -1;
}
rc = PyDict_SetItemString(module_dict, "error", state->error);
if (rc < 0) {
return -1;
}
/* Make the version available */
PyObject *curses_version = PyBytes_FromString(PyCursesVersion);
if (curses_version == NULL) {
return -1;
}
rc = PyDict_SetItemString(module_dict, "version", curses_version);
if (rc < 0) {
Py_DECREF(curses_version);
return -1;
}
rc = PyDict_SetItemString(module_dict, "__version__", curses_version);
Py_CLEAR(curses_version);
if (rc < 0) {
return -1;
}
#ifdef NCURSES_VERSION
/* ncurses_version */
PyTypeObject *version_type;
version_type = _PyStructSequence_NewType(&ncurses_version_desc,
Py_TPFLAGS_DISALLOW_INSTANTIATION);
if (version_type == NULL) {
return -1;
}
PyObject *ncurses_version = make_ncurses_version(version_type);
Py_DECREF(version_type);
if (ncurses_version == NULL) {
return -1;
}
rc = PyDict_SetItemString(module_dict, "ncurses_version", ncurses_version);
Py_CLEAR(ncurses_version);
if (rc < 0) {
return -1;
}
#endif /* NCURSES_VERSION */
#define SetDictInt(NAME, VALUE) \
do { \
PyObject *value = PyLong_FromLong((long)(VALUE)); \
if (value == NULL) { \
return -1; \
} \
int rc = PyDict_SetItemString(module_dict, (NAME), value); \
Py_DECREF(value); \
if (rc < 0) { \
return -1; \
} \
} while (0)
SetDictInt("ERR", ERR);
SetDictInt("OK", OK);
/* Here are some attributes you can add to chars to print */
SetDictInt("A_ATTRIBUTES", A_ATTRIBUTES);
SetDictInt("A_NORMAL", A_NORMAL);
SetDictInt("A_STANDOUT", A_STANDOUT);
SetDictInt("A_UNDERLINE", A_UNDERLINE);
SetDictInt("A_REVERSE", A_REVERSE);
SetDictInt("A_BLINK", A_BLINK);
SetDictInt("A_DIM", A_DIM);
SetDictInt("A_BOLD", A_BOLD);
SetDictInt("A_ALTCHARSET", A_ALTCHARSET);
SetDictInt("A_INVIS", A_INVIS);
SetDictInt("A_PROTECT", A_PROTECT);
SetDictInt("A_CHARTEXT", A_CHARTEXT);
SetDictInt("A_COLOR", A_COLOR);
/* The following are never available with strict SYSV curses */
#ifdef A_HORIZONTAL
SetDictInt("A_HORIZONTAL", A_HORIZONTAL);
#endif
#ifdef A_LEFT
SetDictInt("A_LEFT", A_LEFT);
#endif
#ifdef A_LOW
SetDictInt("A_LOW", A_LOW);
#endif
#ifdef A_RIGHT
SetDictInt("A_RIGHT", A_RIGHT);
#endif
#ifdef A_TOP
SetDictInt("A_TOP", A_TOP);
#endif
#ifdef A_VERTICAL
SetDictInt("A_VERTICAL", A_VERTICAL);
#endif
/* ncurses extension */
#ifdef A_ITALIC
SetDictInt("A_ITALIC", A_ITALIC);
#endif
SetDictInt("COLOR_BLACK", COLOR_BLACK);
SetDictInt("COLOR_RED", COLOR_RED);
SetDictInt("COLOR_GREEN", COLOR_GREEN);
SetDictInt("COLOR_YELLOW", COLOR_YELLOW);
SetDictInt("COLOR_BLUE", COLOR_BLUE);
SetDictInt("COLOR_MAGENTA", COLOR_MAGENTA);
SetDictInt("COLOR_CYAN", COLOR_CYAN);
SetDictInt("COLOR_WHITE", COLOR_WHITE);
#ifdef NCURSES_MOUSE_VERSION
/* Mouse-related constants */
SetDictInt("BUTTON1_PRESSED", BUTTON1_PRESSED);
SetDictInt("BUTTON1_RELEASED", BUTTON1_RELEASED);
SetDictInt("BUTTON1_CLICKED", BUTTON1_CLICKED);
SetDictInt("BUTTON1_DOUBLE_CLICKED", BUTTON1_DOUBLE_CLICKED);
SetDictInt("BUTTON1_TRIPLE_CLICKED", BUTTON1_TRIPLE_CLICKED);
SetDictInt("BUTTON2_PRESSED", BUTTON2_PRESSED);
SetDictInt("BUTTON2_RELEASED", BUTTON2_RELEASED);
SetDictInt("BUTTON2_CLICKED", BUTTON2_CLICKED);
SetDictInt("BUTTON2_DOUBLE_CLICKED", BUTTON2_DOUBLE_CLICKED);
SetDictInt("BUTTON2_TRIPLE_CLICKED", BUTTON2_TRIPLE_CLICKED);
SetDictInt("BUTTON3_PRESSED", BUTTON3_PRESSED);
SetDictInt("BUTTON3_RELEASED", BUTTON3_RELEASED);
SetDictInt("BUTTON3_CLICKED", BUTTON3_CLICKED);
SetDictInt("BUTTON3_DOUBLE_CLICKED", BUTTON3_DOUBLE_CLICKED);
SetDictInt("BUTTON3_TRIPLE_CLICKED", BUTTON3_TRIPLE_CLICKED);
SetDictInt("BUTTON4_PRESSED", BUTTON4_PRESSED);
SetDictInt("BUTTON4_RELEASED", BUTTON4_RELEASED);
SetDictInt("BUTTON4_CLICKED", BUTTON4_CLICKED);
SetDictInt("BUTTON4_DOUBLE_CLICKED", BUTTON4_DOUBLE_CLICKED);
SetDictInt("BUTTON4_TRIPLE_CLICKED", BUTTON4_TRIPLE_CLICKED);
#if NCURSES_MOUSE_VERSION > 1
SetDictInt("BUTTON5_PRESSED", BUTTON5_PRESSED);
SetDictInt("BUTTON5_RELEASED", BUTTON5_RELEASED);
SetDictInt("BUTTON5_CLICKED", BUTTON5_CLICKED);
SetDictInt("BUTTON5_DOUBLE_CLICKED", BUTTON5_DOUBLE_CLICKED);
SetDictInt("BUTTON5_TRIPLE_CLICKED", BUTTON5_TRIPLE_CLICKED);
#endif
SetDictInt("BUTTON_SHIFT", BUTTON_SHIFT);
SetDictInt("BUTTON_CTRL", BUTTON_CTRL);
SetDictInt("BUTTON_ALT", BUTTON_ALT);
SetDictInt("ALL_MOUSE_EVENTS", ALL_MOUSE_EVENTS);
SetDictInt("REPORT_MOUSE_POSITION", REPORT_MOUSE_POSITION);
#endif
/* Now set everything up for KEY_ variables */
for (int keycode = KEY_MIN; keycode < KEY_MAX; keycode++) {
const char *key_name = keyname(keycode);
if (key_name == NULL || strcmp(key_name, "UNKNOWN KEY") == 0) {
continue;
}
if (strncmp(key_name, "KEY_F(", 6) == 0) {
char *fn_key_name = PyMem_Malloc(strlen(key_name) + 1);
if (!fn_key_name) {
PyErr_NoMemory();
return -1;
}
const char *p1 = key_name;
char *p2 = fn_key_name;
while (*p1) {
if (*p1 != '(' && *p1 != ')') {
*p2 = *p1;
p2++;
}
p1++;
}
*p2 = (char)0;
PyObject *p_keycode = PyLong_FromLong((long)keycode);
if (p_keycode == NULL) {
PyMem_Free(fn_key_name);
return -1;
}
int rc = PyDict_SetItemString(module_dict, fn_key_name, p_keycode);
Py_DECREF(p_keycode);
PyMem_Free(fn_key_name);
if (rc < 0) {
return -1;
}
}
else {
SetDictInt(key_name, keycode);
}
}
SetDictInt("KEY_MIN", KEY_MIN);
SetDictInt("KEY_MAX", KEY_MAX);
#undef SetDictInt
return 0;
}
/* Initialization function for the module */
static PyModuleDef_Slot cursesmodule_slots[] = {
{Py_mod_exec, cursesmodule_exec},
{Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED},
{Py_mod_gil, Py_MOD_GIL_NOT_USED},
{0, NULL}
};
static struct PyModuleDef cursesmodule = {
PyModuleDef_HEAD_INIT,
.m_name = "_curses",
.m_size = sizeof(cursesmodule_state),
.m_methods = cursesmodule_methods,
.m_slots = cursesmodule_slots,
.m_traverse = cursesmodule_traverse,
.m_clear = cursesmodule_clear,
.m_free = cursesmodule_free
};
PyMODINIT_FUNC
PyInit__curses(void)
{
return PyModuleDef_Init(&cursesmodule);
} | c | github | https://github.com/python/cpython | Modules/_cursesmodule.c |
from __future__ import absolute_import
import abc
class MinHashIndexBackendTestMixin(object):
__meta__ = abc.ABCMeta
@abc.abstractproperty
def index(self):
pass
def test_basic(self):
self.index.record("example", "1", [("index", "hello world")])
self.index.record("example", "2", [("index", "hello world")])
self.index.record("example", "3", [("index", "jello world")])
self.index.record("example", "4", [("index", "yellow world"), ("index", "mellow world")])
self.index.record("example", "5", [("index", "pizza world")])
# comparison, without thresholding
results = self.index.compare("example", "1", [("index", 0)])
assert results[0] == ("1", [1.0])
assert results[1] == ("2", [1.0]) # identical contents
assert results[2][0] in ("3", "4") # equidistant pairs, order doesn't really matter
assert results[3][0] in ("3", "4")
assert results[4][0] == "5"
# comparison, low threshold
results = self.index.compare("example", "1", [("index", 6)])
assert len(results) == 4
assert results[0] == ("1", [1.0])
assert results[1] == ("2", [1.0]) # identical contents
assert results[2][0] in ("3", "4") # equidistant pairs, order doesn't really matter
assert results[3][0] in ("3", "4")
# comparison, high threshold (exact match)
results = self.index.compare("example", "1", [("index", self.index.bands)])
assert len(results) == 2
assert results[0] == ("1", [1.0])
assert results[1] == ("2", [1.0]) # identical contents
# comparison, candidate limit (with lexicographical collision sort)
results = self.index.compare("example", "1", [("index", 0)], limit=1)
assert len(results) == 1
assert results[0] == ("1", [1.0])
# classification, without thresholding
results = self.index.classify("example", [("index", 0, "hello world")])
assert results[0:2] == [("1", [1.0]), ("2", [1.0])]
assert results[2][0] in ("3", "4") # equidistant pairs, order doesn't really matter
assert results[3][0] in ("3", "4")
assert results[4][0] == "5"
# classification, low threshold
results = self.index.classify("example", [("index", 6, "hello world")])
assert len(results) == 4
assert results[0] == ("1", [1.0])
assert results[1] == ("2", [1.0]) # identical contents
assert results[2][0] in ("3", "4") # equidistant pairs, order doesn't really matter
assert results[3][0] in ("3", "4")
# classification, high threshold (exact match)
results = self.index.classify("example", [("index", self.index.bands, "hello world")])
assert len(results) == 2
assert results[0] == ("1", [1.0])
assert results[1] == ("2", [1.0]) # identical contents
# classification, candidate limit (with lexicographical collision sort)
results = self.index.classify("example", [("index", 0, "hello world")], limit=1)
assert len(results) == 1
assert results[0] == ("1", [1.0])
self.index.delete("example", [("index", "3")])
assert [key for key, _ in self.index.compare("example", "1", [("index", 0)])] == [
"1",
"2",
"4",
"5",
]
def test_multiple_index(self):
self.index.record("example", "1", [("index:a", "hello world"), ("index:b", "hello world")])
self.index.record("example", "2", [("index:a", "hello world"), ("index:b", "hello world")])
self.index.record("example", "3", [("index:a", "hello world"), ("index:b", "pizza world")])
self.index.record("example", "4", [("index:a", "hello world")])
self.index.record("example", "5", [("index:b", "hello world")])
# comparison, without thresholding
results = self.index.compare("example", "1", [("index:a", 0), ("index:b", 0)])
assert len(results) == 5
assert results[:2] == [("1", [1.0, 1.0]), ("2", [1.0, 1.0])]
assert results[2][0] == "3"
assert results[2][1][0] == 1.0
assert results[3] == ("4", [1.0, 0.0])
assert results[4] == ("5", [0.0, 1.0])
# comparison, candidate limit (with lexicographical collision sort)
results = self.index.compare("example", "1", [("index:a", 0), ("index:b", 0)], limit=4)
assert len(results) == 4
assert results[:2] == [("1", [1.0, 1.0]), ("2", [1.0, 1.0])]
assert results[2][0] == "3"
assert results[2][1][0] == 1.0
assert results[3] == ("4", [1.0, 0.0])
# classification, without thresholding
results = self.index.classify(
"example", [("index:a", 0, "hello world"), ("index:b", 0, "hello world")]
)
assert len(results) == 5
assert results[:2] == [("1", [1.0, 1.0]), ("2", [1.0, 1.0])]
assert results[2][0] == "3"
assert results[2][1][0] == 1.0
assert results[3] == ("4", [1.0, 0.0])
assert results[4] == ("5", [0.0, 1.0])
# classification, with thresholding (low)
results = self.index.classify(
"example",
[
("index:a", self.index.bands, "pizza world"), # no direct hits
("index:b", 8, "pizza world"), # one direct hit
],
)
assert len(results) == 1
assert results[0][0] == "3"
# this should have a value since it's similar even thought it was not
# considered as a candidate for this index
assert results[0][1][0] > 0
assert results[0][1][1] == 1.0
# classification, with thresholding (high)
results = self.index.classify(
"example",
[
("index:a", self.index.bands, "pizza world"), # no direct hits
("index:b", self.index.bands, "hello world"), # 3 direct hits
],
)
assert len(results) == 3
assert results[0][0] == "1" # tie btw first 2 items is broken by lex sort
assert results[0][1][0] > 0
assert results[0][1][1] == 1.0
assert results[1][0] == "2"
assert results[1][1][0] > 0
assert results[1][1][1] == 1.0
assert results[2] == ("5", [0.0, 1.0])
# classification, candidate limit (with lexicographical collision sort)
results = self.index.classify(
"example", [("index:a", 0, "hello world"), ("index:b", 0, "hello world")], limit=4
)
assert len(results) == 4
assert results[:2] == [("1", [1.0, 1.0]), ("2", [1.0, 1.0])]
assert results[2][0] == "3"
assert results[2][1][0] == 1.0
assert results[3] == ("4", [1.0, 0.0])
# empty query
assert (
self.index.classify("example", [("index:a", 0, "hello world"), ("index:b", 0, "")])
== self.index.compare("example", "4", [("index:a", 0), ("index:b", 0)])
== [("4", [1.0, None]), ("1", [1.0, 0.0]), ("2", [1.0, 0.0]), ("3", [1.0, 0.0])]
)
def test_merge(self):
self.index.record("example", "1", [("index", ["foo", "bar"])])
self.index.record("example", "2", [("index", ["baz"])])
assert self.index.classify("example", [("index", 0, ["foo", "bar"])]) == [("1", [1.0])]
self.index.merge("example", "1", [("index", "2")])
assert self.index.classify("example", [("index", 0, ["foo", "bar"])]) == [("1", [0.5])]
# merge into an empty key should act as a move
self.index.merge("example", "2", [("index", "1")])
assert self.index.classify("example", [("index", 0, ["foo", "bar"])]) == [("2", [0.5])]
def test_flush_scoped(self):
self.index.record("example", "1", [("index", ["foo", "bar"])])
assert self.index.classify("example", [("index", 0, ["foo", "bar"])]) == [("1", [1.0])]
self.index.flush("example", ["index"])
assert self.index.classify("example", [("index", 0, ["foo", "bar"])]) == []
def test_flush_unscoped(self):
self.index.record("example", "1", [("index", ["foo", "bar"])])
assert self.index.classify("example", [("index", 0, ["foo", "bar"])]) == [("1", [1.0])]
self.index.flush("*", ["index"])
assert self.index.classify("example", [("index", 0, ["foo", "bar"])]) == []
@abc.abstractmethod
def test_export_import(self):
pass | unknown | codeparrot/codeparrot-clean | ||
# Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""Pure-Python RC4 implementation."""
from .rc4 import RC4
from .cryptomath import *
def new(key):
return Python_RC4(key)
class Python_RC4(RC4):
def __init__(self, keyBytes):
RC4.__init__(self, keyBytes, "python")
S = [i for i in range(256)]
j = 0
for i in range(256):
j = (j + S[i] + keyBytes[i % len(keyBytes)]) % 256
S[i], S[j] = S[j], S[i]
self.S = S
self.i = 0
self.j = 0
def encrypt(self, plaintextBytes):
ciphertextBytes = plaintextBytes[:]
S = self.S
i = self.i
j = self.j
for x in range(len(ciphertextBytes)):
i = (i + 1) % 256
j = (j + S[i]) % 256
S[i], S[j] = S[j], S[i]
t = (S[i] + S[j]) % 256
ciphertextBytes[x] ^= S[t]
self.i = i
self.j = j
return ciphertextBytes
def decrypt(self, ciphertext):
return self.encrypt(ciphertext) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Copyright (C) 2013-2014 Jean-Francois Romang (jromang@posteo.de)
# Shivkumar Shivaji ()
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import threading
from flask import Flask
import tornado.web
import tornado.wsgi
from tornado.websocket import WebSocketHandler
from tornado.ioloop import IOLoop
from multiprocessing.pool import ThreadPool
from utilities import *
import queue
from web.picoweb import picoweb as pw
import chess.pgn as pgn
import json
import datetime
_workers = ThreadPool(5)
class ChannelHandler(tornado.web.RequestHandler):
def initialize(self, shared=None):
self.shared = shared
def post(self):
action = self.get_argument("action")
# print("action: {0}".format(action))
# $.post("/channel", { action: "broadcast", fen: currentPosition.fen, pgn: pgnEl[0].innerText}, function (data) {
if action == 'broadcast':
fen = self.get_argument("fen")
# print("fen: {0}".format(fen))
move_stack = self.get_argument("moveStack")
move_stack = json.loads(move_stack)
game = pgn.Game()
self.create_game_header(game)
tmp = game
# move_stack = message.game.move_stack
for move in move_stack:
tmp = tmp.add_variation(tmp.board().parse_san(move))
# print (message.game.move_stack)
exporter = pgn.StringExporter()
game.export(exporter, headers=True, comments=False, variations=False)
# print ("PGN: ")
# print (str(exporter))
# r = {'move': str(message.move), , 'fen': message.game.fen()}
# print("pgn: {0}".format(pgn))
r = {'type': 'broadcast', 'msg': 'Received position from Spectators!', 'pgn': str(exporter), 'fen':fen}
EventHandler.write_to_clients(r)
# if action == 'pause_cloud_engine':
class EventHandler(WebSocketHandler):
clients = set()
def initialize(self, shared=None):
self.shared = shared
def open(self):
EventHandler.clients.add(self)
def on_close(self):
EventHandler.clients.remove(self)
@classmethod
def write_to_clients(cls, msg):
# print "Writing to clients"
for client in cls.clients:
client.write_message(msg)
class DGTHandler(tornado.web.RequestHandler):
def initialize(self, shared=None):
self.shared = shared
def get(self, *args, **kwargs):
action = self.get_argument("action")
if action == "get_last_move":
self.write(self.shared['last_dgt_move_msg'])
class InfoHandler(tornado.web.RequestHandler):
def initialize(self, shared=None):
self.shared = shared
def get(self, *args, **kwargs):
action = self.get_argument("action")
if action == "get_system_info":
# print(self.shared['system_info'])
self.write(self.shared['system_info'])
class PGNHandler(tornado.web.RequestHandler):
def initialize(self, shared=None):
self.shared = shared
def get(self, *args, **kwargs):
action = self.get_argument("action")
# print (action)
if action == "get_pgn_file":
self.set_header('Content-Type', 'text/pgn')
self.set_header('Content-Disposition', 'attachment; filename=game.pgn')
self.write(self.shared['last_dgt_move_msg']['pgn'])
class WebServer(Observable, threading.Thread):
def __init__(self):
shared = {}
WebDisplay(shared).start()
super(WebServer, self).__init__()
wsgi_app = tornado.wsgi.WSGIContainer(pw)
application = tornado.web.Application([
(r'/event', EventHandler, dict(shared=shared)),
(r'/dgt', DGTHandler, dict(shared=shared)),
(r'/pgn', PGNHandler, dict(shared=shared)),
(r'/info', InfoHandler, dict(shared=shared)),
(r'/channel', ChannelHandler, dict(shared=shared)),
(r'.*', tornado.web.FallbackHandler, {'fallback': wsgi_app})
])
application.listen(80)
def run(self):
IOLoop.instance().start()
class WebDisplay(Display, threading.Thread):
def __init__(self, shared):
super(WebDisplay, self).__init__()
self.shared = shared
@staticmethod
def run_background(func, callback, args=(), kwds = None):
if not kwds:
kwds = {}
def _callback(result):
IOLoop.instance().add_callback(lambda: callback(result))
_workers.apply_async(func, args, kwds, _callback)
def create_game_header(self, game):
game.headers["Result"] = "*"
game.headers["White"] = "User"
game.headers["WhiteElo"] = "*"
game.headers["BlackElo"] = "2900"
game.headers["Black"] = "Picochess"
game.headers["Event"] = "Game"
game.headers["EventDate"] = datetime.datetime.now().date().strftime('%Y-%m-%d')
game.headers["Site"] = "Pi"
if 'system_info' in self.shared:
game.headers["Site"] = self.shared['system_info']['location']
if 'game_info' in self.shared:
# game.headers["Result"] = "*"
game.headers["Black"] = "Picochess" if "mode_string" in self.shared["game_info"] and self.shared["game_info"]["mode_string"] == Mode.PLAY_BLACK else "User"
game.headers["White"] = "Picochess" if game.headers["Black"] == "User" else "User"
comp_color = "Black" if game.headers["Black"] == "Picochess" else "White"
if "level" in self.shared["game_info"]:
game.headers[comp_color+"Elo"] = "Level {0}".format(self.shared["game_info"]["level"])
else:
game.headers[comp_color+"Elo"] = "2900"
if "time_control_string" in self.shared["game_info"]:
game.headers["Event"] = "Time " + self.shared["game_info"]["time_control_string"]
# @staticmethod
def create_game_info(self):
if 'game_info' not in self.shared:
self.shared['game_info'] = {}
def task(self, message):
if message == Message.BOOK_MOVE:
EventHandler.write_to_clients({'msg': 'Book move'})
elif message == Message.UCI_OPTION_LIST:
self.shared['uci_options'] = message.options
elif message == Message.SYSTEM_INFO:
self.shared['system_info'] = message.info
elif message == Event.OPENING_BOOK: # Process opening book
self.create_game_info()
self.shared['game_info']['book'] = message.book
elif message == Event.SET_MODE: # Process interaction mode
self.create_game_info()
self.shared['game_info']['mode_string'] = message.mode_string
elif message == Event.SET_TIME_CONTROL:
self.create_game_info()
self.shared['game_info']['time_control_string'] = message.time_control_string
elif message == Event.LEVEL:
self.create_game_info()
self.shared['game_info']['level'] = message.level
elif message == Message.START_NEW_GAME:
EventHandler.write_to_clients({'msg': 'New game'})
elif message == Message.SEARCH_STARTED:
EventHandler.write_to_clients({'msg': 'Thinking..'})
elif message == Message.COMPUTER_MOVE or message == Message.USER_MOVE or message == Message.REVIEW_MODE_MOVE:
game = pgn.Game()
self.create_game_header(game)
tmp = game
move_stack = message.game.move_stack
for move in move_stack:
tmp = tmp.add_variation(move)
exporter = pgn.StringExporter()
game.export(exporter, headers=True, comments=False, variations=False)
fen = message.game.fen()
pgn_str = str(exporter)
r = {'move': str(message.move), 'pgn': pgn_str, 'fen': fen}
if message == Message.COMPUTER_MOVE:
r['msg']= 'Computer move: '+str(message.move)
elif message == Message.USER_MOVE:
r['msg']= 'User move: '+str(message.move)
self.shared['last_dgt_move_msg'] = r
EventHandler.write_to_clients(r)
def create_task(self, msg):
IOLoop.instance().add_callback(callback=lambda: self.task(msg))
def run(self):
while True:
#Check if we have something to display
message = self.message_queue.get()
# print(message.options)
self.create_task(message) | unknown | codeparrot/codeparrot-clean | ||
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
###########################################################
#
# Simple executor script for Batch class methods.
#
# The script is concatenated on the fly with the required
# batch system class definition
#
# 15.11.2014
# Author: A.T.
#
###########################################################
executeBatchContent = """
if __name__ == "__main__":
import sys
import json
from six.moves.urllib.parse import quote as urlquote
from six.moves.urllib.parse import unquote as urlunquote
arguments = sys.argv[1]
inputDict = json.loads(urlunquote(arguments))
method = inputDict.pop('Method')
batchSystem = inputDict.pop('BatchSystem')
batch = locals()[batchSystem]()
try:
result = getattr(batch, method)(**inputDict)
except Exception as x:
result = 'Exception: %s' % str(x)
resultJson = urlquote(json.dumps(result))
print("============= Start output ===============")
print(resultJson)
""" | unknown | codeparrot/codeparrot-clean | ||
import json
import html5lib
def parse(path="html5ents.xml"):
return html5lib.parse(open(path), treebuilder="lxml")
def entity_table(tree):
return dict((entity_name("".join(tr[0].xpath(".//text()"))),
entity_characters(tr[1].text))
for tr in tree.xpath("//h:tbody/h:tr",
namespaces={"h": "http://www.w3.org/1999/xhtml"}))
def entity_name(inp):
return inp.strip()
def entity_characters(inp):
return "".join(codepoint_to_character(item)
for item in inp.split()
if item)
def codepoint_to_character(inp):
return ("\\U000" + inp[2:]).decode("unicode-escape")
def make_tests_json(entities):
test_list = make_test_list(entities)
tests_json = {"tests":
[make_test(*item) for item in test_list]
}
return tests_json
def make_test(name, characters, good):
return {
"description": test_description(name, good),
"input": "&%s" % name,
"output": test_expected(name, characters, good)
}
def test_description(name, good):
with_semicolon = name.endswith(";")
semicolon_text = {True: "with a semi-colon",
False: "without a semi-colon"}[with_semicolon]
if good:
text = "Named entity: %s %s" % (name, semicolon_text)
else:
text = "Bad named entity: %s %s" % (name, semicolon_text)
return text
def test_expected(name, characters, good):
rv = []
if not good or not name.endswith(";"):
rv.append("ParseError")
rv.append(["Character", characters])
return rv
def make_test_list(entities):
tests = []
for entity_name, characters in entities.items():
if entity_name.endswith(";") and not subentity_exists(entity_name, entities):
tests.append((entity_name[:-1], "&" + entity_name[:-1], False))
tests.append((entity_name, characters, True))
return sorted(tests)
def subentity_exists(entity_name, entities):
for i in range(1, len(entity_name)):
if entity_name[:-i] in entities:
return True
return False
def make_entities_code(entities):
entities_text = "\n".join(" \"%s\": u\"%s\"," % (
name, entities[name].encode(
"unicode-escape").replace("\"", "\\\""))
for name in sorted(entities.keys()))
return """entities = {
%s
}""" % entities_text
def main():
entities = entity_table(parse())
tests_json = make_tests_json(entities)
json.dump(tests_json, open("namedEntities.test", "w"), indent=4)
code = make_entities_code(entities)
open("entities_constants.py", "w").write(code)
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
package kotlinx.coroutines.flow
import kotlinx.coroutines.testing.*
import kotlinx.coroutines.*
import kotlinx.coroutines.channels.*
import kotlin.test.*
import kotlin.time.Duration.Companion.milliseconds
class DebounceTest : TestBase() {
@Test
fun testBasic() = withVirtualTime {
expect(1)
val flow = flow {
expect(3)
emit("A")
delay(1500)
emit("B")
delay(500)
emit("C")
delay(250)
emit("D")
delay(2000)
emit("E")
expect(4)
}
expect(2)
val result = flow.debounce(1000).toList()
assertEquals(listOf("A", "D", "E"), result)
finish(5)
}
@Test
fun testSingleNull() = runTest {
val flow = flowOf<Int?>(null).debounce(Long.MAX_VALUE)
assertNull(flow.single())
}
@Test
fun testBasicWithNulls() = withVirtualTime {
expect(1)
val flow = flow {
expect(3)
emit("A")
delay(1500)
emit("B")
delay(500)
emit("C")
delay(250)
emit(null)
delay(2000)
emit(null)
expect(4)
}
expect(2)
val result = flow.debounce(1000).toList()
assertEquals(listOf("A", null, null), result)
finish(5)
}
@Test
fun testEmpty() = runTest {
val flow = emptyFlow<Int>().debounce(Long.MAX_VALUE)
assertNull(flow.singleOrNull())
}
@Test
fun testScalar() = withVirtualTime {
val flow = flowOf(1, 2, 3).debounce(1000)
assertEquals(3, flow.single())
finish(1)
}
@Test
fun testPace() = withVirtualTime {
val flow = flow {
expect(1)
repeat(10) {
emit(-it)
delay(99)
}
repeat(10) {
emit(it)
delay(101)
}
expect(2)
}.debounce(100)
assertEquals((0..9).toList(), flow.toList())
finish(3)
}
@Test
fun testUpstreamError()= testUpstreamError(TimeoutCancellationException(""))
@Test
fun testUpstreamErrorCancellation() = testUpstreamError(TimeoutCancellationException(""))
private inline fun <reified T: Throwable> testUpstreamError(cause: T) = runTest {
val latch = Channel<Unit>()
val flow = flow {
expect(1)
emit(1)
expect(2)
latch.receive()
throw cause
}.debounce(1).map {
latch.send(Unit)
hang { expect(3) }
}
assertFailsWith<T>(flow)
finish(4)
}
@Test
fun testUpstreamErrorIsolatedContext() = runTest {
val latch = Channel<Unit>()
val flow = flow {
assertEquals("upstream", NamedDispatchers.name())
expect(1)
emit(1)
expect(2)
latch.receive()
throw TestException()
}.flowOn(NamedDispatchers("upstream")).debounce(1).map {
latch.send(Unit)
hang { expect(3) }
}
assertFailsWith<TestException>(flow)
finish(4)
}
@Test
fun testUpstreamErrorDebounceNotTriggered() = runTest {
val flow = flow {
expect(1)
emit(1)
expect(2)
throw TestException()
}.debounce(Long.MAX_VALUE).map {
expectUnreached()
}
assertFailsWith<TestException>(flow)
finish(3)
}
@Test
fun testUpstreamErrorDebounceNotTriggeredInIsolatedContext() = runTest {
val flow = flow {
expect(1)
emit(1)
expect(2)
throw TestException()
}.flowOn(NamedDispatchers("source")).debounce(Long.MAX_VALUE).map {
expectUnreached()
}
assertFailsWith<TestException>(flow)
finish(3)
}
@Test
fun testDownstreamError() = runTest {
val flow = flow {
expect(1)
emit(1)
hang { expect(3) }
}.debounce(100).map {
expect(2)
yield()
throw TestException()
}
assertFailsWith<TestException>(flow)
finish(4)
}
@Test
fun testDownstreamErrorIsolatedContext() = runTest {
val flow = flow {
assertEquals("upstream", NamedDispatchers.name())
expect(1)
emit(1)
hang { expect(3) }
}.flowOn(NamedDispatchers("upstream")).debounce(100).map {
expect(2)
yield()
throw TestException()
}
assertFailsWith<TestException>(flow)
finish(4)
}
@Test
fun testDurationBasic() = withVirtualTime {
expect(1)
val flow = flow {
expect(3)
emit("A")
delay(1500.milliseconds)
emit("B")
delay(500.milliseconds)
emit("C")
delay(250.milliseconds)
emit("D")
delay(2000.milliseconds)
emit("E")
expect(4)
}
expect(2)
val result = flow.debounce(1000.milliseconds).toList()
assertEquals(listOf("A", "D", "E"), result)
finish(5)
}
@Test
fun testDebounceSelectorBasic() = withVirtualTime {
expect(1)
val flow = flow {
expect(3)
emit(1)
delay(90)
emit(2)
delay(90)
emit(3)
delay(1010)
emit(4)
delay(1010)
emit(5)
expect(4)
}
expect(2)
val result = flow.debounce {
if (it == 1) {
0
} else {
1000
}
}.toList()
assertEquals(listOf(1, 3, 4, 5), result)
finish(5)
}
@Test
fun testZeroDebounceTime() = withVirtualTime {
expect(1)
val flow = flow {
expect(3)
emit("A")
emit("B")
emit("C")
expect(4)
}
expect(2)
val result = flow.debounce(0).toList()
assertEquals(listOf("A", "B", "C"), result)
finish(5)
}
@Test
fun testZeroDebounceTimeSelector() = withVirtualTime {
expect(1)
val flow = flow {
expect(3)
emit("A")
emit("B")
expect(4)
}
expect(2)
val result = flow.debounce { 0 }.toList()
assertEquals(listOf("A", "B"), result)
finish(5)
}
@Test
fun testDebounceDurationSelectorBasic() = withVirtualTime {
expect(1)
val flow = flow {
expect(3)
emit("A")
delay(1500.milliseconds)
emit("B")
delay(500.milliseconds)
emit("C")
delay(250.milliseconds)
emit("D")
delay(2000.milliseconds)
emit("E")
expect(4)
}
expect(2)
val result = flow.debounce {
if (it == "C") {
0.milliseconds
} else {
1000.milliseconds
}
}.toList()
assertEquals(listOf("A", "C", "D", "E"), result)
finish(5)
}
@Test
fun testFailsWithIllegalArgument() {
val flow = emptyFlow<Int>()
assertFailsWith<IllegalArgumentException> { flow.debounce(-1) }
}
} | kotlin | github | https://github.com/Kotlin/kotlinx.coroutines | kotlinx-coroutines-core/common/test/flow/operators/DebounceTest.kt |
#! /usr/bin/python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
import traceback
from org.apache.nifi.processor import Processor
from org.apache.nifi.processor import Relationship
from org.apache.nifi.components import PropertyDescriptor
from org.apache.nifi.processor.util import StandardValidators
class UpdateAttributes(Processor) :
__rel_success = Relationship.Builder().description("Success").name("success").build()
def __init__(self) :
pass
def initialize(self, context) :
pass
def getRelationships(self) :
return set([self.__rel_success])
def validate(self, context) :
pass
def getPropertyDescriptors(self) :
descriptor = PropertyDescriptor.Builder().name("for-attributes").addValidator(StandardValidators.NON_EMPTY_VALIDATOR).build()
return [descriptor]
def onPropertyModified(self, descriptor, newValue, oldValue) :
pass
def onTrigger(self, context, sessionFactory) :
session = sessionFactory.createSession()
try :
# ensure there is work to do
flowfile = session.get()
if flowfile is None :
return
# extract some attribute values
fromPropertyValue = context.getProperty("for-attributes").getValue()
fromAttributeValue = flowfile.getAttribute("for-attributes")
# set an attribute
flowfile = session.putAttribute(flowfile, "from-property", fromPropertyValue)
flowfile = session.putAttribute(flowfile, "from-attribute", fromAttributeValue)
# transfer
session.transfer(flowfile, self.__rel_success)
session.commit()
except :
print sys.exc_info()[0]
print "Exception in TestReader:"
print '-' * 60
traceback.print_exc(file=sys.stdout)
print '-' * 60
session.rollback(true)
raise
processor = UpdateAttributes() | unknown | codeparrot/codeparrot-clean | ||
import unittest
import numpy as np
import snappy
#JAI = snappy.jpy.get_type('javax.media.jai.JAI')
#JAI.getDefaultInstance().getTileCache().setMemoryCapacity(128 * 1000 * 1000)
test_product_file = './MER_RR__1P.N1'
class TestBeamIO(unittest.TestCase):
def setUp(self):
self.product = snappy.ProductIO.readProduct(test_product_file)
self.assertIsNotNone(self.product)
def tearDown(self):
self.product.dispose()
def test_readPixels_performance(self):
w = self.product.getSceneRasterWidth()
h = self.product.getSceneRasterHeight()
b = self.product.getBand('radiance_13')
a = np.zeros(w, dtype=np.float32)
import time
t0 = time.time()
for y in range(h):
b.readPixels(0, 0, w, 1, a)
t1 = time.time()
dt = t1 - t0
print('Band.readPixels(): w =', w, ', dtype=np.float32:', h, 'calls in', dt*1000, 'ms, that is ', dt*1000/y, 'ms per call')
def test_readValidMask_performance(self):
w = self.product.getSceneRasterWidth()
h = self.product.getSceneRasterHeight()
b = self.product.getBand('radiance_13')
a = np.zeros(w, dtype=np.bool)
import time
t0 = time.time()
for y in range(h):
b.readValidMask(0, 0, w, 1, a)
t1 = time.time()
dt = t1 - t0
print('Band.readValidMask(): w =', w, ', dtype=np.bool:', h, 'calls in', dt*1000, 'ms, that is ', dt*1000/y, 'ms per call')
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'looper.ui'
#
# Created: Sun Aug 31 11:04:09 2014
# by: PyQt5 UI code generator 5.3.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(530, 420)
MainWindow.setMinimumSize(QtCore.QSize(530, 420))
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setGeometry(QtCore.QRect(0, 10, 531, 381))
self.tabWidget.setObjectName("tabWidget")
self.loopTab = QtWidgets.QWidget()
self.loopTab.setObjectName("loopTab")
self.frame = QtWidgets.QFrame(self.loopTab)
self.frame.setGeometry(QtCore.QRect(10, 10, 511, 291))
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.loopListView = QtWidgets.QListView(self.frame)
self.loopListView.setGeometry(QtCore.QRect(10, 10, 491, 271))
self.loopListView.setObjectName("loopListView")
self.addLoopButton = QtWidgets.QToolButton(self.loopTab)
self.addLoopButton.setGeometry(QtCore.QRect(480, 310, 23, 25))
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.addLoopButton.setFont(font)
self.addLoopButton.setObjectName("addLoopButton")
self.removeLoopButton = QtWidgets.QToolButton(self.loopTab)
self.removeLoopButton.setGeometry(QtCore.QRect(500, 310, 23, 25))
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.removeLoopButton.setFont(font)
self.removeLoopButton.setObjectName("removeLoopButton")
self.tabWidget.addTab(self.loopTab, "")
self.mappingTab = QtWidgets.QWidget()
self.mappingTab.setObjectName("mappingTab")
self.frame_2 = QtWidgets.QFrame(self.mappingTab)
self.frame_2.setGeometry(QtCore.QRect(10, 10, 511, 291))
self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_2.setObjectName("frame_2")
self.mappingTableView = QtWidgets.QTableView(self.frame_2)
self.mappingTableView.setGeometry(QtCore.QRect(10, 10, 491, 271))
self.mappingTableView.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.mappingTableView.setTextElideMode(QtCore.Qt.ElideNone)
self.mappingTableView.setCornerButtonEnabled(False)
self.mappingTableView.setObjectName("mappingTableView")
self.mappingTableView.verticalHeader().setVisible(False)
self.addMappingButton = QtWidgets.QToolButton(self.mappingTab)
self.addMappingButton.setGeometry(QtCore.QRect(480, 310, 23, 25))
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.addMappingButton.setFont(font)
self.addMappingButton.setObjectName("addMappingButton")
self.removeMappingButton = QtWidgets.QToolButton(self.mappingTab)
self.removeMappingButton.setGeometry(QtCore.QRect(500, 310, 23, 25))
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.removeMappingButton.setFont(font)
self.removeMappingButton.setObjectName("removeMappingButton")
self.tabWidget.addTab(self.mappingTab, "")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 530, 25))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
self.addLoopButton.clicked.connect(MainWindow.newLoop)
self.removeLoopButton.clicked.connect(MainWindow.removeLoops)
self.addMappingButton.clicked.connect(MainWindow.newMapping)
self.removeMappingButton.clicked.connect(MainWindow.removeMappings)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "JACK MIDI Looper"))
self.addLoopButton.setText(_translate("MainWindow", "+"))
self.removeLoopButton.setText(_translate("MainWindow", "-"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.loopTab), _translate("MainWindow", "Loops"))
self.addMappingButton.setText(_translate("MainWindow", "+"))
self.removeMappingButton.setText(_translate("MainWindow", "-"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.mappingTab), _translate("MainWindow", "MIDI Mappings")) | unknown | codeparrot/codeparrot-clean | ||
# Author: Alexander Fabisch <afabisch@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range) | unknown | codeparrot/codeparrot-clean | ||
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import pytest
from sqlalchemy import select
from airflow.models.dagbag import DagPriorityParsingRequest, DBDagBag
from tests_common.test_utils.api_fastapi import _check_last_log
from tests_common.test_utils.db import clear_db_dag_parsing_requests, clear_db_logs, parse_and_sync_to_db
from tests_common.test_utils.paths import AIRFLOW_CORE_SOURCES_PATH
pytestmark = pytest.mark.db_test
EXAMPLE_DAG_FILE = AIRFLOW_CORE_SOURCES_PATH / "airflow" / "example_dags" / "example_simplest_dag.py"
TEST_DAG_ID = "example_simplest_dag"
NOT_READABLE_DAG_ID = "latest_only_with_trigger"
TEST_MULTIPLE_DAGS_ID = "asset_produces_1"
class TestDagParsingEndpoint:
@staticmethod
def clear_db():
clear_db_dag_parsing_requests()
@pytest.fixture(autouse=True)
def setup(self, session) -> None:
self.clear_db()
clear_db_logs()
def test_201_and_400_requests(self, url_safe_serializer, session, test_client):
parse_and_sync_to_db(EXAMPLE_DAG_FILE)
test_dag = DBDagBag(load_op_links=False).get_latest_version_of_dag(TEST_DAG_ID, session=session)
# grab the token
token = test_client.get(f"/dags/{TEST_DAG_ID}").json()["file_token"]
# First parsing request
url = f"/parseDagFile/{token}"
response = test_client.put(url, headers={"Accept": "application/json"})
assert response.status_code == 201
parsing_requests = session.scalars(select(DagPriorityParsingRequest)).all()
assert len(parsing_requests) == 1
assert parsing_requests[0].bundle_name == "dags-folder"
assert parsing_requests[0].relative_fileloc == test_dag.relative_fileloc
_check_last_log(session, dag_id=None, event="reparse_dag_file", logical_date=None)
# Duplicate file parsing request
response = test_client.put(url, headers={"Accept": "application/json"})
assert response.status_code == 409
parsing_requests = session.scalars(select(DagPriorityParsingRequest)).all()
assert len(parsing_requests) == 1
assert parsing_requests[0].bundle_name == "dags-folder"
assert parsing_requests[0].relative_fileloc == test_dag.relative_fileloc
_check_last_log(session, dag_id=None, event="reparse_dag_file", logical_date=None)
def test_should_respond_401(self, unauthenticated_test_client):
response = unauthenticated_test_client.put(
"/parseDagFile/token", headers={"Accept": "application/json"}
)
assert response.status_code == 401
def test_should_respond_403(self, unauthorized_test_client):
response = unauthorized_test_client.put("/parseDagFile/token", headers={"Accept": "application/json"})
assert response.status_code == 403
def test_bad_file_request(self, url_safe_serializer, session, test_client):
payload = {"bundle_name": "some_bundle", "relative_fileloc": "/some/random/file.py"}
url = f"/parseDagFile/{url_safe_serializer.dumps(payload)}"
response = test_client.put(url, headers={"Accept": "application/json"})
assert response.status_code == 404
parsing_requests = session.scalars(select(DagPriorityParsingRequest)).all()
assert parsing_requests == [] | python | github | https://github.com/apache/airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_dag_parsing.py |
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
package command
import (
"strings"
"testing"
"github.com/hashicorp/cli"
)
func testSSHCommand(tb testing.TB) (*cli.MockUi, *SSHCommand) {
tb.Helper()
ui := cli.NewMockUi()
return ui, &SSHCommand{
BaseCommand: &BaseCommand{
UI: ui,
},
}
}
func TestSSHCommand_Run(t *testing.T) {
t.Parallel()
t.Skip("Need a way to setup target infrastructure")
}
func TestParseSSHCommand(t *testing.T) {
t.Parallel()
_, cmd := testSSHCommand(t)
tests := []struct {
name string
args []string
hostname string
username string
port string
err error
}{
{
"Parse just a hostname",
[]string{
"hostname",
},
"hostname",
"",
"",
nil,
},
{
"Parse the standard username@hostname",
[]string{
"username@hostname",
},
"hostname",
"username",
"",
nil,
},
{
"Parse the username out of -o User=username",
[]string{
"-o", "User=username",
"hostname",
},
"hostname",
"username",
"",
nil,
},
{
"If the username is specified with -o User=username and realname@hostname prefer realname@",
[]string{
"-o", "User=username",
"realname@hostname",
},
"hostname",
"realname",
"",
nil,
},
{
"Parse the port out of -o Port=2222",
[]string{
"-o", "Port=2222",
"hostname",
},
"hostname",
"",
"2222",
nil,
},
{
"Parse the port out of -p 2222",
[]string{
"-p", "2222",
"hostname",
},
"hostname",
"",
"2222",
nil,
},
{
"If port is defined with -o Port=2222 and -p 2244 prefer -p",
[]string{
"-p", "2244",
"-o", "Port=2222",
"hostname",
},
"hostname",
"",
"2244",
nil,
},
{
"Ssh args with a command",
[]string{
"hostname",
"command",
},
"hostname",
"",
"",
nil,
},
{
"Flags after the ssh command are not passed because they are part of the command",
[]string{
"username@hostname",
"command",
"-p 22",
},
"hostname",
"username",
"",
nil,
},
{
"Allow single args which don't have a value",
[]string{
"-v",
"hostname",
},
"hostname",
"",
"",
nil,
},
{
"Allow single args before and after the hostname and command",
[]string{
"-v",
"hostname",
"-v",
"command",
"-v",
},
"hostname",
"",
"",
nil,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
hostname, username, port, err := cmd.parseSSHCommand(test.args)
if err != test.err {
t.Errorf("got error: %q want %q", err, test.err)
}
if hostname != test.hostname {
t.Errorf("got hostname: %q want %q", hostname, test.hostname)
}
if username != test.username {
t.Errorf("got username: %q want %q", username, test.username)
}
if port != test.port {
t.Errorf("got port: %q want %q", port, test.port)
}
})
}
}
func TestIsSingleSSHArg(t *testing.T) {
t.Parallel()
_, cmd := testSSHCommand(t)
tests := []struct {
name string
arg string
want bool
}{
{
"-v is a single ssh arg",
"-v",
true,
},
{
"-o is NOT a single ssh arg",
"-o",
false,
},
{
"Repeated args like -vvv is still a single ssh arg",
"-vvv",
true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
got := cmd.isSingleSSHArg(test.arg)
if got != test.want {
t.Errorf("arg %q got %v want %v", test.arg, got, test.want)
}
})
}
}
// TestSSHCommandOmitFlagWarning checks if flags warning messages are printed
// in the output of the CLI command or not. If so, it will fail.
func TestSSHCommandOmitFlagWarning(t *testing.T) {
t.Parallel()
ui, cmd := testSSHCommand(t)
_ = cmd.Run([]string{"-mode", "ca", "-role", "otp_key_role", "user@1.2.3.4", "-extraFlag", "bug"})
combined := ui.OutputWriter.String() + ui.ErrorWriter.String()
if strings.Contains(combined, "Command flags must be provided before positional arguments. The following arguments will not be parsed as flags") {
t.Fatalf("ssh command displayed flag warnings")
}
} | go | github | https://github.com/hashicorp/vault | command/ssh_test.go |
from test import test_support as support
# If we end up with a significant number of tests that don't require
# threading, this test module should be split. Right now we skip
# them all if we don't have threading.
threading = support.import_module('threading')
from contextlib import contextmanager
import imaplib
import os.path
import SocketServer
import time
from test_support import reap_threads, verbose
import unittest
try:
import ssl
except ImportError:
ssl = None
CERTFILE = None
class TestImaplib(unittest.TestCase):
def test_that_Time2Internaldate_returns_a_result(self):
# We can check only that it successfully produces a result,
# not the correctness of the result itself, since the result
# depends on the timezone the machine is in.
timevalues = [2000000000, 2000000000.0, time.localtime(2000000000),
'"18-May-2033 05:33:20 +0200"']
for t in timevalues:
imaplib.Time2Internaldate(t)
if ssl:
class SecureTCPServer(SocketServer.TCPServer):
def get_request(self):
newsocket, fromaddr = self.socket.accept()
connstream = ssl.wrap_socket(newsocket,
server_side=True,
certfile=CERTFILE)
return connstream, fromaddr
IMAP4_SSL = imaplib.IMAP4_SSL
else:
class SecureTCPServer:
pass
IMAP4_SSL = None
class SimpleIMAPHandler(SocketServer.StreamRequestHandler):
timeout = 1
def _send(self, message):
if verbose: print "SENT:", message.strip()
self.wfile.write(message)
def handle(self):
# Send a welcome message.
self._send('* OK IMAP4rev1\r\n')
while 1:
# Gather up input until we receive a line terminator or we timeout.
# Accumulate read(1) because it's simpler to handle the differences
# between naked sockets and SSL sockets.
line = ''
while 1:
try:
part = self.rfile.read(1)
if part == '':
# Naked sockets return empty strings..
return
line += part
except IOError:
# ..but SSLSockets throw exceptions.
return
if line.endswith('\r\n'):
break
if verbose: print 'GOT:', line.strip()
splitline = line.split()
tag = splitline[0]
cmd = splitline[1]
args = splitline[2:]
if hasattr(self, 'cmd_%s' % (cmd,)):
getattr(self, 'cmd_%s' % (cmd,))(tag, args)
else:
self._send('%s BAD %s unknown\r\n' % (tag, cmd))
def cmd_CAPABILITY(self, tag, args):
self._send('* CAPABILITY IMAP4rev1\r\n')
self._send('%s OK CAPABILITY completed\r\n' % (tag,))
class BaseThreadedNetworkedTests(unittest.TestCase):
def make_server(self, addr, hdlr):
class MyServer(self.server_class):
def handle_error(self, request, client_address):
self.close_request(request)
self.server_close()
raise
if verbose: print "creating server"
server = MyServer(addr, hdlr)
self.assertEquals(server.server_address, server.socket.getsockname())
if verbose:
print "server created"
print "ADDR =", addr
print "CLASS =", self.server_class
print "HDLR =", server.RequestHandlerClass
t = threading.Thread(
name='%s serving' % self.server_class,
target=server.serve_forever,
# Short poll interval to make the test finish quickly.
# Time between requests is short enough that we won't wake
# up spuriously too many times.
kwargs={'poll_interval':0.01})
t.daemon = True # In case this function raises.
t.start()
if verbose: print "server running"
return server, t
def reap_server(self, server, thread):
if verbose: print "waiting for server"
server.shutdown()
thread.join()
if verbose: print "done"
@contextmanager
def reaped_server(self, hdlr):
server, thread = self.make_server((support.HOST, 0), hdlr)
try:
yield server
finally:
self.reap_server(server, thread)
@reap_threads
def test_connect(self):
with self.reaped_server(SimpleIMAPHandler) as server:
client = self.imap_class(*server.server_address)
client.shutdown()
@reap_threads
def test_issue5949(self):
class EOFHandler(SocketServer.StreamRequestHandler):
def handle(self):
# EOF without sending a complete welcome message.
self.wfile.write('* OK')
with self.reaped_server(EOFHandler) as server:
self.assertRaises(imaplib.IMAP4.abort,
self.imap_class, *server.server_address)
class ThreadedNetworkedTests(BaseThreadedNetworkedTests):
server_class = SocketServer.TCPServer
imap_class = imaplib.IMAP4
@unittest.skipUnless(ssl, "SSL not available")
class ThreadedNetworkedTestsSSL(BaseThreadedNetworkedTests):
server_class = SecureTCPServer
imap_class = IMAP4_SSL
def test_main():
tests = [TestImaplib]
if support.is_resource_enabled('network'):
if ssl:
global CERTFILE
CERTFILE = os.path.join(os.path.dirname(__file__) or os.curdir,
"keycert.pem")
if not os.path.exists(CERTFILE):
raise support.TestFailed("Can't read certificate files!")
tests.extend([ThreadedNetworkedTests, ThreadedNetworkedTestsSSL])
support.run_unittest(*tests)
if __name__ == "__main__":
support.use_resources = ['network']
test_main() | unknown | codeparrot/codeparrot-clean | ||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import itertools
import socket
import ssl
import time
import uuid
import eventlet
import greenlet
import kombu
import kombu.connection
import kombu.entity
import kombu.messaging
from oslo.config import cfg
from keystone.openstack.common import excutils
from keystone.openstack.common.gettextutils import _ # noqa
from keystone.openstack.common import network_utils
from keystone.openstack.common.rpc import amqp as rpc_amqp
from keystone.openstack.common.rpc import common as rpc_common
from keystone.openstack.common import sslutils
kombu_opts = [
cfg.StrOpt('kombu_ssl_version',
default='',
help='SSL version to use (valid only if SSL enabled). '
'valid values are TLSv1, SSLv23 and SSLv3. SSLv2 may '
'be available on some distributions'
),
cfg.StrOpt('kombu_ssl_keyfile',
default='',
help='SSL key file (valid only if SSL enabled)'),
cfg.StrOpt('kombu_ssl_certfile',
default='',
help='SSL cert file (valid only if SSL enabled)'),
cfg.StrOpt('kombu_ssl_ca_certs',
default='',
help=('SSL certification authority file '
'(valid only if SSL enabled)')),
cfg.StrOpt('rabbit_host',
default='localhost',
help='The RabbitMQ broker address where a single node is used'),
cfg.IntOpt('rabbit_port',
default=5672,
help='The RabbitMQ broker port where a single node is used'),
cfg.ListOpt('rabbit_hosts',
default=['$rabbit_host:$rabbit_port'],
help='RabbitMQ HA cluster host:port pairs'),
cfg.BoolOpt('rabbit_use_ssl',
default=False,
help='connect over SSL for RabbitMQ'),
cfg.StrOpt('rabbit_userid',
default='guest',
help='the RabbitMQ userid'),
cfg.StrOpt('rabbit_password',
default='guest',
help='the RabbitMQ password',
secret=True),
cfg.StrOpt('rabbit_virtual_host',
default='/',
help='the RabbitMQ virtual host'),
cfg.IntOpt('rabbit_retry_interval',
default=1,
help='how frequently to retry connecting with RabbitMQ'),
cfg.IntOpt('rabbit_retry_backoff',
default=2,
help='how long to backoff for between retries when connecting '
'to RabbitMQ'),
cfg.IntOpt('rabbit_max_retries',
default=0,
help='maximum retries with trying to connect to RabbitMQ '
'(the default of 0 implies an infinite retry count)'),
cfg.BoolOpt('rabbit_ha_queues',
default=False,
help='use H/A queues in RabbitMQ (x-ha-policy: all).'
'You need to wipe RabbitMQ database when '
'changing this option.'),
]
cfg.CONF.register_opts(kombu_opts)
LOG = rpc_common.LOG
def _get_queue_arguments(conf):
"""Construct the arguments for declaring a queue.
If the rabbit_ha_queues option is set, we declare a mirrored queue
as described here:
http://www.rabbitmq.com/ha.html
Setting x-ha-policy to all means that the queue will be mirrored
to all nodes in the cluster.
"""
return {'x-ha-policy': 'all'} if conf.rabbit_ha_queues else {}
class ConsumerBase(object):
"""Consumer base class."""
def __init__(self, channel, callback, tag, **kwargs):
"""Declare a queue on an amqp channel.
'channel' is the amqp channel to use
'callback' is the callback to call when messages are received
'tag' is a unique ID for the consumer on the channel
queue name, exchange name, and other kombu options are
passed in here as a dictionary.
"""
self.callback = callback
self.tag = str(tag)
self.kwargs = kwargs
self.queue = None
self.ack_on_error = kwargs.get('ack_on_error', True)
self.reconnect(channel)
def reconnect(self, channel):
"""Re-declare the queue after a rabbit reconnect."""
self.channel = channel
self.kwargs['channel'] = channel
self.queue = kombu.entity.Queue(**self.kwargs)
self.queue.declare()
def _callback_handler(self, message, callback):
"""Call callback with deserialized message.
Messages that are processed without exception are ack'ed.
If the message processing generates an exception, it will be
ack'ed if ack_on_error=True. Otherwise it will be .reject()'ed.
Rejection is better than waiting for the message to timeout.
Rejected messages are immediately requeued.
"""
ack_msg = False
try:
msg = rpc_common.deserialize_msg(message.payload)
callback(msg)
ack_msg = True
except Exception:
if self.ack_on_error:
ack_msg = True
LOG.exception(_("Failed to process message"
" ... skipping it."))
else:
LOG.exception(_("Failed to process message"
" ... will requeue."))
finally:
if ack_msg:
message.ack()
else:
message.reject()
def consume(self, *args, **kwargs):
"""Actually declare the consumer on the amqp channel. This will
start the flow of messages from the queue. Using the
Connection.iterconsume() iterator will process the messages,
calling the appropriate callback.
If a callback is specified in kwargs, use that. Otherwise,
use the callback passed during __init__()
If kwargs['nowait'] is True, then this call will block until
a message is read.
"""
options = {'consumer_tag': self.tag}
options['nowait'] = kwargs.get('nowait', False)
callback = kwargs.get('callback', self.callback)
if not callback:
raise ValueError("No callback defined")
def _callback(raw_message):
message = self.channel.message_to_python(raw_message)
self._callback_handler(message, callback)
self.queue.consume(*args, callback=_callback, **options)
def cancel(self):
"""Cancel the consuming from the queue, if it has started."""
try:
self.queue.cancel(self.tag)
except KeyError as e:
# NOTE(comstud): Kludge to get around a amqplib bug
if str(e) != "u'%s'" % self.tag:
raise
self.queue = None
class DirectConsumer(ConsumerBase):
"""Queue/consumer class for 'direct'."""
def __init__(self, conf, channel, msg_id, callback, tag, **kwargs):
"""Init a 'direct' queue.
'channel' is the amqp channel to use
'msg_id' is the msg_id to listen on
'callback' is the callback to call when messages are received
'tag' is a unique ID for the consumer on the channel
Other kombu options may be passed
"""
# Default options
options = {'durable': False,
'queue_arguments': _get_queue_arguments(conf),
'auto_delete': True,
'exclusive': False}
options.update(kwargs)
exchange = kombu.entity.Exchange(name=msg_id,
type='direct',
durable=options['durable'],
auto_delete=options['auto_delete'])
super(DirectConsumer, self).__init__(channel,
callback,
tag,
name=msg_id,
exchange=exchange,
routing_key=msg_id,
**options)
class TopicConsumer(ConsumerBase):
"""Consumer class for 'topic'."""
def __init__(self, conf, channel, topic, callback, tag, name=None,
exchange_name=None, **kwargs):
"""Init a 'topic' queue.
:param channel: the amqp channel to use
:param topic: the topic to listen on
:paramtype topic: str
:param callback: the callback to call when messages are received
:param tag: a unique ID for the consumer on the channel
:param name: optional queue name, defaults to topic
:paramtype name: str
Other kombu options may be passed as keyword arguments
"""
# Default options
options = {'durable': conf.amqp_durable_queues,
'queue_arguments': _get_queue_arguments(conf),
'auto_delete': conf.amqp_auto_delete,
'exclusive': False}
options.update(kwargs)
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
exchange = kombu.entity.Exchange(name=exchange_name,
type='topic',
durable=options['durable'],
auto_delete=options['auto_delete'])
super(TopicConsumer, self).__init__(channel,
callback,
tag,
name=name or topic,
exchange=exchange,
routing_key=topic,
**options)
class FanoutConsumer(ConsumerBase):
"""Consumer class for 'fanout'."""
def __init__(self, conf, channel, topic, callback, tag, **kwargs):
"""Init a 'fanout' queue.
'channel' is the amqp channel to use
'topic' is the topic to listen on
'callback' is the callback to call when messages are received
'tag' is a unique ID for the consumer on the channel
Other kombu options may be passed
"""
unique = uuid.uuid4().hex
exchange_name = '%s_fanout' % topic
queue_name = '%s_fanout_%s' % (topic, unique)
# Default options
options = {'durable': False,
'queue_arguments': _get_queue_arguments(conf),
'auto_delete': True,
'exclusive': False}
options.update(kwargs)
exchange = kombu.entity.Exchange(name=exchange_name, type='fanout',
durable=options['durable'],
auto_delete=options['auto_delete'])
super(FanoutConsumer, self).__init__(channel, callback, tag,
name=queue_name,
exchange=exchange,
routing_key=topic,
**options)
class Publisher(object):
"""Base Publisher class."""
def __init__(self, channel, exchange_name, routing_key, **kwargs):
"""Init the Publisher class with the exchange_name, routing_key,
and other options
"""
self.exchange_name = exchange_name
self.routing_key = routing_key
self.kwargs = kwargs
self.reconnect(channel)
def reconnect(self, channel):
"""Re-establish the Producer after a rabbit reconnection."""
self.exchange = kombu.entity.Exchange(name=self.exchange_name,
**self.kwargs)
self.producer = kombu.messaging.Producer(exchange=self.exchange,
channel=channel,
routing_key=self.routing_key)
def send(self, msg, timeout=None):
"""Send a message."""
if timeout:
#
# AMQP TTL is in milliseconds when set in the header.
#
self.producer.publish(msg, headers={'ttl': (timeout * 1000)})
else:
self.producer.publish(msg)
class DirectPublisher(Publisher):
"""Publisher class for 'direct'."""
def __init__(self, conf, channel, msg_id, **kwargs):
"""init a 'direct' publisher.
Kombu options may be passed as keyword args to override defaults
"""
options = {'durable': False,
'auto_delete': True,
'exclusive': False}
options.update(kwargs)
super(DirectPublisher, self).__init__(channel, msg_id, msg_id,
type='direct', **options)
class TopicPublisher(Publisher):
"""Publisher class for 'topic'."""
def __init__(self, conf, channel, topic, **kwargs):
"""init a 'topic' publisher.
Kombu options may be passed as keyword args to override defaults
"""
options = {'durable': conf.amqp_durable_queues,
'auto_delete': conf.amqp_auto_delete,
'exclusive': False}
options.update(kwargs)
exchange_name = rpc_amqp.get_control_exchange(conf)
super(TopicPublisher, self).__init__(channel,
exchange_name,
topic,
type='topic',
**options)
class FanoutPublisher(Publisher):
"""Publisher class for 'fanout'."""
def __init__(self, conf, channel, topic, **kwargs):
"""init a 'fanout' publisher.
Kombu options may be passed as keyword args to override defaults
"""
options = {'durable': False,
'auto_delete': True,
'exclusive': False}
options.update(kwargs)
super(FanoutPublisher, self).__init__(channel, '%s_fanout' % topic,
None, type='fanout', **options)
class NotifyPublisher(TopicPublisher):
"""Publisher class for 'notify'."""
def __init__(self, conf, channel, topic, **kwargs):
self.durable = kwargs.pop('durable', conf.amqp_durable_queues)
self.queue_arguments = _get_queue_arguments(conf)
super(NotifyPublisher, self).__init__(conf, channel, topic, **kwargs)
def reconnect(self, channel):
super(NotifyPublisher, self).reconnect(channel)
# NOTE(jerdfelt): Normally the consumer would create the queue, but
# we do this to ensure that messages don't get dropped if the
# consumer is started after we do
queue = kombu.entity.Queue(channel=channel,
exchange=self.exchange,
durable=self.durable,
name=self.routing_key,
routing_key=self.routing_key,
queue_arguments=self.queue_arguments)
queue.declare()
class Connection(object):
"""Connection object."""
pool = None
def __init__(self, conf, server_params=None):
self.consumers = []
self.consumer_thread = None
self.proxy_callbacks = []
self.conf = conf
self.max_retries = self.conf.rabbit_max_retries
# Try forever?
if self.max_retries <= 0:
self.max_retries = None
self.interval_start = self.conf.rabbit_retry_interval
self.interval_stepping = self.conf.rabbit_retry_backoff
# max retry-interval = 30 seconds
self.interval_max = 30
self.memory_transport = False
if server_params is None:
server_params = {}
# Keys to translate from server_params to kombu params
server_params_to_kombu_params = {'username': 'userid'}
ssl_params = self._fetch_ssl_params()
params_list = []
for adr in self.conf.rabbit_hosts:
hostname, port = network_utils.parse_host_port(
adr, default_port=self.conf.rabbit_port)
params = {
'hostname': hostname,
'port': port,
'userid': self.conf.rabbit_userid,
'password': self.conf.rabbit_password,
'virtual_host': self.conf.rabbit_virtual_host,
}
for sp_key, value in server_params.iteritems():
p_key = server_params_to_kombu_params.get(sp_key, sp_key)
params[p_key] = value
if self.conf.fake_rabbit:
params['transport'] = 'memory'
if self.conf.rabbit_use_ssl:
params['ssl'] = ssl_params
params_list.append(params)
self.params_list = params_list
self.memory_transport = self.conf.fake_rabbit
self.connection = None
self.reconnect()
def _fetch_ssl_params(self):
"""Handles fetching what ssl params should be used for the connection
(if any).
"""
ssl_params = dict()
# http://docs.python.org/library/ssl.html - ssl.wrap_socket
if self.conf.kombu_ssl_version:
ssl_params['ssl_version'] = sslutils.validate_ssl_version(
self.conf.kombu_ssl_version)
if self.conf.kombu_ssl_keyfile:
ssl_params['keyfile'] = self.conf.kombu_ssl_keyfile
if self.conf.kombu_ssl_certfile:
ssl_params['certfile'] = self.conf.kombu_ssl_certfile
if self.conf.kombu_ssl_ca_certs:
ssl_params['ca_certs'] = self.conf.kombu_ssl_ca_certs
# We might want to allow variations in the
# future with this?
ssl_params['cert_reqs'] = ssl.CERT_REQUIRED
# Return the extended behavior or just have the default behavior
return ssl_params or True
def _connect(self, params):
"""Connect to rabbit. Re-establish any queues that may have
been declared before if we are reconnecting. Exceptions should
be handled by the caller.
"""
if self.connection:
LOG.info(_("Reconnecting to AMQP server on "
"%(hostname)s:%(port)d") % params)
try:
self.connection.release()
except self.connection_errors:
pass
# Setting this in case the next statement fails, though
# it shouldn't be doing any network operations, yet.
self.connection = None
self.connection = kombu.connection.BrokerConnection(**params)
self.connection_errors = self.connection.connection_errors
if self.memory_transport:
# Kludge to speed up tests.
self.connection.transport.polling_interval = 0.0
self.consumer_num = itertools.count(1)
self.connection.connect()
self.channel = self.connection.channel()
# work around 'memory' transport bug in 1.1.3
if self.memory_transport:
self.channel._new_queue('ae.undeliver')
for consumer in self.consumers:
consumer.reconnect(self.channel)
LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d') %
params)
def reconnect(self):
"""Handles reconnecting and re-establishing queues.
Will retry up to self.max_retries number of times.
self.max_retries = 0 means to retry forever.
Sleep between tries, starting at self.interval_start
seconds, backing off self.interval_stepping number of seconds
each attempt.
"""
attempt = 0
while True:
params = self.params_list[attempt % len(self.params_list)]
attempt += 1
try:
self._connect(params)
return
except (IOError, self.connection_errors) as e:
pass
except Exception as e:
# NOTE(comstud): Unfortunately it's possible for amqplib
# to return an error not covered by its transport
# connection_errors in the case of a timeout waiting for
# a protocol response. (See paste link in LP888621)
# So, we check all exceptions for 'timeout' in them
# and try to reconnect in this case.
if 'timeout' not in str(e):
raise
log_info = {}
log_info['err_str'] = str(e)
log_info['max_retries'] = self.max_retries
log_info.update(params)
if self.max_retries and attempt == self.max_retries:
msg = _('Unable to connect to AMQP server on '
'%(hostname)s:%(port)d after %(max_retries)d '
'tries: %(err_str)s') % log_info
LOG.error(msg)
raise rpc_common.RPCException(msg)
if attempt == 1:
sleep_time = self.interval_start or 1
elif attempt > 1:
sleep_time += self.interval_stepping
if self.interval_max:
sleep_time = min(sleep_time, self.interval_max)
log_info['sleep_time'] = sleep_time
LOG.error(_('AMQP server on %(hostname)s:%(port)d is '
'unreachable: %(err_str)s. Trying again in '
'%(sleep_time)d seconds.') % log_info)
time.sleep(sleep_time)
def ensure(self, error_callback, method, *args, **kwargs):
while True:
try:
return method(*args, **kwargs)
except (self.connection_errors, socket.timeout, IOError) as e:
if error_callback:
error_callback(e)
except Exception as e:
# NOTE(comstud): Unfortunately it's possible for amqplib
# to return an error not covered by its transport
# connection_errors in the case of a timeout waiting for
# a protocol response. (See paste link in LP888621)
# So, we check all exceptions for 'timeout' in them
# and try to reconnect in this case.
if 'timeout' not in str(e):
raise
if error_callback:
error_callback(e)
self.reconnect()
def get_channel(self):
"""Convenience call for bin/clear_rabbit_queues."""
return self.channel
def close(self):
"""Close/release this connection."""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
self.connection.release()
self.connection = None
def reset(self):
"""Reset a connection so it can be used again."""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
self.channel.close()
self.channel = self.connection.channel()
# work around 'memory' transport bug in 1.1.3
if self.memory_transport:
self.channel._new_queue('ae.undeliver')
self.consumers = []
def declare_consumer(self, consumer_cls, topic, callback):
"""Create a Consumer using the class that was passed in and
add it to our list of consumers
"""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s") % log_info)
def _declare_consumer():
consumer = consumer_cls(self.conf, self.channel, topic, callback,
self.consumer_num.next())
self.consumers.append(consumer)
return consumer
return self.ensure(_connect_error, _declare_consumer)
def iterconsume(self, limit=None, timeout=None):
"""Return an iterator that will consume from all queues/consumers."""
info = {'do_consume': True}
def _error_callback(exc):
if isinstance(exc, socket.timeout):
LOG.debug(_('Timed out waiting for RPC response: %s') %
str(exc))
raise rpc_common.Timeout()
else:
LOG.exception(_('Failed to consume message from queue: %s') %
str(exc))
info['do_consume'] = True
def _consume():
if info['do_consume']:
queues_head = self.consumers[:-1] # not fanout.
queues_tail = self.consumers[-1] # fanout
for queue in queues_head:
queue.consume(nowait=True)
queues_tail.consume(nowait=False)
info['do_consume'] = False
return self.connection.drain_events(timeout=timeout)
for iteration in itertools.count(0):
if limit and iteration >= limit:
raise StopIteration
yield self.ensure(_error_callback, _consume)
def cancel_consumer_thread(self):
"""Cancel a consumer thread."""
if self.consumer_thread is not None:
self.consumer_thread.kill()
try:
self.consumer_thread.wait()
except greenlet.GreenletExit:
pass
self.consumer_thread = None
def wait_on_proxy_callbacks(self):
"""Wait for all proxy callback threads to exit."""
for proxy_cb in self.proxy_callbacks:
proxy_cb.wait()
def publisher_send(self, cls, topic, msg, timeout=None, **kwargs):
"""Send to a publisher based on the publisher class."""
def _error_callback(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.exception(_("Failed to publish message to topic "
"'%(topic)s': %(err_str)s") % log_info)
def _publish():
publisher = cls(self.conf, self.channel, topic, **kwargs)
publisher.send(msg, timeout)
self.ensure(_error_callback, _publish)
def declare_direct_consumer(self, topic, callback):
"""Create a 'direct' queue.
In nova's use, this is generally a msg_id queue used for
responses for call/multicall
"""
self.declare_consumer(DirectConsumer, topic, callback)
def declare_topic_consumer(self, topic, callback=None, queue_name=None,
exchange_name=None, ack_on_error=True):
"""Create a 'topic' consumer."""
self.declare_consumer(functools.partial(TopicConsumer,
name=queue_name,
exchange_name=exchange_name,
ack_on_error=ack_on_error,
),
topic, callback)
def declare_fanout_consumer(self, topic, callback):
"""Create a 'fanout' consumer."""
self.declare_consumer(FanoutConsumer, topic, callback)
def direct_send(self, msg_id, msg):
"""Send a 'direct' message."""
self.publisher_send(DirectPublisher, msg_id, msg)
def topic_send(self, topic, msg, timeout=None):
"""Send a 'topic' message."""
self.publisher_send(TopicPublisher, topic, msg, timeout)
def fanout_send(self, topic, msg):
"""Send a 'fanout' message."""
self.publisher_send(FanoutPublisher, topic, msg)
def notify_send(self, topic, msg, **kwargs):
"""Send a notify message on a topic."""
self.publisher_send(NotifyPublisher, topic, msg, None, **kwargs)
def consume(self, limit=None):
"""Consume from all queues/consumers."""
it = self.iterconsume(limit=limit)
while True:
try:
it.next()
except StopIteration:
return
def consume_in_thread(self):
"""Consumer from all queues/consumers in a greenthread."""
@excutils.forever_retry_uncaught_exceptions
def _consumer_thread():
try:
self.consume()
except greenlet.GreenletExit:
return
if self.consumer_thread is None:
self.consumer_thread = eventlet.spawn(_consumer_thread)
return self.consumer_thread
def create_consumer(self, topic, proxy, fanout=False):
"""Create a consumer that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
if fanout:
self.declare_fanout_consumer(topic, proxy_cb)
else:
self.declare_topic_consumer(topic, proxy_cb)
def create_worker(self, topic, proxy, pool_name):
"""Create a worker that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
self.declare_topic_consumer(topic, proxy_cb, pool_name)
def join_consumer_pool(self, callback, pool_name, topic,
exchange_name=None, ack_on_error=True):
"""Register as a member of a group of consumers for a given topic from
the specified exchange.
Exactly one member of a given pool will receive each message.
A message will be delivered to multiple pools, if more than
one is created.
"""
callback_wrapper = rpc_amqp.CallbackWrapper(
conf=self.conf,
callback=callback,
connection_pool=rpc_amqp.get_connection_pool(self.conf,
Connection),
)
self.proxy_callbacks.append(callback_wrapper)
self.declare_topic_consumer(
queue_name=pool_name,
topic=topic,
exchange_name=exchange_name,
callback=callback_wrapper,
ack_on_error=ack_on_error,
)
def create_connection(conf, new=True):
"""Create a connection."""
return rpc_amqp.create_connection(
conf, new,
rpc_amqp.get_connection_pool(conf, Connection))
def multicall(conf, context, topic, msg, timeout=None):
"""Make a call that returns multiple times."""
return rpc_amqp.multicall(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def call(conf, context, topic, msg, timeout=None):
"""Sends a message on a topic and wait for a response."""
return rpc_amqp.call(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def cast(conf, context, topic, msg):
"""Sends a message on a topic without waiting for a response."""
return rpc_amqp.cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast(conf, context, topic, msg):
"""Sends a message on a fanout exchange without waiting for a response."""
return rpc_amqp.fanout_cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a topic to a specific server."""
return rpc_amqp.cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a fanout exchange to a specific server."""
return rpc_amqp.fanout_cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def notify(conf, context, topic, msg, envelope):
"""Sends a notification event on a topic."""
return rpc_amqp.notify(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection),
envelope)
def cleanup():
return rpc_amqp.cleanup(Connection.pool) | unknown | codeparrot/codeparrot-clean | ||
from django.conf.urls import include, patterns, url
urlpatterns = patterns('misago.users.views.auth',
url(r'^login/$', 'login', name='login'),
url(r'^logout/$', 'logout', name='logout'),
)
urlpatterns += patterns('misago.users.views.activation',
url(r'^activation/$', 'activation_noscript', name="request_activation"),
url(r'^activation/(?P<user_id>\d+)/(?P<token>[a-zA-Z0-9]+)/$', 'activation_noscript', name="activate_by_token"),
)
urlpatterns += patterns('misago.users.views.forgottenpassword',
url(r'^forgotten-password/$', 'forgotten_password_noscript', name='forgotten_password'),
url(r'^forgotten-password/(?P<user_id>\d+)/(?P<token>[a-zA-Z0-9]+)/$', 'forgotten_password_noscript', name='forgotten_password_change_form'),
)
urlpatterns += patterns('misago.users.views.options',
url(r'^options/$', 'index', name='options'),
url(r'^options/(?P<form_name>[-a-zA-Z]+)/$', 'form', name='options_form'),
url(r'^options/(?P<form_name>[-a-zA-Z]+)/(?P<token>[-a-zA-Z0-9]+)/$', 'form', name='options_form'),
)
urlpatterns += patterns('',
url(r'^users/', include(patterns('misago.users.views.lists',
url(r'^$', 'lander', name="users"),
url(r'^active-posters/$', 'active_posters', name="users_active_posters"),
url(r'^active-posters/(?P<page>\d+)/$', 'active_posters', name="users_active_posters"),
url(r'^online/$', 'online', name="users_online"),
url(r'^online/(?P<page>\d+)/$', 'online', name="users_online"),
url(r'^(?P<rank_slug>[-a-zA-Z0-9]+)/$', 'rank', name="users_rank"),
url(r'^(?P<rank_slug>[-a-zA-Z0-9]+)/(?P<page>\d+)/$', 'rank', name="users_rank"),
)))
)
urlpatterns += patterns('',
url(r'^user/(?P<user_slug>[a-zA-Z0-9]+)-(?P<user_id>\d+)/', include(patterns('misago.users.views.profile',
url(r'^$', 'posts', name="user_posts"),
url(r'^threads/$', 'threads', name="user_threads"),
url(r'^followers/$', 'followers', name="user_followers"),
url(r'^followers/(?P<page>\d+)/$', 'followers', name="user_followers"),
url(r'^follows/$', 'follows', name="user_follows"),
url(r'^follows/(?P<page>\d+)/$', 'follows', name="user_follows"),
url(r'^name-history/$', 'name_history', name="user_name_history"),
url(r'^name-history/(?P<page>\d+)/$', 'name_history', name="user_name_history"),
url(r'^warnings/$', 'warnings', name="user_warnings"),
url(r'^warnings/(?P<page>\d+)/$', 'warnings', name="user_warnings"),
url(r'^ban-details/$', 'user_ban', name="user_ban"),
url(r'^follow/$', 'follow_user', name="follow_user"),
url(r'^block/$', 'block_user', name="block_user"),
)))
)
urlpatterns += patterns('',
url(r'^mod-user/(?P<user_slug>[a-zA-Z0-9]+)-(?P<user_id>\d+)/', include(patterns('misago.users.views.moderation',
url(r'^warn/$', 'warn', name='warn_user'),
url(r'^warn/(?P<warning_id>\d+)/cancel/$', 'cancel_warning', name='cancel_warning'),
url(r'^warn/(?P<warning_id>\d+)/delete/$', 'delete_warning', name='delete_warning'),
url(r'^rename/$', 'rename', name='rename_user'),
url(r'^avatar/$', 'moderate_avatar', name='moderate_avatar'),
url(r'^signature/$', 'moderate_signature', name='moderate_signature'),
url(r'^ban/$', 'ban_user', name='ban_user'),
url(r'^lift-ban/$', 'lift_user_ban', name='lift_user_ban'),
url(r'^delete/$', 'delete', name='delete_user'),
)))
)
urlpatterns += patterns('',
url(r'^user-avatar/', include(patterns('misago.users.views.avatarserver',
url(r'^(?P<hash>[a-f0-9]+)/(?P<size>\d+)/(?P<user_id>\d+)\.png$', 'serve_user_avatar', name="user_avatar"),
url(r'^(?P<secret>[a-f0-9]+):(?P<hash>[a-f0-9]+)/(?P<user_id>\d+)\.png$', 'serve_user_avatar_source', name="user_avatar_source"),
url(r'^(?P<size>\d+)\.png$', 'serve_blank_avatar', name="blank_avatar"),
)))
) | unknown | codeparrot/codeparrot-clean | ||
"""Simple expression that should pass with mypy."""
import operator
from collections.abc import Iterable
import numpy as np
import numpy.typing as npt
# Basic checks
array = np.array([1, 2])
def ndarray_func(x: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
return x
ndarray_func(np.array([1, 2], dtype=np.float64))
array == 1
array.dtype == float
# Dtype construction
np.dtype(float)
np.dtype(np.float64)
np.dtype(None)
np.dtype("float64")
np.dtype(np.dtype(float))
np.dtype(("U", 10))
np.dtype((np.int32, (2, 2)))
# Define the arguments on the previous line to prevent bidirectional
# type inference in mypy from broadening the types.
two_tuples_dtype = [("R", "u1"), ("G", "u1"), ("B", "u1")]
np.dtype(two_tuples_dtype)
three_tuples_dtype = [("R", "u1", 2)]
np.dtype(three_tuples_dtype)
mixed_tuples_dtype = [("R", "u1"), ("G", np.str_, 1)]
np.dtype(mixed_tuples_dtype)
shape_tuple_dtype = [("R", "u1", (2, 2))]
np.dtype(shape_tuple_dtype)
shape_like_dtype = [("R", "u1", (2, 2)), ("G", np.str_, 1)]
np.dtype(shape_like_dtype)
object_dtype = [("field1", object)]
np.dtype(object_dtype)
np.dtype((np.int32, (np.int8, 4)))
# Dtype comparison
np.dtype(float) == float
np.dtype(float) != np.float64
np.dtype(float) < None
np.dtype(float) <= "float64"
np.dtype(float) > np.dtype(float)
np.dtype(float) >= np.dtype(("U", 10))
# Iteration and indexing
def iterable_func(x: Iterable[object]) -> Iterable[object]:
return x
iterable_func(array)
list(array)
iter(array)
zip(array, array)
array[1]
array[:]
array[...]
array[:] = 0
array_2d = np.ones((3, 3))
array_2d[:2, :2]
array_2d[:2, :2] = 0
array_2d[..., 0]
array_2d[..., 0] = 2
array_2d[-1, -1] = None
array_obj = np.zeros(1, dtype=np.object_)
array_obj[0] = slice(None)
# Other special methods
len(array)
str(array)
array_scalar = np.array(1)
int(array_scalar)
float(array_scalar)
complex(array_scalar)
bytes(array_scalar)
operator.index(array_scalar)
bool(array_scalar)
# comparisons
array < 1
array <= 1
array == 1
array != 1
array > 1
array >= 1
1 < array
1 <= array
1 == array
1 != array
1 > array
1 >= array
# binary arithmetic
array + 1
1 + array
array += 1
array - 1
1 - array
array -= 1
array * 1
1 * array
array *= 1
nonzero_array = np.array([1, 2])
array / 1
1 / nonzero_array
float_array = np.array([1.0, 2.0])
float_array /= 1
array // 1
1 // nonzero_array
array //= 1
array % 1
1 % nonzero_array
array %= 1
divmod(array, 1)
divmod(1, nonzero_array)
array ** 1
1 ** array
array **= 1
array << 1
1 << array
array <<= 1
array >> 1
1 >> array
array >>= 1
array & 1
1 & array
array &= 1
array ^ 1
1 ^ array
array ^= 1
array | 1
1 | array
array |= 1
# unary arithmetic
-array
+array
abs(array)
~array
# Other methods
array.transpose()
array @ array | python | github | https://github.com/numpy/numpy | numpy/typing/tests/data/pass/simple.py |
// This file is part of ICU4X. For terms of use, please see the file
// called LICENSE at the top level of the ICU4X source tree
// (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ).
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use icu_locale::LocaleCanonicalizer;
use icu_locale::LocaleExpander;
use icu_locale_core::Locale;
fn canonicalize_bench(c: &mut Criterion) {
let lc = LocaleCanonicalizer::new_common();
let mut group = c.benchmark_group("uncanonicalized");
let data: Vec<String> =
serde_json::from_str(include_str!("fixtures/uncanonicalized-locales.json"))
.expect("Failed to read a fixture");
let locales: Vec<Locale> = data.iter().map(|s| s.parse().unwrap()).collect();
group.bench_function("clone", |b| {
b.iter(|| {
for locale in &locales {
let _ = black_box(locale).clone();
}
})
});
group.bench_function("canonicalize", |b| {
b.iter(|| {
for locale in &locales {
let mut locale = black_box(locale).clone();
lc.canonicalize(&mut locale);
}
})
});
group.finish();
}
fn canonicalize_noop_bench(c: &mut Criterion) {
let lc = LocaleCanonicalizer::new_common();
let mut group = c.benchmark_group("canonicalized");
// None of these locales require canonicalization, so this measures the cost of calling
// the canonicalizer on locales that will not be modified.
let data: Vec<String> = serde_json::from_str(include_str!("fixtures/locales.json"))
.expect("Failed to read a fixture");
let locales: Vec<Locale> = data.iter().map(|s| s.parse().unwrap()).collect();
group.bench_function("clone", |b| {
b.iter(|| {
for locale in &locales {
let _ = black_box(locale).clone();
}
})
});
group.bench_function("canonicalize", |b| {
b.iter(|| {
for locale in &locales {
let mut locale = black_box(locale).clone();
lc.canonicalize(&mut locale);
}
})
});
group.finish();
}
fn maximize_bench(c: &mut Criterion) {
let lc = LocaleExpander::new_common();
let mut group = c.benchmark_group("likelysubtags");
let data: Vec<String> = serde_json::from_str(include_str!("fixtures/locales.json"))
.expect("Failed to read a fixture");
let locales: Vec<Locale> = data.iter().map(|s| s.parse().unwrap()).collect();
group.bench_function("maximize", |b| {
b.iter(|| {
for locale in &locales {
let mut locale = locale.clone();
lc.maximize(black_box(&mut locale.id));
}
})
});
group.bench_function("minimize", |b| {
b.iter(|| {
for locale in &locales {
let mut locale = locale.clone();
lc.minimize(black_box(&mut locale.id));
}
})
});
group.finish();
}
criterion_group!(
benches,
canonicalize_bench,
canonicalize_noop_bench,
maximize_bench
);
criterion_main!(benches); | rust | github | https://github.com/nodejs/node | deps/crates/vendor/icu_locale/benches/locale_canonicalizer.rs |
''' Text annotation module '''
from lxml import etree
from pkg_resources import resource_string
from xmodule.x_module import XModule
from xmodule.raw_module import RawDescriptor
from xblock.core import Scope, String
from xmodule.annotator_mixin import get_instructions
from xmodule.annotator_token import retrieve_token
from xblock.fragment import Fragment
import textwrap
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
class AnnotatableFields(object):
"""Fields for `TextModule` and `TextDescriptor`."""
data = String(
help=_("XML data for the annotation"),
scope=Scope.content,
default=textwrap.dedent("""\
<annotatable>
<instructions>
<p>
Add the instructions to the assignment here.
</p>
</instructions>
<p>
Lorem ipsum dolor sit amet, at amet animal petentium nec. Id augue nemore postulant mea. Ex eam dicant noluisse expetenda, alia admodum abhorreant qui et. An ceteros expetenda mea, tale natum ipsum quo no, ut pro paulo alienum noluisse.
</p>
</annotatable>
"""))
display_name = String(
display_name=_("Display Name"),
help=_("Display name for this module"),
scope=Scope.settings,
default=_('Text Annotation'),
)
instructor_tags = String(
display_name=_("Tags for Assignments"),
help=_("Add tags that automatically highlight in a certain color using the comma-separated form, i.e. imagery:red,parallelism:blue"),
scope=Scope.settings,
default='imagery:red,parallelism:blue',
)
source = String(
display_name=_("Source/Citation"),
help=_("Optional for citing source of any material used. Automatic citation can be done using <a href=\"http://easybib.com\">EasyBib</a>"),
scope=Scope.settings,
default='None',
)
diacritics = String(
display_name=_("Diacritic Marks"),
help=_("Add diacritic marks to be added to a text using the comma-separated form, i.e. markname;urltomark;baseline,markname2;urltomark2;baseline2"),
scope=Scope.settings,
default='',
)
annotation_storage_url = String(
help=_("Location of Annotation backend"),
scope=Scope.settings,
default="http://your_annotation_storage.com",
display_name=_("Url for Annotation Storage")
)
annotation_token_secret = String(
help=_("Secret string for annotation storage"),
scope=Scope.settings,
default="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
display_name=_("Secret Token String for Annotation")
)
default_tab = String(
display_name=_("Default Annotations Tab"),
help=_("Select which tab will be the default in the annotations table: myNotes, Instructor, or Public."),
scope=Scope.settings,
default="myNotes",
)
# currently only supports one instructor, will build functionality for multiple later
instructor_email = String(
display_name=_("Email for 'Instructor' Annotations"),
help=_("Email of the user that will be attached to all annotations that will be found in 'Instructor' tab."),
scope=Scope.settings,
default="",
)
annotation_mode = String(
display_name=_("Mode for Annotation Tool"),
help=_("Type in number corresponding to following modes: 'instructor' or 'everyone'"),
scope=Scope.settings,
default="everyone",
)
class TextAnnotationModule(AnnotatableFields, XModule):
''' Text Annotation Module '''
js = {'coffee': [],
'js': []}
css = {'scss': [resource_string(__name__, 'css/annotatable/display.scss')]}
icon_class = 'textannotation'
def __init__(self, *args, **kwargs):
super(TextAnnotationModule, self).__init__(*args, **kwargs)
xmltree = etree.fromstring(self.data)
self.instructions = self._extract_instructions(xmltree)
self.content = etree.tostring(xmltree, encoding='unicode')
self.user_email = ""
self.is_course_staff = False
if self.runtime.get_user_role() in ['instructor', 'staff']:
self.is_course_staff = True
if self.runtime.get_real_user is not None:
try:
self.user_email = self.runtime.get_real_user(self.runtime.anonymous_student_id).email
except Exception: # pylint: disable=broad-except
self.user_email = _("No email address found.")
def _extract_instructions(self, xmltree):
""" Removes <instructions> from the xmltree and returns them as a string, otherwise None. """
return get_instructions(xmltree)
def student_view(self, context):
""" Renders parameters to template. """
context = {
'course_key': self.runtime.course_id,
'display_name': self.display_name_with_default,
'tag': self.instructor_tags,
'source': self.source,
'instructions_html': self.instructions,
'content_html': self.content,
'token': retrieve_token(self.user_email, self.annotation_token_secret),
'diacritic_marks': self.diacritics,
'annotation_storage': self.annotation_storage_url,
'default_tab': self.default_tab,
'instructor_email': self.instructor_email,
'annotation_mode': self.annotation_mode,
'is_course_staff': self.is_course_staff,
}
fragment = Fragment(self.system.render_template('textannotation.html', context))
# TinyMCE already exists in Studio so we should not load the files again
# get_real_user always returns "None" in Studio since its runtimes contains no anonymous ids
if self.runtime.get_real_user is not None:
fragment.add_javascript_url(self.runtime.STATIC_URL + "js/vendor/tinymce/js/tinymce/tinymce.full.min.js")
fragment.add_javascript_url(self.runtime.STATIC_URL + "js/vendor/tinymce/js/tinymce/jquery.tinymce.min.js")
return fragment
class TextAnnotationDescriptor(AnnotatableFields, RawDescriptor):
''' Text Annotation Descriptor '''
module_class = TextAnnotationModule
mako_template = "widgets/raw-edit.html"
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(TextAnnotationDescriptor, self).non_editable_metadata_fields
non_editable_fields.extend([
TextAnnotationDescriptor.annotation_storage_url,
TextAnnotationDescriptor.annotation_token_secret,
])
return non_editable_fields | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
from erpnext.stock.doctype.purchase_receipt.test_purchase_receipt \
import set_perpetual_inventory, get_gl_entries, test_records as pr_test_records
class TestLandedCostVoucher(unittest.TestCase):
def test_landed_cost_voucher(self):
set_perpetual_inventory(1)
pr = frappe.copy_doc(pr_test_records[0])
pr.submit()
bin_details = frappe.db.get_value("Bin", {"warehouse": "_Test Warehouse - _TC",
"item_code": "_Test Item"}, ["actual_qty", "stock_value"], as_dict=1)
self.submit_landed_cost_voucher(pr)
pr_lc_value = frappe.db.get_value("Purchase Receipt Item", {"parent": pr.name}, "landed_cost_voucher_amount")
self.assertEquals(pr_lc_value, 25.0)
bin_details_after_lcv = frappe.db.get_value("Bin", {"warehouse": "_Test Warehouse - _TC",
"item_code": "_Test Item"}, ["actual_qty", "stock_value"], as_dict=1)
self.assertEqual(bin_details.actual_qty, bin_details_after_lcv.actual_qty)
self.assertEqual(bin_details_after_lcv.stock_value - bin_details.stock_value, 25.0)
gl_entries = get_gl_entries("Purchase Receipt", pr.name)
self.assertTrue(gl_entries)
stock_in_hand_account = pr.get("items")[0].warehouse
fixed_asset_account = pr.get("items")[1].warehouse
expected_values = {
stock_in_hand_account: [400.0, 0.0],
fixed_asset_account: [400.0, 0.0],
"Stock Received But Not Billed - _TC": [0.0, 500.0],
"Expenses Included In Valuation - _TC": [0.0, 300.0]
}
for gle in gl_entries:
self.assertEquals(expected_values[gle.account][0], gle.debit)
self.assertEquals(expected_values[gle.account][1], gle.credit)
set_perpetual_inventory(0)
def test_landed_cost_voucher_for_serialized_item(self):
set_perpetual_inventory(1)
frappe.db.sql("delete from `tabSerial No` where name in ('SN001', 'SN002', 'SN003', 'SN004', 'SN005')")
pr = frappe.copy_doc(pr_test_records[0])
pr.items[0].item_code = "_Test Serialized Item"
pr.items[0].serial_no = "SN001\nSN002\nSN003\nSN004\nSN005"
pr.submit()
serial_no_rate = frappe.db.get_value("Serial No", "SN001", "purchase_rate")
self.submit_landed_cost_voucher(pr)
serial_no = frappe.db.get_value("Serial No", "SN001",
["warehouse", "purchase_rate"], as_dict=1)
self.assertEquals(serial_no.purchase_rate - serial_no_rate, 5.0)
self.assertEquals(serial_no.warehouse, "_Test Warehouse - _TC")
set_perpetual_inventory(0)
def submit_landed_cost_voucher(self, pr):
lcv = frappe.new_doc("Landed Cost Voucher")
lcv.company = "_Test Company"
lcv.set("purchase_receipts", [{
"purchase_receipt": pr.name,
"supplier": pr.supplier,
"posting_date": pr.posting_date,
"grand_total": pr.base_grand_total
}])
lcv.set("taxes", [{
"description": "Insurance Charges",
"account": "_Test Account Insurance Charges - _TC",
"amount": 50.0
}])
lcv.insert()
lcv.submit()
test_records = frappe.get_test_records('Landed Cost Voucher') | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
#
# Copyright (C) 2008 Yu-Jie Lin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib
import gdata.webmastertools.service
import gdata.service
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import atom
import getpass
username = ''
password = ''
username = raw_input('Please enter your username: ')
password = getpass.getpass()
client = gdata.webmastertools.service.GWebmasterToolsService(
email=username,
password=password, source='PythonWebmasterToolsSample-1')
EXAMPLE_SITE = 'http://www.example.com/'
EXAMPLE_SITEMAP = 'http://www.example.com/sitemap-index.xml'
def safeElementText(element):
if hasattr(element, 'text'):
return element.text
return ''
print 'Logging in'
client.ProgrammaticLogin()
print
print 'Adding site: %s' % EXAMPLE_SITE
entry = client.AddSite(EXAMPLE_SITE)
print
print "%-25s %25s %25s" % ('Site', 'Last Updated', 'Last Crawled')
print '='*80
print "%-25s %25s %25s" % (
entry.title.text.replace('http://', '')[:25], entry.updated.text[:25],
safeElementText(entry.crawled)[:25])
print " Preferred: %-23s Indexed: %5s GeoLoc: %10s" % (
safeElementText(entry.preferred_domain)[:30], entry.indexed.text[:5],
safeElementText(entry.geolocation)[:10])
print " Crawl rate: %-10s Verified: %5s" % (
safeElementText(entry.crawl_rate)[:10], entry.verified.text[:5])
# Verifying a site. This sample won't do this since we don't own example.com
#client.VerifySite(EXAMPLE_SITE, 'htmlpage')
# The following needs the ownership of the site
#client.UpdateGeoLocation(EXAMPLE_SITE, 'US')
#client.UpdateCrawlRate(EXAMPLE_SITE, 'normal')
#client.UpdatePreferredDomain(EXAMPLE_SITE, 'preferwww')
#client.UpdateEnhancedImageSearch(EXAMPLE_SITE, 'true')
print
print 'Adding sitemap: %s' % EXAMPLE_SITEMAP
entry = client.AddSitemap(EXAMPLE_SITE, EXAMPLE_SITEMAP)
print entry.title.text.replace('http://', '')[:80]
print " Last Updated : %29s Status: %10s" % (
entry.updated.text[:29], entry.sitemap_status.text[:10])
print " Last Downloaded: %29s URL Count: %10s" % (
safeElementText(entry.sitemap_last_downloaded)[:29],
safeElementText(entry.sitemap_url_count)[:10])
# Add a mobile sitemap
#entry = client.AddMobileSitemap(EXAMPLE_SITE, 'http://.../sitemap-mobile-example.xml', 'XHTML')
# Add a news sitemap, your site must be included in Google News.
# See also http://google.com/support/webmasters/bin/answer.py?answer=42738
#entry = client.AddNewsSitemap(EXAMPLE_SITE, 'http://.../sitemap-news-example.xml', 'Label')
print
print 'Deleting sitemap: %s' % EXAMPLE_SITEMAP
client.DeleteSitemap(EXAMPLE_SITE, EXAMPLE_SITEMAP)
print
print 'Deleting site: %s' % EXAMPLE_SITE
client.DeleteSite(EXAMPLE_SITE)
print | unknown | codeparrot/codeparrot-clean | ||
async function loader() {
await new Promise((r) => setTimeout(r, 500));
return {
message: `Child route loader ran at ${new Date().toISOString()}`,
};
}
export async function Component() {
let loaderData = await loader();
return (
<div style={{ border: "1px solid black", padding: "10px" }}>
<h3>Child Route</h3>
<p>Loader data: {loaderData.message}</p>
</div>
);
} | typescript | github | https://github.com/remix-run/react-router | playground/rsc-vite/src/routes/child/child.tsx |
import re
from clusto_query.exceptions import (StringParseError,
ExpectedTokenError,
UnexpectedTokenError)
from clusto_query.query.objects import Attribute
from clusto_query.query.operator import (UNARY_BOOLEAN_OPERATORS,
SUFFIX_OPERATORS,
INFIX_OPERATORS,
BOOLEAN_OPERATORS)
_attribute_re = re.compile(r'([\w-]+)(\.([\w-]+))?(\:([0-9]+))?')
def _expect(token, q):
if not q or q[0] != token:
raise ExpectedTokenError(token, q[0])
else:
return q[1:]
def parse_attribute(q):
smd = _attribute_re.match(q[0])
if not smd:
raise StringParseError
return Attribute(smd.group(1), smd.group(3), smd.group(5)), q[1:]
def parse_expression(q):
if q[0] == "(":
lhs, q = parse_boolean(q[1:])
q = _expect(")", q)
return lhs, q
elif q[0] == "attr":
lhs, q = parse_attribute(_expect("attr", q))
elif "." in q[0]:
raise StringParseError("Found a . in %s; missing attr?" % q[0])
else:
for op, kls in sorted(UNARY_BOOLEAN_OPERATORS.iteritems(), key=len, reverse=True):
if op == q[0]:
exp, rest = parse_expression(_expect(op, q))
return kls(exp), rest
else:
lhs = q[0]
q = q[1:]
for op, kls in sorted(SUFFIX_OPERATORS.iteritems(), key=len, reverse=True):
if q[0] == op:
return kls(lhs), q[1:]
for op, kls in sorted(INFIX_OPERATORS.iteritems(), key=len, reverse=True):
if q[0] == op:
return kls(lhs, q[1]), q[2:]
else:
raise UnexpectedTokenError(q)
def parse_boolean(q):
lhs, rest = parse_expression(q)
if rest:
for op, kls in sorted(BOOLEAN_OPERATORS.iteritems(), key=len, reverse=True):
if rest[0] == op:
rhs, rest = parse_boolean(rest[1:])
lhs = kls(lhs, rhs)
break
else:
if rest[0] == ")":
return lhs, rest
else:
raise UnexpectedTokenError(rest)
return lhs, rest
def parse_query(q):
return parse_boolean(q) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
"""
Greyscale ℓ1-TV Denoising
=========================
This example demonstrates the use of class :class:`.tvl1.TVL1Denoise` for removing salt & pepper noise from a greyscale image using Total Variation regularization with an ℓ1 data fidelity term (ℓ1-TV denoising).
"""
from __future__ import print_function
from builtins import input
import numpy as np
from sporco.admm import tvl1
from sporco import util
from sporco import signal
from sporco import metric
from sporco import plot
"""
Load reference image.
"""
img = util.ExampleImages().image('monarch.png', scaled=True,
idxexp=np.s_[:,160:672], gray=True)
"""
Construct test image corrupted by 20% salt & pepper noise.
"""
np.random.seed(12345)
imgn = signal.spnoise(img, 0.2)
"""
Set regularization parameter and options for ℓ1-TV denoising solver. The regularization parameter used here has been manually selected for good performance.
"""
lmbda = 8e-1
opt = tvl1.TVL1Denoise.Options({'Verbose': True, 'MaxMainIter': 200,
'RelStopTol': 5e-3, 'gEvalY': False,
'AutoRho': {'Enabled': True}})
"""
Create solver object and solve, returning the the denoised image ``imgr``.
"""
b = tvl1.TVL1Denoise(imgn, lmbda, opt)
imgr = b.solve()
"""
Display solve time and denoising performance.
"""
print("TVL1Denoise solve time: %5.2f s" % b.timer.elapsed('solve'))
print("Noisy image PSNR: %5.2f dB" % metric.psnr(img, imgn))
print("Denoised image PSNR: %5.2f dB" % metric.psnr(img, imgr))
"""
Display reference, corrupted, and denoised images.
"""
fig = plot.figure(figsize=(20, 5))
plot.subplot(1, 3, 1)
plot.imview(img, title='Reference', fig=fig)
plot.subplot(1, 3, 2)
plot.imview(imgn, title='Corrupted', fig=fig)
plot.subplot(1, 3, 3)
plot.imview(imgr, title=r'Restored ($\ell_1$-TV)', fig=fig)
fig.show()
"""
Get iterations statistics from solver object and plot functional value, ADMM primary and dual residuals, and automatically adjusted ADMM penalty parameter against the iteration number.
"""
its = b.getitstat()
fig = plot.figure(figsize=(20, 5))
plot.subplot(1, 3, 1)
plot.plot(its.ObjFun, xlbl='Iterations', ylbl='Functional', fig=fig)
plot.subplot(1, 3, 2)
plot.plot(np.vstack((its.PrimalRsdl, its.DualRsdl)).T,
ptyp='semilogy', xlbl='Iterations', ylbl='Residual',
lgnd=['Primal', 'Dual'], fig=fig)
plot.subplot(1, 3, 3)
plot.plot(its.Rho, xlbl='Iterations', ylbl='Penalty Parameter', fig=fig)
fig.show()
# Wait for enter on keyboard
input() | unknown | codeparrot/codeparrot-clean | ||
"""Tracers that call listeners."""
from collections.abc import Awaitable, Callable
from typing import TYPE_CHECKING
from langchain_core.runnables.config import (
RunnableConfig,
acall_func_with_variable_args,
call_func_with_variable_args,
)
from langchain_core.tracers.base import AsyncBaseTracer, BaseTracer
from langchain_core.tracers.schemas import Run
if TYPE_CHECKING:
from uuid import UUID
Listener = Callable[[Run], None] | Callable[[Run, RunnableConfig], None]
AsyncListener = (
Callable[[Run], Awaitable[None]] | Callable[[Run, RunnableConfig], Awaitable[None]]
)
class RootListenersTracer(BaseTracer):
"""Tracer that calls listeners on run start, end, and error."""
log_missing_parent = False
"""Whether to log a warning if the parent is missing."""
def __init__(
self,
*,
config: RunnableConfig,
on_start: Listener | None,
on_end: Listener | None,
on_error: Listener | None,
) -> None:
"""Initialize the tracer.
Args:
config: The runnable config.
on_start: The listener to call on run start.
on_end: The listener to call on run end.
on_error: The listener to call on run error
"""
super().__init__(_schema_format="original+chat")
self.config = config
self._arg_on_start = on_start
self._arg_on_end = on_end
self._arg_on_error = on_error
self.root_id: UUID | None = None
def _persist_run(self, run: Run) -> None:
# This is a legacy method only called once for an entire run tree
# therefore not useful here
pass
def _on_run_create(self, run: Run) -> None:
if self.root_id is not None:
return
self.root_id = run.id
if self._arg_on_start is not None:
call_func_with_variable_args(self._arg_on_start, run, self.config)
def _on_run_update(self, run: Run) -> None:
if run.id != self.root_id:
return
if run.error is None:
if self._arg_on_end is not None:
call_func_with_variable_args(self._arg_on_end, run, self.config)
elif self._arg_on_error is not None:
call_func_with_variable_args(self._arg_on_error, run, self.config)
class AsyncRootListenersTracer(AsyncBaseTracer):
"""Async tracer that calls listeners on run start, end, and error."""
log_missing_parent = False
"""Whether to log a warning if the parent is missing."""
def __init__(
self,
*,
config: RunnableConfig,
on_start: AsyncListener | None,
on_end: AsyncListener | None,
on_error: AsyncListener | None,
) -> None:
"""Initialize the tracer.
Args:
config: The runnable config.
on_start: The listener to call on run start.
on_end: The listener to call on run end.
on_error: The listener to call on run error
"""
super().__init__(_schema_format="original+chat")
self.config = config
self._arg_on_start = on_start
self._arg_on_end = on_end
self._arg_on_error = on_error
self.root_id: UUID | None = None
async def _persist_run(self, run: Run) -> None:
# This is a legacy method only called once for an entire run tree
# therefore not useful here
pass
async def _on_run_create(self, run: Run) -> None:
if self.root_id is not None:
return
self.root_id = run.id
if self._arg_on_start is not None:
await acall_func_with_variable_args(self._arg_on_start, run, self.config)
async def _on_run_update(self, run: Run) -> None:
if run.id != self.root_id:
return
if run.error is None:
if self._arg_on_end is not None:
await acall_func_with_variable_args(self._arg_on_end, run, self.config)
elif self._arg_on_error is not None:
await acall_func_with_variable_args(self._arg_on_error, run, self.config) | python | github | https://github.com/langchain-ai/langchain | libs/core/langchain_core/tracers/root_listeners.py |
from django.db import models
from django_extensions.db.fields import UUIDField
from developer.models import Developer
from reporter.models import Reporter
import datetime
# Create your models here.
class Bug(models.Model):
guid = UUIDField(db_index=True)
title = models.CharField(max_length=100, null=False, blank=False, db_index=True)
description = models.TextField(max_length=600, blank=True, null=True)
link = models.URLField(max_length=300, blank=True, null=True)
screenshot = models.ImageField(upload_to='bugs_screenshots', null=True, blank=True)
guidelines = models.TextField(blank=True, null=True)
# datetime fields
created_at = models.DateTimeField(auto_now_add=True, default=datetime.datetime.now())
fixed_at = models.DateTimeField(null=True)
# foreign keys
assigned_developer = models.ForeignKey(Developer, related_name='assigned_developer', db_index=True)
reporter = models.ForeignKey(Reporter, related_name='reporter', db_index=True)
# Bugs categories
type1 = 'functional'
type2 = 'logical'
type3 = 'UI'
type4 = 'design'
type5 = 'typographical'
type6 = 'system'
type7 = 'standards'
type8 = 'requirements'
CATEGORY_LIST = (
(type1, 'Functional'),
(type2, 'Logical'),
(type3, 'UI'),
(type4, 'Design'),
(type5, 'Typographical'),
(type6, 'System'),
(type7, 'Standards'),
(type8, 'Requirements'),
)
# Bug Statuses : (Open -> Assigned -> Closed) or (Open -> Assigned -> Cancelled or Deferred) or (Open -> Cancelled or Deferred)
status1 = 'open'
status2 = 'closed'
status3 = 'cancelled'
status4 = 'deferred'
status5 = 'assigned'
STATUS_LIST = (
(status1, 'Open'),
(status2, 'Closed'),
(status3, 'Cancelled'),
(status4, 'Deferred'),
(status5, 'Assigned')
)
# Priority
p1 = '1'
p2 = '2'
p3 = '3'
p4 = '4'
p5 = '5'
PRIORITY_LIST = (
(p1, '1'),
(p2, '2'),
(p3, '3'),
(p4, '4'),
(p5, '5')
)
# choice fields
category = models.CharField(max_length=30,choices=CATEGORY_LIST, null=False, blank=False, db_index=True)
status = models.CharField(max_length=30, choices=STATUS_LIST, null=False, db_index=True)
priority = models.CharField(max_length=2, choices=PRIORITY_LIST, null=False, db_index=True)
def __unicode__(self):
return '%s <--> %s : %s, %s, %s ' % (self.title, self.guid, self.reporter, self.status, self.category,) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: docker_network
version_added: "2.2"
short_description: Manage Docker networks
description:
- Create/remove Docker networks and connect containers to them.
- Performs largely the same function as the "docker network" CLI subcommand.
options:
name:
description:
- Name of the network to operate on.
required: true
aliases:
- network_name
connected:
description:
- List of container names or container IDs to connect to a network.
aliases:
- containers
driver:
description:
- Specify the type of network. Docker provides bridge and overlay drivers, but 3rd party drivers can also be used.
default: bridge
driver_options:
description:
- Dictionary of network settings. Consult docker docs for valid options and values.
force:
description:
- With state I(absent) forces disconnecting all containers from the
network prior to deleting the network. With state I(present) will
disconnect all containers, delete the network and re-create the
network. This option is required if you have changed the IPAM or
driver options and want an existing network to be updated to use the
new options.
type: bool
default: 'no'
appends:
description:
- By default the connected list is canonical, meaning containers not on the list are removed from the network.
Use C(appends) to leave existing containers connected.
type: bool
default: 'no'
aliases:
- incremental
ipam_driver:
description:
- Specify an IPAM driver.
ipam_options:
description:
- Dictionary of IPAM options.
state:
description:
- I(absent) deletes the network. If a network has connected containers, it
cannot be deleted. Use the C(force) option to disconnect all containers
and delete the network.
- I(present) creates the network, if it does not already exist with the
specified parameters, and connects the list of containers provided via
the connected parameter. Containers not on the list will be disconnected.
An empty list will leave no containers connected to the network. Use the
C(appends) option to leave existing containers connected. Use the C(force)
options to force re-creation of the network.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- docker
author:
- "Ben Keith (@keitwb)"
- "Chris Houseknecht (@chouseknecht)"
requirements:
- "python >= 2.6"
- "docker-py >= 1.7.0"
- "Please note that the L(docker-py,https://pypi.org/project/docker-py/) Python
module has been superseded by L(docker,https://pypi.org/project/docker/)
(see L(here,https://github.com/docker/docker-py/issues/1310) for details).
For Python 2.6, C(docker-py) must be used. Otherwise, it is recommended to
install the C(docker) Python module. Note that both modules should I(not)
be installed at the same time."
- "The docker server >= 1.9.0"
'''
EXAMPLES = '''
- name: Create a network
docker_network:
name: network_one
- name: Remove all but selected list of containers
docker_network:
name: network_one
connected:
- container_a
- container_b
- container_c
- name: Remove a single container
docker_network:
name: network_one
connected: "{{ fulllist|difference(['container_a']) }}"
- name: Add a container to a network, leaving existing containers connected
docker_network:
name: network_one
connected:
- container_a
appends: yes
- name: Create a network with options
docker_network:
name: network_two
driver_options:
com.docker.network.bridge.name: net2
ipam_options:
subnet: '172.3.26.0/16'
gateway: 172.3.26.1
iprange: '192.168.1.0/24'
- name: Delete a network, disconnecting all containers
docker_network:
name: network_one
state: absent
force: yes
'''
RETURN = '''
facts:
description: Network inspection results for the affected network.
returned: success
type: dict
sample: {}
'''
from ansible.module_utils.docker_common import AnsibleDockerClient, DockerBaseClass, HAS_DOCKER_PY_2, HAS_DOCKER_PY_3
try:
from docker import utils
if HAS_DOCKER_PY_2 or HAS_DOCKER_PY_3:
from docker.types import IPAMPool, IPAMConfig
except:
# missing docker-py handled in ansible.module_utils.docker_common
pass
class TaskParameters(DockerBaseClass):
def __init__(self, client):
super(TaskParameters, self).__init__()
self.client = client
self.network_name = None
self.connected = None
self.driver = None
self.driver_options = None
self.ipam_driver = None
self.ipam_options = None
self.appends = None
self.force = None
self.debug = None
for key, value in client.module.params.items():
setattr(self, key, value)
def container_names_in_network(network):
return [c['Name'] for c in network['Containers'].values()] if network['Containers'] else []
class DockerNetworkManager(object):
def __init__(self, client):
self.client = client
self.parameters = TaskParameters(client)
self.check_mode = self.client.check_mode
self.results = {
u'changed': False,
u'actions': []
}
self.diff = self.client.module._diff
self.existing_network = self.get_existing_network()
if not self.parameters.connected and self.existing_network:
self.parameters.connected = container_names_in_network(self.existing_network)
state = self.parameters.state
if state == 'present':
self.present()
elif state == 'absent':
self.absent()
def get_existing_network(self):
networks = self.client.networks(names=[self.parameters.network_name])
# check if a user is trying to find network by its Id
if not networks:
networks = self.client.networks(ids=[self.parameters.network_name])
if not networks:
return None
else:
return networks[0]
def has_different_config(self, net):
'''
Evaluates an existing network and returns a tuple containing a boolean
indicating if the configuration is different and a list of differences.
:param net: the inspection output for an existing network
:return: (bool, list)
'''
different = False
differences = []
if self.parameters.driver and self.parameters.driver != net['Driver']:
different = True
differences.append('driver')
if self.parameters.driver_options:
if not net.get('Options'):
different = True
differences.append('driver_options')
else:
for key, value in self.parameters.driver_options.items():
if not (key in net['Options']) or value != net['Options'][key]:
different = True
differences.append('driver_options.%s' % key)
if self.parameters.ipam_driver:
if not net.get('IPAM') or net['IPAM']['Driver'] != self.parameters.ipam_driver:
different = True
differences.append('ipam_driver')
if self.parameters.ipam_options:
if not net.get('IPAM') or not net['IPAM'].get('Config'):
different = True
differences.append('ipam_options')
else:
for key, value in self.parameters.ipam_options.items():
camelkey = None
for net_key in net['IPAM']['Config'][0]:
if key == net_key.lower():
camelkey = net_key
break
if not camelkey:
# key not found
different = True
differences.append('ipam_options.%s' % key)
elif net['IPAM']['Config'][0].get(camelkey) != value:
# key has different value
different = True
differences.append('ipam_options.%s' % key)
return different, differences
def create_network(self):
if not self.existing_network:
ipam_pools = []
if self.parameters.ipam_options:
if HAS_DOCKER_PY_2 or HAS_DOCKER_PY_3:
ipam_pools.append(IPAMPool(**self.parameters.ipam_options))
else:
ipam_pools.append(utils.create_ipam_pool(**self.parameters.ipam_options))
if HAS_DOCKER_PY_2 or HAS_DOCKER_PY_3:
ipam_config = IPAMConfig(driver=self.parameters.ipam_driver,
pool_configs=ipam_pools)
else:
ipam_config = utils.create_ipam_config(driver=self.parameters.ipam_driver,
pool_configs=ipam_pools)
if not self.check_mode:
resp = self.client.create_network(self.parameters.network_name,
driver=self.parameters.driver,
options=self.parameters.driver_options,
ipam=ipam_config)
self.existing_network = self.client.inspect_network(resp['Id'])
self.results['actions'].append("Created network %s with driver %s" % (self.parameters.network_name, self.parameters.driver))
self.results['changed'] = True
def remove_network(self):
if self.existing_network:
self.disconnect_all_containers()
if not self.check_mode:
self.client.remove_network(self.parameters.network_name)
self.results['actions'].append("Removed network %s" % (self.parameters.network_name,))
self.results['changed'] = True
def is_container_connected(self, container_name):
return container_name in container_names_in_network(self.existing_network)
def connect_containers(self):
for name in self.parameters.connected:
if not self.is_container_connected(name):
if not self.check_mode:
self.client.connect_container_to_network(name, self.parameters.network_name)
self.results['actions'].append("Connected container %s" % (name,))
self.results['changed'] = True
def disconnect_missing(self):
if not self.existing_network:
return
containers = self.existing_network['Containers']
if not containers:
return
for c in containers.values():
name = c['Name']
if name not in self.parameters.connected:
self.disconnect_container(name)
def disconnect_all_containers(self):
containers = self.client.inspect_network(self.parameters.network_name)['Containers']
if not containers:
return
for cont in containers.values():
self.disconnect_container(cont['Name'])
def disconnect_container(self, container_name):
if not self.check_mode:
self.client.disconnect_container_from_network(container_name, self.parameters.network_name)
self.results['actions'].append("Disconnected container %s" % (container_name,))
self.results['changed'] = True
def present(self):
different = False
differences = []
if self.existing_network:
different, differences = self.has_different_config(self.existing_network)
if self.parameters.force or different:
self.remove_network()
self.existing_network = None
self.create_network()
self.connect_containers()
if not self.parameters.appends:
self.disconnect_missing()
if self.diff or self.check_mode or self.parameters.debug:
self.results['diff'] = differences
if not self.check_mode and not self.parameters.debug:
self.results.pop('actions')
self.results['ansible_facts'] = {u'docker_network': self.get_existing_network()}
def absent(self):
self.remove_network()
def main():
argument_spec = dict(
network_name=dict(type='str', required=True, aliases=['name']),
connected=dict(type='list', default=[], aliases=['containers']),
state=dict(type='str', default='present', choices=['present', 'absent']),
driver=dict(type='str', default='bridge'),
driver_options=dict(type='dict', default={}),
force=dict(type='bool', default=False),
appends=dict(type='bool', default=False, aliases=['incremental']),
ipam_driver=dict(type='str', default=None),
ipam_options=dict(type='dict', default={}),
debug=dict(type='bool', default=False)
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True
)
cm = DockerNetworkManager(client)
client.module.exit_json(**cm.results)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/clock/samsung,exynos5410-clock.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung Exynos5410 SoC clock controller
maintainers:
- Chanwoo Choi <cw00.choi@samsung.com>
- Krzysztof Kozlowski <krzk@kernel.org>
- Sylwester Nawrocki <s.nawrocki@samsung.com>
- Tomasz Figa <tomasz.figa@gmail.com>
description: |
Expected external clocks, defined in DTS as fixed-rate clocks with a matching
name::
- "fin_pll" - PLL input clock from XXTI
All available clocks are defined as preprocessor macros in
include/dt-bindings/clock/exynos5410.h header.
properties:
compatible:
oneOf:
- enum:
- samsung,exynos5410-clock
clocks:
description:
Should contain an entry specifying the root clock from external
oscillator supplied through XXTI or XusbXTI pin. This clock should be
defined using standard clock bindings with "fin_pll" clock-output-name.
That clock is being passed internally to the 9 PLLs.
maxItems: 1
"#clock-cells":
const: 1
reg:
maxItems: 1
required:
- compatible
- "#clock-cells"
- reg
additionalProperties: false
examples:
- |
#include <dt-bindings/clock/exynos5410.h>
fin_pll: osc-clock {
compatible = "fixed-clock";
clock-frequency = <24000000>;
clock-output-names = "fin_pll";
#clock-cells = <0>;
};
clock-controller@10010000 {
compatible = "samsung,exynos5410-clock";
reg = <0x10010000 0x30000>;
#clock-cells = <1>;
clocks = <&fin_pll>;
}; | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/clock/samsung,exynos5410-clock.yaml |
#!/usr/bin/env python
# sp800_22_frequency_within_block_test.pylon
#
# Copyright (C) 2017 David Johnston
# This program is distributed under the terms of the GNU General Public License.
#
# This file is part of sp800_22_tests.
#
# sp800_22_tests is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# sp800_22_tests is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with sp800_22_tests. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import math
from fractions import Fraction
#from scipy.special import gamma, gammainc, gammaincc
from gamma_functions import *
#ones_table = [bin(i)[2:].count('1') for i in range(256)]
def count_ones_zeroes(bits):
ones = 0
zeroes = 0
for bit in bits:
if (bit == 1):
ones += 1
else:
zeroes += 1
return (zeroes,ones)
def frequency_within_block_test(bits):
# Compute number of blocks M = block size. N=num of blocks
# N = floor(n/M)
# miniumum block size 20 bits, most blocks 100
n = len(bits)
M = 20
N = int(math.floor(n/M))
if N > 99:
N=99
M = int(math.floor(n/N))
if len(bits) < 100:
print("Too little data for test. Supply at least 100 bits")
return False,1.0,None
print(" n = %d" % len(bits))
print(" N = %d" % N)
print(" M = %d" % M)
num_of_blocks = N
block_size = M #int(math.floor(len(bits)/num_of_blocks))
#n = int(block_size * num_of_blocks)
proportions = list()
for i in range(num_of_blocks):
block = bits[i*(block_size):((i+1)*(block_size))]
zeroes,ones = count_ones_zeroes(block)
proportions.append(Fraction(ones,block_size))
chisq = 0.0
for prop in proportions:
chisq += 4.0*block_size*((prop - Fraction(1,2))**2)
p = gammaincc((num_of_blocks/2.0),float(chisq)/2.0)
success = (p >= 0.01)
return (success,p,None) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
from __future__ import absolute_import, print_function, unicode_literals
import os
import stat
from optparse import OptionParser
from django.core.management import ManagementUtility
class bcolors:
"""
ANSI escape sequences for terminal colors
"""
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
def create_project(parser, options, args):
# Validate args
if len(args) < 2:
parser.error("Please specify a name for your Arctic installation")
elif len(args) > 3:
parser.error("Too many arguments")
project_name = args[1]
try:
dest_dir = args[2]
except IndexError:
dest_dir = ""
# Make sure given name is not already in use by another
# python package/module.
try:
__import__(project_name)
except ImportError:
pass
else:
parser.error(
'"{}" conflicts with the name of an existing '
"Python module and cannot be used as a project "
"name. Please try another name.".format(project_name)
)
print("Creating an Arctic project named {}".format(project_name))
# Create the project from the Arctic template using startapp
# First find the path to Arctic
import arctic
arctic_path = os.path.dirname(arctic.__file__)
template_path = os.path.join(arctic_path, "project_template/start")
# Call django-admin startproject
utility_args = [
"django-admin.py",
"startproject",
"--template=" + template_path,
"--ext=html,rst",
project_name,
]
if dest_dir:
utility_args.append(dest_dir)
utility = ManagementUtility(utility_args)
utility.execute()
# add execute permission to manage.py, somehow it gets lost on the way
manage_py = os.path.join(dest_dir or project_name, "manage.py")
st = os.stat(manage_py)
os.chmod(
manage_py, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
)
print(
"Congratulations! {0} has been created.\n"
"The next steps are:\n"
"- In config/settings.py change the database settings (if needed).\n"
"- Run database migrations: {0}/manage.py migrate.\n"
"- Create an admin user: {0}/manage.py createsuperuser.\n"
"- Finally run the project: {0}/manage.py runserver.\n".format(
project_name
)
)
def create_app(parser, options, args):
# Validate args
if len(args) < 2:
parser.error("Please specify a name for your app")
elif len(args) > 3:
parser.error("Too many arguments")
app_name = args[1].lower()
try:
dest_dir = args[2]
except IndexError:
dest_dir = ""
# Make sure given name is not already in use by another
# python package/module.
try:
__import__(app_name)
except ImportError:
pass
else:
parser.error(
'"{}" conflicts with the name of an existing '
"Python module and cannot be used as an app "
"name. Please try another name.".format(app_name)
)
print(
(
bcolors.HEADER + "Creating an App named {}" + bcolors.ENDC + "\n"
).format(app_name)
)
# First find the path to Arctic
import arctic
arctic_path = os.path.dirname(arctic.__file__)
template_path = os.path.join(arctic_path, "project_template/app")
# Call django-admin starrtapp
utility_args = [
"django-admin.py",
"startapp",
"--template=" + template_path,
app_name,
]
if dest_dir:
utility_args.append(dest_dir)
utility = ManagementUtility(utility_args)
utility.execute()
print(
(
"Congratulations! {0} folder has been created it contains the "
"following structure.\n\n" + bcolors.OKBLUE + " -{0}\n"
" ---__init__.py\n"
" ---apps.py\n"
" ---forms.py\n"
" ---models.py\n"
" ---urls.py\n"
" ---views.py\n\n" + bcolors.ENDC + "The next steps are:\n\n"
" Add the app name to "
+ bcolors.UNDERLINE
+ "INSTALLED_APPS"
+ bcolors.ENDC
+ " in the settings.py\n" # NOQA
+ bcolors.OKGREEN
+ '"{0}",'
+ bcolors.ENDC
+ "\n"
" Add the app name and path to "
+ bcolors.UNDERLINE
+ "ARCTIC_MENU"
+ bcolors.ENDC
+ " in the settings.py\n" # NOQA
+ bcolors.OKGREEN
+ '("{1}", "{0}:list", "fa-folder"),'
+ bcolors.ENDC
+ "\n" # NOQA
" Add the urls to config/urls.py.\n"
+ bcolors.OKGREEN
+ 'url(r"^{0}/", include("{0}.urls", "{0}")),'
+ bcolors.ENDC
+ "\n" # NOQA
" Add fields in the models.py file\n"
"- Run "
+ bcolors.OKGREEN
+ "./manage.py makemigrations {0}"
+ bcolors.ENDC
+ "\n" # NOQA
"- Run "
+ bcolors.OKGREEN
+ "./manage.py migrate"
+ bcolors.ENDC
+ "\n\n" # NOQA
"The "
+ bcolors.BOLD
+ "forms.py"
+ bcolors.ENDC
+ " has a form with all the fields in the model and \n" # NOQA
"the "
+ bcolors.BOLD
+ "views.py"
+ bcolors.ENDC
+ " contains views for list, create, edit and delete. \n" # NOQA
"All of then can be tweaked to better satisfy the needs of the "
"project/app\n"
).format(app_name, app_name.capitalize())
)
COMMANDS = {
"start": create_project,
"createapp": create_app,
}
def main():
# Parse options
parser = OptionParser(
usage="Usage: arctic start project_name [directory]"
"Usage: arctic createapp appname [directory]"
)
(options, args) = parser.parse_args()
# Find command
try:
command = args[0]
except IndexError:
parser.print_help()
return
if command in COMMANDS:
COMMANDS[command](parser, options, args)
else:
parser.error("Unrecognised command: " + command)
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains base REST classes for constructing client v1 servlets.
"""
from synapse.http.servlet import RestServlet
from synapse.api.urls import CLIENT_PREFIX
from .transactions import HttpTransactionStore
import re
import logging
logger = logging.getLogger(__name__)
def client_path_pattern(path_regex):
"""Creates a regex compiled client path with the correct client path
prefix.
Args:
path_regex (str): The regex string to match. This should NOT have a ^
as this will be prefixed.
Returns:
SRE_Pattern
"""
return re.compile("^" + CLIENT_PREFIX + path_regex)
class ClientV1RestServlet(RestServlet):
"""A base Synapse REST Servlet for the client version 1 API.
"""
def __init__(self, hs):
self.hs = hs
self.handlers = hs.get_handlers()
self.builder_factory = hs.get_event_builder_factory()
self.auth = hs.get_v1auth()
self.txns = HttpTransactionStore() | unknown | codeparrot/codeparrot-clean | ||
---
mapped_pages:
- https://www.elastic.co/guide/en/elasticsearch/reference/current/es-dls-overview.html
---
# How DLS works [es-dls-overview]
:::{important}
This page pertains to a specific implementation of DLS for Elastic content connectors.
Refer to [Controlling access at the document and field level](docs-content://deploy-manage/users-roles/cluster-or-deployment-auth/controlling-access-at-document-field-level.md) to learn about the {{es}} DLS feature.
:::
Document level security (DLS) enables you to control access to content at the document level. Access to each document in an index can be managed independently, based on the identities (such as usernames, emails, groups etc.) that are allowed to view it.
This feature works with the help of special access control documents that are indexed by a connector into a hidden Elasticsearch index, associated with the standard content index. If your content documents have access control fields that match the criteria defined in your access control documents, Elasticsearch will apply DLS to the documents synced by the connector.
## Core concepts [es-dls-overview-core-concepts]
At a very high level, there are two essential components that enable document level security with connectors:
* **Access control documents**: These documents define the access control policy for documents from your third party source. They live in a hidden index named with the following pattern: `.search-acl-filter-<INDEX-NAME>`. See [access control documents](#es-dls-overview-access-control-documents) for more details and an example.
* **Content documents with access control fields**: The documents that contain the synced content from your third party source must have **access control fields** that match the criteria defined in your access control documents. These documents live in an index named with the following pattern: `search-<INDEX-NAME>`.
* If a content document does not have access control fields, there will be no restrictions on who can view it.
* If the access control field is present but *empty*, no identities will have access and the document will be effectively invisible.
See [content documents](#es-dls-overview-content-documents) for more details.
## Enabling DLS [es-dls-overview-procedure]
To enable DLS, you need to perform the following steps:
1. First **enable DLS** for your connector as part of the connector configuration.
2. Run an **Access control** sync.
3. This creates a hidden access control index prefixed with `.search-acl-filter-`. For example, if you named your connector index `search-sharepoint`, the access control index would be named `.search-acl-filter-search-sharepoint`.
4. The [access control documents](#es-dls-overview-access-control-documents) on the hidden index define which identities are allowed to view documents with access control fields.
5. The access control document uses a search template to define how to filter search results based on identities.
6. Schedule recurring **Access control** syncs to update the access control documents in the hidden index.
Note the following details about content documents and syncs:
1. Remember that for DLS to work, your **content documents** must have access control fields that match the criteria defined in your access control documents. [Content documents](#es-dls-overview-content-documents) contain the actual content your users will search for. If a content document does not have access control fields, there will be no restrictions on who can view it.
2. When a user searches for content, the access control documents determine which content the user is allowed to view.
3. At *search* time documents without the `_allow_access_control` field or with allowed values in `_allow_access_control.enum` will be returned in the search results. The logic for determining whether a document has access control enabled is based on the presence or values of the `_allow_access_control*` fields.
4. Run **Content** syncs to sync your third party data source to Elasticsearch. A specific field (or fields) within these documents correlates with the query parameters in the access control documents enabling document-level security (DLS).
::::{note}
You must enable DLS for your connector *before* running the first content sync. If you have already run a content sync, you’ll need to delete all documents on the index, enable DLS, and run a new content sync.
::::
## DLS at index time [es-dls-overview-index]
### Access control documents [es-dls-overview-access-control-documents]
These documents define the access control policy for the data indexed into Elasticsearch. An example of an access control document is as follows:
```js
{
"_id": "example.user@example.com",
"identity": {
"username": "example username",
"email": "example.user@example.com"
},
"query": {
"template": {
"params": {
"access_control": [
"example.user@example.com",
"example group",
"example username"]
}
},
"source": "..."
}
}
```
% NOTCONSOLE
In this example, the identity object specifies the identity of the user that this document pertains to. The `query` object then uses a template to list the parameters that form the access control policy for this identity. It also contains the query `source`, which will specify a query to fetch all content documents the identity has access to. The `_id` could be, for example, the email address or the username of a user. The exact content and structure of `identity` depends on the corresponding implementation.
### Content documents [es-dls-overview-content-documents]
Content documents contain the actual data from your 3rd party source. A specific field (or fields) within these documents correlates with the query parameters in the access control documents enabling document-level security (DLS). Please note, the field names used to implement DLS may vary across different connectors. In the following example we’ll use the field `_allow_access_control` for specifying the access control for a user identity.
```js
{
"_id": "some-unique-id",
"key-1": "value-1",
"key-2": "value-2",
"key-3": "value-3",
"_allow_access_control": [
"example.user@example.com",
"example group",
"example username"
]
}
```
% NOTCONSOLE
### Access control sync vs content sync [es-dls-overview-sync-type-comparison]
The ingestion of documents into an Elasticsearch index is known as a sync. DLS is managed using two types of syncs:
* **Content sync**: Ingests content into an index that starts with `search-`.
* **Access control sync**: Separate, additional sync which ingests access control documents into index that starts with `.search-acl-filter-`.
During a sync, the connector ingests the documents into the relevant index based on their type (content or access control). The access control documents determine the access control policy for the content documents.
By leveraging DLS, you can ensure that your Elasticsearch data is securely accessible to the right users or groups, based on the permissions defined in the access control documents.
## DLS at search time [es-dls-overview-search-time]
### When is an identity allowed to see a content document [es-dls-overview-search-time-identity-allowed]
A user can view a document if at least one access control element in their access control document matches an item within the document’s `_allow_access_control` field.
#### Example [es-dls-overview-search-time-example]
This section illustrates when a user has access to certain documents depending on the access control.
One access control document:
```js
{
"_id": "example.user@example.com",
"identity": {
"username": "example username",
"email": "example.user@example.com"
},
"query": {
"template": {
"params": {
"access_control": [
"example.user@example.com",
"example group",
"example username"]
}
},
"source": "..."
}
}
```
% NOTCONSOLE
Let’s see which of the following example documents these permissions can access, and why.
```js
{
"_id": "some-unique-id-1",
"_allow_access_control": [
"example.user@example.com",
"example group",
"example username"
]
}
```
% NOTCONSOLE
The user `example username` will have access to this document as he’s part of the corresponding group and his username and email address are also explicitly part of `_allow_access_control`.
```js
{
"_id": "some-unique-id-2",
"_allow_access_control": [
"example group"
]
}
```
% NOTCONSOLE
The user `example username` will also have access to this document as they are part of the `example group`.
```js
{
"_id": "some-unique-id-3",
"_allow_access_control": [
"another.user@example.com"
]
}
```
% NOTCONSOLE
The user `example username` won’t have access to this document because their email does not match `another.user@example.com`.
```js
{
"_id": "some-unique-id-4",
"_allow_access_control": []
}
```
% NOTCONSOLE
No one will have access to this document as the `_allow_access_control` field is empty.
### Querying multiple indices [es-dls-overview-multiple-connectors]
This section illustrates how to define an Elasticsearch API key that has restricted read access to multiple indices that have DLS enabled.
A user might have multiple identities that define which documents they are allowed to read. We can define an Elasticsearch API key with a role descriptor for each index the user has access to.
#### Example [es-dls-overview-multiple-connectors-example]
Let’s assume we want to create an API key that combines the following user identities:
```js
GET .search-acl-filter-source1
{
"_id": "example.user@example.com",
"identity": {
"username": "example username",
"email": "example.user@example.com"
},
"query": {
"template": {
"params": {
"access_control": [
"example.user@example.com",
"source1-user-group"]
}
},
"source": "..."
}
}
```
% NOTCONSOLE
```js
GET .search-acl-filter-source2
{
"_id": "example.user@example.com",
"identity": {
"username": "example username",
"email": "example.user@example.com"
},
"query": {
"template": {
"params": {
"access_control": [
"example.user@example.com",
"source2-user-group"]
}
},
"source": "..."
}
}
```
% NOTCONSOLE
`.search-acl-filter-source1` and `.search-acl-filter-source2` define the access control identities for `source1` and `source2`.
You can create an Elasticsearch API key using an API call like this:
```console
POST /_security/api_key
{
"name": "my-api-key",
"role_descriptors": {
"role-source1": {
"indices": [
{
"names": ["source1"],
"privileges": ["read"],
"query": {
"template": {
"params": {
"access_control": [
"example.user@example.com",
"source1-user-group"]
}
},
"source": "..."
}
}
]
},
"role-source2": {
"indices": [
{
"names": ["source2"],
"privileges": ["read"],
"query": {
"template": {
"params": {
"access_control": [
"example.user@example.com",
"source2-user-group"]
}
},
"source": "..."
}
}
]
}
}
}
```
% TEST[skip:TODO]
#### Workflow guidance [es-dls-overview-multiple-connectors-workflow-guidance]
We recommend relying on the connector access control sync to automate and keep documents in sync with changes to the original content source’s user permissions.
Consider setting an `expiration` time when creating an Elasticsearch API key. When `expiration` is not set, the Elasticsearch API will never expire.
The API key can be invalidated using the [Invalidate API Key API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-invalidate-api-key). Additionally, if the user’s permission changes, you’ll need to update or recreate the Elasticsearch API key.
### Learn more [es-dls-overview-search-time-learn-more]
* [DLS in Search Applications](/reference/search-connectors/es-dls-e2e-guide.md)
* [Elasticsearch Document Level Security](docs-content://deploy-manage/users-roles/cluster-or-deployment-auth/controlling-access-at-document-field-level.md) | unknown | github | https://github.com/elastic/elasticsearch | docs/reference/search-connectors/es-dls-overview.md |
# -*- coding: utf-8 -*-
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import pytest
pytest_plugins = [
"jupyter_server.pytest_plugin",
"jupyterlab_server.pytest_plugin",
"jupyterlab.pytest_plugin"
]
def pytest_addoption(parser):
"""
Adds flags for py.test.
This is called by the pytest API
"""
group = parser.getgroup("general")
group.addoption('--quick', action='store_true',
help="Skip slow tests")
group.addoption('--slow', action='store_true',
help="Run only slow tests")
def pytest_configure(config):
config.addinivalue_line("markers", "slow: mark test as slow to run")
def pytest_collection_modifyitems(config, items):
if config.getoption("--quick"):
skip_slow = pytest.mark.skip(reason="skipping slow test")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
elif config.getoption("--slow"):
skip_quick = pytest.mark.skip(reason="skipping non-slow test")
for item in items:
if "slow" not in item.keywords:
item.add_marker(skip_quick) | unknown | codeparrot/codeparrot-clean | ||
use crate::spec::{
Arch, Cc, LinkerFlavor, Lld, Os, PanicStrategy, RelocModel, Target, TargetMetadata,
TargetOptions, cvs,
};
pub(crate) fn target() -> Target {
Target {
data_layout: "e-m:e-p:32:32-i64:64-n32-S128".into(),
llvm_target: "riscv32".into(),
metadata: TargetMetadata {
description: None,
tier: Some(3),
host_tools: None,
std: Some(true),
},
pointer_width: 32,
arch: Arch::RiscV32,
options: TargetOptions {
families: cvs!["unix"],
os: Os::NuttX,
linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::Yes),
linker: Some("rust-lld".into()),
cpu: "generic-rv32".into(),
max_atomic_width: Some(32),
llvm_abiname: "ilp32f".into(),
features: "+m,+a,+c,+f".into(),
panic_strategy: PanicStrategy::Abort,
relocation_model: RelocModel::Static,
emit_debug_gdb_scripts: false,
eh_frame_header: false,
..Default::default()
},
}
} | rust | github | https://github.com/rust-lang/rust | compiler/rustc_target/src/spec/targets/riscv32imafc_unknown_nuttx_elf.rs |
#
# ovirt-host-deploy -- ovirt host deployer
# Copyright (C) 2017 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
"""cockpit configuration plugin."""
import gettext
from otopi import plugin, util
def _(m):
return gettext.dgettext(message=m, domain='ovirt-host-deploy')
@util.export
class Plugin(plugin.PluginBase):
"""Cockpit configuration plugin.
"""
def __init__(self, context):
super(Plugin, self).__init__(context=context)
@plugin.event(
stage=plugin.Stages.STAGE_CLOSEUP,
)
def _closeup(self):
self.logger.info(_('Starting Cockpit'))
if self.services.exists('cockpit'):
self.services.startup('cockpit', True)
self.services.state('cockpit', True)
else:
self.logger.warning(_('Cockpit not found!'))
# vim: expandtab tabstop=4 shiftwidth=4 | unknown | codeparrot/codeparrot-clean | ||
/** @import { ExportNamedDeclaration, Identifier } from 'estree' */
/** @import { Context } from '../types' */
import * as e from '../../../errors.js';
import { extract_identifiers } from '../../../utils/ast.js';
/**
* @param {ExportNamedDeclaration} node
* @param {Context} context
*/
export function ExportNamedDeclaration(node, context) {
// visit children, so bindings are correctly initialised
context.next();
if (
context.state.ast_type &&
node.specifiers.some((specifier) =>
specifier.exported.type === 'Identifier'
? specifier.exported.name === 'default'
: specifier.exported.value === 'default'
)
) {
e.module_illegal_default_export(node);
}
if (node.declaration?.type === 'VariableDeclaration') {
// in runes mode, forbid `export let`
if (
context.state.analysis.runes &&
context.state.ast_type === 'instance' &&
node.declaration.kind === 'let'
) {
e.legacy_export_invalid(node);
}
for (const declarator of node.declaration.declarations) {
for (const id of extract_identifiers(declarator.id)) {
const binding = context.state.scope.get(id.name);
if (!binding) continue;
if (binding.kind === 'derived') {
e.derived_invalid_export(node);
}
if ((binding.kind === 'state' || binding.kind === 'raw_state') && binding.reassigned) {
e.state_invalid_export(node);
}
}
}
}
if (context.state.analysis.runes) {
if (node.declaration && context.state.ast_type === 'instance') {
if (
node.declaration.type === 'FunctionDeclaration' ||
node.declaration.type === 'ClassDeclaration'
) {
context.state.analysis.exports.push({
name: /** @type {Identifier} */ (node.declaration.id).name,
alias: null
});
} else if (node.declaration.kind === 'const') {
for (const declarator of node.declaration.declarations) {
for (const node of extract_identifiers(declarator.id)) {
context.state.analysis.exports.push({ name: node.name, alias: null });
}
}
}
}
}
} | javascript | github | https://github.com/sveltejs/svelte | packages/svelte/src/compiler/phases/2-analyze/visitors/ExportNamedDeclaration.js |
# ply: ygen.py
#
# This is a support program that auto-generates different versions of the YACC parsing
# function with different features removed for the purposes of performance.
#
# Users should edit the method LParser.parsedebug() in yacc.py. The source code
# for that method is then used to create the other methods. See the comments in
# yacc.py for further details.
import os.path
import shutil
def get_source_range(lines, tag):
srclines = enumerate(lines)
start_tag = '#--! %s-start' % tag
end_tag = '#--! %s-end' % tag
for start_index, line in srclines:
if line.strip().startswith(start_tag):
break
for end_index, line in srclines:
if line.strip().endswith(end_tag):
break
return (start_index + 1, end_index)
def filter_section(lines, tag):
filtered_lines = []
include = True
tag_text = '#--! %s' % tag
for line in lines:
if line.strip().startswith(tag_text):
include = not include
elif include:
filtered_lines.append(line)
return filtered_lines
def main():
dirname = os.path.dirname(__file__)
shutil.copy2(os.path.join(dirname, 'yacc.py'), os.path.join(dirname, 'yacc.py.bak'))
with open(os.path.join(dirname, 'yacc.py'), 'r') as f:
lines = f.readlines()
parse_start, parse_end = get_source_range(lines, 'parsedebug')
parseopt_start, parseopt_end = get_source_range(lines, 'parseopt')
parseopt_notrack_start, parseopt_notrack_end = get_source_range(lines, 'parseopt-notrack')
# Get the original source
orig_lines = lines[parse_start:parse_end]
# Filter the DEBUG sections out
parseopt_lines = filter_section(orig_lines, 'DEBUG')
# Filter the TRACKING sections out
parseopt_notrack_lines = filter_section(parseopt_lines, 'TRACKING')
# Replace the parser source sections with updated versions
lines[parseopt_notrack_start:parseopt_notrack_end] = parseopt_notrack_lines
lines[parseopt_start:parseopt_end] = parseopt_lines
lines = [line.rstrip()+'\n' for line in lines]
with open(os.path.join(dirname, 'yacc.py'), 'w') as f:
f.writelines(lines)
print('Updated yacc.py')
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
import warnings as _warnings
_warnings.resetwarnings()
_warnings.filterwarnings('error')
# BEGIN INCLUDE
from tdi import html
from tdi.tools import htmlform
template = html.from_string("""
<html>
<body>
<p>Type your name:</p>
<form tdi="form">
<div tdi="-name">
<p tdi="*error" class="error">Error message</p>
<input tdi="*field" id="name" type="text" />
</div>
<input tdi="submit" type="submit" />
</form>
</body>
</html>
""")
class Model(object):
def __init__(self, errors=None):
self._errors = errors or {}
self._form = htmlform.HTMLForm(
pre_proc=self.preproc,
post_proc=htmlform.TabIndexer(),
)
def preproc(self, which, node, kwargs):
""" HTMLForm node pre processor """
try:
fieldnode = node.field
except AttributeError:
fieldnode = node
else:
try:
errornode = node.error
except AttributeError:
pass
else:
name = kwargs.get('name')
if name and name in self._errors:
errornode.content = self._errors[name]
else:
errornode.remove()
return fieldnode, kwargs
def render_form(self, node):
self._form.form(node)
def render_name(self, node):
self._form.text(node, u"name")
def render_submit(self, node):
self._form.submit(node, u"send", u"Submit form")
model = Model(errors=dict(name=u'Please do enter a name!'))
template.render(model) | unknown | codeparrot/codeparrot-clean | ||
# Patchwork - automated patch tracking system
# Copyright (C) 2008 Jeremy Kerr <jk@ozlabs.org>
#
# This file is part of the Patchwork package.
#
# Patchwork is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Patchwork is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Patchwork; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import itertools
import datetime
from django.shortcuts import get_object_or_404
from django.template.loader import render_to_string
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.conf import settings
from django.core.mail import EmailMessage
from django.db.models import Max, Q, F
from django.db.utils import IntegrityError
from patchwork.forms import MultiplePatchForm
from patchwork.models import Bundle, Project, BundlePatch, UserProfile, \
PatchChangeNotification, EmailOptout, EmailConfirmation
def get_patch_ids(d, prefix = 'patch_id'):
ids = []
for (k, v) in d.items():
a = k.split(':')
if len(a) != 2:
continue
if a[0] != prefix:
continue
if not v:
continue
ids.append(a[1])
return ids
class Order(object):
order_map = {
'date': 'date',
'name': 'name',
'state': 'state__ordering',
'submitter': 'submitter__name',
'delegate': 'delegate__username',
}
default_order = ('date', True)
def __init__(self, str = None, editable = False):
self.reversed = False
self.editable = editable
(self.order, self.reversed) = self.default_order
if self.editable:
return
if str is None or str == '':
return
reversed = False
if str[0] == '-':
str = str[1:]
reversed = True
if str not in self.order_map.keys():
return
self.order = str
self.reversed = reversed
def __str__(self):
str = self.order
if self.reversed:
str = '-' + str
return str
def name(self):
return self.order
def reversed_name(self):
if self.reversed:
return self.order
else:
return '-' + self.order
def apply(self, qs):
q = self.order_map[self.order]
if self.reversed:
q = '-' + q
orders = [q]
# if we're using a non-default order, add the default as a secondary
# ordering. We reverse the default if the primary is reversed.
(default_name, default_reverse) = self.default_order
if self.order != default_name:
q = self.order_map[default_name]
if self.reversed ^ default_reverse:
q = '-' + q
orders.append(q)
return qs.order_by(*orders)
bundle_actions = ['create', 'add', 'remove']
def set_bundle(user, project, action, data, patches, context):
# set up the bundle
bundle = None
if action == 'create':
bundle_name = data['bundle_name'].strip()
if '/' in bundle_name:
return ['Bundle names can\'t contain slashes']
if not bundle_name:
return ['No bundle name was specified']
if Bundle.objects.filter(owner = user, name = bundle_name).count() > 0:
return ['You already have a bundle called "%s"' % bundle_name]
bundle = Bundle(owner = user, project = project,
name = bundle_name)
bundle.save()
context.add_message("Bundle %s created" % bundle.name)
elif action =='add':
bundle = get_object_or_404(Bundle, id = data['bundle_id'])
elif action =='remove':
bundle = get_object_or_404(Bundle, id = data['removed_bundle_id'])
if not bundle:
return ['no such bundle']
for patch in patches:
if action == 'create' or action == 'add':
bundlepatch_count = BundlePatch.objects.filter(bundle = bundle,
patch = patch).count()
if bundlepatch_count == 0:
bundle.append_patch(patch)
context.add_message("Patch '%s' added to bundle %s" % \
(patch.name, bundle.name))
else:
context.add_message("Patch '%s' already in bundle %s" % \
(patch.name, bundle.name))
elif action == 'remove':
try:
bp = BundlePatch.objects.get(bundle = bundle, patch = patch)
bp.delete()
context.add_message("Patch '%s' removed from bundle %s\n" % \
(patch.name, bundle.name))
except Exception:
pass
bundle.save()
return []
def send_notifications():
date_limit = datetime.datetime.now() - \
datetime.timedelta(minutes =
settings.NOTIFICATION_DELAY_MINUTES)
# This gets funky: we want to filter out any notifications that should
# be grouped with other notifications that aren't ready to go out yet. To
# do that, we join back onto PatchChangeNotification (PCN -> Patch ->
# Person -> Patch -> max(PCN.last_modified)), filtering out any maxima
# that are with the date_limit.
qs = PatchChangeNotification.objects \
.annotate(m = Max('patch__submitter__patch__patchchangenotification'
'__last_modified')) \
.filter(m__lt = date_limit)
groups = itertools.groupby(qs.order_by('patch__submitter'),
lambda n: n.patch.submitter)
errors = []
for (recipient, notifications) in groups:
notifications = list(notifications)
projects = set([ n.patch.project.linkname for n in notifications ])
def delete_notifications():
pks = [ n.pk for n in notifications ]
PatchChangeNotification.objects.filter(pk__in = pks).delete()
if EmailOptout.is_optout(recipient.email):
delete_notifications()
continue
context = {
'site': Site.objects.get_current(),
'person': recipient,
'notifications': notifications,
'projects': projects,
}
subject = render_to_string(
'patchwork/patch-change-notification-subject.text',
context).strip()
content = render_to_string('patchwork/patch-change-notification.mail',
context)
message = EmailMessage(subject = subject, body = content,
from_email = settings.NOTIFICATION_FROM_EMAIL,
to = [recipient.email],
headers = {'Precedence': 'bulk'})
try:
message.send()
except ex:
errors.append((recipient, ex))
continue
delete_notifications()
return errors
def do_expiry():
# expire any pending confirmations
q = (Q(date__lt = datetime.datetime.now() - EmailConfirmation.validity) |
Q(active = False))
EmailConfirmation.objects.filter(q).delete()
# expire inactive users with no pending confirmation
pending_confs = EmailConfirmation.objects.values('user')
users = User.objects.filter(
is_active = False,
last_login = F('date_joined')
).exclude(
id__in = pending_confs
)
# delete users
users.delete() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
from openerp.tools import float_compare
from openerp.report import report_sxw
import openerp
class res_currency(osv.osv):
_inherit = "res.currency"
def _get_current_rate(self, cr, uid, ids, raise_on_no_rate=True, context=None):
if context is None:
context = {}
res = super(res_currency, self)._get_current_rate(cr, uid, ids, raise_on_no_rate, context=context)
if context.get('voucher_special_currency') in ids and context.get('voucher_special_currency_rate'):
res[context.get('voucher_special_currency')] = context.get('voucher_special_currency_rate')
return res
class account_voucher(osv.osv):
def _check_paid(self, cr, uid, ids, name, args, context=None):
res = {}
for voucher in self.browse(cr, uid, ids, context=context):
res[voucher.id] = any([((line.account_id.type, 'in', ('receivable', 'payable')) and line.reconcile_id) for line in voucher.move_ids])
return res
def _get_type(self, cr, uid, context=None):
if context is None:
context = {}
return context.get('type', False)
def _get_period(self, cr, uid, context=None):
if context is None: context = {}
if context.get('period_id', False):
return context.get('period_id')
periods = self.pool.get('account.period').find(cr, uid, context=context)
return periods and periods[0] or False
def _make_journal_search(self, cr, uid, ttype, context=None):
journal_pool = self.pool.get('account.journal')
return journal_pool.search(cr, uid, [('type', '=', ttype)], limit=1)
def _get_journal(self, cr, uid, context=None):
if context is None: context = {}
invoice_pool = self.pool.get('account.invoice')
journal_pool = self.pool.get('account.journal')
if context.get('invoice_id', False):
invoice = invoice_pool.browse(cr, uid, context['invoice_id'], context=context)
journal_id = journal_pool.search(cr, uid, [
('currency', '=', invoice.currency_id.id), ('company_id', '=', invoice.company_id.id)
], limit=1, context=context)
return journal_id and journal_id[0] or False
if context.get('journal_id', False):
return context.get('journal_id')
if not context.get('journal_id', False) and context.get('search_default_journal_id', False):
return context.get('search_default_journal_id')
ttype = context.get('type', 'bank')
if ttype in ('payment', 'receipt'):
ttype = 'bank'
res = self._make_journal_search(cr, uid, ttype, context=context)
return res and res[0] or False
def _get_tax(self, cr, uid, context=None):
if context is None: context = {}
journal_pool = self.pool.get('account.journal')
journal_id = context.get('journal_id', False)
if not journal_id:
ttype = context.get('type', 'bank')
res = journal_pool.search(cr, uid, [('type', '=', ttype)], limit=1)
if not res:
return False
journal_id = res[0]
if not journal_id:
return False
journal = journal_pool.browse(cr, uid, journal_id, context=context)
account_id = journal.default_credit_account_id or journal.default_debit_account_id
if account_id and account_id.tax_ids:
tax_id = account_id.tax_ids[0].id
return tax_id
return False
def _get_payment_rate_currency(self, cr, uid, context=None):
"""
Return the default value for field payment_rate_currency_id: the currency of the journal
if there is one, otherwise the currency of the user's company
"""
if context is None: context = {}
journal_pool = self.pool.get('account.journal')
journal_id = context.get('journal_id', False)
if journal_id:
journal = journal_pool.browse(cr, uid, journal_id, context=context)
if journal.currency:
return journal.currency.id
#no journal given in the context, use company currency as default
return self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id.id
def _get_currency(self, cr, uid, context=None):
if context is None: context = {}
journal_pool = self.pool.get('account.journal')
journal_id = context.get('journal_id', False)
if journal_id:
if isinstance(journal_id, (list, tuple)):
# sometimes journal_id is a pair (id, display_name)
journal_id = journal_id[0]
journal = journal_pool.browse(cr, uid, journal_id, context=context)
if journal.currency:
return journal.currency.id
return self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id.id
def _get_partner(self, cr, uid, context=None):
if context is None: context = {}
return context.get('partner_id', False)
def _get_reference(self, cr, uid, context=None):
if context is None: context = {}
return context.get('reference', False)
def _get_narration(self, cr, uid, context=None):
if context is None: context = {}
return context.get('narration', False)
def _get_amount(self, cr, uid, context=None):
if context is None:
context= {}
return context.get('amount', 0.0)
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
if context is None: context = {}
return [(r['id'], (r['number'] or _('Voucher'))) for r in self.read(cr, uid, ids, ['number'], context, load='_classic_write')]
def fields_view_get(self, cr, uid, view_id=None, view_type=False, context=None, toolbar=False, submenu=False):
mod_obj = self.pool.get('ir.model.data')
if context is None: context = {}
if view_type == 'form':
if not view_id and context.get('invoice_type'):
if context.get('invoice_type') in ('out_invoice', 'out_refund'):
result = mod_obj.get_object_reference(cr, uid, 'account_voucher', 'view_vendor_receipt_form')
else:
result = mod_obj.get_object_reference(cr, uid, 'account_voucher', 'view_vendor_payment_form')
result = result and result[1] or False
view_id = result
if not view_id and context.get('line_type'):
if context.get('line_type') == 'customer':
result = mod_obj.get_object_reference(cr, uid, 'account_voucher', 'view_vendor_receipt_form')
else:
result = mod_obj.get_object_reference(cr, uid, 'account_voucher', 'view_vendor_payment_form')
result = result and result[1] or False
view_id = result
res = super(account_voucher, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
doc = etree.XML(res['arch'])
if context.get('type', 'sale') in ('purchase', 'payment'):
nodes = doc.xpath("//field[@name='partner_id']")
for node in nodes:
node.set('context', "{'default_customer': 0, 'search_default_supplier': 1, 'default_supplier': 1}")
if context.get('invoice_type','') in ('in_invoice', 'in_refund'):
node.set('string', _("Supplier"))
res['arch'] = etree.tostring(doc)
return res
def _compute_writeoff_amount(self, cr, uid, line_dr_ids, line_cr_ids, amount, type):
debit = credit = 0.0
sign = type == 'payment' and -1 or 1
for l in line_dr_ids:
if isinstance(l, dict):
debit += l['amount']
for l in line_cr_ids:
if isinstance(l, dict):
credit += l['amount']
return amount - sign * (credit - debit)
def onchange_line_ids(self, cr, uid, ids, line_dr_ids, line_cr_ids, amount, voucher_currency, type, context=None):
context = context or {}
if not line_dr_ids and not line_cr_ids:
return {'value':{'writeoff_amount': 0.0}}
# resolve lists of commands into lists of dicts
line_dr_ids = self.resolve_2many_commands(cr, uid, 'line_dr_ids', line_dr_ids, ['amount'], context)
line_cr_ids = self.resolve_2many_commands(cr, uid, 'line_cr_ids', line_cr_ids, ['amount'], context)
#compute the field is_multi_currency that is used to hide/display options linked to secondary currency on the voucher
is_multi_currency = False
#loop on the voucher lines to see if one of these has a secondary currency. If yes, we need to see the options
for voucher_line in line_dr_ids+line_cr_ids:
line_id = voucher_line.get('id') and self.pool.get('account.voucher.line').browse(cr, uid, voucher_line['id'], context=context).move_line_id.id or voucher_line.get('move_line_id')
if line_id and self.pool.get('account.move.line').browse(cr, uid, line_id, context=context).currency_id:
is_multi_currency = True
break
return {'value': {'writeoff_amount': self._compute_writeoff_amount(cr, uid, line_dr_ids, line_cr_ids, amount, type), 'is_multi_currency': is_multi_currency}}
def _get_journal_currency(self, cr, uid, ids, name, args, context=None):
res = {}
for voucher in self.browse(cr, uid, ids, context=context):
res[voucher.id] = voucher.journal_id.currency and voucher.journal_id.currency.id or voucher.company_id.currency_id.id
return res
def _get_writeoff_amount(self, cr, uid, ids, name, args, context=None):
if not ids: return {}
currency_obj = self.pool.get('res.currency')
res = {}
for voucher in self.browse(cr, uid, ids, context=context):
debit = credit = 0.0
sign = voucher.type == 'payment' and -1 or 1
for l in voucher.line_dr_ids:
debit += l.amount
for l in voucher.line_cr_ids:
credit += l.amount
currency = voucher.currency_id or voucher.company_id.currency_id
res[voucher.id] = currency_obj.round(cr, uid, currency, voucher.amount - sign * (credit - debit))
return res
def _paid_amount_in_company_currency(self, cr, uid, ids, name, args, context=None):
if context is None:
context = {}
res = {}
ctx = context.copy()
for v in self.browse(cr, uid, ids, context=context):
ctx.update({'date': v.date})
#make a new call to browse in order to have the right date in the context, to get the right currency rate
voucher = self.browse(cr, uid, v.id, context=ctx)
ctx.update({
'voucher_special_currency': voucher.payment_rate_currency_id and voucher.payment_rate_currency_id.id or False,
'voucher_special_currency_rate': voucher.currency_id.rate * voucher.payment_rate,})
res[voucher.id] = self.pool.get('res.currency').compute(cr, uid, voucher.currency_id.id, voucher.company_id.currency_id.id, voucher.amount, context=ctx)
return res
def _get_currency_help_label(self, cr, uid, currency_id, payment_rate, payment_rate_currency_id, context=None):
"""
This function builds a string to help the users to understand the behavior of the payment rate fields they can specify on the voucher.
This string is only used to improve the usability in the voucher form view and has no other effect.
:param currency_id: the voucher currency
:type currency_id: integer
:param payment_rate: the value of the payment_rate field of the voucher
:type payment_rate: float
:param payment_rate_currency_id: the value of the payment_rate_currency_id field of the voucher
:type payment_rate_currency_id: integer
:return: translated string giving a tip on what's the effect of the current payment rate specified
:rtype: str
"""
rml_parser = report_sxw.rml_parse(cr, uid, 'currency_help_label', context=context)
currency_pool = self.pool.get('res.currency')
currency_str = payment_rate_str = ''
if currency_id:
currency_str = rml_parser.formatLang(1, currency_obj=currency_pool.browse(cr, uid, currency_id, context=context))
if payment_rate_currency_id:
payment_rate_str = rml_parser.formatLang(payment_rate, currency_obj=currency_pool.browse(cr, uid, payment_rate_currency_id, context=context))
currency_help_label = _('At the operation date, the exchange rate was\n%s = %s') % (currency_str, payment_rate_str)
return currency_help_label
def _fnct_currency_help_label(self, cr, uid, ids, name, args, context=None):
res = {}
for voucher in self.browse(cr, uid, ids, context=context):
res[voucher.id] = self._get_currency_help_label(cr, uid, voucher.currency_id.id, voucher.payment_rate, voucher.payment_rate_currency_id.id, context=context)
return res
_name = 'account.voucher'
_description = 'Accounting Voucher'
_inherit = ['mail.thread']
_order = "date desc, id desc"
# _rec_name = 'number'
_track = {
'state': {
'account_voucher.mt_voucher_state_change': lambda self, cr, uid, obj, ctx=None: True,
},
}
_columns = {
'type':fields.selection([
('sale','Sale'),
('purchase','Purchase'),
('payment','Payment'),
('receipt','Receipt'),
],'Default Type', readonly=True, states={'draft':[('readonly',False)]}),
'name':fields.char('Memo', readonly=True, states={'draft':[('readonly',False)]}),
'date':fields.date('Date', readonly=True, select=True, states={'draft':[('readonly',False)]},
help="Effective date for accounting entries", copy=False),
'journal_id':fields.many2one('account.journal', 'Journal', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'account_id':fields.many2one('account.account', 'Account', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'line_ids':fields.one2many('account.voucher.line', 'voucher_id', 'Voucher Lines',
readonly=True, copy=True,
states={'draft':[('readonly',False)]}),
'line_cr_ids':fields.one2many('account.voucher.line','voucher_id','Credits',
domain=[('type','=','cr')], context={'default_type':'cr'}, readonly=True, states={'draft':[('readonly',False)]}),
'line_dr_ids':fields.one2many('account.voucher.line','voucher_id','Debits',
domain=[('type','=','dr')], context={'default_type':'dr'}, readonly=True, states={'draft':[('readonly',False)]}),
'period_id': fields.many2one('account.period', 'Period', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'narration':fields.text('Notes', readonly=True, states={'draft':[('readonly',False)]}),
'currency_id': fields.function(_get_journal_currency, type='many2one', relation='res.currency', string='Currency', readonly=True, required=True),
'company_id': fields.many2one('res.company', 'Company', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'state':fields.selection(
[('draft','Draft'),
('cancel','Cancelled'),
('proforma','Pro-forma'),
('posted','Posted')
], 'Status', readonly=True, track_visibility='onchange', copy=False,
help=' * The \'Draft\' status is used when a user is encoding a new and unconfirmed Voucher. \
\n* The \'Pro-forma\' when voucher is in Pro-forma status,voucher does not have an voucher number. \
\n* The \'Posted\' status is used when user create voucher,a voucher number is generated and voucher entries are created in account \
\n* The \'Cancelled\' status is used when user cancel voucher.'),
'amount': fields.float('Total', digits_compute=dp.get_precision('Account'), required=True, readonly=True, states={'draft':[('readonly',False)]}),
'tax_amount':fields.float('Tax Amount', digits_compute=dp.get_precision('Account'), readonly=True),
'reference': fields.char('Ref #', readonly=True, states={'draft':[('readonly',False)]},
help="Transaction reference number.", copy=False),
'number': fields.char('Number', readonly=True, copy=False),
'move_id':fields.many2one('account.move', 'Account Entry', copy=False),
'move_ids': fields.related('move_id','line_id', type='one2many', relation='account.move.line', string='Journal Items', readonly=True),
'partner_id':fields.many2one('res.partner', 'Partner', change_default=1, readonly=True, states={'draft':[('readonly',False)]}),
'audit': fields.related('move_id','to_check', type='boolean', help='Check this box if you are unsure of that journal entry and if you want to note it as \'to be reviewed\' by an accounting expert.', relation='account.move', string='To Review'),
'paid': fields.function(_check_paid, string='Paid', type='boolean', help="The Voucher has been totally paid."),
'pay_now':fields.selection([
('pay_now','Pay Directly'),
('pay_later','Pay Later or Group Funds'),
],'Payment', select=True, readonly=True, states={'draft':[('readonly',False)]}),
'tax_id': fields.many2one('account.tax', 'Tax', readonly=True, states={'draft':[('readonly',False)]}, domain=[('price_include','=', False)], help="Only for tax excluded from price"),
'pre_line':fields.boolean('Previous Payments ?', required=False),
'date_due': fields.date('Due Date', readonly=True, select=True, states={'draft':[('readonly',False)]}),
'payment_option':fields.selection([
('without_writeoff', 'Keep Open'),
('with_writeoff', 'Reconcile Payment Balance'),
], 'Payment Difference', required=True, readonly=True, states={'draft': [('readonly', False)]}, help="This field helps you to choose what you want to do with the eventual difference between the paid amount and the sum of allocated amounts. You can either choose to keep open this difference on the partner's account, or reconcile it with the payment(s)"),
'writeoff_acc_id': fields.many2one('account.account', 'Counterpart Account', readonly=True, states={'draft': [('readonly', False)]}),
'comment': fields.char('Counterpart Comment', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'analytic_id': fields.many2one('account.analytic.account','Write-Off Analytic Account', readonly=True, states={'draft': [('readonly', False)]}),
'writeoff_amount': fields.function(_get_writeoff_amount, string='Difference Amount', type='float', readonly=True, help="Computed as the difference between the amount stated in the voucher and the sum of allocation on the voucher lines."),
'payment_rate_currency_id': fields.many2one('res.currency', 'Payment Rate Currency', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'payment_rate': fields.float('Exchange Rate', digits=(12,6), required=True, readonly=True, states={'draft': [('readonly', False)]},
help='The specific rate that will be used, in this voucher, between the selected currency (in \'Payment Rate Currency\' field) and the voucher currency.'),
'paid_amount_in_company_currency': fields.function(_paid_amount_in_company_currency, string='Paid Amount in Company Currency', type='float', readonly=True),
'is_multi_currency': fields.boolean('Multi Currency Voucher', help='Fields with internal purpose only that depicts if the voucher is a multi currency one or not'),
'currency_help_label': fields.function(_fnct_currency_help_label, type='text', string="Helping Sentence", help="This sentence helps you to know how to specify the payment rate by giving you the direct effect it has"),
}
_defaults = {
'period_id': _get_period,
'partner_id': _get_partner,
'journal_id':_get_journal,
'currency_id': _get_currency,
'reference': _get_reference,
'narration':_get_narration,
'amount': _get_amount,
'type':_get_type,
'state': 'draft',
'pay_now': 'pay_now',
'name': '',
'date': fields.date.context_today,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.voucher',context=c),
'tax_id': _get_tax,
'payment_option': 'without_writeoff',
'comment': _('Write-Off'),
'payment_rate': 1.0,
'payment_rate_currency_id': _get_payment_rate_currency,
}
def compute_tax(self, cr, uid, ids, context=None):
tax_pool = self.pool.get('account.tax')
partner_pool = self.pool.get('res.partner')
position_pool = self.pool.get('account.fiscal.position')
voucher_line_pool = self.pool.get('account.voucher.line')
voucher_pool = self.pool.get('account.voucher')
if context is None: context = {}
for voucher in voucher_pool.browse(cr, uid, ids, context=context):
voucher_amount = 0.0
for line in voucher.line_ids:
voucher_amount += line.untax_amount or line.amount
line.amount = line.untax_amount or line.amount
voucher_line_pool.write(cr, uid, [line.id], {'amount':line.amount, 'untax_amount':line.untax_amount})
if not voucher.tax_id:
self.write(cr, uid, [voucher.id], {'amount':voucher_amount, 'tax_amount':0.0})
continue
tax = [tax_pool.browse(cr, uid, voucher.tax_id.id, context=context)]
partner = partner_pool.browse(cr, uid, voucher.partner_id.id, context=context) or False
taxes = position_pool.map_tax(cr, uid, partner and partner.property_account_position or False, tax)
tax = tax_pool.browse(cr, uid, taxes, context=context)
total = voucher_amount
total_tax = 0.0
if not tax[0].price_include:
for line in voucher.line_ids:
for tax_line in tax_pool.compute_all(cr, uid, tax, line.amount, 1).get('taxes', []):
total_tax += tax_line.get('amount', 0.0)
total += total_tax
else:
for line in voucher.line_ids:
line_total = 0.0
line_tax = 0.0
for tax_line in tax_pool.compute_all(cr, uid, tax, line.untax_amount or line.amount, 1).get('taxes', []):
line_tax += tax_line.get('amount', 0.0)
line_total += tax_line.get('price_unit')
total_tax += line_tax
untax_amount = line.untax_amount or line.amount
voucher_line_pool.write(cr, uid, [line.id], {'amount':line_total, 'untax_amount':untax_amount})
self.write(cr, uid, [voucher.id], {'amount':total, 'tax_amount':total_tax})
return True
def onchange_price(self, cr, uid, ids, line_ids, tax_id, partner_id=False, context=None):
context = context or {}
tax_pool = self.pool.get('account.tax')
partner_pool = self.pool.get('res.partner')
position_pool = self.pool.get('account.fiscal.position')
if not line_ids:
line_ids = []
res = {
'tax_amount': False,
'amount': False,
}
voucher_total = 0.0
# resolve the list of commands into a list of dicts
line_ids = self.resolve_2many_commands(cr, uid, 'line_ids', line_ids, ['amount'], context)
total_tax = 0.0
for line in line_ids:
line_amount = 0.0
line_amount = line.get('amount',0.0)
if tax_id:
tax = [tax_pool.browse(cr, uid, tax_id, context=context)]
if partner_id:
partner = partner_pool.browse(cr, uid, partner_id, context=context) or False
taxes = position_pool.map_tax(cr, uid, partner and partner.property_account_position or False, tax)
tax = tax_pool.browse(cr, uid, taxes, context=context)
if not tax[0].price_include:
for tax_line in tax_pool.compute_all(cr, uid, tax, line_amount, 1).get('taxes', []):
total_tax += tax_line.get('amount')
voucher_total += line_amount
total = voucher_total + total_tax
res.update({
'amount': total or voucher_total,
'tax_amount': total_tax
})
return {
'value': res
}
def onchange_term_id(self, cr, uid, ids, term_id, amount):
term_pool = self.pool.get('account.payment.term')
terms = False
due_date = False
default = {'date_due':False}
if term_id and amount:
terms = term_pool.compute(cr, uid, term_id, amount)
if terms:
due_date = terms[-1][0]
default.update({
'date_due':due_date
})
return {'value':default}
def onchange_journal_voucher(self, cr, uid, ids, line_ids=False, tax_id=False, price=0.0, partner_id=False, journal_id=False, ttype=False, company_id=False, context=None):
"""price
Returns a dict that contains new values and context
@param partner_id: latest value from user input for field partner_id
@param args: other arguments
@param context: context arguments, like lang, time zone
@return: Returns a dict which contains new values, and context
"""
default = {
'value':{},
}
if not partner_id or not journal_id:
return default
partner_pool = self.pool.get('res.partner')
journal_pool = self.pool.get('account.journal')
journal = journal_pool.browse(cr, uid, journal_id, context=context)
partner = partner_pool.browse(cr, uid, partner_id, context=context)
account_id = False
tr_type = False
if journal.type in ('sale','sale_refund'):
account_id = partner.property_account_receivable.id
tr_type = 'sale'
elif journal.type in ('purchase', 'purchase_refund','expense'):
account_id = partner.property_account_payable.id
tr_type = 'purchase'
else:
if not journal.default_credit_account_id or not journal.default_debit_account_id:
raise osv.except_osv(_('Error!'), _('Please define default credit/debit accounts on the journal "%s".') % (journal.name))
if ttype in ('sale', 'receipt'):
account_id = journal.default_debit_account_id.id
elif ttype in ('purchase', 'payment'):
account_id = journal.default_credit_account_id.id
else:
account_id = journal.default_credit_account_id.id or journal.default_debit_account_id.id
tr_type = 'receipt'
default['value']['account_id'] = account_id
default['value']['type'] = ttype or tr_type
vals = self.onchange_journal(cr, uid, ids, journal_id, line_ids, tax_id, partner_id, time.strftime('%Y-%m-%d'), price, ttype, company_id, context)
default['value'].update(vals.get('value'))
return default
def onchange_rate(self, cr, uid, ids, rate, amount, currency_id, payment_rate_currency_id, company_id, context=None):
res = {'value': {'paid_amount_in_company_currency': amount, 'currency_help_label': self._get_currency_help_label(cr, uid, currency_id, rate, payment_rate_currency_id, context=context)}}
if rate and amount and currency_id:
company_currency = self.pool.get('res.company').browse(cr, uid, company_id, context=context).currency_id
#context should contain the date, the payment currency and the payment rate specified on the voucher
amount_in_company_currency = self.pool.get('res.currency').compute(cr, uid, currency_id, company_currency.id, amount, context=context)
res['value']['paid_amount_in_company_currency'] = amount_in_company_currency
return res
def onchange_amount(self, cr, uid, ids, amount, rate, partner_id, journal_id, currency_id, ttype, date, payment_rate_currency_id, company_id, context=None):
if context is None:
context = {}
ctx = context.copy()
ctx.update({'date': date})
#read the voucher rate with the right date in the context
currency_id = currency_id or self.pool.get('res.company').browse(cr, uid, company_id, context=ctx).currency_id.id
voucher_rate = self.pool.get('res.currency').read(cr, uid, [currency_id], ['rate'], context=ctx)[0]['rate']
ctx.update({
'voucher_special_currency': payment_rate_currency_id,
'voucher_special_currency_rate': rate * voucher_rate})
res = self.recompute_voucher_lines(cr, uid, ids, partner_id, journal_id, amount, currency_id, ttype, date, context=ctx)
vals = self.onchange_rate(cr, uid, ids, rate, amount, currency_id, payment_rate_currency_id, company_id, context=ctx)
for key in vals.keys():
res[key].update(vals[key])
return res
def recompute_payment_rate(self, cr, uid, ids, vals, currency_id, date, ttype, journal_id, amount, context=None):
if context is None:
context = {}
#on change of the journal, we need to set also the default value for payment_rate and payment_rate_currency_id
currency_obj = self.pool.get('res.currency')
journal = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context)
company_id = journal.company_id.id
payment_rate = 1.0
currency_id = currency_id or journal.company_id.currency_id.id
payment_rate_currency_id = currency_id
ctx = context.copy()
ctx.update({'date': date})
o2m_to_loop = False
if ttype == 'receipt':
o2m_to_loop = 'line_cr_ids'
elif ttype == 'payment':
o2m_to_loop = 'line_dr_ids'
if o2m_to_loop and 'value' in vals and o2m_to_loop in vals['value']:
for voucher_line in vals['value'][o2m_to_loop]:
if not isinstance(voucher_line, dict):
continue
if voucher_line['currency_id'] != currency_id:
# we take as default value for the payment_rate_currency_id, the currency of the first invoice that
# is not in the voucher currency
payment_rate_currency_id = voucher_line['currency_id']
tmp = currency_obj.browse(cr, uid, payment_rate_currency_id, context=ctx).rate
payment_rate = tmp / currency_obj.browse(cr, uid, currency_id, context=ctx).rate
break
vals['value'].update({
'payment_rate': payment_rate,
'currency_id': currency_id,
'payment_rate_currency_id': payment_rate_currency_id
})
#read the voucher rate with the right date in the context
voucher_rate = self.pool.get('res.currency').read(cr, uid, [currency_id], ['rate'], context=ctx)[0]['rate']
ctx.update({
'voucher_special_currency_rate': payment_rate * voucher_rate,
'voucher_special_currency': payment_rate_currency_id})
res = self.onchange_rate(cr, uid, ids, payment_rate, amount, currency_id, payment_rate_currency_id, company_id, context=ctx)
for key in res.keys():
vals[key].update(res[key])
return vals
def basic_onchange_partner(self, cr, uid, ids, partner_id, journal_id, ttype, context=None):
partner_pool = self.pool.get('res.partner')
journal_pool = self.pool.get('account.journal')
res = {'value': {'account_id': False}}
if not partner_id or not journal_id:
return res
journal = journal_pool.browse(cr, uid, journal_id, context=context)
partner = partner_pool.browse(cr, uid, partner_id, context=context)
account_id = False
if journal.type in ('sale','sale_refund'):
account_id = partner.property_account_receivable.id
elif journal.type in ('purchase', 'purchase_refund','expense'):
account_id = partner.property_account_payable.id
elif ttype in ('sale', 'receipt'):
account_id = journal.default_debit_account_id.id
elif ttype in ('purchase', 'payment'):
account_id = journal.default_credit_account_id.id
else:
account_id = journal.default_credit_account_id.id or journal.default_debit_account_id.id
res['value']['account_id'] = account_id
return res
def onchange_partner_id(self, cr, uid, ids, partner_id, journal_id, amount, currency_id, ttype, date, context=None):
if not journal_id:
return {}
if context is None:
context = {}
#TODO: comment me and use me directly in the sales/purchases views
res = self.basic_onchange_partner(cr, uid, ids, partner_id, journal_id, ttype, context=context)
if ttype in ['sale', 'purchase']:
return res
ctx = context.copy()
# not passing the payment_rate currency and the payment_rate in the context but it's ok because they are reset in recompute_payment_rate
ctx.update({'date': date})
vals = self.recompute_voucher_lines(cr, uid, ids, partner_id, journal_id, amount, currency_id, ttype, date, context=ctx)
vals2 = self.recompute_payment_rate(cr, uid, ids, vals, currency_id, date, ttype, journal_id, amount, context=context)
for key in vals.keys():
res[key].update(vals[key])
for key in vals2.keys():
res[key].update(vals2[key])
#TODO: can probably be removed now
#TODO: onchange_partner_id() should not returns [pre_line, line_dr_ids, payment_rate...] for type sale, and not
# [pre_line, line_cr_ids, payment_rate...] for type purchase.
# We should definitively split account.voucher object in two and make distinct on_change functions. In the
# meanwhile, bellow lines must be there because the fields aren't present in the view, what crashes if the
# onchange returns a value for them
if ttype == 'sale':
del(res['value']['line_dr_ids'])
del(res['value']['pre_line'])
del(res['value']['payment_rate'])
elif ttype == 'purchase':
del(res['value']['line_cr_ids'])
del(res['value']['pre_line'])
del(res['value']['payment_rate'])
return res
def recompute_voucher_lines(self, cr, uid, ids, partner_id, journal_id, price, currency_id, ttype, date, context=None):
"""
Returns a dict that contains new values and context
@param partner_id: latest value from user input for field partner_id
@param args: other arguments
@param context: context arguments, like lang, time zone
@return: Returns a dict which contains new values, and context
"""
def _remove_noise_in_o2m():
"""if the line is partially reconciled, then we must pay attention to display it only once and
in the good o2m.
This function returns True if the line is considered as noise and should not be displayed
"""
if line.reconcile_partial_id:
if currency_id == line.currency_id.id:
if line.amount_residual_currency <= 0:
return True
else:
if line.amount_residual <= 0:
return True
return False
if context is None:
context = {}
context_multi_currency = context.copy()
currency_pool = self.pool.get('res.currency')
move_line_pool = self.pool.get('account.move.line')
partner_pool = self.pool.get('res.partner')
journal_pool = self.pool.get('account.journal')
line_pool = self.pool.get('account.voucher.line')
#set default values
default = {
'value': {'line_dr_ids': [], 'line_cr_ids': [], 'pre_line': False},
}
# drop existing lines
line_ids = ids and line_pool.search(cr, uid, [('voucher_id', '=', ids[0])])
for line in line_pool.browse(cr, uid, line_ids, context=context):
if line.type == 'cr':
default['value']['line_cr_ids'].append((2, line.id))
else:
default['value']['line_dr_ids'].append((2, line.id))
if not partner_id or not journal_id:
return default
journal = journal_pool.browse(cr, uid, journal_id, context=context)
partner = partner_pool.browse(cr, uid, partner_id, context=context)
currency_id = currency_id or journal.company_id.currency_id.id
total_credit = 0.0
total_debit = 0.0
account_type = None
if context.get('account_id'):
account_type = self.pool['account.account'].browse(cr, uid, context['account_id'], context=context).type
if ttype == 'payment':
if not account_type:
account_type = 'payable'
total_debit = price or 0.0
else:
total_credit = price or 0.0
if not account_type:
account_type = 'receivable'
if not context.get('move_line_ids', False):
ids = move_line_pool.search(cr, uid, [('state','=','valid'), ('account_id.type', '=', account_type), ('reconcile_id', '=', False), ('partner_id', '=', partner_id)], context=context)
else:
ids = context['move_line_ids']
invoice_id = context.get('invoice_id', False)
company_currency = journal.company_id.currency_id.id
move_lines_found = []
#order the lines by most old first
ids.reverse()
account_move_lines = move_line_pool.browse(cr, uid, ids, context=context)
#compute the total debit/credit and look for a matching open amount or invoice
for line in account_move_lines:
if _remove_noise_in_o2m():
continue
if invoice_id:
if line.invoice.id == invoice_id:
#if the invoice linked to the voucher line is equal to the invoice_id in context
#then we assign the amount on that line, whatever the other voucher lines
move_lines_found.append(line.id)
elif currency_id == company_currency:
#otherwise treatments is the same but with other field names
if line.amount_residual == price:
#if the amount residual is equal the amount voucher, we assign it to that voucher
#line, whatever the other voucher lines
move_lines_found.append(line.id)
break
#otherwise we will split the voucher amount on each line (by most old first)
total_credit += line.credit or 0.0
total_debit += line.debit or 0.0
elif currency_id == line.currency_id.id:
if line.amount_residual_currency == price:
move_lines_found.append(line.id)
break
total_credit += line.credit and line.amount_currency or 0.0
total_debit += line.debit and line.amount_currency or 0.0
remaining_amount = price
#voucher line creation
for line in account_move_lines:
if _remove_noise_in_o2m():
continue
if line.currency_id and currency_id == line.currency_id.id:
amount_original = abs(line.amount_currency)
amount_unreconciled = abs(line.amount_residual_currency)
else:
#always use the amount booked in the company currency as the basis of the conversion into the voucher currency
amount_original = currency_pool.compute(cr, uid, company_currency, currency_id, line.credit or line.debit or 0.0, context=context_multi_currency)
amount_unreconciled = currency_pool.compute(cr, uid, company_currency, currency_id, abs(line.amount_residual), context=context_multi_currency)
line_currency_id = line.currency_id and line.currency_id.id or company_currency
rs = {
'name':line.move_id.name,
'type': line.credit and 'dr' or 'cr',
'move_line_id':line.id,
'account_id':line.account_id.id,
'amount_original': amount_original,
'amount': (line.id in move_lines_found) and min(abs(remaining_amount), amount_unreconciled) or 0.0,
'date_original':line.date,
'date_due':line.date_maturity,
'amount_unreconciled': amount_unreconciled,
'currency_id': line_currency_id,
}
remaining_amount -= rs['amount']
#in case a corresponding move_line hasn't been found, we now try to assign the voucher amount
#on existing invoices: we split voucher amount by most old first, but only for lines in the same currency
if not move_lines_found:
if currency_id == line_currency_id:
if line.credit:
amount = min(amount_unreconciled, abs(total_debit))
rs['amount'] = amount
total_debit -= amount
else:
amount = min(amount_unreconciled, abs(total_credit))
rs['amount'] = amount
total_credit -= amount
if rs['amount_unreconciled'] == rs['amount']:
rs['reconcile'] = True
if rs['type'] == 'cr':
default['value']['line_cr_ids'].append(rs)
else:
default['value']['line_dr_ids'].append(rs)
if len(default['value']['line_cr_ids']) > 0:
default['value']['pre_line'] = 1
elif len(default['value']['line_dr_ids']) > 0:
default['value']['pre_line'] = 1
default['value']['writeoff_amount'] = self._compute_writeoff_amount(cr, uid, default['value']['line_dr_ids'], default['value']['line_cr_ids'], price, ttype)
return default
def onchange_payment_rate_currency(self, cr, uid, ids, currency_id, payment_rate, payment_rate_currency_id, date, amount, company_id, context=None):
if context is None:
context = {}
res = {'value': {}}
if currency_id:
#set the default payment rate of the voucher and compute the paid amount in company currency
ctx = context.copy()
ctx.update({'date': date})
#read the voucher rate with the right date in the context
voucher_rate = self.pool.get('res.currency').read(cr, uid, [currency_id], ['rate'], context=ctx)[0]['rate']
ctx.update({
'voucher_special_currency_rate': payment_rate * voucher_rate,
'voucher_special_currency': payment_rate_currency_id})
vals = self.onchange_rate(cr, uid, ids, payment_rate, amount, currency_id, payment_rate_currency_id, company_id, context=ctx)
for key in vals.keys():
res[key].update(vals[key])
return res
def onchange_date(self, cr, uid, ids, date, currency_id, payment_rate_currency_id, amount, company_id, context=None):
"""
@param date: latest value from user input for field date
@param args: other arguments
@param context: context arguments, like lang, time zone
@return: Returns a dict which contains new values, and context
"""
if context is None:
context ={}
res = {'value': {}}
#set the period of the voucher
period_pool = self.pool.get('account.period')
currency_obj = self.pool.get('res.currency')
ctx = context.copy()
ctx.update({'company_id': company_id, 'account_period_prefer_normal': True})
voucher_currency_id = currency_id or self.pool.get('res.company').browse(cr, uid, company_id, context=ctx).currency_id.id
pids = period_pool.find(cr, uid, date, context=ctx)
if pids:
res['value'].update({'period_id':pids[0]})
if payment_rate_currency_id:
ctx.update({'date': date})
payment_rate = 1.0
if payment_rate_currency_id != currency_id:
tmp = currency_obj.browse(cr, uid, payment_rate_currency_id, context=ctx).rate
payment_rate = tmp / currency_obj.browse(cr, uid, voucher_currency_id, context=ctx).rate
vals = self.onchange_payment_rate_currency(cr, uid, ids, voucher_currency_id, payment_rate, payment_rate_currency_id, date, amount, company_id, context=context)
vals['value'].update({'payment_rate': payment_rate})
for key in vals.keys():
res[key].update(vals[key])
return res
def onchange_journal(self, cr, uid, ids, journal_id, line_ids, tax_id, partner_id, date, amount, ttype, company_id, context=None):
if context is None:
context = {}
if not journal_id:
return False
journal_pool = self.pool.get('account.journal')
journal = journal_pool.browse(cr, uid, journal_id, context=context)
if ttype in ('sale', 'receipt'):
account_id = journal.default_debit_account_id
elif ttype in ('purchase', 'payment'):
account_id = journal.default_credit_account_id
else:
account_id = journal.default_credit_account_id or journal.default_debit_account_id
tax_id = False
if account_id and account_id.tax_ids:
tax_id = account_id.tax_ids[0].id
vals = {'value':{} }
if ttype in ('sale', 'purchase'):
vals = self.onchange_price(cr, uid, ids, line_ids, tax_id, partner_id, context)
vals['value'].update({'tax_id':tax_id,'amount': amount})
currency_id = False
if journal.currency:
currency_id = journal.currency.id
else:
currency_id = journal.company_id.currency_id.id
period_ids = self.pool['account.period'].find(cr, uid, dt=date, context=dict(context, company_id=company_id))
vals['value'].update({
'currency_id': currency_id,
'payment_rate_currency_id': currency_id,
'period_id': period_ids and period_ids[0] or False
})
#in case we want to register the payment directly from an invoice, it's confusing to allow to switch the journal
#without seeing that the amount is expressed in the journal currency, and not in the invoice currency. So to avoid
#this common mistake, we simply reset the amount to 0 if the currency is not the invoice currency.
if context.get('payment_expected_currency') and currency_id != context.get('payment_expected_currency'):
vals['value']['amount'] = 0
amount = 0
if partner_id:
res = self.onchange_partner_id(cr, uid, ids, partner_id, journal_id, amount, currency_id, ttype, date, context)
for key in res.keys():
vals[key].update(res[key])
return vals
def onchange_company(self, cr, uid, ids, partner_id, journal_id, currency_id, company_id, context=None):
"""
If the company changes, check that the journal is in the right company.
If not, fetch a new journal.
"""
journal_pool = self.pool['account.journal']
journal = journal_pool.browse(cr, uid, journal_id, context=context)
if journal.company_id.id != company_id:
# can not guess type of journal, better remove it
return {'value': {'journal_id': False}}
return {}
def button_proforma_voucher(self, cr, uid, ids, context=None):
self.signal_workflow(cr, uid, ids, 'proforma_voucher')
return {'type': 'ir.actions.act_window_close'}
def proforma_voucher(self, cr, uid, ids, context=None):
self.action_move_line_create(cr, uid, ids, context=context)
return True
def action_cancel_draft(self, cr, uid, ids, context=None):
self.create_workflow(cr, uid, ids)
self.write(cr, uid, ids, {'state':'draft'})
return True
def cancel_voucher(self, cr, uid, ids, context=None):
reconcile_pool = self.pool.get('account.move.reconcile')
move_pool = self.pool.get('account.move')
move_line_pool = self.pool.get('account.move.line')
for voucher in self.browse(cr, uid, ids, context=context):
# refresh to make sure you don't unlink an already removed move
voucher.refresh()
for line in voucher.move_ids:
# refresh to make sure you don't unreconcile an already unreconciled entry
line.refresh()
if line.reconcile_id:
move_lines = [move_line.id for move_line in line.reconcile_id.line_id]
move_lines.remove(line.id)
reconcile_pool.unlink(cr, uid, [line.reconcile_id.id])
if len(move_lines) >= 2:
move_line_pool.reconcile_partial(cr, uid, move_lines, 'auto',context=context)
if voucher.move_id:
move_pool.button_cancel(cr, uid, [voucher.move_id.id])
move_pool.unlink(cr, uid, [voucher.move_id.id])
res = {
'state':'cancel',
'move_id':False,
}
self.write(cr, uid, ids, res)
return True
def unlink(self, cr, uid, ids, context=None):
for t in self.read(cr, uid, ids, ['state'], context=context):
if t['state'] not in ('draft', 'cancel'):
raise osv.except_osv(_('Invalid Action!'), _('Cannot delete voucher(s) which are already opened or paid.'))
return super(account_voucher, self).unlink(cr, uid, ids, context=context)
def onchange_payment(self, cr, uid, ids, pay_now, journal_id, partner_id, ttype='sale'):
res = {}
if not partner_id:
return res
res = {}
partner_pool = self.pool.get('res.partner')
journal_pool = self.pool.get('account.journal')
if pay_now == 'pay_later':
partner = partner_pool.browse(cr, uid, partner_id)
journal = journal_pool.browse(cr, uid, journal_id)
if journal.type in ('sale','sale_refund'):
account_id = partner.property_account_receivable.id
elif journal.type in ('purchase', 'purchase_refund','expense'):
account_id = partner.property_account_payable.id
elif ttype in ('sale', 'receipt'):
account_id = journal.default_debit_account_id.id
elif ttype in ('purchase', 'payment'):
account_id = journal.default_credit_account_id.id
else:
account_id = journal.default_credit_account_id.id or journal.default_debit_account_id.id
if account_id:
res['account_id'] = account_id
return {'value':res}
def _sel_context(self, cr, uid, voucher_id, context=None):
"""
Select the context to use accordingly if it needs to be multicurrency or not.
:param voucher_id: Id of the actual voucher
:return: The returned context will be the same as given in parameter if the voucher currency is the same
than the company currency, otherwise it's a copy of the parameter with an extra key 'date' containing
the date of the voucher.
:rtype: dict
"""
company_currency = self._get_company_currency(cr, uid, voucher_id, context)
current_currency = self._get_current_currency(cr, uid, voucher_id, context)
if current_currency <> company_currency:
context_multi_currency = context.copy()
voucher = self.pool.get('account.voucher').browse(cr, uid, voucher_id, context)
context_multi_currency.update({'date': voucher.date})
return context_multi_currency
return context
def first_move_line_get(self, cr, uid, voucher_id, move_id, company_currency, current_currency, context=None):
'''
Return a dict to be use to create the first account move line of given voucher.
:param voucher_id: Id of voucher what we are creating account_move.
:param move_id: Id of account move where this line will be added.
:param company_currency: id of currency of the company to which the voucher belong
:param current_currency: id of currency of the voucher
:return: mapping between fieldname and value of account move line to create
:rtype: dict
'''
voucher = self.pool.get('account.voucher').browse(cr,uid,voucher_id,context)
debit = credit = 0.0
# TODO: is there any other alternative then the voucher type ??
# ANSWER: We can have payment and receipt "In Advance".
# TODO: Make this logic available.
# -for sale, purchase we have but for the payment and receipt we do not have as based on the bank/cash journal we can not know its payment or receipt
if voucher.type in ('purchase', 'payment'):
credit = voucher.paid_amount_in_company_currency
elif voucher.type in ('sale', 'receipt'):
debit = voucher.paid_amount_in_company_currency
if debit < 0: credit = -debit; debit = 0.0
if credit < 0: debit = -credit; credit = 0.0
sign = debit - credit < 0 and -1 or 1
#set the first line of the voucher
move_line = {
'name': voucher.name or '/',
'debit': debit,
'credit': credit,
'account_id': voucher.account_id.id,
'move_id': move_id,
'journal_id': voucher.journal_id.id,
'period_id': voucher.period_id.id,
'partner_id': voucher.partner_id.id,
'currency_id': company_currency <> current_currency and current_currency or False,
'amount_currency': (sign * abs(voucher.amount) # amount < 0 for refunds
if company_currency != current_currency else 0.0),
'date': voucher.date,
'date_maturity': voucher.date_due
}
return move_line
def account_move_get(self, cr, uid, voucher_id, context=None):
'''
This method prepare the creation of the account move related to the given voucher.
:param voucher_id: Id of voucher for which we are creating account_move.
:return: mapping between fieldname and value of account move to create
:rtype: dict
'''
seq_obj = self.pool.get('ir.sequence')
voucher = self.pool.get('account.voucher').browse(cr,uid,voucher_id,context)
if voucher.number:
name = voucher.number
elif voucher.journal_id.sequence_id:
if not voucher.journal_id.sequence_id.active:
raise osv.except_osv(_('Configuration Error !'),
_('Please activate the sequence of selected journal !'))
c = dict(context)
c.update({'fiscalyear_id': voucher.period_id.fiscalyear_id.id})
name = seq_obj.next_by_id(cr, uid, voucher.journal_id.sequence_id.id, context=c)
else:
raise osv.except_osv(_('Error!'),
_('Please define a sequence on the journal.'))
if not voucher.reference:
ref = name.replace('/','')
else:
ref = voucher.reference
move = {
'name': name,
'journal_id': voucher.journal_id.id,
'narration': voucher.narration,
'date': voucher.date,
'ref': ref,
'period_id': voucher.period_id.id,
}
return move
def _get_exchange_lines(self, cr, uid, line, move_id, amount_residual, company_currency, current_currency, context=None):
'''
Prepare the two lines in company currency due to currency rate difference.
:param line: browse record of the voucher.line for which we want to create currency rate difference accounting
entries
:param move_id: Account move wher the move lines will be.
:param amount_residual: Amount to be posted.
:param company_currency: id of currency of the company to which the voucher belong
:param current_currency: id of currency of the voucher
:return: the account move line and its counterpart to create, depicted as mapping between fieldname and value
:rtype: tuple of dict
'''
if amount_residual > 0:
account_id = line.voucher_id.company_id.expense_currency_exchange_account_id
if not account_id:
model, action_id = self.pool['ir.model.data'].get_object_reference(cr, uid, 'account', 'action_account_form')
msg = _("You should configure the 'Loss Exchange Rate Account' to manage automatically the booking of accounting entries related to differences between exchange rates.")
raise openerp.exceptions.RedirectWarning(msg, action_id, _('Go to the configuration panel'))
else:
account_id = line.voucher_id.company_id.income_currency_exchange_account_id
if not account_id:
model, action_id = self.pool['ir.model.data'].get_object_reference(cr, uid, 'account', 'action_account_form')
msg = _("You should configure the 'Gain Exchange Rate Account' to manage automatically the booking of accounting entries related to differences between exchange rates.")
raise openerp.exceptions.RedirectWarning(msg, action_id, _('Go to the configuration panel'))
# Even if the amount_currency is never filled, we need to pass the foreign currency because otherwise
# the receivable/payable account may have a secondary currency, which render this field mandatory
if line.account_id.currency_id:
account_currency_id = line.account_id.currency_id.id
else:
account_currency_id = company_currency <> current_currency and current_currency or False
move_line = {
'journal_id': line.voucher_id.journal_id.id,
'period_id': line.voucher_id.period_id.id,
'name': _('change')+': '+(line.name or '/'),
'account_id': line.account_id.id,
'move_id': move_id,
'partner_id': line.voucher_id.partner_id.id,
'currency_id': account_currency_id,
'amount_currency': 0.0,
'quantity': 1,
'credit': amount_residual > 0 and amount_residual or 0.0,
'debit': amount_residual < 0 and -amount_residual or 0.0,
'date': line.voucher_id.date,
}
move_line_counterpart = {
'journal_id': line.voucher_id.journal_id.id,
'period_id': line.voucher_id.period_id.id,
'name': _('change')+': '+(line.name or '/'),
'account_id': account_id.id,
'move_id': move_id,
'amount_currency': 0.0,
'partner_id': line.voucher_id.partner_id.id,
'currency_id': account_currency_id,
'quantity': 1,
'debit': amount_residual > 0 and amount_residual or 0.0,
'credit': amount_residual < 0 and -amount_residual or 0.0,
'date': line.voucher_id.date,
}
return (move_line, move_line_counterpart)
def _convert_amount(self, cr, uid, amount, voucher_id, context=None):
'''
This function convert the amount given in company currency. It takes either the rate in the voucher (if the
payment_rate_currency_id is relevant) either the rate encoded in the system.
:param amount: float. The amount to convert
:param voucher: id of the voucher on which we want the conversion
:param context: to context to use for the conversion. It may contain the key 'date' set to the voucher date
field in order to select the good rate to use.
:return: the amount in the currency of the voucher's company
:rtype: float
'''
if context is None:
context = {}
currency_obj = self.pool.get('res.currency')
voucher = self.browse(cr, uid, voucher_id, context=context)
return currency_obj.compute(cr, uid, voucher.currency_id.id, voucher.company_id.currency_id.id, amount, context=context)
def voucher_move_line_create(self, cr, uid, voucher_id, line_total, move_id, company_currency, current_currency, context=None):
'''
Create one account move line, on the given account move, per voucher line where amount is not 0.0.
It returns Tuple with tot_line what is total of difference between debit and credit and
a list of lists with ids to be reconciled with this format (total_deb_cred,list_of_lists).
:param voucher_id: Voucher id what we are working with
:param line_total: Amount of the first line, which correspond to the amount we should totally split among all voucher lines.
:param move_id: Account move wher those lines will be joined.
:param company_currency: id of currency of the company to which the voucher belong
:param current_currency: id of currency of the voucher
:return: Tuple build as (remaining amount not allocated on voucher lines, list of account_move_line created in this method)
:rtype: tuple(float, list of int)
'''
if context is None:
context = {}
move_line_obj = self.pool.get('account.move.line')
currency_obj = self.pool.get('res.currency')
tax_obj = self.pool.get('account.tax')
tot_line = line_total
rec_lst_ids = []
date = self.read(cr, uid, [voucher_id], ['date'], context=context)[0]['date']
ctx = context.copy()
ctx.update({'date': date})
voucher = self.pool.get('account.voucher').browse(cr, uid, voucher_id, context=ctx)
voucher_currency = voucher.journal_id.currency or voucher.company_id.currency_id
ctx.update({
'voucher_special_currency_rate': voucher_currency.rate * voucher.payment_rate ,
'voucher_special_currency': voucher.payment_rate_currency_id and voucher.payment_rate_currency_id.id or False,})
prec = self.pool.get('decimal.precision').precision_get(cr, uid, 'Account')
for line in voucher.line_ids:
#create one move line per voucher line where amount is not 0.0
# AND (second part of the clause) only if the original move line was not having debit = credit = 0 (which is a legal value)
if not line.amount and not (line.move_line_id and not float_compare(line.move_line_id.debit, line.move_line_id.credit, precision_digits=prec) and not float_compare(line.move_line_id.debit, 0.0, precision_digits=prec)):
continue
# convert the amount set on the voucher line into the currency of the voucher's company
# this calls res_curreny.compute() with the right context, so that it will take either the rate on the voucher if it is relevant or will use the default behaviour
amount = self._convert_amount(cr, uid, line.untax_amount or line.amount, voucher.id, context=ctx)
# if the amount encoded in voucher is equal to the amount unreconciled, we need to compute the
# currency rate difference
if line.amount == line.amount_unreconciled:
if not line.move_line_id:
raise osv.except_osv(_('Wrong voucher line'),_("The invoice you are willing to pay is not valid anymore."))
sign = line.type =='dr' and -1 or 1
currency_rate_difference = sign * (line.move_line_id.amount_residual - amount)
else:
currency_rate_difference = 0.0
move_line = {
'journal_id': voucher.journal_id.id,
'period_id': voucher.period_id.id,
'name': line.name or '/',
'account_id': line.account_id.id,
'move_id': move_id,
'partner_id': voucher.partner_id.id,
'currency_id': line.move_line_id and (company_currency <> line.move_line_id.currency_id.id and line.move_line_id.currency_id.id) or False,
'analytic_account_id': line.account_analytic_id and line.account_analytic_id.id or False,
'quantity': 1,
'credit': 0.0,
'debit': 0.0,
'date': voucher.date
}
if amount < 0:
amount = -amount
if line.type == 'dr':
line.type = 'cr'
else:
line.type = 'dr'
if (line.type=='dr'):
tot_line += amount
move_line['debit'] = amount
else:
tot_line -= amount
move_line['credit'] = amount
if voucher.tax_id and voucher.type in ('sale', 'purchase'):
move_line.update({
'account_tax_id': voucher.tax_id.id,
})
# compute the amount in foreign currency
foreign_currency_diff = 0.0
amount_currency = False
if line.move_line_id:
# We want to set it on the account move line as soon as the original line had a foreign currency
if line.move_line_id.currency_id and line.move_line_id.currency_id.id != company_currency:
# we compute the amount in that foreign currency.
if line.move_line_id.currency_id.id == current_currency:
# if the voucher and the voucher line share the same currency, there is no computation to do
sign = (move_line['debit'] - move_line['credit']) < 0 and -1 or 1
amount_currency = sign * (line.amount)
else:
# if the rate is specified on the voucher, it will be used thanks to the special keys in the context
# otherwise we use the rates of the system
amount_currency = currency_obj.compute(cr, uid, company_currency, line.move_line_id.currency_id.id, move_line['debit']-move_line['credit'], context=ctx)
if line.amount == line.amount_unreconciled:
foreign_currency_diff = line.move_line_id.amount_residual_currency - abs(amount_currency)
move_line['amount_currency'] = amount_currency
voucher_line = move_line_obj.create(cr, uid, move_line)
rec_ids = [voucher_line, line.move_line_id.id]
if not currency_obj.is_zero(cr, uid, voucher.company_id.currency_id, currency_rate_difference):
# Change difference entry in company currency
exch_lines = self._get_exchange_lines(cr, uid, line, move_id, currency_rate_difference, company_currency, current_currency, context=context)
new_id = move_line_obj.create(cr, uid, exch_lines[0],context)
move_line_obj.create(cr, uid, exch_lines[1], context)
rec_ids.append(new_id)
if line.move_line_id and line.move_line_id.currency_id and not currency_obj.is_zero(cr, uid, line.move_line_id.currency_id, foreign_currency_diff):
# Change difference entry in voucher currency
move_line_foreign_currency = {
'journal_id': line.voucher_id.journal_id.id,
'period_id': line.voucher_id.period_id.id,
'name': _('change')+': '+(line.name or '/'),
'account_id': line.account_id.id,
'move_id': move_id,
'partner_id': line.voucher_id.partner_id.id,
'currency_id': line.move_line_id.currency_id.id,
'amount_currency': -1 * foreign_currency_diff,
'quantity': 1,
'credit': 0.0,
'debit': 0.0,
'date': line.voucher_id.date,
}
new_id = move_line_obj.create(cr, uid, move_line_foreign_currency, context=context)
rec_ids.append(new_id)
if line.move_line_id.id:
rec_lst_ids.append(rec_ids)
return (tot_line, rec_lst_ids)
def writeoff_move_line_get(self, cr, uid, voucher_id, line_total, move_id, name, company_currency, current_currency, context=None):
'''
Set a dict to be use to create the writeoff move line.
:param voucher_id: Id of voucher what we are creating account_move.
:param line_total: Amount remaining to be allocated on lines.
:param move_id: Id of account move where this line will be added.
:param name: Description of account move line.
:param company_currency: id of currency of the company to which the voucher belong
:param current_currency: id of currency of the voucher
:return: mapping between fieldname and value of account move line to create
:rtype: dict
'''
currency_obj = self.pool.get('res.currency')
move_line = {}
voucher = self.pool.get('account.voucher').browse(cr,uid,voucher_id,context)
current_currency_obj = voucher.currency_id or voucher.journal_id.company_id.currency_id
if not currency_obj.is_zero(cr, uid, current_currency_obj, line_total):
diff = line_total
account_id = False
write_off_name = ''
if voucher.payment_option == 'with_writeoff':
account_id = voucher.writeoff_acc_id.id
write_off_name = voucher.comment
elif voucher.partner_id:
if voucher.type in ('sale', 'receipt'):
account_id = voucher.partner_id.property_account_receivable.id
else:
account_id = voucher.partner_id.property_account_payable.id
else:
# fallback on account of voucher
account_id = voucher.account_id.id
sign = voucher.type == 'payment' and -1 or 1
move_line = {
'name': write_off_name or name,
'account_id': account_id,
'move_id': move_id,
'partner_id': voucher.partner_id.id,
'date': voucher.date,
'credit': diff > 0 and diff or 0.0,
'debit': diff < 0 and -diff or 0.0,
'amount_currency': company_currency <> current_currency and (sign * -1 * voucher.writeoff_amount) or 0.0,
'currency_id': company_currency <> current_currency and current_currency or False,
'analytic_account_id': voucher.analytic_id and voucher.analytic_id.id or False,
}
return move_line
def _get_company_currency(self, cr, uid, voucher_id, context=None):
'''
Get the currency of the actual company.
:param voucher_id: Id of the voucher what i want to obtain company currency.
:return: currency id of the company of the voucher
:rtype: int
'''
return self.pool.get('account.voucher').browse(cr,uid,voucher_id,context).journal_id.company_id.currency_id.id
def _get_current_currency(self, cr, uid, voucher_id, context=None):
'''
Get the currency of the voucher.
:param voucher_id: Id of the voucher what i want to obtain current currency.
:return: currency id of the voucher
:rtype: int
'''
voucher = self.pool.get('account.voucher').browse(cr,uid,voucher_id,context)
return voucher.currency_id.id or self._get_company_currency(cr,uid,voucher.id,context)
def action_move_line_create(self, cr, uid, ids, context=None):
'''
Confirm the vouchers given in ids and create the journal entries for each of them
'''
if context is None:
context = {}
move_pool = self.pool.get('account.move')
move_line_pool = self.pool.get('account.move.line')
for voucher in self.browse(cr, uid, ids, context=context):
local_context = dict(context, force_company=voucher.journal_id.company_id.id)
if voucher.move_id:
continue
company_currency = self._get_company_currency(cr, uid, voucher.id, context)
current_currency = self._get_current_currency(cr, uid, voucher.id, context)
# we select the context to use accordingly if it's a multicurrency case or not
context = self._sel_context(cr, uid, voucher.id, context)
# But for the operations made by _convert_amount, we always need to give the date in the context
ctx = context.copy()
ctx.update({'date': voucher.date})
# Create the account move record.
move_id = move_pool.create(cr, uid, self.account_move_get(cr, uid, voucher.id, context=context), context=context)
# Get the name of the account_move just created
name = move_pool.browse(cr, uid, move_id, context=context).name
# Create the first line of the voucher
move_line_id = move_line_pool.create(cr, uid, self.first_move_line_get(cr,uid,voucher.id, move_id, company_currency, current_currency, local_context), local_context)
move_line_brw = move_line_pool.browse(cr, uid, move_line_id, context=context)
line_total = move_line_brw.debit - move_line_brw.credit
rec_list_ids = []
if voucher.type == 'sale':
line_total = line_total - self._convert_amount(cr, uid, voucher.tax_amount, voucher.id, context=ctx)
elif voucher.type == 'purchase':
line_total = line_total + self._convert_amount(cr, uid, voucher.tax_amount, voucher.id, context=ctx)
# Create one move line per voucher line where amount is not 0.0
line_total, rec_list_ids = self.voucher_move_line_create(cr, uid, voucher.id, line_total, move_id, company_currency, current_currency, context)
# Create the writeoff line if needed
ml_writeoff = self.writeoff_move_line_get(cr, uid, voucher.id, line_total, move_id, name, company_currency, current_currency, local_context)
if ml_writeoff:
move_line_pool.create(cr, uid, ml_writeoff, local_context)
# We post the voucher.
self.write(cr, uid, [voucher.id], {
'move_id': move_id,
'state': 'posted',
'number': name,
})
if voucher.journal_id.entry_posted:
move_pool.post(cr, uid, [move_id], context={})
# We automatically reconcile the account move lines.
reconcile = False
for rec_ids in rec_list_ids:
if len(rec_ids) >= 2:
reconcile = move_line_pool.reconcile_partial(cr, uid, rec_ids, writeoff_acc_id=voucher.writeoff_acc_id.id, writeoff_period_id=voucher.period_id.id, writeoff_journal_id=voucher.journal_id.id)
return True
class account_voucher_line(osv.osv):
_name = 'account.voucher.line'
_description = 'Voucher Lines'
_order = "move_line_id"
# If the payment is in the same currency than the invoice, we keep the same amount
# Otherwise, we compute from invoice currency to payment currency
def _compute_balance(self, cr, uid, ids, name, args, context=None):
currency_pool = self.pool.get('res.currency')
rs_data = {}
for line in self.browse(cr, uid, ids, context=context):
ctx = context.copy()
ctx.update({'date': line.voucher_id.date})
voucher_rate = self.pool.get('res.currency').read(cr, uid, line.voucher_id.currency_id.id, ['rate'], context=ctx)['rate']
ctx.update({
'voucher_special_currency': line.voucher_id.payment_rate_currency_id and line.voucher_id.payment_rate_currency_id.id or False,
'voucher_special_currency_rate': line.voucher_id.payment_rate * voucher_rate})
res = {}
company_currency = line.voucher_id.journal_id.company_id.currency_id.id
voucher_currency = line.voucher_id.currency_id and line.voucher_id.currency_id.id or company_currency
move_line = line.move_line_id or False
if not move_line:
res['amount_original'] = 0.0
res['amount_unreconciled'] = 0.0
elif move_line.currency_id and voucher_currency==move_line.currency_id.id:
res['amount_original'] = abs(move_line.amount_currency)
res['amount_unreconciled'] = abs(move_line.amount_residual_currency)
else:
#always use the amount booked in the company currency as the basis of the conversion into the voucher currency
res['amount_original'] = currency_pool.compute(cr, uid, company_currency, voucher_currency, move_line.credit or move_line.debit or 0.0, context=ctx)
res['amount_unreconciled'] = currency_pool.compute(cr, uid, company_currency, voucher_currency, abs(move_line.amount_residual), context=ctx)
rs_data[line.id] = res
return rs_data
def _currency_id(self, cr, uid, ids, name, args, context=None):
'''
This function returns the currency id of a voucher line. It's either the currency of the
associated move line (if any) or the currency of the voucher or the company currency.
'''
res = {}
for line in self.browse(cr, uid, ids, context=context):
move_line = line.move_line_id
if move_line:
res[line.id] = move_line.currency_id and move_line.currency_id.id or move_line.company_id.currency_id.id
else:
res[line.id] = line.voucher_id.currency_id and line.voucher_id.currency_id.id or line.voucher_id.company_id.currency_id.id
return res
_columns = {
'voucher_id':fields.many2one('account.voucher', 'Voucher', required=1, ondelete='cascade'),
'name':fields.char('Description',),
'account_id':fields.many2one('account.account','Account', required=True),
'partner_id':fields.related('voucher_id', 'partner_id', type='many2one', relation='res.partner', string='Partner'),
'untax_amount':fields.float('Untax Amount'),
'amount':fields.float('Amount', digits_compute=dp.get_precision('Account')),
'reconcile': fields.boolean('Full Reconcile'),
'type':fields.selection([('dr','Debit'),('cr','Credit')], 'Dr/Cr'),
'account_analytic_id': fields.many2one('account.analytic.account', 'Analytic Account'),
'move_line_id': fields.many2one('account.move.line', 'Journal Item', copy=False),
'date_original': fields.related('move_line_id','date', type='date', relation='account.move.line', string='Date', readonly=1),
'date_due': fields.related('move_line_id','date_maturity', type='date', relation='account.move.line', string='Due Date', readonly=1),
'amount_original': fields.function(_compute_balance, multi='dc', type='float', string='Original Amount', store=True, digits_compute=dp.get_precision('Account')),
'amount_unreconciled': fields.function(_compute_balance, multi='dc', type='float', string='Open Balance', store=True, digits_compute=dp.get_precision('Account')),
'company_id': fields.related('voucher_id','company_id', relation='res.company', type='many2one', string='Company', store=True, readonly=True),
'currency_id': fields.function(_currency_id, string='Currency', type='many2one', relation='res.currency', readonly=True),
}
_defaults = {
'name': '',
}
def onchange_reconcile(self, cr, uid, ids, reconcile, amount, amount_unreconciled, context=None):
vals = {'amount': 0.0}
if reconcile:
vals = { 'amount': amount_unreconciled}
return {'value': vals}
def onchange_amount(self, cr, uid, ids, amount, amount_unreconciled, context=None):
vals = {}
if amount:
vals['reconcile'] = (amount == amount_unreconciled)
return {'value': vals}
def onchange_move_line_id(self, cr, user, ids, move_line_id, context=None):
"""
Returns a dict that contains new values and context
@param move_line_id: latest value from user input for field move_line_id
@param args: other arguments
@param context: context arguments, like lang, time zone
@return: Returns a dict which contains new values, and context
"""
res = {}
move_line_pool = self.pool.get('account.move.line')
if move_line_id:
move_line = move_line_pool.browse(cr, user, move_line_id, context=context)
if move_line.credit:
ttype = 'dr'
else:
ttype = 'cr'
res.update({
'account_id': move_line.account_id.id,
'type': ttype,
'currency_id': move_line.currency_id and move_line.currency_id.id or move_line.company_id.currency_id.id,
})
return {
'value':res,
}
def default_get(self, cr, user, fields_list, context=None):
"""
Returns default values for fields
@param fields_list: list of fields, for which default values are required to be read
@param context: context arguments, like lang, time zone
@return: Returns a dict that contains default values for fields
"""
if context is None:
context = {}
journal_id = context.get('journal_id', False)
partner_id = context.get('partner_id', False)
journal_pool = self.pool.get('account.journal')
partner_pool = self.pool.get('res.partner')
values = super(account_voucher_line, self).default_get(cr, user, fields_list, context=context)
if (not journal_id) or ('account_id' not in fields_list):
return values
journal = journal_pool.browse(cr, user, journal_id, context=context)
account_id = False
ttype = 'cr'
if journal.type in ('sale', 'sale_refund'):
account_id = journal.default_credit_account_id and journal.default_credit_account_id.id or False
ttype = 'cr'
elif journal.type in ('purchase', 'expense', 'purchase_refund'):
account_id = journal.default_debit_account_id and journal.default_debit_account_id.id or False
ttype = 'dr'
elif partner_id:
partner = partner_pool.browse(cr, user, partner_id, context=context)
if context.get('type') == 'payment':
ttype = 'dr'
account_id = partner.property_account_payable.id
elif context.get('type') == 'receipt':
account_id = partner.property_account_receivable.id
values.update({
'account_id':account_id,
'type':ttype
})
return values
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
'''
Parser dispatcher mix-in class.
Copyright (c) 2010, Patrick Maupin. All rights reserved.
See http://code.google.com/p/rson/source/browse/trunk/license.txt
'''
def _alter_attributes(cls, attrs):
''' Return a new class with altered attributes.
But throw an exception unless altered attributes
already exist in class.
'''
if not attrs:
return cls
class Altered(cls):
pass
extra = cls.allowed_extra_attributes
for name, value in attrs.items():
if not hasattr(cls, name) and name not in extra:
raise AttributeError('Class %s has no attribute %s'
% (cls.__name__, name))
if value is not None:
setattr(Altered, name, staticmethod(value))
return Altered
class Dispatcher(object):
''' Assumes that this is mixed-in to a class with an
appropriate parser_factory() method.
The design of RSON allows for many things to be replaced
at run-time. To support this without sacrificing too much
efficiency, closures are used inside the classes.
All the closures are invoked from inside the parser_factory
method. This class has a dispatcher_factory that decides
when to invoke the closures based on whether the particular
variant has been cached or not.
'''
allowed_extra_attributes = ()
@classmethod
def dispatcher_factory(cls, tuple=tuple, sorted=sorted, **kw):
def loads(s, **kw):
if not kw:
return default_loads(s)
key = tuple(sorted(kw.items()))
func = cached(key)
if func is None:
func = _alter_attributes(cls, kw)().parser_factory()
parsercache[key] = func
return func(s)
cls = _alter_attributes(cls, kw)
default_loads = cls().parser_factory()
parsercache = {}
cached = parsercache.get
loads.customize = cls.dispatcher_factory
return loads | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.beans.factory.config;
import java.io.Serializable;
import jakarta.inject.Provider;
import org.jspecify.annotations.Nullable;
import org.springframework.beans.BeansException;
import org.springframework.beans.factory.BeanFactory;
import org.springframework.util.Assert;
/**
* A {@link org.springframework.beans.factory.FactoryBean} implementation that
* returns a value which is a JSR-330 {@link jakarta.inject.Provider} that in turn
* returns a bean sourced from a {@link org.springframework.beans.factory.BeanFactory}.
*
* <p>This is basically a JSR-330 compliant variant of Spring's good old
* {@link ObjectFactoryCreatingFactoryBean}. It can be used for traditional
* external dependency injection configuration that targets a property or
* constructor argument of type {@code jakarta.inject.Provider}, as an
* alternative to JSR-330's {@code @Inject} annotation-driven approach.
*
* @author Juergen Hoeller
* @since 3.0.2
* @see jakarta.inject.Provider
* @see ObjectFactoryCreatingFactoryBean
*/
public class ProviderCreatingFactoryBean extends AbstractFactoryBean<Provider<Object>> {
private @Nullable String targetBeanName;
/**
* Set the name of the target bean.
* <p>The target does not <i>have</i> to be a non-singleton bean, but realistically
* always will be (because if the target bean were a singleton, then said singleton
* bean could simply be injected straight into the dependent object, thus obviating
* the need for the extra level of indirection afforded by this factory approach).
*/
public void setTargetBeanName(String targetBeanName) {
this.targetBeanName = targetBeanName;
}
@Override
public void afterPropertiesSet() throws Exception {
Assert.hasText(this.targetBeanName, "Property 'targetBeanName' is required");
super.afterPropertiesSet();
}
@Override
public Class<?> getObjectType() {
return Provider.class;
}
@Override
protected Provider<Object> createInstance() {
BeanFactory beanFactory = getBeanFactory();
Assert.state(beanFactory != null, "No BeanFactory available");
Assert.state(this.targetBeanName != null, "No target bean name specified");
return new TargetBeanProvider(beanFactory, this.targetBeanName);
}
/**
* Independent inner class - for serialization purposes.
*/
@SuppressWarnings("serial")
private static class TargetBeanProvider implements Provider<Object>, Serializable {
private final BeanFactory beanFactory;
private final String targetBeanName;
public TargetBeanProvider(BeanFactory beanFactory, String targetBeanName) {
this.beanFactory = beanFactory;
this.targetBeanName = targetBeanName;
}
@Override
public Object get() throws BeansException {
return this.beanFactory.getBean(this.targetBeanName);
}
}
} | java | github | https://github.com/spring-projects/spring-framework | spring-beans/src/main/java/org/springframework/beans/factory/config/ProviderCreatingFactoryBean.java |
#!/bin/bash
if [ -z "${BUILD_ENVIRONMENT}" ] || [[ "${BUILD_ENVIRONMENT}" == *-build* ]]; then
# shellcheck source=./macos-build.sh
source "$(dirname "${BASH_SOURCE[0]}")/macos-build.sh"
fi
if [ -z "${BUILD_ENVIRONMENT}" ] || [[ "${BUILD_ENVIRONMENT}" == *-test* ]]; then
# shellcheck source=./macos-test.sh
source "$(dirname "${BASH_SOURCE[0]}")/macos-test.sh"
fi | unknown | github | https://github.com/pytorch/pytorch | .ci/pytorch/macos-build-test.sh |
def average_Even(n) :
if (n% 2!= 0) :
return ("Invalid Input")
return -1
sm = 0
count = 0
while (n>= 2) :
count = count+1
sm = sm+n
n = n-2
return sm // count | unknown | mbpp | ||
---
title: "Getting Started"
lesson: 1
approx_time: 10 mins
---
The first thing you need is a working installation of Ruby. Install from [the official website](https://www.ruby-lang.org/en/documentation/installation/). | unknown | github | https://github.com/jekyll/jekyll | test/source/_tutorials/getting-started.md |
import os
import unittest
from conans.test.utils.tools import TestClient
from conans.paths import CONANFILE
conanfile = """
from conans import ConanFile, tools
class AConan(ConanFile):
name = "Hello0"
version = "0.1"
def build(self):
self.output.warn("build() IN LOCAL CACHE=> %s" % str(self.in_local_cache))
def package(self):
self.output.warn("package() IN LOCAL CACHE=> %s" % str(self.in_local_cache))
"""
class InLocalCacheTest(unittest.TestCase):
def test_in_local_cache_flag(self):
client = TestClient()
client.save({CONANFILE: conanfile})
client.run("export . lasote/stable")
client.run("install Hello0/0.1@lasote/stable --build missing")
self.assertIn("build() IN LOCAL CACHE=> True", client.user_io.out)
self.assertIn("package() IN LOCAL CACHE=> True", client.user_io.out)
client = TestClient()
client.save({CONANFILE: conanfile})
client.run("install .")
client.run("build .")
self.assertIn("build() IN LOCAL CACHE=> False", client.user_io.out)
pack_folder = os.path.join(client.current_folder, "package")
os.mkdir(pack_folder)
client.current_folder = pack_folder
client.run("package .. --build-folder ..")
self.assertIn("package() IN LOCAL CACHE=> False", client.user_io.out)
# Confirm that we have the flag depending on the recipe too
client = TestClient()
client.save({CONANFILE: conanfile})
client.run("export . lasote/stable")
conanfile_reuse = """
from conans import ConanFile, tools
class OtherConan(ConanFile):
name = "Hello1"
version = "0.1"
requires = "Hello0/0.1@lasote/stable"
def build(self):
pass
"""
client.save({CONANFILE: conanfile_reuse}, clean_first=True)
client.run("install . --build")
self.assertIn("build() IN LOCAL CACHE=> True", client.user_io.out)
self.assertIn("package() IN LOCAL CACHE=> True", client.user_io.out)
client.run("export . lasote/stable")
client.run("install Hello1/0.1@lasote/stable --build")
self.assertIn("build() IN LOCAL CACHE=> True", client.user_io.out)
self.assertIn("package() IN LOCAL CACHE=> True", client.user_io.out) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.gradle.internal.conventions.precommit;
import org.gradle.api.Plugin;
import org.gradle.api.Project;
import org.gradle.api.Task;
import org.gradle.api.plugins.JavaBasePlugin;
import org.gradle.api.plugins.JavaPluginExtension;
import org.gradle.api.tasks.TaskProvider;
import org.gradle.api.tasks.testing.Test;
import org.gradle.language.base.plugins.LifecycleBasePlugin;
public class PrecommitTaskPlugin implements Plugin<Project> {
@Override
public void apply(Project project) {
TaskProvider<Task> precommit = project.getTasks().register(PrecommitPlugin.PRECOMMIT_TASK_NAME, t -> {
t.setGroup(JavaBasePlugin.VERIFICATION_GROUP);
t.setDescription("Runs all non-test checks");
});
project.getPluginManager()
.withPlugin(
"lifecycle-base",
p -> project.getTasks().named(LifecycleBasePlugin.CHECK_TASK_NAME).configure(t -> t.dependsOn(precommit))
);
project.getPluginManager().withPlugin("java-base", p -> {
// run compilation as part of precommit
project.getExtensions().getByType(JavaPluginExtension.class).getSourceSets().all(sourceSet ->
precommit.configure(t -> t.dependsOn(sourceSet.getClassesTaskName()))
);
// make sure tests run after all precommit tasks
project.getTasks().withType(Test.class).configureEach(t -> t.mustRunAfter(precommit));
});
}
} | java | github | https://github.com/elastic/elasticsearch | build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/PrecommitTaskPlugin.java |
{
"name": "react-forgive",
"displayName": "React Analyzer",
"description": "React LSP",
"license": "MIT",
"version": "0.0.0",
"repository": {
"type": "git",
"url": "git+https://github.com/facebook/react.git",
"directory": "compiler/packages/react-forgive"
},
"categories": [
"Programming Languages"
],
"keywords": [
"react",
"react analyzer",
"react compiler"
],
"publisher": "Meta",
"engines": {
"vscode": "^1.96.0"
},
"activationEvents": [
"onLanguage:javascriptreact",
"onLanguage:typescriptreact"
],
"main": "./dist/extension.js",
"contributes": {
"commands": [
{
"command": "react-forgive.toggleAll",
"title": "React Analyzer: Toggle on/off"
}
]
},
"scripts": {
"build": "yarn run compile",
"build:compiler": "yarn workspace babel-plugin-react-compiler build --dts",
"compile": "rimraf dist && concurrently -n server,client \"scripts/build.mjs -t server\" \"scripts/build.mjs -t client\"",
"dev": "yarn run package && yarn run install-ext",
"install-ext": "code --install-extension react-forgive-0.0.0.vsix",
"lint": "echo 'no tests'",
"package": "rm -f react-forgive-0.0.0.vsix && vsce package --yarn",
"postinstall": "cd client && yarn install && cd ../server && yarn install && cd ..",
"pretest": "yarn run build:compiler && yarn run compile && yarn run lint",
"test": "vscode-test",
"vscode:prepublish": "yarn run compile",
"watch": "scripts/build.mjs --watch"
},
"devDependencies": {
"@eslint/js": "^9.13.0",
"@types/mocha": "^10.0.10",
"@types/node": "^20",
"@types/vscode": "^1.96.0",
"@vscode/test-cli": "^0.0.10",
"@vscode/test-electron": "^2.4.1",
"eslint": "^9.13.0",
"mocha": "^11.0.1",
"typescript-eslint": "^8.16.0",
"yargs": "^17.7.2"
}
} | json | github | https://github.com/facebook/react | compiler/packages/react-forgive/package.json |
IBM861_TO_UCS_TBL = [
["FF",0xA0],
["AD",0xA1],
["9C",0xA3],
["AE",0xAB],
["AA",0xAC],
["F8",0xB0],
["F1",0xB1],
["FD",0xB2],
["E6",0xB5],
["FA",0xB7],
["AF",0xBB],
["AC",0xBC],
["AB",0xBD],
["A8",0xBF],
["A4",0xC1],
["8E",0xC4],
["8F",0xC5],
["92",0xC6],
["80",0xC7],
["90",0xC9],
["A5",0xCD],
["8B",0xD0],
["A6",0xD3],
["99",0xD6],
["9D",0xD8],
["A7",0xDA],
["9A",0xDC],
["97",0xDD],
["8D",0xDE],
["E1",0xDF],
["85",0xE0],
["A0",0xE1],
["83",0xE2],
["84",0xE4],
["86",0xE5],
["91",0xE6],
["87",0xE7],
["8A",0xE8],
["82",0xE9],
["88",0xEA],
["89",0xEB],
["A1",0xED],
["8C",0xF0],
["A2",0xF3],
["93",0xF4],
["94",0xF6],
["F6",0xF7],
["9B",0xF8],
["A3",0xFA],
["96",0xFB],
["81",0xFC],
["98",0xFD],
["95",0xFE],
["9F",0x192],
["E2",0x393],
["E9",0x398],
["E4",0x3A3],
["E8",0x3A6],
["EA",0x3A9],
["E0",0x3B1],
["EB",0x3B4],
["EE",0x3B5],
["E3",0x3C0],
["E5",0x3C3],
["E7",0x3C4],
["ED",0x3C6],
["FC",0x207F],
["9E",0x20A7],
["F9",0x2219],
["FB",0x221A],
["EC",0x221E],
["EF",0x2229],
["F7",0x2248],
["F0",0x2261],
["F3",0x2264],
["F2",0x2265],
["A9",0x2310],
["F4",0x2320],
["F5",0x2321],
["C4",0x2500],
["B3",0x2502],
["DA",0x250C],
["BF",0x2510],
["C0",0x2514],
["D9",0x2518],
["C3",0x251C],
["B4",0x2524],
["C2",0x252C],
["C1",0x2534],
["C5",0x253C],
["CD",0x2550],
["BA",0x2551],
["D5",0x2552],
["D6",0x2553],
["C9",0x2554],
["B8",0x2555],
["B7",0x2556],
["BB",0x2557],
["D4",0x2558],
["D3",0x2559],
["C8",0x255A],
["BE",0x255B],
["BD",0x255C],
["BC",0x255D],
["C6",0x255E],
["C7",0x255F],
["CC",0x2560],
["B5",0x2561],
["B6",0x2562],
["B9",0x2563],
["D1",0x2564],
["D2",0x2565],
["CB",0x2566],
["CF",0x2567],
["D0",0x2568],
["CA",0x2569],
["D8",0x256A],
["D7",0x256B],
["CE",0x256C],
["DF",0x2580],
["DC",0x2584],
["DB",0x2588],
["DD",0x258C],
["DE",0x2590],
["B0",0x2591],
["B1",0x2592],
["B2",0x2593],
["FE",0x25A0],
] | ruby | github | https://github.com/ruby/ruby | enc/trans/ibm861-tbl.rb |
import json
import logging
from lxml import etree
from xmodule.capa_module import ComplexEncoder
from xmodule.progress import Progress
from xmodule.stringify import stringify_children
import openendedchild
from .combined_open_ended_rubric import CombinedOpenEndedRubric
log = logging.getLogger("edx.courseware")
class SelfAssessmentModule(openendedchild.OpenEndedChild):
"""
A Self Assessment module that allows students to write open-ended responses,
submit, then see a rubric and rate themselves. Persists student supplied
hints, answers, and assessment judgment (currently only correct/incorrect).
Parses xml definition file--see below for exact format.
Sample XML format:
<selfassessment>
<hintprompt>
What hint about this problem would you give to someone?
</hintprompt>
<submitmessage>
Save Succcesful. Thanks for participating!
</submitmessage>
</selfassessment>
"""
TEMPLATE_DIR = "combinedopenended/selfassessment"
# states
INITIAL = 'initial'
ASSESSING = 'assessing'
REQUEST_HINT = 'request_hint'
DONE = 'done'
def setup_response(self, system, location, definition, descriptor):
"""
Sets up the module
@param system: Modulesystem
@param location: location, to let the module know where it is.
@param definition: XML definition of the module.
@param descriptor: SelfAssessmentDescriptor
@return: None
"""
self.child_prompt = stringify_children(self.child_prompt)
self.child_rubric = stringify_children(self.child_rubric)
def get_html(self, system):
"""
Gets context and renders HTML that represents the module
@param system: Modulesystem
@return: Rendered HTML
"""
# set context variables and render template
previous_answer = self.get_display_answer()
# Use the module name as a unique id to pass to the template.
try:
module_id = self.system.location.name
except AttributeError:
# In cases where we don't have a system or a location, use a fallback.
module_id = "self_assessment"
context = {
'prompt': self.child_prompt,
'previous_answer': previous_answer,
'ajax_url': system.ajax_url,
'initial_rubric': self.get_rubric_html(system),
'state': self.child_state,
'allow_reset': self._allow_reset(),
'child_type': 'selfassessment',
'accept_file_upload': self.accept_file_upload,
'module_id': module_id,
}
html = system.render_template('{0}/self_assessment_prompt.html'.format(self.TEMPLATE_DIR), context)
return html
def handle_ajax(self, dispatch, data, system):
"""
This is called by courseware.module_render, to handle an AJAX call.
"data" is request.POST.
Returns a json dictionary:
{ 'progress_changed' : True/False,
'progress': 'none'/'in_progress'/'done',
<other request-specific values here > }
"""
handlers = {
'save_answer': self.save_answer,
'save_assessment': self.save_assessment,
'save_post_assessment': self.save_hint,
'store_answer': self.store_answer,
}
if dispatch not in handlers:
# This is a dev_facing_error
log.error("Cannot find {0} in handlers in handle_ajax function for open_ended_module.py".format(dispatch))
# This is a dev_facing_error
return json.dumps({'error': 'Error handling action. Please try again.', 'success': False})
before = self.get_progress()
d = handlers[dispatch](data, system)
after = self.get_progress()
d.update({
'progress_changed': after != before,
'progress_status': Progress.to_js_status_str(after),
})
return json.dumps(d, cls=ComplexEncoder)
def get_rubric_html(self, system):
"""
Return the appropriate version of the rubric, based on the state.
"""
if self.child_state == self.INITIAL:
return ''
rubric_renderer = CombinedOpenEndedRubric(system.render_template, False)
rubric_dict = rubric_renderer.render_rubric(self.child_rubric)
success = rubric_dict['success']
rubric_html = rubric_dict['html']
# we'll render it
context = {
'rubric': rubric_html,
'max_score': self._max_score,
}
if self.child_state == self.ASSESSING:
context['read_only'] = False
elif self.child_state in (self.POST_ASSESSMENT, self.DONE):
context['read_only'] = True
else:
# This is a dev_facing_error
raise ValueError("Self assessment module is in an illegal state '{0}'".format(self.child_state))
return system.render_template('{0}/self_assessment_rubric.html'.format(self.TEMPLATE_DIR), context)
def get_hint_html(self, system):
"""
Return the appropriate version of the hint view, based on state.
"""
if self.child_state in (self.INITIAL, self.ASSESSING):
return ''
if self.child_state == self.DONE:
# display the previous hint
latest = self.latest_post_assessment(system)
hint = latest if latest is not None else ''
else:
hint = ''
context = {'hint': hint}
if self.child_state == self.POST_ASSESSMENT:
context['read_only'] = False
elif self.child_state == self.DONE:
context['read_only'] = True
else:
# This is a dev_facing_error
raise ValueError("Self Assessment module is in an illegal state '{0}'".format(self.child_state))
return system.render_template('{0}/self_assessment_hint.html'.format(self.TEMPLATE_DIR), context)
def save_answer(self, data, system):
"""
After the answer is submitted, show the rubric.
Args:
data: the request dictionary passed to the ajax request. Should contain
a key 'student_answer'
Returns:
Dictionary with keys 'success' and either 'error' (if not success),
or 'rubric_html' (if success).
"""
# Check to see if this problem is closed
closed, msg = self.check_if_closed()
if closed:
return msg
if self.child_state != self.INITIAL:
return self.out_of_sync_error(data)
error_message = ""
# add new history element with answer and empty score and hint.
success, error_message, data = self.append_file_link_to_student_answer(data)
if success:
data['student_answer'] = SelfAssessmentModule.sanitize_html(data['student_answer'])
self.new_history_entry(data['student_answer'])
self.change_state(self.ASSESSING)
return {
'success': success,
'rubric_html': self.get_rubric_html(system),
'error': error_message,
'student_response': data['student_answer'].replace("\n", "<br/>"),
}
def save_assessment(self, data, _system):
"""
Save the assessment. If the student said they're right, don't ask for a
hint, and go straight to the done state. Otherwise, do ask for a hint.
Returns a dict { 'success': bool, 'state': state,
'hint_html': hint_html OR 'message_html': html and 'allow_reset',
'error': error-msg},
with 'error' only present if 'success' is False, and 'hint_html' or
'message_html' only if success is true
:param data: A `webob.multidict.MultiDict` containing the keys
asasssment: The sum of assessment scores
score_list[]: A multivalue key containing all the individual scores
"""
closed, msg = self.check_if_closed()
if closed:
return msg
if self.child_state != self.ASSESSING:
return self.out_of_sync_error(data)
try:
score = int(data.get('assessment'))
score_list = [int(x) for x in data.getall('score_list[]')]
except (ValueError, TypeError):
# This is a dev_facing_error
log.error("Non-integer score value passed to save_assessment, or no score list present.")
# This is a student_facing_error
_ = self.system.service(self, "i18n").ugettext
return {
'success': False,
'error': _("Error saving your score. Please notify course staff.")
}
# Record score as assessment and rubric scores as post assessment
self.record_latest_score(score)
self.record_latest_post_assessment(json.dumps(score_list))
d = {'success': True, }
self.change_state(self.DONE)
d['allow_reset'] = self._allow_reset()
d['state'] = self.child_state
return d
def save_hint(self, data, _system):
'''
Not used currently, as hints have been removed from the system.
Save the hint.
Returns a dict { 'success': bool,
'message_html': message_html,
'error': error-msg,
'allow_reset': bool},
with the error key only present if success is False and message_html
only if True.
'''
if self.child_state != self.POST_ASSESSMENT:
# Note: because we only ask for hints on wrong answers, may not have
# the same number of hints and answers.
return self.out_of_sync_error(data)
self.record_latest_post_assessment(data['hint'])
self.change_state(self.DONE)
return {
'success': True,
'message_html': '',
'allow_reset': self._allow_reset(),
}
def latest_post_assessment(self, system):
latest_post_assessment = super(SelfAssessmentModule, self).latest_post_assessment(system)
try:
rubric_scores = json.loads(latest_post_assessment)
except:
rubric_scores = []
return [rubric_scores]
class SelfAssessmentDescriptor(object):
"""
Module for adding self assessment questions to courses
"""
mako_template = "widgets/html-edit.html"
module_class = SelfAssessmentModule
filename_extension = "xml"
has_score = True
def __init__(self, system):
self.system = system
@classmethod
def definition_from_xml(cls, xml_object, system):
"""
Pull out the rubric, prompt, and submitmessage into a dictionary.
Returns:
{
'submitmessage': 'some-html'
'hintprompt': 'some-html'
}
"""
expected_children = []
for child in expected_children:
if len(xml_object.xpath(child)) != 1:
# This is a staff_facing_error
raise ValueError(
u"Self assessment definition must include exactly one '{0}' tag. Contact the learning sciences group for assistance.".format(
child))
def parse(k):
"""Assumes that xml_object has child k"""
return stringify_children(xml_object.xpath(k)[0])
return {}
def definition_to_xml(self, resource_fs):
'''Return an xml element representing this definition.'''
elt = etree.Element('selfassessment')
def add_child(k):
child_str = u'<{tag}>{body}</{tag}>'.format(tag=k, body=getattr(self, k))
child_node = etree.fromstring(child_str)
elt.append(child_node)
for child in []:
add_child(child)
return elt | unknown | codeparrot/codeparrot-clean | ||
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from . import github
import json
import os
import requests
import six.moves.configparser
import urllib.request, urllib.error, urllib.parse
class IssueHandler(object):
'''
This plugin facilitates sending issues to github, when
an item is prefixed with '@issue' or '@bug'
It will also write items to the issues stream, as well
as reporting it to github
'''
URL = 'https://api.github.com/repos/{}/{}/issues'
CHARACTER_LIMIT = 70
CONFIG_FILE = '~/.github-issue-bot/github-issue-bot.conf'
REPO_NAME = ''
REPO_OWNER = ''
def __init__(self):
# gets token from config file
# Token at CONFIG_FILE address
config = six.moves.configparser.ConfigParser()
config.read([os.path.expanduser(self.CONFIG_FILE)])
self.REPO_NAME = config.get('github', 'github_repo')
self.REPO_OWNER = config.get('github', 'github_repo_owner')
def usage(self):
return '''
This plugin will allow users to flag messages
as being issues with Zulip by using te prefix '@issue'
Before running this, make sure to create a stream
called "issues" that your API user can send to.
Also, make sure that the credentials of the github bot have
been typed in correctly, that there is a personal access token
with access to public repositories ONLY,
and that the repository name is entered correctly.
Check ~/.github-issue-bot/github-issue-bot.conf, and make sure there are
github_repo (The name of the repo to post to)
github_repo_owner (The owner of the repo to post to)
github_username (The username of the github bot)
github_token (The personal access token for the github bot)
'''
def triage_message(self, message, client):
# return True if we want to (possibly) respond to this message
original_content = message['content']
# This next line of code is defensive, as we
# never want to get into an infinite loop of posting follow
# ups for own follow ups!
if message['display_recipient'] == 'issue':
return False
is_issue = original_content.startswith('@issue')
return is_issue
def handle_message(self, message, client, state_handler):
original_content = message['content']
original_sender = message['sender_email']
new_content = original_content.replace('@issue', 'by {}:'.format(original_sender,))
# gets the repo url
url_new = self.URL.format(self.REPO_OWNER, self.REPO_NAME)
# signs into github using the provided username and password
session = github.auth()
# Gets rid of the @issue in the issue title
issue_title = message['content'].replace('@issue', '').strip()
issue_content = ''
new_issue_title = ''
for part_of_title in issue_title.split():
if len(new_issue_title) < self.CHARACTER_LIMIT:
new_issue_title += '{} '.format(part_of_title)
else:
issue_content += '{} '.format(part_of_title)
new_issue_title = new_issue_title.strip()
issue_content = issue_content.strip()
new_issue_title += '...'
# Creates the issue json, that is transmitted to the github api servers
issue = {
'title': new_issue_title,
'body': '{} **Sent by [{}](https://chat.zulip.org/#) from zulip**'.format(issue_content, original_sender),
'assignee': '',
'milestone': 'none',
'labels': [''],
}
# Sends the HTTP post request
r = session.post(url_new, json.dumps(issue))
if r.ok:
# sends the message onto the 'issues' stream so it can be seen by zulip users
client.send_message(dict(
type='stream',
to='issues',
subject=message['sender_email'],
# Adds a check mark so that the user can verify if it has been sent
content='{} :heavy_check_mark:'.format(new_content),
))
return
# This means that the issue has not been sent
# sends the message onto the 'issues' stream so it can be seen by zulip users
client.send_message(dict(
type='stream',
to='issues',
subject=message['sender_email'],
# Adds a cross so that the user can see that it has failed, and provides a link to a
# google search that can (hopefully) direct them to the error
content='{} :x: Code: [{}](https://www.google.com/search?q=Github HTTP {} Error {})'
.format(new_content, r.status_code, r.status_code, r.content),
))
handler_class = IssueHandler | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2010-2024 JetBrains s.r.o. and Kotlin Programming Language contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package org.jetbrains.kotlin.analysis.api.impl.base.test.cases.components.typeInfoProvider
import org.jetbrains.kotlin.analysis.test.framework.base.AbstractAnalysisApiBasedTest
import org.jetbrains.kotlin.analysis.test.framework.projectStructure.KtTestModule
import org.jetbrains.kotlin.analysis.test.framework.services.expressionMarkerProvider
import org.jetbrains.kotlin.analysis.test.framework.utils.executeOnPooledThreadInReadAction
import org.jetbrains.kotlin.psi.KtExpression
import org.jetbrains.kotlin.psi.KtFile
import org.jetbrains.kotlin.test.services.TestServices
import org.jetbrains.kotlin.test.services.assertions
import org.jetbrains.kotlin.types.Variance
abstract class AbstractFunctionClassKindTest : AbstractAnalysisApiBasedTest() {
override fun doTestByMainFile(mainFile: KtFile, mainModule: KtTestModule, testServices: TestServices) {
val expressionAtCaret = testServices.expressionMarkerProvider.getBottommostElementOfTypeAtCaret(mainFile) as KtExpression
val (type, functionClassKind) = executeOnPooledThreadInReadAction {
copyAwareAnalyzeForTest(expressionAtCaret) { contextExpression ->
val functionType = contextExpression.expectedType
functionType?.render(position = Variance.INVARIANT) to functionType?.functionTypeKind
}
}
val actual = buildString {
appendLine("expression: ${expressionAtCaret.text}")
appendLine("expected type: $type")
appendLine("functionClassKind: $functionClassKind")
}
testServices.assertions.assertEqualsToTestOutputFile(actual)
}
} | kotlin | github | https://github.com/JetBrains/kotlin | analysis/analysis-api-impl-base/testFixtures/org/jetbrains/kotlin/analysis/api/impl/base/test/cases/components/typeInfoProvider/AbstractFunctionClassKindTest.kt |
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package getproviders
import "testing"
func TestParsePlatform(t *testing.T) {
tests := []struct {
Input string
Want Platform
Err bool
}{
{
"",
Platform{},
true,
},
{
"too_many_notes",
Platform{},
true,
},
{
"extra _ whitespaces ",
Platform{},
true,
},
{
"arbitrary_os",
Platform{OS: "arbitrary", Arch: "os"},
false,
},
}
for _, test := range tests {
got, err := ParsePlatform(test.Input)
if err != nil {
if test.Err == false {
t.Errorf("unexpected error: %s", err.Error())
}
} else {
if test.Err {
t.Errorf("wrong result: expected error, got none")
}
}
if got != test.Want {
t.Errorf("wrong\n got: %q\nwant: %q", got, test.Want)
}
}
} | go | github | https://github.com/hashicorp/terraform | internal/getproviders/types_test.go |
/******************************************************************************
* Copyright (c) 2024, Tri Dao.
******************************************************************************/
#pragma once
#include <ATen/TensorIndexing.h>
#include <ATen/core/Tensor.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPGraphsUtils.cuh>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/empty.h>
#include <ATen/ops/empty_like.h>
#include <ATen/ops/narrow.h>
#include <ATen/ops/pad.h>
#include <ATen/ops/reshape.h>
#include <ATen/ops/scalar_tensor.h>
#include <ATen/ops/sum.h>
#include <ATen/ops/zeros.h>
#endif
#ifdef OLD_GENERATOR_PATH
#include <ATen/CUDAGeneratorImpl.h>
#else
#include <ATen/hip/HIPGeneratorImpl.h>
#endif
#include <ATen/native/transformers/hip/flash_attn/flash_api.h>
#define CHECK_DEVICE(x) TORCH_CHECK(x.is_cuda(), #x " must be on CUDA")
#define CHECK_SHAPE(x, ...) \
TORCH_CHECK( \
x.sizes() == at::IntArrayRef({__VA_ARGS__}), \
#x " must have shape (" #__VA_ARGS__ ")")
#define CHECK_CONTIGUOUS(x) \
TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
namespace flash {
inline __global__ void ParsePhiloxCudaState(
at::PhiloxCudaState arg,
uint64_t* rng_state) {
// Imitate from PyTorch
// https://github.com/pytorch/pytorch/blob/8b61daaf7349e9102117e1aeefaa51666d887547/aten/src/ATen/cuda/detail/UnpackRaw.cuh#L17
if (arg.captured_) {
rng_state[0] = static_cast<uint64_t>(*arg.seed_.ptr);
rng_state[1] =
static_cast<uint64_t>(*(arg.offset_.ptr) + arg.offset_intragraph_);
} else {
rng_state[0] = arg.seed_.val;
rng_state[1] = arg.offset_.val;
}
}
} // namespace flash | unknown | github | https://github.com/pytorch/pytorch | aten/src/ATen/native/transformers/hip/flash_attn/flash_common_hip.hpp |
{
"format_version": "1.0",
"values": {
"root_module": {
"resources": [
{
"address": "tfcoremock_list.list",
"mode": "managed",
"name": "list",
"provider_name": "registry.terraform.io/hashicorp/tfcoremock",
"schema_version": 0,
"sensitive_values": {
"list": [
{},
{},
{}
]
},
"type": "tfcoremock_list",
"values": {
"id": "F40F2AB4-100C-4AE8-BFD0-BF332A158415",
"list": [
{
"id": "3BFC1A84-023F-44FA-A8EE-EFD88E18B8F7"
},
{
"id": "07F887E2-FDFF-4B2E-9BFB-B6AA4A05EDB9"
},
{
"id": "4B7178A8-AB9D-4FF4-8B3D-48B754DE537B"
}
]
}
}
]
}
}
} | json | github | https://github.com/hashicorp/terraform | testing/equivalence-tests/outputs/replace_within_list/state.json |
"""Tests for ExtensionDtype Table Schema integration."""
from collections import OrderedDict
import datetime as dt
import decimal
from io import StringIO
import json
import pytest
from pandas import (
NA,
DataFrame,
Index,
array,
read_json,
)
import pandas._testing as tm
from pandas.core.arrays.integer import Int64Dtype
from pandas.core.arrays.string_ import StringDtype
from pandas.core.series import Series
from pandas.tests.extension.date import (
DateArray,
DateDtype,
)
from pandas.tests.extension.decimal.array import (
DecimalArray,
DecimalDtype,
)
from pandas.io.json._table_schema import (
as_json_table_type,
build_table_schema,
)
class TestBuildSchema:
def test_build_table_schema(self):
df = DataFrame(
{
"A": DateArray([dt.date(2021, 10, 10)]),
"B": DecimalArray([decimal.Decimal(10)]),
"C": array(["pandas"], dtype="string"),
"D": array([10], dtype="Int64"),
}
)
result = build_table_schema(df, version=False)
expected = {
"fields": [
{"name": "index", "type": "integer"},
{"name": "A", "type": "any", "extDtype": "DateDtype"},
{"name": "B", "type": "number", "extDtype": "decimal"},
{"name": "C", "type": "string", "extDtype": "string"},
{"name": "D", "type": "integer", "extDtype": "Int64"},
],
"primaryKey": ["index"],
}
assert result == expected
result = build_table_schema(df)
assert "pandas_version" in result
class TestTableSchemaType:
@pytest.mark.parametrize("box", [lambda x: x, Series])
def test_as_json_table_type_ext_date_array_dtype(self, box):
date_data = box(DateArray([dt.date(2021, 10, 10)]))
assert as_json_table_type(date_data.dtype) == "any"
def test_as_json_table_type_ext_date_dtype(self):
assert as_json_table_type(DateDtype()) == "any"
@pytest.mark.parametrize("box", [lambda x: x, Series])
def test_as_json_table_type_ext_decimal_array_dtype(self, box):
decimal_data = box(DecimalArray([decimal.Decimal(10)]))
assert as_json_table_type(decimal_data.dtype) == "number"
def test_as_json_table_type_ext_decimal_dtype(self):
assert as_json_table_type(DecimalDtype()) == "number"
@pytest.mark.parametrize("box", [lambda x: x, Series])
def test_as_json_table_type_ext_string_array_dtype(self, box):
string_data = box(array(["pandas"], dtype="string"))
assert as_json_table_type(string_data.dtype) == "string"
def test_as_json_table_type_ext_string_dtype(self):
assert as_json_table_type(StringDtype()) == "string"
@pytest.mark.parametrize("box", [lambda x: x, Series])
def test_as_json_table_type_ext_integer_array_dtype(self, box):
integer_data = box(array([10], dtype="Int64"))
assert as_json_table_type(integer_data.dtype) == "integer"
def test_as_json_table_type_ext_integer_dtype(self):
assert as_json_table_type(Int64Dtype()) == "integer"
class TestTableOrient:
@pytest.fixture
def da(self):
"""Fixture for creating a DateArray."""
return DateArray([dt.date(2021, 10, 10)])
@pytest.fixture
def dc(self):
"""Fixture for creating a DecimalArray."""
return DecimalArray([decimal.Decimal(10)])
@pytest.fixture
def sa(self):
"""Fixture for creating a StringDtype array."""
return array(["pandas"], dtype="string")
@pytest.fixture
def ia(self):
"""Fixture for creating an Int64Dtype array."""
return array([10], dtype="Int64")
def test_build_date_series(self, da):
s = Series(da, name="a")
s.index.name = "id"
result = s.to_json(orient="table", date_format="iso")
result = json.loads(result, object_pairs_hook=OrderedDict)
assert "pandas_version" in result["schema"]
result["schema"].pop("pandas_version")
fields = [
{"name": "id", "type": "integer"},
{"name": "a", "type": "any", "extDtype": "DateDtype"},
]
schema = {"fields": fields, "primaryKey": ["id"]}
expected = OrderedDict(
[
("schema", schema),
("data", [OrderedDict([("id", 0), ("a", "2021-10-10T00:00:00.000")])]),
]
)
assert result == expected
def test_build_decimal_series(self, dc):
s = Series(dc, name="a")
s.index.name = "id"
result = s.to_json(orient="table", date_format="iso")
result = json.loads(result, object_pairs_hook=OrderedDict)
assert "pandas_version" in result["schema"]
result["schema"].pop("pandas_version")
fields = [
{"name": "id", "type": "integer"},
{"name": "a", "type": "number", "extDtype": "decimal"},
]
schema = {"fields": fields, "primaryKey": ["id"]}
expected = OrderedDict(
[
("schema", schema),
("data", [OrderedDict([("id", 0), ("a", "10")])]),
]
)
assert result == expected
def test_build_string_series(self, sa):
s = Series(sa, name="a")
s.index.name = "id"
result = s.to_json(orient="table", date_format="iso")
result = json.loads(result, object_pairs_hook=OrderedDict)
assert "pandas_version" in result["schema"]
result["schema"].pop("pandas_version")
fields = [
{"name": "id", "type": "integer"},
{"name": "a", "type": "string", "extDtype": "string"},
]
schema = {"fields": fields, "primaryKey": ["id"]}
expected = OrderedDict(
[
("schema", schema),
("data", [OrderedDict([("id", 0), ("a", "pandas")])]),
]
)
assert result == expected
def test_build_int64_series(self, ia):
s = Series(ia, name="a")
s.index.name = "id"
result = s.to_json(orient="table", date_format="iso")
result = json.loads(result, object_pairs_hook=OrderedDict)
assert "pandas_version" in result["schema"]
result["schema"].pop("pandas_version")
fields = [
{"name": "id", "type": "integer"},
{"name": "a", "type": "integer", "extDtype": "Int64"},
]
schema = {"fields": fields, "primaryKey": ["id"]}
expected = OrderedDict(
[
("schema", schema),
("data", [OrderedDict([("id", 0), ("a", 10)])]),
]
)
assert result == expected
def test_to_json(self, da, dc, sa, ia):
df = DataFrame(
{
"A": da,
"B": dc,
"C": sa,
"D": ia,
}
)
df.index.name = "idx"
result = df.to_json(orient="table", date_format="iso")
result = json.loads(result, object_pairs_hook=OrderedDict)
assert "pandas_version" in result["schema"]
result["schema"].pop("pandas_version")
fields = [
OrderedDict({"name": "idx", "type": "integer"}),
OrderedDict({"name": "A", "type": "any", "extDtype": "DateDtype"}),
OrderedDict({"name": "B", "type": "number", "extDtype": "decimal"}),
OrderedDict({"name": "C", "type": "string", "extDtype": "string"}),
OrderedDict({"name": "D", "type": "integer", "extDtype": "Int64"}),
]
schema = OrderedDict({"fields": fields, "primaryKey": ["idx"]})
data = [
OrderedDict(
[
("idx", 0),
("A", "2021-10-10T00:00:00.000"),
("B", "10"),
("C", "pandas"),
("D", 10),
]
)
]
expected = OrderedDict([("schema", schema), ("data", data)])
assert result == expected
def test_json_ext_dtype_reading_roundtrip(self):
# GH#40255
df = DataFrame(
{
"a": Series([2, NA], dtype="Int64"),
"b": Series([1.5, NA], dtype="Float64"),
"c": Series([True, NA], dtype="boolean"),
},
index=Index([1, NA], dtype="Int64"),
)
expected = df.copy()
data_json = df.to_json(orient="table", indent=4)
result = read_json(StringIO(data_json), orient="table")
tm.assert_frame_equal(result, expected)
def test_json_ext_dtype_reading(self):
# GH#40255
data_json = """{
"schema":{
"fields":[
{
"name":"a",
"type":"integer",
"extDtype":"Int64"
}
],
},
"data":[
{
"a":2
},
{
"a":null
}
]
}"""
result = read_json(StringIO(data_json), orient="table")
expected = DataFrame({"a": Series([2, NA], dtype="Int64")})
tm.assert_frame_equal(result, expected) | python | github | https://github.com/pandas-dev/pandas | pandas/tests/io/json/test_json_table_schema_ext_dtype.py |
# frozen_string_literal: true
require "securerandom"
require "concurrent/scheduled_task"
require "concurrent/executor/thread_pool_executor"
require "concurrent/utility/processor_counter"
module ActiveJob
module QueueAdapters
# = Active Job Async adapter
#
# The Async adapter runs jobs with an in-process thread pool.
#
# This is the default queue adapter. It's well-suited for dev/test since
# it doesn't need an external infrastructure, but it's a poor fit for
# production since it drops pending jobs on restart.
#
# To use this adapter, set queue adapter to +:async+:
#
# config.active_job.queue_adapter = :async
#
# To configure the adapter's thread pool, instantiate the adapter and
# pass your own config:
#
# config.active_job.queue_adapter = ActiveJob::QueueAdapters::AsyncAdapter.new \
# min_threads: 1,
# max_threads: 2 * Concurrent.processor_count,
# idletime: 600.seconds
#
# The adapter uses a {Concurrent Ruby}[https://github.com/ruby-concurrency/concurrent-ruby] thread pool to schedule and execute
# jobs. Since jobs share a single thread pool, long-running jobs will block
# short-lived jobs. Fine for dev/test; bad for production.
class AsyncAdapter < AbstractAdapter
# See {Concurrent::ThreadPoolExecutor}[https://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/ThreadPoolExecutor.html] for executor options.
def initialize(**executor_options)
@scheduler = Scheduler.new(**executor_options)
end
def enqueue(job) # :nodoc:
@scheduler.enqueue JobWrapper.new(job), queue_name: job.queue_name
end
def enqueue_at(job, timestamp) # :nodoc:
@scheduler.enqueue_at JobWrapper.new(job), timestamp, queue_name: job.queue_name
end
# Gracefully stop processing jobs. Finishes in-progress work and handles
# any new jobs following the executor's fallback policy (`caller_runs`).
# Waits for termination by default. Pass `wait: false` to continue.
def shutdown(wait: true) # :nodoc:
@scheduler.shutdown wait: wait
end
# Used for our test suite.
def immediate=(immediate) # :nodoc:
@scheduler.immediate = immediate
end
# Note that we don't actually need to serialize the jobs since we're
# performing them in-process, but we do so anyway for parity with other
# adapters and deployment environments. Otherwise, serialization bugs
# may creep in undetected.
class JobWrapper # :nodoc:
def initialize(job)
job.provider_job_id = SecureRandom.uuid
@job_data = job.serialize
end
def perform
Base.execute @job_data
end
end
class Scheduler # :nodoc:
DEFAULT_EXECUTOR_OPTIONS = {
min_threads: 0,
max_threads: ENV.fetch("RAILS_MAX_THREADS", 5).to_i,
auto_terminate: true,
idletime: 60, # 1 minute
max_queue: 0, # unlimited
fallback_policy: :caller_runs # shouldn't matter -- 0 max queue
}.freeze
attr_accessor :immediate
def initialize(**options)
self.immediate = false
@immediate_executor = Concurrent::ImmediateExecutor.new
@async_executor = Concurrent::ThreadPoolExecutor.new(
name: "ActiveJob-async-scheduler",
**DEFAULT_EXECUTOR_OPTIONS,
**options
)
end
def enqueue(job, queue_name:)
executor.post(job, &:perform)
end
def enqueue_at(job, timestamp, queue_name:)
delay = timestamp - Time.current.to_f
if !immediate && delay > 0
Concurrent::ScheduledTask.execute(delay, args: [job], executor: executor, &:perform)
else
enqueue(job, queue_name: queue_name)
end
end
def shutdown(wait: true)
@async_executor.shutdown
@async_executor.wait_for_termination if wait
end
def executor
immediate ? @immediate_executor : @async_executor
end
end
end
end
end | ruby | github | https://github.com/rails/rails | activejob/lib/active_job/queue_adapters/async_adapter.rb |
from __future__ import annotations
from datetime import datetime
import re
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
Series,
Timestamp,
date_range,
)
import pandas._testing as tm
@pytest.fixture
def mix_ab() -> dict[str, list[int | str]]:
return {"a": list(range(4)), "b": list("ab..")}
@pytest.fixture
def mix_abc() -> dict[str, list[float | str]]:
return {"a": list(range(4)), "b": list("ab.."), "c": ["a", "b", np.nan, "d"]}
class TestDataFrameReplace:
def test_replace_inplace(self, datetime_frame, float_string_frame):
datetime_frame.loc[datetime_frame.index[:5], "A"] = np.nan
datetime_frame.loc[datetime_frame.index[-5:], "A"] = np.nan
tsframe = datetime_frame.copy()
result = tsframe.replace(np.nan, 0, inplace=True)
assert result is tsframe
tm.assert_frame_equal(tsframe, datetime_frame.fillna(0))
# mixed type
mf = float_string_frame
mf.iloc[5:20, mf.columns.get_loc("foo")] = np.nan
mf.iloc[-10:, mf.columns.get_loc("A")] = np.nan
result = float_string_frame.replace(np.nan, 0)
expected = float_string_frame.copy()
expected["foo"] = expected["foo"].astype(object)
expected = expected.fillna(value=0)
tm.assert_frame_equal(result, expected)
tsframe = datetime_frame.copy()
result = tsframe.replace([np.nan], [0], inplace=True)
assert result is tsframe
tm.assert_frame_equal(tsframe, datetime_frame.fillna(0))
@pytest.mark.parametrize(
"to_replace,values,expected",
[
# lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
(
[r"\s*\.\s*", r"e|f|g"],
[np.nan, "crap"],
{
"a": ["a", "b", np.nan, np.nan],
"b": ["crap"] * 3 + ["h"],
"c": ["h", "crap", "l", "o"],
},
),
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
(
[r"\s*(\.)\s*", r"(e|f|g)"],
[r"\1\1", r"\1_crap"],
{
"a": ["a", "b", "..", ".."],
"b": ["e_crap", "f_crap", "g_crap", "h"],
"c": ["h", "e_crap", "l", "o"],
},
),
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
(
[r"\s*(\.)\s*", r"e"],
[r"\1\1", r"crap"],
{
"a": ["a", "b", "..", ".."],
"b": ["crap", "f", "g", "h"],
"c": ["h", "crap", "l", "o"],
},
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("use_value_regex_args", [True, False])
def test_regex_replace_list_obj(
self, to_replace, values, expected, inplace, use_value_regex_args
):
df = DataFrame({"a": list("ab.."), "b": list("efgh"), "c": list("helo")})
if use_value_regex_args:
result = df.replace(value=values, regex=to_replace, inplace=inplace)
else:
result = df.replace(to_replace, values, regex=True, inplace=inplace)
if inplace:
assert result is df
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
def test_regex_replace_list_mixed(self, mix_ab):
# mixed frame to make sure this doesn't break things
dfmix = DataFrame(mix_ab)
# lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r"\s*\.\s*", r"a"]
values = [np.nan, "crap"]
mix2 = {"a": list(range(4)), "b": list("ab.."), "c": list("halo")}
dfmix2 = DataFrame(mix2)
res = dfmix2.replace(to_replace_res, values, regex=True)
expec = DataFrame(
{
"a": mix2["a"],
"b": ["crap", "b", np.nan, np.nan],
"c": ["h", "crap", "l", "o"],
}
)
tm.assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r"\s*(\.)\s*", r"(a|b)"]
values = [r"\1\1", r"\1_crap"]
res = dfmix.replace(to_replace_res, values, regex=True)
expec = DataFrame({"a": mix_ab["a"], "b": ["a_crap", "b_crap", "..", ".."]})
tm.assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r"\s*(\.)\s*", r"a", r"(b)"]
values = [r"\1\1", r"crap", r"\1_crap"]
res = dfmix.replace(to_replace_res, values, regex=True)
expec = DataFrame({"a": mix_ab["a"], "b": ["crap", "b_crap", "..", ".."]})
tm.assert_frame_equal(res, expec)
to_replace_res = [r"\s*(\.)\s*", r"a", r"(b)"]
values = [r"\1\1", r"crap", r"\1_crap"]
res = dfmix.replace(regex=to_replace_res, value=values)
expec = DataFrame({"a": mix_ab["a"], "b": ["crap", "b_crap", "..", ".."]})
tm.assert_frame_equal(res, expec)
def test_regex_replace_list_mixed_inplace(self, mix_ab):
dfmix = DataFrame(mix_ab)
# the same inplace
# lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r"\s*\.\s*", r"a"]
values = [np.nan, "crap"]
res = dfmix.copy()
result = res.replace(to_replace_res, values, inplace=True, regex=True)
assert result is res
expec = DataFrame({"a": mix_ab["a"], "b": ["crap", "b", np.nan, np.nan]})
tm.assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r"\s*(\.)\s*", r"(a|b)"]
values = [r"\1\1", r"\1_crap"]
res = dfmix.copy()
result = res.replace(to_replace_res, values, inplace=True, regex=True)
assert result is res
expec = DataFrame({"a": mix_ab["a"], "b": ["a_crap", "b_crap", "..", ".."]})
tm.assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r"\s*(\.)\s*", r"a", r"(b)"]
values = [r"\1\1", r"crap", r"\1_crap"]
res = dfmix.copy()
result = res.replace(to_replace_res, values, inplace=True, regex=True)
assert result is res
expec = DataFrame({"a": mix_ab["a"], "b": ["crap", "b_crap", "..", ".."]})
tm.assert_frame_equal(res, expec)
to_replace_res = [r"\s*(\.)\s*", r"a", r"(b)"]
values = [r"\1\1", r"crap", r"\1_crap"]
res = dfmix.copy()
result = res.replace(regex=to_replace_res, value=values, inplace=True)
assert result is res
expec = DataFrame({"a": mix_ab["a"], "b": ["crap", "b_crap", "..", ".."]})
tm.assert_frame_equal(res, expec)
def test_regex_replace_dict_mixed(self, mix_abc):
dfmix = DataFrame(mix_abc)
# dicts
# single dict {re1: v1}, search the whole frame
# need test for this...
# list of dicts {re1: v1, re2: v2, ..., re3: v3}, search the whole
# frame
res = dfmix.replace({"b": r"\s*\.\s*"}, {"b": np.nan}, regex=True)
res2 = dfmix.copy()
result = res2.replace(
{"b": r"\s*\.\s*"}, {"b": np.nan}, inplace=True, regex=True
)
assert result is res2
expec = DataFrame(
{"a": mix_abc["a"], "b": ["a", "b", np.nan, np.nan], "c": mix_abc["c"]}
)
tm.assert_frame_equal(res, expec)
tm.assert_frame_equal(res2, expec)
# list of dicts {re1: re11, re2: re12, ..., reN: re1N}, search the
# whole frame
res = dfmix.replace({"b": r"\s*(\.)\s*"}, {"b": r"\1ty"}, regex=True)
res2 = dfmix.copy()
result = res2.replace(
{"b": r"\s*(\.)\s*"}, {"b": r"\1ty"}, inplace=True, regex=True
)
assert result is res2
expec = DataFrame(
{"a": mix_abc["a"], "b": ["a", "b", ".ty", ".ty"], "c": mix_abc["c"]}
)
tm.assert_frame_equal(res, expec)
tm.assert_frame_equal(res2, expec)
res = dfmix.replace(regex={"b": r"\s*(\.)\s*"}, value={"b": r"\1ty"})
res2 = dfmix.copy()
result = res2.replace(
regex={"b": r"\s*(\.)\s*"}, value={"b": r"\1ty"}, inplace=True
)
assert result is res2
expec = DataFrame(
{"a": mix_abc["a"], "b": ["a", "b", ".ty", ".ty"], "c": mix_abc["c"]}
)
tm.assert_frame_equal(res, expec)
tm.assert_frame_equal(res2, expec)
# scalar -> dict
# to_replace regex, {value: value}
expec = DataFrame(
{"a": mix_abc["a"], "b": [np.nan, "b", ".", "."], "c": mix_abc["c"]}
)
res = dfmix.replace("a", {"b": np.nan}, regex=True)
res2 = dfmix.copy()
result = res2.replace("a", {"b": np.nan}, regex=True, inplace=True)
assert result is res2
tm.assert_frame_equal(res, expec)
tm.assert_frame_equal(res2, expec)
res = dfmix.replace("a", {"b": np.nan}, regex=True)
res2 = dfmix.copy()
result = res2.replace(regex="a", value={"b": np.nan}, inplace=True)
assert result is res2
expec = DataFrame(
{"a": mix_abc["a"], "b": [np.nan, "b", ".", "."], "c": mix_abc["c"]}
)
tm.assert_frame_equal(res, expec)
tm.assert_frame_equal(res2, expec)
def test_regex_replace_dict_nested(self, mix_abc):
# nested dicts will not work until this is implemented for Series
dfmix = DataFrame(mix_abc)
res = dfmix.replace({"b": {r"\s*\.\s*": np.nan}}, regex=True)
res2 = dfmix.copy()
res4 = dfmix.copy()
result = res2.replace({"b": {r"\s*\.\s*": np.nan}}, inplace=True, regex=True)
assert result is res2
res3 = dfmix.replace(regex={"b": {r"\s*\.\s*": np.nan}})
result = res4.replace(regex={"b": {r"\s*\.\s*": np.nan}}, inplace=True)
assert result is res4
expec = DataFrame(
{"a": mix_abc["a"], "b": ["a", "b", np.nan, np.nan], "c": mix_abc["c"]}
)
tm.assert_frame_equal(res, expec)
tm.assert_frame_equal(res2, expec)
tm.assert_frame_equal(res3, expec)
tm.assert_frame_equal(res4, expec)
def test_regex_replace_dict_nested_non_first_character(
self, any_string_dtype, using_infer_string
):
# GH 25259
dtype = any_string_dtype
df = DataFrame({"first": ["abc", "bca", "cab"]}, dtype=dtype)
result = df.replace({"a": "."}, regex=True)
expected = DataFrame({"first": [".bc", "bc.", "c.b"]}, dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_regex_replace_dict_nested_gh4115(self):
df = DataFrame(
{"Type": Series(["Q", "T", "Q", "Q", "T"], dtype=object), "tmp": 2}
)
expected = DataFrame({"Type": Series([0, 1, 0, 0, 1], dtype=object), "tmp": 2})
result = df.replace({"Type": {"Q": 0, "T": 1}})
tm.assert_frame_equal(result, expected)
def test_regex_replace_list_to_scalar(self, mix_abc):
df = DataFrame(mix_abc)
expec = DataFrame(
{
"a": mix_abc["a"],
"b": Series([np.nan] * 4, dtype="str"),
"c": [np.nan, np.nan, np.nan, "d"],
}
)
res = df.replace([r"\s*\.\s*", "a|b"], np.nan, regex=True)
res2 = df.copy()
res3 = df.copy()
result = res2.replace([r"\s*\.\s*", "a|b"], np.nan, regex=True, inplace=True)
assert result is res2
result = res3.replace(regex=[r"\s*\.\s*", "a|b"], value=np.nan, inplace=True)
assert result is res3
tm.assert_frame_equal(res, expec)
tm.assert_frame_equal(res2, expec)
tm.assert_frame_equal(res3, expec)
def test_regex_replace_str_to_numeric(self, mix_abc):
# what happens when you try to replace a numeric value with a regex?
df = DataFrame(mix_abc)
res = df.replace(r"\s*\.\s*", 0, regex=True)
res2 = df.copy()
result = res2.replace(r"\s*\.\s*", 0, inplace=True, regex=True)
assert result is res2
res3 = df.copy()
result = res3.replace(regex=r"\s*\.\s*", value=0, inplace=True)
assert result is res3
expec = DataFrame({"a": mix_abc["a"], "b": ["a", "b", 0, 0], "c": mix_abc["c"]})
expec["c"] = expec["c"].astype(object)
tm.assert_frame_equal(res, expec)
tm.assert_frame_equal(res2, expec)
tm.assert_frame_equal(res3, expec)
def test_regex_replace_regex_list_to_numeric(self, mix_abc):
df = DataFrame(mix_abc)
res = df.replace([r"\s*\.\s*", "b"], 0, regex=True)
res2 = df.copy()
result = res2.replace([r"\s*\.\s*", "b"], 0, regex=True, inplace=True)
assert result is res2
res3 = df.copy()
result = res3.replace(regex=[r"\s*\.\s*", "b"], value=0, inplace=True)
assert result is res3
expec = DataFrame(
{"a": mix_abc["a"], "b": ["a", 0, 0, 0], "c": ["a", 0, np.nan, "d"]}
)
tm.assert_frame_equal(res, expec)
tm.assert_frame_equal(res2, expec)
tm.assert_frame_equal(res3, expec)
def test_regex_replace_series_of_regexes(self, mix_abc):
df = DataFrame(mix_abc)
s1 = Series({"b": r"\s*\.\s*"})
s2 = Series({"b": np.nan})
res = df.replace(s1, s2, regex=True)
res2 = df.copy()
result = res2.replace(s1, s2, inplace=True, regex=True)
assert result is res2
res3 = df.copy()
result = res3.replace(regex=s1, value=s2, inplace=True)
assert result is res3
expec = DataFrame(
{"a": mix_abc["a"], "b": ["a", "b", np.nan, np.nan], "c": mix_abc["c"]}
)
tm.assert_frame_equal(res, expec)
tm.assert_frame_equal(res2, expec)
tm.assert_frame_equal(res3, expec)
def test_regex_replace_numeric_to_object_conversion(self, mix_abc):
df = DataFrame(mix_abc)
expec = DataFrame({"a": ["a", 1, 2, 3], "b": mix_abc["b"], "c": mix_abc["c"]})
res = df.replace(0, "a")
tm.assert_frame_equal(res, expec)
assert res.a.dtype == np.object_
@pytest.mark.parametrize(
"to_replace", [{"": np.nan, ",": ""}, {",": "", "": np.nan}]
)
def test_joint_simple_replace_and_regex_replace(self, to_replace):
# GH-39338
df = DataFrame(
{
"col1": ["1,000", "a", "3"],
"col2": ["a", "", "b"],
"col3": ["a", "b", "c"],
}
)
result = df.replace(regex=to_replace)
expected = DataFrame(
{
"col1": ["1000", "a", "3"],
"col2": ["a", np.nan, "b"],
"col3": ["a", "b", "c"],
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("metachar", ["[]", "()", r"\d", r"\w", r"\s"])
def test_replace_regex_metachar(self, metachar):
df = DataFrame({"a": [metachar, "else"]})
result = df.replace({"a": {metachar: "paren"}})
expected = DataFrame({"a": ["paren", "else"]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,to_replace,expected",
[
(["xax", "xbx"], {"a": "c", "b": "d"}, ["xcx", "xdx"]),
(["d", "", ""], {r"^\s*$": pd.NA}, ["d", pd.NA, pd.NA]),
],
)
def test_regex_replace_string_types(
self,
data,
to_replace,
expected,
frame_or_series,
any_string_dtype,
using_infer_string,
request,
):
# GH-41333, GH-35977
dtype = any_string_dtype
obj = frame_or_series(data, dtype=dtype)
result = obj.replace(to_replace, regex=True)
expected = frame_or_series(expected, dtype=dtype)
tm.assert_equal(result, expected)
def test_replace(self, datetime_frame):
datetime_frame.loc[datetime_frame.index[:5], "A"] = np.nan
datetime_frame.loc[datetime_frame.index[-5:], "A"] = np.nan
zero_filled = datetime_frame.replace(np.nan, -1e8)
tm.assert_frame_equal(zero_filled, datetime_frame.fillna(-1e8))
tm.assert_frame_equal(zero_filled.replace(-1e8, np.nan), datetime_frame)
datetime_frame.loc[datetime_frame.index[:5], "A"] = np.nan
datetime_frame.loc[datetime_frame.index[-5:], "A"] = np.nan
datetime_frame.loc[datetime_frame.index[:5], "B"] = -1e8
# empty
df = DataFrame(index=["a", "b"])
tm.assert_frame_equal(df, df.replace(5, 7))
# GH 11698
# test for mixed data types.
df = DataFrame(
[("-", pd.to_datetime("20150101")), ("a", pd.to_datetime("20150102"))]
)
df1 = df.replace("-", np.nan)
expected_df = DataFrame(
[(np.nan, pd.to_datetime("20150101")), ("a", pd.to_datetime("20150102"))]
)
tm.assert_frame_equal(df1, expected_df)
def test_replace_list(self):
obj = {"a": list("ab.."), "b": list("efgh"), "c": list("helo")}
dfobj = DataFrame(obj)
# lists of regexes and values
# list of [v1, v2, ..., vN] -> [v1, v2, ..., vN]
to_replace_res = [r".", r"e"]
values = [np.nan, "crap"]
res = dfobj.replace(to_replace_res, values)
expec = DataFrame(
{
"a": ["a", "b", np.nan, np.nan],
"b": ["crap", "f", "g", "h"],
"c": ["h", "crap", "l", "o"],
}
)
tm.assert_frame_equal(res, expec)
# list of [v1, v2, ..., vN] -> [v1, v2, .., vN]
to_replace_res = [r".", r"f"]
values = [r"..", r"crap"]
res = dfobj.replace(to_replace_res, values)
expec = DataFrame(
{
"a": ["a", "b", "..", ".."],
"b": ["e", "crap", "g", "h"],
"c": ["h", "e", "l", "o"],
}
)
tm.assert_frame_equal(res, expec)
def test_replace_with_empty_list(self, frame_or_series):
# GH 21977
ser = Series([["a", "b"], [], np.nan, [1]])
obj = DataFrame({"col": ser})
obj = tm.get_obj(obj, frame_or_series)
expected = obj
result = obj.replace([], np.nan)
tm.assert_equal(result, expected)
# GH 19266
msg = (
"NumPy boolean array indexing assignment cannot assign {size} "
"input values to the 1 output values where the mask is true"
)
with pytest.raises(ValueError, match=msg.format(size=0)):
obj.replace({np.nan: []})
with pytest.raises(ValueError, match=msg.format(size=2)):
obj.replace({np.nan: ["dummy", "alt"]})
def test_replace_series_dict(self):
# from GH 3064
df = DataFrame({"zero": {"a": 0.0, "b": 1}, "one": {"a": 2.0, "b": 0}})
result = df.replace(0, {"zero": 0.5, "one": 1.0})
expected = DataFrame({"zero": {"a": 0.5, "b": 1}, "one": {"a": 2.0, "b": 1.0}})
tm.assert_frame_equal(result, expected)
result = df.replace(0, df.mean())
tm.assert_frame_equal(result, expected)
# series to series/dict
df = DataFrame({"zero": {"a": 0.0, "b": 1}, "one": {"a": 2.0, "b": 0}})
s = Series({"zero": 0.0, "one": 2.0})
result = df.replace(s, {"zero": 0.5, "one": 1.0})
expected = DataFrame({"zero": {"a": 0.5, "b": 1}, "one": {"a": 1.0, "b": 0.0}})
tm.assert_frame_equal(result, expected)
result = df.replace(s, df.mean())
tm.assert_frame_equal(result, expected)
def test_replace_convert(self, any_string_dtype):
# gh 3907 (pandas >= 3.0 no longer converts dtypes)
df = DataFrame(
[["foo", "bar", "bah"], ["bar", "foo", "bah"]], dtype=any_string_dtype
)
m = {"foo": 1, "bar": 2, "bah": 3}
rep = df.replace(m)
assert (rep.dtypes == object).all()
def test_replace_mixed(self, float_string_frame):
mf = float_string_frame
mf.iloc[5:20, mf.columns.get_loc("foo")] = np.nan
mf.iloc[-10:, mf.columns.get_loc("A")] = np.nan
result = float_string_frame.replace(np.nan, -18)
expected = float_string_frame.copy()
expected["foo"] = expected["foo"].astype(object)
expected = expected.fillna(value=-18)
tm.assert_frame_equal(result, expected)
expected2 = float_string_frame.copy()
expected2["foo"] = expected2["foo"].astype(object)
tm.assert_frame_equal(result.replace(-18, np.nan), expected2)
result = float_string_frame.replace(np.nan, -1e8)
expected = float_string_frame.copy()
expected["foo"] = expected["foo"].astype(object)
expected = expected.fillna(value=-1e8)
tm.assert_frame_equal(result, expected)
expected2 = float_string_frame.copy()
expected2["foo"] = expected2["foo"].astype(object)
tm.assert_frame_equal(result.replace(-1e8, np.nan), expected2)
def test_replace_mixed_int_block_upcasting(self):
# int block upcasting
df = DataFrame(
{
"A": Series([1.0, 2.0], dtype="float64"),
"B": Series([0, 1], dtype="int64"),
}
)
expected = DataFrame(
{
"A": Series([1.0, 2.0], dtype="float64"),
"B": Series([0.5, 1], dtype="float64"),
}
)
result = df.replace(0, 0.5)
tm.assert_frame_equal(result, expected)
result = df.replace(0, 0.5, inplace=True)
assert result is df
tm.assert_frame_equal(df, expected)
def test_replace_mixed_int_block_splitting(self):
# int block splitting
df = DataFrame(
{
"A": Series([1.0, 2.0], dtype="float64"),
"B": Series([0, 1], dtype="int64"),
"C": Series([1, 2], dtype="int64"),
}
)
expected = DataFrame(
{
"A": Series([1.0, 2.0], dtype="float64"),
"B": Series([0.5, 1], dtype="float64"),
"C": Series([1, 2], dtype="int64"),
}
)
result = df.replace(0, 0.5)
tm.assert_frame_equal(result, expected)
def test_replace_mixed2(self):
# to object block upcasting
df = DataFrame(
{
"A": Series([1.0, 2.0], dtype="float64"),
"B": Series([0, 1], dtype="int64"),
}
)
expected = DataFrame(
{
"A": Series([1, "foo"], dtype="object"),
"B": Series([0, 1], dtype="int64"),
}
)
result = df.replace(2, "foo")
tm.assert_frame_equal(result, expected)
expected = DataFrame(
{
"A": Series(["foo", "bar"], dtype="object"),
"B": Series([0, "foo"], dtype="object"),
}
)
result = df.replace([1, 2], ["foo", "bar"])
tm.assert_frame_equal(result, expected)
def test_replace_mixed3(self):
# test case from
df = DataFrame(
{"A": Series([3, 0], dtype="int64"), "B": Series([0, 3], dtype="int64")}
)
result = df.replace(3, df.mean().to_dict())
expected = df.copy().astype("float64")
m = df.mean()
expected.iloc[0, 0] = m.iloc[0]
expected.iloc[1, 1] = m.iloc[1]
tm.assert_frame_equal(result, expected)
def test_replace_nullable_int_with_string_doesnt_cast(self):
# GH#25438 don't cast df['a'] to float64
df = DataFrame({"a": [1, 2, 3, pd.NA], "b": ["some", "strings", "here", "he"]})
df["a"] = df["a"].astype("Int64")
res = df.replace("", np.nan)
tm.assert_series_equal(res["a"], df["a"])
@pytest.mark.parametrize("dtype", ["boolean", "Int64", "Float64"])
def test_replace_with_nullable_column(self, dtype):
# GH-44499
nullable_ser = Series([1, 0, 1], dtype=dtype)
df = DataFrame({"A": ["A", "B", "x"], "B": nullable_ser})
result = df.replace("x", "X")
expected = DataFrame({"A": ["A", "B", "X"], "B": nullable_ser})
tm.assert_frame_equal(result, expected)
def test_replace_simple_nested_dict(self):
df = DataFrame({"col": range(1, 5)})
expected = DataFrame({"col": ["a", 2, 3, "b"]})
result = df.replace({"col": {1: "a", 4: "b"}})
tm.assert_frame_equal(expected, result)
# in this case, should be the same as the not nested version
result = df.replace({1: "a", 4: "b"})
tm.assert_frame_equal(expected, result)
def test_replace_simple_nested_dict_with_nonexistent_value(self):
df = DataFrame({"col": range(1, 5)})
expected = DataFrame({"col": ["a", 2, 3, "b"]})
result = df.replace({-1: "-", 1: "a", 4: "b"})
tm.assert_frame_equal(expected, result)
result = df.replace({"col": {-1: "-", 1: "a", 4: "b"}})
tm.assert_frame_equal(expected, result)
def test_replace_NA_with_None(self):
# gh-45601
df = DataFrame({"value": [42, pd.NA]}, dtype="Int64")
result = df.replace({pd.NA: None})
expected = DataFrame({"value": [42, None]}, dtype=object)
tm.assert_frame_equal(result, expected)
def test_replace_NAT_with_None(self):
# gh-45836
df = DataFrame([pd.NaT, pd.NaT])
result = df.replace({pd.NaT: None, np.nan: None})
expected = DataFrame([None, None])
tm.assert_frame_equal(result, expected)
def test_replace_with_None_keeps_categorical(self):
# gh-46634
cat_series = Series(["b", "b", "b", "d"], dtype="category")
df = DataFrame(
{
"id": Series([5, 4, 3, 2], dtype="float64"),
"col": cat_series,
}
)
result = df.replace({3: None})
expected = DataFrame(
{
"id": Series([5.0, 4.0, None, 2.0], dtype="object"),
"col": cat_series,
}
)
tm.assert_frame_equal(result, expected)
def test_replace_all_NA(self):
# GH#60688
df = DataFrame({"ticker": ["#1234#"], "name": [None]})
result = df.replace({col: {r"^#": "$"} for col in df.columns}, regex=True)
expected = DataFrame({"ticker": ["$1234#"], "name": [None]})
tm.assert_frame_equal(result, expected)
def test_replace_value_is_none(self, datetime_frame):
orig_value = datetime_frame.iloc[0, 0]
orig2 = datetime_frame.iloc[1, 0]
datetime_frame.iloc[0, 0] = np.nan
datetime_frame.iloc[1, 0] = 1
result = datetime_frame.replace(to_replace={np.nan: 0})
expected = datetime_frame.T.replace(to_replace={np.nan: 0}).T
tm.assert_frame_equal(result, expected)
result = datetime_frame.replace(to_replace={np.nan: 0, 1: -1e8})
tsframe = datetime_frame.copy()
tsframe.iloc[0, 0] = 0
tsframe.iloc[1, 0] = -1e8
expected = tsframe
tm.assert_frame_equal(expected, result)
datetime_frame.iloc[0, 0] = orig_value
datetime_frame.iloc[1, 0] = orig2
def test_replace_for_new_dtypes(self, datetime_frame):
# dtypes
tsframe = datetime_frame.copy().astype(np.float32)
tsframe.loc[tsframe.index[:5], "A"] = np.nan
tsframe.loc[tsframe.index[-5:], "A"] = np.nan
zero_filled = tsframe.replace(np.nan, -1e8)
tm.assert_frame_equal(zero_filled, tsframe.fillna(-1e8))
tm.assert_frame_equal(zero_filled.replace(-1e8, np.nan), tsframe)
tsframe.loc[tsframe.index[:5], "A"] = np.nan
tsframe.loc[tsframe.index[-5:], "A"] = np.nan
tsframe.loc[tsframe.index[:5], "B"] = np.nan
@pytest.mark.parametrize(
"frame, to_replace, value, expected",
[
(DataFrame({"ints": [1, 2, 3]}), 1, 0, DataFrame({"ints": [0, 2, 3]})),
(
DataFrame({"ints": [1, 2, 3]}, dtype=np.int32),
1,
0,
DataFrame({"ints": [0, 2, 3]}, dtype=np.int32),
),
(
DataFrame({"ints": [1, 2, 3]}, dtype=np.int16),
1,
0,
DataFrame({"ints": [0, 2, 3]}, dtype=np.int16),
),
(
DataFrame({"bools": [True, False, True]}),
False,
True,
DataFrame({"bools": [True, True, True]}),
),
(
DataFrame({"complex": [1j, 2j, 3j]}),
1j,
0,
DataFrame({"complex": [0j, 2j, 3j]}),
),
(
DataFrame(
{
"datetime64": Index(
[
datetime(2018, 5, 28),
datetime(2018, 7, 28),
datetime(2018, 5, 28),
]
)
}
),
datetime(2018, 5, 28),
datetime(2018, 7, 28),
DataFrame({"datetime64": Index([datetime(2018, 7, 28)] * 3)}),
),
# GH 20380
(
DataFrame({"dt": [datetime(3017, 12, 20)], "str": ["foo"]}),
"foo",
"bar",
DataFrame({"dt": [datetime(3017, 12, 20)], "str": ["bar"]}),
),
(
DataFrame(
{
"A": date_range(
"20130101", periods=3, tz="US/Eastern", unit="ns"
),
"B": [0, np.nan, 2],
}
),
Timestamp("20130102", tz="US/Eastern"),
Timestamp("20130104", tz="US/Eastern"),
DataFrame(
{
"A": pd.DatetimeIndex(
[
Timestamp("20130101", tz="US/Eastern"),
Timestamp("20130104", tz="US/Eastern"),
Timestamp("20130103", tz="US/Eastern"),
]
).as_unit("ns"),
"B": [0, np.nan, 2],
}
),
),
# GH 35376
(
DataFrame([[1, 1.0], [2, 2.0]]),
1.0,
5,
DataFrame([[5, 5.0], [2, 2.0]]),
),
(
DataFrame([[1, 1.0], [2, 2.0]]),
1,
5,
DataFrame([[5, 5.0], [2, 2.0]]),
),
(
DataFrame([[1, 1.0], [2, 2.0]]),
1.0,
5.0,
DataFrame([[5, 5.0], [2, 2.0]]),
),
(
DataFrame([[1, 1.0], [2, 2.0]]),
1,
5.0,
DataFrame([[5, 5.0], [2, 2.0]]),
),
],
)
def test_replace_dtypes(self, frame, to_replace, value, expected):
result = frame.replace(to_replace, value)
tm.assert_frame_equal(result, expected)
def test_replace_input_formats_listlike(self):
# both dicts
to_rep = {"A": np.nan, "B": 0, "C": ""}
values = {"A": 0, "B": -1, "C": "missing"}
df = DataFrame(
{"A": [np.nan, 0, np.inf], "B": [0, 2, 5], "C": ["", "asdf", "fd"]}
)
filled = df.replace(to_rep, values)
expected = {k: v.replace(to_rep[k], values[k]) for k, v in df.items()}
tm.assert_frame_equal(filled, DataFrame(expected))
result = df.replace([0, 2, 5], [5, 2, 0])
expected = DataFrame(
{"A": [np.nan, 5, np.inf], "B": [5, 2, 0], "C": ["", "asdf", "fd"]}
)
tm.assert_frame_equal(result, expected)
# scalar to dict
values = {"A": 0, "B": -1, "C": "missing"}
df = DataFrame(
{"A": [np.nan, 0, np.nan], "B": [0, 2, 5], "C": ["", "asdf", "fd"]}
)
filled = df.replace(np.nan, values)
expected = {k: v.replace(np.nan, values[k]) for k, v in df.items()}
tm.assert_frame_equal(filled, DataFrame(expected))
# list to list
to_rep = [np.nan, 0, ""]
values = [-2, -1, "missing"]
result = df.replace(to_rep, values)
expected = df.copy()
for rep, value in zip(to_rep, values, strict=True):
result = expected.replace(rep, value, inplace=True)
assert result is expected
tm.assert_frame_equal(result, expected)
msg = r"Replacement lists must match in length\. Expecting 3 got 2"
with pytest.raises(ValueError, match=msg):
df.replace(to_rep, values[1:])
def test_replace_input_formats_scalar(self):
df = DataFrame(
{"A": [np.nan, 0, np.inf], "B": [0, 2, 5], "C": ["", "asdf", "fd"]}
)
# dict to scalar
to_rep = {"A": np.nan, "B": 0, "C": ""}
filled = df.replace(to_rep, 0)
expected = {k: v.replace(to_rep[k], 0) for k, v in df.items()}
tm.assert_frame_equal(filled, DataFrame(expected))
msg = "value argument must be scalar, dict, or Series"
with pytest.raises(TypeError, match=msg):
df.replace(to_rep, [np.nan, 0, ""])
# list to scalar
to_rep = [np.nan, 0, ""]
result = df.replace(to_rep, -1)
expected = df.copy()
for rep in to_rep:
result = expected.replace(rep, -1, inplace=True)
assert result is expected
tm.assert_frame_equal(result, expected)
def test_replace_limit(self):
# TODO
pass
def test_replace_dict_no_regex(self, any_string_dtype):
answer = Series(
{
0: "Strongly Agree",
1: "Agree",
2: "Neutral",
3: "Disagree",
4: "Strongly Disagree",
},
dtype=any_string_dtype,
)
weights = {
"Agree": 4,
"Disagree": 2,
"Neutral": 3,
"Strongly Agree": 5,
"Strongly Disagree": 1,
}
expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1}, dtype=object)
result = answer.replace(weights)
tm.assert_series_equal(result, expected)
def test_replace_series_no_regex(self, any_string_dtype):
answer = Series(
{
0: "Strongly Agree",
1: "Agree",
2: "Neutral",
3: "Disagree",
4: "Strongly Disagree",
},
dtype=any_string_dtype,
)
weights = Series(
{
"Agree": 4,
"Disagree": 2,
"Neutral": 3,
"Strongly Agree": 5,
"Strongly Disagree": 1,
}
)
expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1}, dtype=object)
result = answer.replace(weights)
tm.assert_series_equal(result, expected)
def test_replace_dict_tuple_list_ordering_remains_the_same(self):
df = DataFrame({"A": [np.nan, 1]})
res1 = df.replace(to_replace={np.nan: 0, 1: -1e8})
res2 = df.replace(to_replace=(1, np.nan), value=[-1e8, 0])
res3 = df.replace(to_replace=[1, np.nan], value=[-1e8, 0])
expected = DataFrame({"A": [0, -1e8]})
tm.assert_frame_equal(res1, res2)
tm.assert_frame_equal(res2, res3)
tm.assert_frame_equal(res3, expected)
def test_replace_doesnt_replace_without_regex(self):
df = DataFrame(
{
"fol": [1, 2, 2, 3],
"T_opp": ["0", "vr", "0", "0"],
"T_Dir": ["0", "0", "0", "bt"],
"T_Enh": ["vo", "0", "0", "0"],
}
)
res = df.replace({r"\D": 1})
tm.assert_frame_equal(df, res)
def test_replace_bool_with_string(self):
df = DataFrame({"a": [True, False], "b": list("ab")})
result = df.replace(True, "a")
expected = DataFrame({"a": ["a", False], "b": df.b})
tm.assert_frame_equal(result, expected)
def test_replace_pure_bool_with_string_no_op(self):
df = DataFrame(np.random.default_rng(2).random((2, 2)) > 0.5)
result = df.replace("asdf", "fdsa")
tm.assert_frame_equal(df, result)
def test_replace_bool_with_bool(self):
df = DataFrame(np.random.default_rng(2).random((2, 2)) > 0.5)
result = df.replace(False, True)
expected = DataFrame(np.ones((2, 2), dtype=bool))
tm.assert_frame_equal(result, expected)
def test_replace_with_dict_with_bool_keys(self):
df = DataFrame({0: [True, False], 1: [False, True]})
result = df.replace({"asdf": "asdb", True: "yes"})
expected = DataFrame({0: ["yes", False], 1: [False, "yes"]})
tm.assert_frame_equal(result, expected)
def test_replace_dict_strings_vs_ints(self):
# GH#34789
df = DataFrame({"Y0": [1, 2], "Y1": [3, 4]})
result = df.replace({"replace_string": "test"})
tm.assert_frame_equal(result, df)
result = df["Y0"].replace({"replace_string": "test"})
tm.assert_series_equal(result, df["Y0"])
def test_replace_truthy(self):
df = DataFrame({"a": [True, True]})
r = df.replace([np.inf, -np.inf], np.nan)
e = df
tm.assert_frame_equal(r, e)
def test_nested_dict_overlapping_keys_replace_int(self):
# GH 27660 keep behaviour consistent for simple dictionary and
# nested dictionary replacement
df = DataFrame({"a": list(range(1, 5))})
result = df.replace({"a": dict(zip(range(1, 5), range(2, 6), strict=True))})
expected = df.replace(dict(zip(range(1, 5), range(2, 6), strict=True)))
tm.assert_frame_equal(result, expected)
def test_nested_dict_overlapping_keys_replace_str(self):
# GH 27660
a = np.arange(1, 5)
astr = a.astype(str)
bstr = np.arange(2, 6).astype(str)
df = DataFrame({"a": astr})
result = df.replace(dict(zip(astr, bstr, strict=True)))
expected = df.replace({"a": dict(zip(astr, bstr, strict=True))})
tm.assert_frame_equal(result, expected)
def test_replace_swapping_bug(self):
df = DataFrame({"a": [True, False, True]})
res = df.replace({"a": {True: "Y", False: "N"}})
expect = DataFrame({"a": ["Y", "N", "Y"]}, dtype=object)
tm.assert_frame_equal(res, expect)
df = DataFrame({"a": [0, 1, 0]})
res = df.replace({"a": {0: "Y", 1: "N"}})
expect = DataFrame({"a": ["Y", "N", "Y"]}, dtype=object)
tm.assert_frame_equal(res, expect)
def test_replace_datetimetz(self):
# GH 11326
# behaving poorly when presented with a datetime64[ns, tz]
df = DataFrame(
{
"A": date_range("20130101", periods=3, tz="US/Eastern", unit="ns"),
"B": [0, np.nan, 2],
}
)
result = df.replace(np.nan, 1)
expected = DataFrame(
{
"A": date_range("20130101", periods=3, tz="US/Eastern", unit="ns"),
"B": Series([0, 1, 2], dtype="float64"),
}
)
tm.assert_frame_equal(result, expected)
result = df.fillna(1)
tm.assert_frame_equal(result, expected)
result = df.replace(0, np.nan)
expected = DataFrame(
{
"A": date_range("20130101", periods=3, tz="US/Eastern", unit="ns"),
"B": [np.nan, np.nan, 2],
}
)
tm.assert_frame_equal(result, expected)
result = df.replace(
Timestamp("20130102", tz="US/Eastern"),
Timestamp("20130104", tz="US/Eastern"),
)
expected = DataFrame(
{
"A": [
Timestamp("20130101", tz="US/Eastern"),
Timestamp("20130104", tz="US/Eastern"),
Timestamp("20130103", tz="US/Eastern"),
],
"B": [0, np.nan, 2],
}
)
expected["A"] = expected["A"].dt.as_unit("ns")
tm.assert_frame_equal(result, expected)
result = df.copy()
result.iloc[1, 0] = np.nan
result = result.replace({"A": pd.NaT}, Timestamp("20130104", tz="US/Eastern"))
tm.assert_frame_equal(result, expected)
# pre-2.0 this would coerce to object with mismatched tzs
result = df.copy()
result.iloc[1, 0] = np.nan
result = result.replace({"A": pd.NaT}, Timestamp("20130104", tz="US/Pacific"))
expected = DataFrame(
{
"A": [
Timestamp("20130101", tz="US/Eastern"),
Timestamp("20130104", tz="US/Pacific").tz_convert("US/Eastern"),
Timestamp("20130103", tz="US/Eastern"),
],
"B": [0, np.nan, 2],
}
)
expected["A"] = expected["A"].dt.as_unit("ns")
tm.assert_frame_equal(result, expected)
result = df.copy()
result.iloc[1, 0] = np.nan
result = result.replace({"A": np.nan}, Timestamp("20130104"))
expected = DataFrame(
{
"A": [
Timestamp("20130101", tz="US/Eastern"),
Timestamp("20130104"),
Timestamp("20130103", tz="US/Eastern"),
],
"B": [0, np.nan, 2],
}
)
tm.assert_frame_equal(result, expected)
def test_replace_with_empty_dictlike(self, mix_abc):
# GH 15289
df = DataFrame(mix_abc)
tm.assert_frame_equal(df, df.replace({}))
tm.assert_frame_equal(df, df.replace(Series([], dtype=object)))
tm.assert_frame_equal(df, df.replace({"b": {}}))
tm.assert_frame_equal(df, df.replace(Series({"b": {}})))
@pytest.mark.parametrize(
"df, to_replace, exp",
[
(
{"col1": [1, 2, 3], "col2": [4, 5, 6]},
{4: 5, 5: 6, 6: 7},
{"col1": [1, 2, 3], "col2": [5, 6, 7]},
),
(
{"col1": [1, 2, 3], "col2": ["4", "5", "6"]},
{"4": "5", "5": "6", "6": "7"},
{"col1": [1, 2, 3], "col2": ["5", "6", "7"]},
),
],
)
def test_replace_commutative(self, df, to_replace, exp):
# GH 16051
# DataFrame.replace() overwrites when values are non-numeric
# also added to data frame whilst issue was for series
df = DataFrame(df)
expected = DataFrame(exp)
result = df.replace(to_replace)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"replacer",
[
Timestamp("20170827"),
np.int8(1),
np.int16(1),
np.float32(1),
np.float64(1),
],
)
def test_replace_replacer_dtype(self, replacer):
# GH26632
df = DataFrame(["a"], dtype=object)
result = df.replace({"a": replacer, "b": replacer})
expected = DataFrame([replacer], dtype=object)
tm.assert_frame_equal(result, expected)
def test_replace_after_convert_dtypes(self):
# GH31517
df = DataFrame({"grp": [1, 2, 3, 4, 5]}, dtype="Int64")
result = df.replace(1, 10)
expected = DataFrame({"grp": [10, 2, 3, 4, 5]}, dtype="Int64")
tm.assert_frame_equal(result, expected)
def test_replace_invalid_to_replace(self):
# GH 18634
# API: replace() should raise an exception if invalid argument is given
df = DataFrame({"one": ["a", "b ", "c"], "two": ["d ", "e ", "f "]})
msg = (
r"Expecting 'to_replace' to be either a scalar, array-like, "
r"dict or None, got invalid type.*"
)
with pytest.raises(TypeError, match=msg):
df.replace(lambda x: x.strip())
@pytest.mark.parametrize("dtype", ["float", "float64", "int64", "Int64", "boolean"])
@pytest.mark.parametrize("value", [np.nan, pd.NA])
def test_replace_no_replacement_dtypes(self, dtype, value):
# https://github.com/pandas-dev/pandas/issues/32988
df = DataFrame(np.eye(2), dtype=dtype)
result = df.replace(to_replace=[None, -np.inf, np.inf], value=value)
tm.assert_frame_equal(result, df)
@pytest.mark.parametrize("replacement", [np.nan, 5])
def test_replace_with_duplicate_columns(self, replacement):
# GH 24798
result = DataFrame({"A": [1, 2, 3], "A1": [4, 5, 6], "B": [7, 8, 9]})
result.columns = list("AAB")
expected = DataFrame(
{"A": [1, 2, 3], "A1": [4, 5, 6], "B": [replacement, 8, 9]}
)
expected.columns = list("AAB")
result["B"] = result["B"].replace(7, replacement)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [pd.Period("2020-01"), pd.Interval(0, 5)])
def test_replace_ea_ignore_float(self, frame_or_series, value):
# GH#34871
obj = DataFrame({"Per": [value] * 3})
obj = tm.get_obj(obj, frame_or_series)
expected = obj.copy()
result = obj.replace(1.0, 0.0)
tm.assert_equal(expected, result)
@pytest.mark.parametrize(
"replace_dict, final_data",
[({"a": 1, "b": 1}, [[2, 2], [2, 2]]), ({"a": 1, "b": 2}, [[2, 1], [2, 2]])],
)
def test_categorical_replace_with_dict(self, replace_dict, final_data):
# GH 26988
df = DataFrame([[1, 1], [2, 2]], columns=["a", "b"], dtype="category")
final_data = np.array(final_data)
a = pd.Categorical(final_data[:, 0], categories=[1, 2])
b = pd.Categorical(final_data[:, 1], categories=[1, 2])
expected = DataFrame({"a": a, "b": b})
result = df.replace(replace_dict, 2)
tm.assert_frame_equal(result, expected)
msg = r"DataFrame.iloc\[:, 0\] \(column name=\"a\"\) are " "different"
with pytest.raises(AssertionError, match=msg):
# ensure non-inplace call does not affect original
tm.assert_frame_equal(df, expected)
result = df.replace(replace_dict, 2, inplace=True)
assert result is df
tm.assert_frame_equal(df, expected)
def test_replace_value_category_type(self):
"""
Test for #23305: to ensure category dtypes are maintained
after replace with direct values
"""
# create input data
input_dict = {
"col1": [1, 2, 3, 4],
"col2": ["a", "b", "c", "d"],
"col3": [1.5, 2.5, 3.5, 4.5],
"col4": ["cat1", "cat2", "cat3", "cat4"],
"col5": ["obj1", "obj2", "obj3", "obj4"],
}
# explicitly cast columns as category and order them
input_df = DataFrame(data=input_dict).astype(
{"col2": "category", "col4": "category"}
)
input_df["col2"] = input_df["col2"].cat.reorder_categories(
["a", "b", "c", "d"], ordered=True
)
input_df["col4"] = input_df["col4"].cat.reorder_categories(
["cat1", "cat2", "cat3", "cat4"], ordered=True
)
# create expected dataframe
expected_dict = {
"col1": [1, 2, 3, 4],
"col2": ["a", "b", "c", "z"],
"col3": [1.5, 2.5, 3.5, 4.5],
"col4": ["cat1", "catX", "cat3", "cat4"],
"col5": ["obj9", "obj2", "obj3", "obj4"],
}
# explicitly cast columns as category and order them
expected = DataFrame(data=expected_dict).astype(
{"col2": "category", "col4": "category"}
)
expected["col2"] = expected["col2"].cat.reorder_categories(
["a", "b", "c", "z"], ordered=True
)
expected["col4"] = expected["col4"].cat.reorder_categories(
["cat1", "catX", "cat3", "cat4"], ordered=True
)
# replace values in input dataframe
input_df = input_df.apply(
lambda x: x.astype("category").cat.rename_categories({"d": "z"})
)
input_df = input_df.apply(
lambda x: x.astype("category").cat.rename_categories({"obj1": "obj9"})
)
result = input_df.apply(
lambda x: x.astype("category").cat.rename_categories({"cat2": "catX"})
)
result = result.astype({"col1": "int64", "col3": "float64", "col5": "str"})
tm.assert_frame_equal(result, expected)
def test_replace_dict_category_type(self):
"""
Test to ensure category dtypes are maintained
after replace with dict values
"""
# GH#35268, GH#44940
# create input dataframe
input_dict = {"col1": ["a"], "col2": ["obj1"], "col3": ["cat1"]}
# explicitly cast columns as category
input_df = DataFrame(data=input_dict).astype(
{"col1": "category", "col2": "category", "col3": "category"}
)
# create expected dataframe
expected_dict = {"col1": ["z"], "col2": ["obj9"], "col3": ["catX"]}
# explicitly cast columns as category
expected = DataFrame(data=expected_dict).astype(
{"col1": "category", "col2": "category", "col3": "category"}
)
# replace values in input dataframe using a dict
result = input_df.apply(
lambda x: x.cat.rename_categories(
{"a": "z", "obj1": "obj9", "cat1": "catX"}
)
)
tm.assert_frame_equal(result, expected)
def test_replace_with_compiled_regex(self):
# https://github.com/pandas-dev/pandas/issues/35680
df = DataFrame(["a", "b", "c"])
regex = re.compile("^a$")
result = df.replace({regex: "z"}, regex=True)
expected = DataFrame(["z", "b", "c"])
tm.assert_frame_equal(result, expected)
def test_replace_intervals(self):
# https://github.com/pandas-dev/pandas/issues/35931
df = DataFrame({"a": [pd.Interval(0, 1), pd.Interval(0, 1)]})
result = df.replace({"a": {pd.Interval(0, 1): "x"}})
expected = DataFrame({"a": ["x", "x"]}, dtype=object)
tm.assert_frame_equal(result, expected)
def test_replace_unicode(self):
# GH: 16784
columns_values_map = {"positive": {"正面": 1, "中立": 1, "负面": 0}}
df1 = DataFrame({"positive": np.ones(3)})
result = df1.replace(columns_values_map)
expected = DataFrame({"positive": np.ones(3)})
tm.assert_frame_equal(result, expected)
def test_replace_bytes(self, frame_or_series):
# GH#38900
obj = frame_or_series(["o"]).astype("|S")
expected = obj.copy()
obj = obj.replace({None: np.nan})
tm.assert_equal(obj, expected)
@pytest.mark.parametrize(
"data, to_replace, value, expected",
[
([1], [1.0], [0], [0]),
([1], [1], [0], [0]),
([1.0], [1.0], [0], [0.0]),
([1.0], [1], [0], [0.0]),
],
)
@pytest.mark.parametrize("box", [list, tuple, np.array])
def test_replace_list_with_mixed_type(
self, data, to_replace, value, expected, box, frame_or_series
):
# GH#40371
obj = frame_or_series(data)
expected = frame_or_series(expected)
result = obj.replace(box(to_replace), value)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("val", [2, np.nan, 2.0])
def test_replace_value_none_dtype_numeric(self, val):
# GH#48231
df = DataFrame({"a": [1, val]})
result = df.replace(val, None)
expected = DataFrame({"a": [1, None]}, dtype=object)
tm.assert_frame_equal(result, expected)
df = DataFrame({"a": [1, val]})
result = df.replace({val: None})
tm.assert_frame_equal(result, expected)
def test_replace_with_nil_na(self):
# GH 32075
ser = DataFrame({"a": ["nil", pd.NA]})
expected = DataFrame({"a": ["anything else", pd.NA]}, index=[0, 1])
result = ser.replace("nil", "anything else")
tm.assert_frame_equal(expected, result)
@pytest.mark.parametrize(
"dtype",
[
"Float64",
pytest.param("float64[pyarrow]", marks=td.skip_if_no("pyarrow")),
],
)
def test_replace_na_to_nan_nullable_floats(self, dtype, using_nan_is_na):
# GH#55127
df = DataFrame({0: [1, np.nan, 1], 1: Series([0, pd.NA, 1], dtype=dtype)})
result = df.replace(pd.NA, np.nan)
if using_nan_is_na:
expected = result
else:
expected = DataFrame(
{0: [1, np.nan, 1], 1: Series([0, np.nan, 1], dtype=dtype)}
)
assert np.isnan(expected.loc[1, 1])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype",
[
"Int64",
pytest.param("int64[pyarrow]", marks=td.skip_if_no("pyarrow")),
],
)
def test_replace_nan_nullable_ints(self, dtype, using_nan_is_na):
# GH#51237 with nan_is_na=False, replacing NaN should be a no-op here
ser = Series([1, 2, None], dtype=dtype)
result = ser.replace(np.nan, -1)
if using_nan_is_na:
# np.nan is equivalent to pd.NA here
expected = Series([1, 2, -1], dtype=dtype)
else:
expected = ser
tm.assert_series_equal(result, expected)
class TestDataFrameReplaceRegex:
@pytest.mark.parametrize(
"data",
[
{"a": list("ab.."), "b": list("efgh")},
{"a": list("ab.."), "b": list(range(4))},
],
)
@pytest.mark.parametrize(
"to_replace,value", [(r"\s*\.\s*", np.nan), (r"\s*(\.)\s*", r"\1\1\1")]
)
@pytest.mark.parametrize("compile_regex", [True, False])
@pytest.mark.parametrize("regex_kwarg", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_regex_replace_scalar(
self, data, to_replace, value, compile_regex, regex_kwarg, inplace
):
df = DataFrame(data)
expected = df.copy()
if compile_regex:
to_replace = re.compile(to_replace)
if regex_kwarg:
regex = to_replace
to_replace = None
else:
regex = True
result = df.replace(to_replace, value, inplace=inplace, regex=regex)
if inplace:
assert result is df
result = df
if value is np.nan:
expected_replace_val = np.nan
else:
expected_replace_val = "..."
expected.loc[expected["a"] == ".", "a"] = expected_replace_val
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("regex", [False, True])
@pytest.mark.parametrize("value", [1, "1"])
def test_replace_regex_dtype_frame(self, regex, value):
# GH-48644
df1 = DataFrame({"A": ["0"], "B": ["0"]})
# When value is an integer, coerce result to object.
# When value is a string, infer the correct string dtype.
dtype = object if value == 1 else None
expected_df1 = DataFrame({"A": [value], "B": [value]}, dtype=dtype)
result_df1 = df1.replace(to_replace="0", value=value, regex=regex)
tm.assert_frame_equal(result_df1, expected_df1)
df2 = DataFrame({"A": ["0"], "B": ["1"]})
if regex:
expected_df2 = DataFrame({"A": [value], "B": ["1"]}, dtype=dtype)
else:
expected_df2 = DataFrame({"A": Series([value], dtype=dtype), "B": ["1"]})
result_df2 = df2.replace(to_replace="0", value=value, regex=regex)
tm.assert_frame_equal(result_df2, expected_df2)
def test_replace_with_value_also_being_replaced(self):
# GH46306
df = DataFrame({"A": [0, 1, 2], "B": [1, 0, 2]})
result = df.replace({0: 1, 1: np.nan})
expected = DataFrame({"A": [1, np.nan, 2], "B": [np.nan, 1, 2]})
tm.assert_frame_equal(result, expected)
def test_replace_categorical_no_replacement(self):
# GH#46672
df = DataFrame(
{
"a": ["one", "two", None, "three"],
"b": ["one", None, "two", "three"],
},
dtype="category",
)
expected = df.copy()
result = df.replace(to_replace=[".", "def"], value=["_", None])
tm.assert_frame_equal(result, expected)
def test_replace_object_splitting(self, using_infer_string):
# GH#53977
df = DataFrame({"a": ["a"], "b": "b"})
if using_infer_string:
assert len(df._mgr.blocks) == 2
else:
assert len(df._mgr.blocks) == 1
df.replace(to_replace=r"^\s*$", value="", inplace=True, regex=True)
if using_infer_string:
assert len(df._mgr.blocks) == 2
else:
assert len(df._mgr.blocks) == 1 | python | github | https://github.com/pandas-dev/pandas | pandas/tests/frame/methods/test_replace.py |
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.build.artifacts;
import org.gradle.api.Project;
import org.gradle.testfixtures.ProjectBuilder;
import org.junit.jupiter.api.Test;
import static org.assertj.core.api.Assertions.assertThat;
/**
* Tests for {@link ArtifactRelease}.
*
* @author Andy Wilkinson
* @author Scott Frederick
*/
class ArtifactReleaseTests {
@Test
void whenProjectVersionIsSnapshotThenTypeIsSnapshot() {
Project project = ProjectBuilder.builder().build();
project.setVersion("1.2.3-SNAPSHOT");
assertThat(ArtifactRelease.forProject(project).getType()).isEqualTo("snapshot");
}
@Test
void whenProjectVersionIsMilestoneThenTypeIsMilestone() {
Project project = ProjectBuilder.builder().build();
project.setVersion("1.2.3-M1");
assertThat(ArtifactRelease.forProject(project).getType()).isEqualTo("milestone");
}
@Test
void whenProjectVersionIsReleaseCandidateThenTypeIsMilestone() {
Project project = ProjectBuilder.builder().build();
project.setVersion("1.2.3-RC1");
assertThat(ArtifactRelease.forProject(project).getType()).isEqualTo("milestone");
}
@Test
void whenProjectVersionIsReleaseThenTypeIsRelease() {
Project project = ProjectBuilder.builder().build();
project.setVersion("1.2.3");
assertThat(ArtifactRelease.forProject(project).getType()).isEqualTo("release");
}
@Test
void whenProjectVersionIsSnapshotThenRepositoryIsArtifactorySnapshot() {
Project project = ProjectBuilder.builder().build();
project.setVersion("1.2.3-SNAPSHOT");
assertThat(ArtifactRelease.forProject(project).getDownloadRepo()).contains("repo.spring.io/snapshot");
}
@Test
void whenProjectVersionIsMilestoneThenRepositoryIsMavenCentral() {
Project project = ProjectBuilder.builder().build();
project.setVersion("4.0.0-M1");
assertThat(ArtifactRelease.forProject(project).getDownloadRepo())
.contains("https://repo.maven.apache.org/maven2");
}
@Test
void whenProjectVersionIsReleaseCandidateThenRepositoryIsMavenCentral() {
Project project = ProjectBuilder.builder().build();
project.setVersion("4.0.0-RC1");
assertThat(ArtifactRelease.forProject(project).getDownloadRepo())
.contains("https://repo.maven.apache.org/maven2");
}
@Test
void whenProjectVersionIsReleaseThenRepositoryIsMavenCentral() {
Project project = ProjectBuilder.builder().build();
project.setVersion("1.2.3");
assertThat(ArtifactRelease.forProject(project).getDownloadRepo())
.contains("https://repo.maven.apache.org/maven2");
}
} | java | github | https://github.com/spring-projects/spring-boot | buildSrc/src/test/java/org/springframework/boot/build/artifacts/ArtifactReleaseTests.java |
from collections import Iterable, Mapping
from uuid import uuid4
import six
from apscheduler.triggers.base import BaseTrigger
from apscheduler.util import ref_to_obj, obj_to_ref, datetime_repr, repr_escape, get_callable_name, check_callable_args, \
convert_to_datetime
class Job(object):
"""
Contains the options given when scheduling callables and its current schedule and other state.
This class should never be instantiated by the user.
:var str id: the unique identifier of this job
:var str name: the description of this job
:var func: the callable to execute
:var tuple|list args: positional arguments to the callable
:var dict kwargs: keyword arguments to the callable
:var bool coalesce: whether to only run the job once when several run times are due
:var trigger: the trigger object that controls the schedule of this job
:var str executor: the name of the executor that will run this job
:var int misfire_grace_time: the time (in seconds) how much this job's execution is allowed to be late
:var int max_instances: the maximum number of concurrently executing instances allowed for this job
:var datetime.datetime next_run_time: the next scheduled run time of this job
"""
__slots__ = ('_scheduler', '_jobstore_alias', 'id', 'trigger', 'executor', 'func', 'func_ref', 'args', 'kwargs',
'name', 'misfire_grace_time', 'coalesce', 'max_instances', 'next_run_time')
def __init__(self, scheduler, id=None, **kwargs):
super(Job, self).__init__()
self._scheduler = scheduler
self._jobstore_alias = None
self._modify(id=id or uuid4().hex, **kwargs)
def modify(self, **changes):
"""
Makes the given changes to this job and saves it in the associated job store.
Accepted keyword arguments are the same as the variables on this class.
.. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.modify_job`
"""
self._scheduler.modify_job(self.id, self._jobstore_alias, **changes)
def reschedule(self, trigger, **trigger_args):
"""
Shortcut for switching the trigger on this job.
.. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.reschedule_job`
"""
self._scheduler.reschedule_job(self.id, self._jobstore_alias, trigger, **trigger_args)
def pause(self):
"""
Temporarily suspend the execution of this job.
.. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.pause_job`
"""
self._scheduler.pause_job(self.id, self._jobstore_alias)
def resume(self):
"""
Resume the schedule of this job if previously paused.
.. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.resume_job`
"""
self._scheduler.resume_job(self.id, self._jobstore_alias)
def remove(self):
"""
Unschedules this job and removes it from its associated job store.
.. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.remove_job`
"""
self._scheduler.remove_job(self.id, self._jobstore_alias)
@property
def pending(self):
"""Returns ``True`` if the referenced job is still waiting to be added to its designated job store."""
return self._jobstore_alias is None
#
# Private API
#
def _get_run_times(self, now):
"""
Computes the scheduled run times between ``next_run_time`` and ``now`` (inclusive).
:type now: datetime.datetime
:rtype: list[datetime.datetime]
"""
run_times = []
next_run_time = self.next_run_time
while next_run_time and next_run_time <= now:
run_times.append(next_run_time)
next_run_time = self.trigger.get_next_fire_time(next_run_time, now)
return run_times
def _modify(self, **changes):
"""Validates the changes to the Job and makes the modifications if and only if all of them validate."""
approved = {}
if 'id' in changes:
value = changes.pop('id')
if not isinstance(value, six.string_types):
raise TypeError("id must be a nonempty string")
if hasattr(self, 'id'):
raise ValueError('The job ID may not be changed')
approved['id'] = value
if 'func' in changes or 'args' in changes or 'kwargs' in changes:
func = changes.pop('func') if 'func' in changes else self.func
args = changes.pop('args') if 'args' in changes else self.args
kwargs = changes.pop('kwargs') if 'kwargs' in changes else self.kwargs
if isinstance(func, str):
func_ref = func
func = ref_to_obj(func)
elif callable(func):
try:
func_ref = obj_to_ref(func)
except ValueError:
# If this happens, this Job won't be serializable
func_ref = None
else:
raise TypeError('func must be a callable or a textual reference to one')
if not hasattr(self, 'name') and changes.get('name', None) is None:
changes['name'] = get_callable_name(func)
if isinstance(args, six.string_types) or not isinstance(args, Iterable):
raise TypeError('args must be a non-string iterable')
if isinstance(kwargs, six.string_types) or not isinstance(kwargs, Mapping):
raise TypeError('kwargs must be a dict-like object')
check_callable_args(func, args, kwargs)
approved['func'] = func
approved['func_ref'] = func_ref
approved['args'] = args
approved['kwargs'] = kwargs
if 'name' in changes:
value = changes.pop('name')
if not value or not isinstance(value, six.string_types):
raise TypeError("name must be a nonempty string")
approved['name'] = value
if 'misfire_grace_time' in changes:
value = changes.pop('misfire_grace_time')
if value is not None and (not isinstance(value, six.integer_types) or value <= 0):
raise TypeError('misfire_grace_time must be either None or a positive integer')
approved['misfire_grace_time'] = value
if 'coalesce' in changes:
value = bool(changes.pop('coalesce'))
approved['coalesce'] = value
if 'max_instances' in changes:
value = changes.pop('max_instances')
if not isinstance(value, six.integer_types) or value <= 0:
raise TypeError('max_instances must be a positive integer')
approved['max_instances'] = value
if 'trigger' in changes:
trigger = changes.pop('trigger')
if not isinstance(trigger, BaseTrigger):
raise TypeError('Expected a trigger instance, got %s instead' % trigger.__class__.__name__)
approved['trigger'] = trigger
if 'executor' in changes:
value = changes.pop('executor')
if not isinstance(value, six.string_types):
raise TypeError('executor must be a string')
approved['executor'] = value
if 'next_run_time' in changes:
value = changes.pop('next_run_time')
approved['next_run_time'] = convert_to_datetime(value, self._scheduler.timezone, 'next_run_time')
if changes:
raise AttributeError('The following are not modifiable attributes of Job: %s' % ', '.join(changes))
for key, value in six.iteritems(approved):
setattr(self, key, value)
def __getstate__(self):
# Don't allow this Job to be serialized if the function reference could not be determined
if not self.func_ref:
raise ValueError('This Job cannot be serialized since the reference to its callable (%r) could not be '
'determined. Consider giving a textual reference (module:function name) instead.' %
(self.func,))
return {
'version': 1,
'id': self.id,
'func': self.func_ref,
'trigger': self.trigger,
'executor': self.executor,
'args': self.args,
'kwargs': self.kwargs,
'name': self.name,
'misfire_grace_time': self.misfire_grace_time,
'coalesce': self.coalesce,
'max_instances': self.max_instances,
'next_run_time': self.next_run_time
}
def __setstate__(self, state):
if state.get('version', 1) > 1:
raise ValueError('Job has version %s, but only version 1 can be handled' % state['version'])
self.id = state['id']
self.func_ref = state['func']
self.func = ref_to_obj(self.func_ref)
self.trigger = state['trigger']
self.executor = state['executor']
self.args = state['args']
self.kwargs = state['kwargs']
self.name = state['name']
self.misfire_grace_time = state['misfire_grace_time']
self.coalesce = state['coalesce']
self.max_instances = state['max_instances']
self.next_run_time = state['next_run_time']
def __eq__(self, other):
if isinstance(other, Job):
return self.id == other.id
return NotImplemented
def __repr__(self):
return '<Job (id=%s name=%s)>' % (repr_escape(self.id), repr_escape(self.name))
def __str__(self):
return '%s (trigger: %s, next run at: %s)' % (repr_escape(self.name), repr_escape(str(self.trigger)),
datetime_repr(self.next_run_time))
def __unicode__(self):
return six.u('%s (trigger: %s, next run at: %s)') % (self.name, self.trigger, datetime_repr(self.next_run_time)) | unknown | codeparrot/codeparrot-clean | ||
"""Plugin for NOS: Nederlandse Omroep Stichting
Supports:
MP$: http://nos.nl/uitzending/nieuwsuur.html
Live: http://www.nos.nl/livestream/*
Tour: http://nos.nl/tour/live
"""
import re
import json
from livestreamer.plugin import Plugin
from livestreamer.plugin.api import http
from livestreamer.plugin.api.utils import parse_json
from livestreamer.stream import HTTPStream, HLSStream
_url_re = re.compile("http(s)?://(\w+\.)?nos.nl/")
_js_re = re.compile('\((.*)\)')
_data_stream_re = re.compile('data-stream="(.*?)"', re.DOTALL | re.IGNORECASE)
_source_re = re.compile("<source(?P<source>[^>]+)>", re.IGNORECASE)
_source_src_re = re.compile("src=\"(?P<src>[^\"]+)\"", re.IGNORECASE)
_source_type_re = re.compile("type=\"(?P<type>[^\"]+)\"", re.IGNORECASE)
class NOS(Plugin):
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
def _resolve_stream(self):
res = http.get(self.url)
match = _data_stream_re.search(res.text)
if not match:
return
data_stream = match.group(1)
resolve_data = {
'stream': data_stream
}
res = http.post(
'http://www-ipv4.nos.nl/livestream/resolve/',
data=json.dumps(resolve_data)
)
data = http.json(res)
res = http.get(data['url'])
match = _js_re.search(res.text)
if not match:
return
stream_url = parse_json(match.group(1))
return HLSStream.parse_variant_playlist(self.session, stream_url)
def _get_source_streams(self):
res = http.get(self.url)
streams = {}
sources = _source_re.findall(res.text)
for source in sources:
src = _source_src_re.search(source).group("src")
pixels = _source_type_re.search(source).group("type")
streams[pixels] = HTTPStream(self.session, src)
return streams
def _get_streams(self):
urlparts = self.url.split('/')
if urlparts[-2] == 'livestream' or urlparts[-3] == 'tour':
return self._resolve_stream()
else:
return self._get_source_streams()
__plugin__ = NOS | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
***************************************************************************
NumberInputPanel.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
import os
import math
import warnings
from qgis.PyQt import uic
from qgis.PyQt import sip
from qgis.PyQt.QtCore import pyqtSignal, QSize
from qgis.PyQt.QtWidgets import QDialog, QLabel, QComboBox
from qgis.core import (QgsApplication,
QgsExpression,
QgsProperty,
QgsUnitTypes,
QgsMapLayer,
QgsCoordinateReferenceSystem,
QgsProcessingParameterNumber,
QgsProcessingOutputNumber,
QgsProcessingParameterDefinition,
QgsProcessingModelChildParameterSource,
QgsProcessingFeatureSourceDefinition,
QgsProcessingUtils)
from qgis.gui import QgsExpressionBuilderDialog
from processing.tools.dataobjects import createExpressionContext, createContext
pluginPath = os.path.split(os.path.dirname(__file__))[0]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
NUMBER_WIDGET, NUMBER_BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'widgetNumberSelector.ui'))
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'widgetBaseSelector.ui'))
class ModelerNumberInputPanel(BASE, WIDGET):
"""
Number input panel for use inside the modeler - this input panel
is based off the base input panel and includes a text based line input
for entering values. This allows expressions and other non-numeric
values to be set, which are later evalauted to numbers when the model
is run.
"""
hasChanged = pyqtSignal()
def __init__(self, param, modelParametersDialog):
super().__init__(None)
self.setupUi(self)
self.param = param
self.modelParametersDialog = modelParametersDialog
if param.defaultValue():
self.setValue(param.defaultValue())
self.btnSelect.clicked.connect(self.showExpressionsBuilder)
self.leText.textChanged.connect(lambda: self.hasChanged.emit())
def showExpressionsBuilder(self):
context = createExpressionContext()
processing_context = createContext()
scope = self.modelParametersDialog.model.createExpressionContextScopeForChildAlgorithm(self.modelParametersDialog.childId, processing_context)
context.appendScope(scope)
highlighted = scope.variableNames()
context.setHighlightedVariables(highlighted)
dlg = QgsExpressionBuilderDialog(None, str(self.leText.text()), self, 'generic', context)
dlg.setWindowTitle(self.tr('Expression Based Input'))
if dlg.exec_() == QDialog.Accepted:
exp = QgsExpression(dlg.expressionText())
if not exp.hasParserError():
self.setValue(dlg.expressionText())
def getValue(self):
value = self.leText.text()
for param in self.modelParametersDialog.model.parameterDefinitions():
if isinstance(param, QgsProcessingParameterNumber):
if "@" + param.name() == value.strip():
return QgsProcessingModelChildParameterSource.fromModelParameter(param.name())
for alg in list(self.modelParametersDialog.model.childAlgorithms().values()):
for out in alg.algorithm().outputDefinitions():
if isinstance(out, QgsProcessingOutputNumber) and "@%s_%s" % (alg.childId(), out.name()) == value.strip():
return QgsProcessingModelChildParameterSource.fromChildOutput(alg.childId(), out.outputName())
try:
return float(value.strip())
except:
return QgsProcessingModelChildParameterSource.fromExpression(self.leText.text())
def setValue(self, value):
if isinstance(value, QgsProcessingModelChildParameterSource):
if value.source() == QgsProcessingModelChildParameterSource.ModelParameter:
self.leText.setText('@' + value.parameterName())
elif value.source() == QgsProcessingModelChildParameterSource.ChildOutput:
name = "%s_%s" % (value.outputChildId(), value.outputName())
self.leText.setText(name)
elif value.source() == QgsProcessingModelChildParameterSource.Expression:
self.leText.setText(value.expression())
else:
self.leText.setText(str(value.staticValue()))
else:
self.leText.setText(str(value))
class NumberInputPanel(NUMBER_BASE, NUMBER_WIDGET):
"""
Number input panel for use outside the modeler - this input panel
contains a user friendly spin box for entering values.
"""
hasChanged = pyqtSignal()
def __init__(self, param):
super(NumberInputPanel, self).__init__(None)
self.setupUi(self)
self.layer = None
self.spnValue.setExpressionsEnabled(True)
self.param = param
if self.param.dataType() == QgsProcessingParameterNumber.Integer:
self.spnValue.setDecimals(0)
else:
# Guess reasonable step value
if self.param.maximum() is not None and self.param.minimum() is not None:
try:
self.spnValue.setSingleStep(self.calculateStep(float(self.param.minimum()), float(self.param.maximum())))
except:
pass
if self.param.maximum() is not None:
self.spnValue.setMaximum(self.param.maximum())
else:
self.spnValue.setMaximum(999999999)
if self.param.minimum() is not None:
self.spnValue.setMinimum(self.param.minimum())
else:
self.spnValue.setMinimum(-999999999)
self.allowing_null = False
# set default value
if param.flags() & QgsProcessingParameterDefinition.FlagOptional:
self.spnValue.setShowClearButton(True)
min = self.spnValue.minimum() - 1
self.spnValue.setMinimum(min)
self.spnValue.setValue(min)
self.spnValue.setSpecialValueText(self.tr('Not set'))
self.allowing_null = True
if param.defaultValue() is not None:
self.setValue(param.defaultValue())
if not self.allowing_null:
try:
self.spnValue.setClearValue(float(param.defaultValue()))
except:
pass
elif self.param.minimum() is not None and not self.allowing_null:
try:
self.setValue(float(self.param.minimum()))
if not self.allowing_null:
self.spnValue.setClearValue(float(self.param.minimum()))
except:
pass
elif not self.allowing_null:
self.setValue(0)
self.spnValue.setClearValue(0)
# we don't show the expression button outside of modeler
self.layout().removeWidget(self.btnSelect)
sip.delete(self.btnSelect)
self.btnSelect = None
if not self.param.isDynamic():
# only show data defined button for dynamic properties
self.layout().removeWidget(self.btnDataDefined)
sip.delete(self.btnDataDefined)
self.btnDataDefined = None
else:
self.btnDataDefined.init(0, QgsProperty(), self.param.dynamicPropertyDefinition())
self.btnDataDefined.registerEnabledWidget(self.spnValue, False)
self.spnValue.valueChanged.connect(lambda: self.hasChanged.emit())
def setDynamicLayer(self, layer):
try:
self.layer = self.getLayerFromValue(layer)
self.btnDataDefined.setVectorLayer(self.layer)
except:
pass
def getLayerFromValue(self, value):
context = createContext()
if isinstance(value, QgsProcessingFeatureSourceDefinition):
value, ok = value.source.valueAsString(context.expressionContext())
if isinstance(value, str):
value = QgsProcessingUtils.mapLayerFromString(value, context)
if value is None or not isinstance(value, QgsMapLayer):
return None
# need to return layer with ownership - otherwise layer may be deleted when context
# goes out of scope
new_layer = context.takeResultLayer(value.id())
# if we got ownership, return that - otherwise just return the layer (which may be owned by the project)
return new_layer if new_layer is not None else value
def getValue(self):
if self.btnDataDefined is not None and self.btnDataDefined.isActive():
return self.btnDataDefined.toProperty()
elif self.allowing_null and self.spnValue.value() == self.spnValue.minimum():
return None
else:
return self.spnValue.value()
def setValue(self, value):
try:
self.spnValue.setValue(float(value))
except:
return
def calculateStep(self, minimum, maximum):
value_range = maximum - minimum
if value_range <= 1.0:
step = value_range / 10.0
# round to 1 significant figrue
return round(step, -int(math.floor(math.log10(step))))
else:
return 1.0
class DistanceInputPanel(NumberInputPanel):
"""
Distance input panel for use outside the modeler - this input panel
contains a label showing the distance unit.
"""
def __init__(self, param):
super().__init__(param)
self.label = QLabel('')
self.units_combo = QComboBox()
self.base_units = QgsUnitTypes.DistanceUnknownUnit
for u in (QgsUnitTypes.DistanceMeters,
QgsUnitTypes.DistanceKilometers,
QgsUnitTypes.DistanceFeet,
QgsUnitTypes.DistanceMiles,
QgsUnitTypes.DistanceYards):
self.units_combo.addItem(QgsUnitTypes.toString(u), u)
label_margin = self.fontMetrics().width('X')
self.layout().insertSpacing(1, label_margin / 2)
self.layout().insertWidget(2, self.label)
self.layout().insertWidget(3, self.units_combo)
self.layout().insertSpacing(4, label_margin / 2)
self.warning_label = QLabel()
icon = QgsApplication.getThemeIcon('mIconWarning.svg')
size = max(24, self.spnValue.height() * 0.5)
self.warning_label.setPixmap(icon.pixmap(icon.actualSize(QSize(size, size))))
self.warning_label.setToolTip(self.tr('Distance is in geographic degrees. Consider reprojecting to a projected local coordinate system for accurate results.'))
self.layout().insertWidget(4, self.warning_label)
self.layout().insertSpacing(5, label_margin)
self.setUnits(QgsUnitTypes.DistanceUnknownUnit)
def setUnits(self, units):
self.label.setText(QgsUnitTypes.toString(units))
if QgsUnitTypes.unitType(units) != QgsUnitTypes.Standard:
self.units_combo.hide()
self.label.show()
else:
self.units_combo.setCurrentIndex(self.units_combo.findData(units))
self.units_combo.show()
self.label.hide()
self.warning_label.setVisible(units == QgsUnitTypes.DistanceDegrees)
self.base_units = units
def setUnitParameterValue(self, value):
units = QgsUnitTypes.DistanceUnknownUnit
layer = self.getLayerFromValue(value)
if isinstance(layer, QgsMapLayer):
units = layer.crs().mapUnits()
elif isinstance(value, QgsCoordinateReferenceSystem):
units = value.mapUnits()
elif isinstance(value, str):
crs = QgsCoordinateReferenceSystem(value)
if crs.isValid():
units = crs.mapUnits()
self.setUnits(units)
def getValue(self):
val = super().getValue()
if isinstance(val, float) and self.units_combo.isVisible():
display_unit = self.units_combo.currentData()
return val * QgsUnitTypes.fromUnitToUnitFactor(display_unit, self.base_units)
return val
def setValue(self, value):
try:
self.spnValue.setValue(float(value))
except:
return | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2014-2020 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license.
*/
package io.ktor.tests.server.jetty
import io.ktor.server.jetty.*
import io.ktor.server.testing.suites.*
class JettyClientCertTest : ClientCertTestSuite<JettyApplicationEngine, JettyApplicationEngineBase.Configuration>(Jetty) | kotlin | github | https://github.com/ktorio/ktor | ktor-server/ktor-server-jetty/jvm/test/io/ktor/tests/server/jetty/JettyClientCertTest.kt |
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base Cells Communication Driver
"""
class BaseCellsDriver(object):
"""The base class for cells communication.
One instance of this class will be created for every neighbor cell
that we find in the DB and it will be associated with the cell in
its CellState.
One instance is also created by the cells manager for setting up
the consumers.
"""
def start_consumers(self, msg_runner):
"""Start any consumers the driver may need."""
raise NotImplementedError()
def stop_consumers(self):
"""Stop consuming messages."""
raise NotImplementedError()
def send_message_to_cell(self, cell_state, message):
"""Send a message to a cell."""
raise NotImplementedError() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class hr_payslip_employees(osv.osv_memory):
_inherit ='hr.payslip.employees'
def compute_sheet(self, cr, uid, ids, context=None):
run_pool = self.pool.get('hr.payslip.run')
if context is None:
context = {}
if context.get('active_id'):
run_data = run_pool.read(cr, uid, context['active_id'], ['journal_id'])
journal_id = run_data.get('journal_id')
journal_id = journal_id and journal_id[0] or False
if journal_id:
context = dict(context, journal_id=journal_id)
return super(hr_payslip_employees, self).compute_sheet(cr, uid, ids, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
from __future__ import absolute_import
import re
import pytz
import time
from datetime import datetime
from django.utils import timezone
from werkzeug.utils import secure_filename as werkzeug_secure_filename
def iso8601format(dt):
"""Given a datetime object, return an associated ISO-8601 string"""
return dt.strftime('%Y-%m-%dT%H:%M:%SZ') if dt else ''
def secure_filename(filename):
"""Return a secure version of a filename.
Uses ``werkzeug.utils.secure_filename``, but explicitly allows for leading
underscores.
:param filename str: A filename to sanitize
:return: Secure version of filename
"""
secure = werkzeug_secure_filename(filename)
# Check for leading underscores, and add them back in
try:
secure = re.search('^_+', filename).group() + secure
except AttributeError:
pass
return secure
def get_timestamp():
return int(time.time())
def throttle_period_expired(timestamp, throttle):
if not timestamp:
return True
elif isinstance(timestamp, datetime):
if timestamp.tzinfo:
return (timezone.now() - timestamp).total_seconds() > throttle
else:
return (timezone.now() - timestamp.replace(tzinfo=pytz.utc)).total_seconds() > throttle
else:
return (get_timestamp() - timestamp) > throttle | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
sphinx.util.docstrings
~~~~~~~~~~~~~~~~~~~~~~
Utilities for docstring processing.
:copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
def prepare_docstring(s, ignore=1):
"""Convert a docstring into lines of parseable reST. Remove common leading
indentation, where the indentation of a given number of lines (usually just
one) is ignored.
Return the docstring as a list of lines usable for inserting into a docutils
ViewList (used as argument of nested_parse().) An empty line is added to
act as a separator between this docstring and following content.
"""
lines = s.expandtabs().splitlines()
# Find minimum indentation of any non-blank lines after ignored lines.
margin = sys.maxint
for line in lines[ignore:]:
content = len(line.lstrip())
if content:
indent = len(line) - content
margin = min(margin, indent)
# Remove indentation from ignored lines.
for i in range(ignore):
if i < len(lines):
lines[i] = lines[i].lstrip()
if margin < sys.maxint:
for i in range(ignore, len(lines)): lines[i] = lines[i][margin:]
# Remove any leading blank lines.
while lines and not lines[0]:
lines.pop(0)
# make sure there is an empty line at the end
if lines and lines[-1]:
lines.append('')
return lines
def prepare_commentdoc(s):
"""Extract documentation comment lines (starting with #:) and return them
as a list of lines. Returns an empty list if there is no documentation.
"""
result = []
lines = [line.strip() for line in s.expandtabs().splitlines()]
for line in lines:
if line.startswith('#:'):
line = line[2:]
# the first space after the comment is ignored
if line and line[0] == ' ':
line = line[1:]
result.append(line)
if result and result[-1]:
result.append('')
return result | unknown | codeparrot/codeparrot-clean | ||
// Code generated - EDITING IS FUTILE. DO NOT EDIT.
export interface IntervalTrigger {
interval: PromDuration;
}
export const defaultIntervalTrigger = (): IntervalTrigger => ({
interval: defaultPromDuration(),
});
export type PromDuration = string;
export const defaultPromDuration = (): PromDuration => ("");
export type TemplateString = string;
export const defaultTemplateString = (): TemplateString => ("");
// TODO: validate that only one can specify source=true
// & struct.MinFields(1) This doesn't work in Cue <v0.12.0 as per
export type ExpressionMap = Record<string, Expression>;
export const defaultExpressionMap = (): ExpressionMap => ({});
export interface Expression {
// The type of query if this is a query expression
queryType?: string;
relativeTimeRange?: RelativeTimeRange;
// The UID of the datasource to run this expression against. If omitted, the expression will be run against the `__expr__` datasource
datasourceUID?: DatasourceUID;
model: any;
// Used to mark the expression to be used as the final source for the rule evaluation
// Only one expression in a rule can be marked as the source
// For AlertRules, this is the expression that will be evaluated against the alerting condition
// For RecordingRules, this is the expression that will be recorded
source?: boolean;
}
export const defaultExpression = (): Expression => ({
model: {},
});
export interface RelativeTimeRange {
from: PromDurationWMillis;
to: PromDurationWMillis;
}
export const defaultRelativeTimeRange = (): RelativeTimeRange => ({
from: defaultPromDurationWMillis(),
to: defaultPromDurationWMillis(),
});
export type PromDurationWMillis = string;
export const defaultPromDurationWMillis = (): PromDurationWMillis => ("");
export type DatasourceUID = string;
export const defaultDatasourceUID = (): DatasourceUID => ("");
export interface Spec {
title: string;
paused?: boolean;
trigger: IntervalTrigger;
labels?: Record<string, TemplateString>;
metric: string;
expressions: ExpressionMap;
targetDatasourceUID: string;
}
export const defaultSpec = (): Spec => ({
title: "",
trigger: defaultIntervalTrigger(),
metric: "",
expressions: defaultExpressionMap(),
targetDatasourceUID: "",
}); | typescript | github | https://github.com/grafana/grafana | apps/alerting/rules/plugin/src/generated/recordingrule/v0alpha1/types.spec.gen.ts |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, Linus Unnebäck <linus@folkdatorn.se>
#
# This file is part of Ansible
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
BINS = dict(
ipv4='iptables',
ipv6='ip6tables',
)
DOCUMENTATION = '''
---
module: iptables
short_description: Modify the systems iptables
requirements: []
version_added: "2.0"
author: Linus Unnebäck (@LinusU) <linus@folkdatorn.se>
description:
- Iptables is used to set up, maintain, and inspect the tables of IP packet
filter rules in the Linux kernel. This module does not handle the saving
and/or loading of rules, but rather only manipulates the current rules
that are present in memory. This is the same as the behaviour of the
"iptables" and "ip6tables" command which this module uses internally.
notes:
- This module just deals with individual rules. If you need advanced
chaining of rules the recommended way is to template the iptables restore
file.
options:
table:
description:
- This option specifies the packet matching table which the command
should operate on. If the kernel is configured with automatic module
loading, an attempt will be made to load the appropriate module for
that table if it is not already there.
required: false
default: filter
choices: [ "filter", "nat", "mangle", "raw", "security" ]
state:
description:
- Whether the rule should be absent or present.
required: false
default: present
choices: [ "present", "absent" ]
action:
version_added: "2.2"
description:
- Whether the rule should be appended at the bottom or inserted at the
top. If the rule already exists the chain won't be modified.
required: false
default: append
choices: [ "append", "insert" ]
ip_version:
description:
- Which version of the IP protocol this rule should apply to.
required: false
default: ipv4
choices: [ "ipv4", "ipv6" ]
chain:
description:
- "Chain to operate on. This option can either be the name of a user
defined chain or any of the builtin chains: 'INPUT', 'FORWARD',
'OUTPUT', 'PREROUTING', 'POSTROUTING', 'SECMARK', 'CONNSECMARK'."
required: false
protocol:
description:
- The protocol of the rule or of the packet to check. The specified
protocol can be one of tcp, udp, udplite, icmp, esp, ah, sctp or the
special keyword "all", or it can be a numeric value, representing one
of these protocols or a different one. A protocol name from
/etc/protocols is also allowed. A "!" argument before the protocol
inverts the test. The number zero is equivalent to all. "all" will
match with all protocols and is taken as default when this option is
omitted.
required: false
default: null
source:
description:
- Source specification. Address can be either a network name,
a hostname, a network IP address (with /mask), or a plain IP address.
Hostnames will be resolved once only, before the rule is submitted to
the kernel. Please note that specifying any name to be resolved with
a remote query such as DNS is a really bad idea. The mask can be
either a network mask or a plain number, specifying the number of 1's
at the left side of the network mask. Thus, a mask of 24 is equivalent
to 255.255.255.0. A "!" argument before the address specification
inverts the sense of the address.
required: false
default: null
destination:
description:
- Destination specification. Address can be either a network name,
a hostname, a network IP address (with /mask), or a plain IP address.
Hostnames will be resolved once only, before the rule is submitted to
the kernel. Please note that specifying any name to be resolved with
a remote query such as DNS is a really bad idea. The mask can be
either a network mask or a plain number, specifying the number of 1's
at the left side of the network mask. Thus, a mask of 24 is equivalent
to 255.255.255.0. A "!" argument before the address specification
inverts the sense of the address.
required: false
default: null
match:
description:
- Specifies a match to use, that is, an extension module that tests for
a specific property. The set of matches make up the condition under
which a target is invoked. Matches are evaluated first to last if
specified as an array and work in short-circuit fashion, i.e. if one
extension yields false, evaluation will stop.
required: false
default: []
jump:
description:
- This specifies the target of the rule; i.e., what to do if the packet
matches it. The target can be a user-defined chain (other than the one
this rule is in), one of the special builtin targets which decide the
fate of the packet immediately, or an extension (see EXTENSIONS
below). If this option is omitted in a rule (and the goto paramater
is not used), then matching the rule will have no effect on the
packet's fate, but the counters on the rule will be incremented.
required: false
default: null
goto:
description:
- This specifies that the processing should continue in a user specified
chain. Unlike the jump argument return will not continue processing in
this chain but instead in the chain that called us via jump.
required: false
default: null
in_interface:
description:
- Name of an interface via which a packet was received (only for packets
entering the INPUT, FORWARD and PREROUTING chains). When the "!"
argument is used before the interface name, the sense is inverted. If
the interface name ends in a "+", then any interface which begins with
this name will match. If this option is omitted, any interface name
will match.
required: false
default: null
out_interface:
description:
- Name of an interface via which a packet is going to be sent (for
packets entering the FORWARD, OUTPUT and POSTROUTING chains). When the
"!" argument is used before the interface name, the sense is inverted.
If the interface name ends in a "+", then any interface which begins
with this name will match. If this option is omitted, any interface
name will match.
required: false
default: null
fragment:
description:
- This means that the rule only refers to second and further fragments
of fragmented packets. Since there is no way to tell the source or
destination ports of such a packet (or ICMP type), such a packet will
not match any rules which specify them. When the "!" argument precedes
fragment argument, the rule will only match head fragments, or
unfragmented packets.
required: false
default: null
set_counters:
description:
- This enables the administrator to initialize the packet and byte
counters of a rule (during INSERT, APPEND, REPLACE operations).
required: false
default: null
source_port:
description:
- "Source port or port range specification. This can either be a service
name or a port number. An inclusive range can also be specified, using
the format first:last. If the first port is omitted, '0' is assumed;
if the last is omitted, '65535' is assumed. If the first port is
greater than the second one they will be swapped."
required: false
default: null
destination_port:
description:
- "Destination port or port range specification. This can either be
a service name or a port number. An inclusive range can also be
specified, using the format first:last. If the first port is omitted,
'0' is assumed; if the last is omitted, '65535' is assumed. If the
first port is greater than the second one they will be swapped."
required: false
default: null
to_ports:
description:
- "This specifies a destination port or range of ports to use: without
this, the destination port is never altered. This is only valid if the
rule also specifies one of the following protocols: tcp, udp, dccp or
sctp."
required: false
default: null
to_destination:
version_added: "2.1"
description:
- "This specifies a destination address to use with DNAT: without
this, the destination address is never altered."
required: false
default: null
to_source:
version_added: "2.2"
description:
- "This specifies a source address to use with SNAT: without
this, the source address is never altered."
required: false
default: null
set_dscp_mark:
version_added: "2.1"
description:
- "This allows specifying a DSCP mark to be added to packets.
It takes either an integer or hex value. Mutually exclusive with
C(set_dscp_mark_class)."
required: false
default: null
set_dscp_mark_class:
version_added: "2.1"
description:
- "This allows specifying a predefined DiffServ class which will be
translated to the corresponding DSCP mark. Mutually exclusive with
C(set_dscp_mark)."
required: false
default: null
comment:
description:
- "This specifies a comment that will be added to the rule"
required: false
default: null
ctstate:
description:
- "ctstate is a list of the connection states to match in the conntrack
module.
Possible states are: 'INVALID', 'NEW', 'ESTABLISHED', 'RELATED',
'UNTRACKED', 'SNAT', 'DNAT'"
required: false
default: []
limit:
description:
- "Specifies the maximum average number of matches to allow per second.
The number can specify units explicitly, using `/second', `/minute',
`/hour' or `/day', or parts of them (so `5/second' is the same as
`5/s')."
required: false
default: null
limit_burst:
version_added: "2.1"
description:
- "Specifies the maximum burst before the above limit kicks in."
required: false
default: null
uid_owner:
version_added: "2.1"
description:
- "Specifies the UID or username to use in match by owner rule."
required: false
reject_with:
version_added: "2.1"
description:
- "Specifies the error packet type to return while rejecting."
required: false
icmp_type:
version_added: "2.2"
description:
- "This allows specification of the ICMP type, which can be a numeric
ICMP type, type/code pair, or one of the ICMP type names shown by the
command 'iptables -p icmp -h'"
required: false
flush:
version_added: "2.2"
description:
- "Flushes the specified table and chain of all rules. If no chain is
specified then the entire table is purged. Ignores all other
parameters."
required: false
policy:
version_added: "2.2"
description:
- "Set the policy for the chain to the given target. Valid targets are
ACCEPT, DROP, QUEUE, RETURN. Only built in chains can have policies.
This parameter requires the chain parameter. Ignores all other
parameters."
'''
EXAMPLES = '''
# Block specific IP
- iptables: chain=INPUT source=8.8.8.8 jump=DROP
become: yes
# Forward port 80 to 8600
- iptables: table=nat chain=PREROUTING in_interface=eth0 protocol=tcp match=tcp destination_port=80 jump=REDIRECT to_ports=8600 comment="Redirect web traffic to port 8600"
become: yes
# Allow related and established connections
- iptables: chain=INPUT ctstate=ESTABLISHED,RELATED jump=ACCEPT
become: yes
# Tag all outbound tcp packets with DSCP mark 8
- iptables: chain=OUTPUT jump=DSCP table=mangle set_dscp_mark=8 protocol=tcp
# Tag all outbound tcp packets with DSCP DiffServ class CS1
- iptables: chain=OUTPUT jump=DSCP table=mangle set_dscp_mark_class=CS1 protocol=tcp
'''
def append_param(rule, param, flag, is_list):
if is_list:
for item in param:
append_param(rule, item, flag, False)
else:
if param is not None:
rule.extend([flag, param])
def append_csv(rule, param, flag):
if param:
rule.extend([flag, ','.join(param)])
def append_match(rule, param, match):
if param:
rule.extend(['-m', match])
def append_jump(rule, param, jump):
if param:
rule.extend(['-j', jump])
def construct_rule(params):
rule = []
append_param(rule, params['protocol'], '-p', False)
append_param(rule, params['source'], '-s', False)
append_param(rule, params['destination'], '-d', False)
append_param(rule, params['match'], '-m', True)
append_param(rule, params['jump'], '-j', False)
append_param(rule, params['to_destination'], '--to-destination', False)
append_param(rule, params['to_source'], '--to-source', False)
append_param(rule, params['goto'], '-g', False)
append_param(rule, params['in_interface'], '-i', False)
append_param(rule, params['out_interface'], '-o', False)
append_param(rule, params['fragment'], '-f', False)
append_param(rule, params['set_counters'], '-c', False)
append_param(rule, params['source_port'], '--source-port', False)
append_param(rule, params['destination_port'], '--destination-port', False)
append_param(rule, params['to_ports'], '--to-ports', False)
append_param(rule, params['set_dscp_mark'], '--set-dscp', False)
append_param(
rule,
params['set_dscp_mark_class'],
'--set-dscp-class',
False)
append_match(rule, params['comment'], 'comment')
append_param(rule, params['comment'], '--comment', False)
append_match(rule, params['ctstate'], 'state')
append_csv(rule, params['ctstate'], '--state')
append_match(rule, params['limit'] or params['limit_burst'], 'limit')
append_param(rule, params['limit'], '--limit', False)
append_param(rule, params['limit_burst'], '--limit-burst', False)
append_match(rule, params['uid_owner'], 'owner')
append_param(rule, params['uid_owner'], '--uid-owner', False)
append_jump(rule, params['reject_with'], 'REJECT')
append_param(rule, params['reject_with'], '--reject-with', False)
append_param(rule, params['icmp_type'], '--icmp-type', False)
return rule
def push_arguments(iptables_path, action, params, make_rule=True):
cmd = [iptables_path]
cmd.extend(['-t', params['table']])
cmd.extend([action, params['chain']])
if make_rule:
cmd.extend(construct_rule(params))
return cmd
def check_present(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-C', params)
rc, _, __ = module.run_command(cmd, check_rc=False)
return (rc == 0)
def append_rule(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-A', params)
module.run_command(cmd, check_rc=True)
def insert_rule(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-I', params)
module.run_command(cmd, check_rc=True)
def remove_rule(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-D', params)
module.run_command(cmd, check_rc=True)
def flush_table(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-F', params, make_rule=False)
module.run_command(cmd, check_rc=True)
def set_chain_policy(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-P', params, make_rule=False)
cmd.append(params['policy'])
module.run_command(cmd, check_rc=True)
def main():
module = AnsibleModule(
supports_check_mode=True,
argument_spec=dict(
table=dict(
required=False,
default='filter',
choices=['filter', 'nat', 'mangle', 'raw', 'security']),
state=dict(
required=False,
default='present',
choices=['present', 'absent']),
action=dict(
required=False,
default='append',
type='str',
choices=['append', 'insert']),
ip_version=dict(
required=False,
default='ipv4',
choices=['ipv4', 'ipv6']),
chain=dict(required=False, default=None, type='str'),
protocol=dict(required=False, default=None, type='str'),
source=dict(required=False, default=None, type='str'),
to_source=dict(required=False, default=None, type='str'),
destination=dict(required=False, default=None, type='str'),
to_destination=dict(required=False, default=None, type='str'),
match=dict(required=False, default=[], type='list'),
jump=dict(required=False, default=None, type='str'),
goto=dict(required=False, default=None, type='str'),
in_interface=dict(required=False, default=None, type='str'),
out_interface=dict(required=False, default=None, type='str'),
fragment=dict(required=False, default=None, type='str'),
set_counters=dict(required=False, default=None, type='str'),
source_port=dict(required=False, default=None, type='str'),
destination_port=dict(required=False, default=None, type='str'),
to_ports=dict(required=False, default=None, type='str'),
set_dscp_mark=dict(required=False, default=None, type='str'),
set_dscp_mark_class=dict(required=False, default=None, type='str'),
comment=dict(required=False, default=None, type='str'),
ctstate=dict(required=False, default=[], type='list'),
limit=dict(required=False, default=None, type='str'),
limit_burst=dict(required=False, default=None, type='str'),
uid_owner=dict(required=False, default=None, type='str'),
reject_with=dict(required=False, default=None, type='str'),
icmp_type=dict(required=False, default=None, type='str'),
flush=dict(required=False, default=False, type='bool'),
policy=dict(
required=False,
default=None,
type='str',
choices=['ACCEPT', 'DROP', 'QUEUE', 'RETURN']),
),
mutually_exclusive=(
['set_dscp_mark', 'set_dscp_mark_class'],
['flush', 'policy'],
),
)
args = dict(
changed=False,
failed=False,
ip_version=module.params['ip_version'],
table=module.params['table'],
chain=module.params['chain'],
flush=module.params['flush'],
rule=' '.join(construct_rule(module.params)),
state=module.params['state'],
)
ip_version = module.params['ip_version']
iptables_path = module.get_bin_path(BINS[ip_version], True)
# Check if chain option is required
if args['flush'] is False and args['chain'] is None:
module.fail_json(
msg="Either chain or flush parameter must be specified.")
# Flush the table
if args['flush'] is True:
flush_table(iptables_path, module, module.params)
module.exit_json(**args)
# Set the policy
if module.params['policy']:
set_chain_policy(iptables_path, module, module.params)
module.exit_json(**args)
insert = (module.params['action'] == 'insert')
rule_is_present = check_present(iptables_path, module, module.params)
should_be_present = (args['state'] == 'present')
# Check if target is up to date
args['changed'] = (rule_is_present != should_be_present)
# Check only; don't modify
if module.check_mode:
module.exit_json(changed=args['changed'])
# Target is already up to date
if args['changed'] is False:
module.exit_json(**args)
if should_be_present:
if insert:
insert_rule(iptables_path, module, module.params)
else:
append_rule(iptables_path, module, module.params)
else:
remove_rule(iptables_path, module, module.params)
module.exit_json(**args)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
import unittest
import sys
from test import test_support
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def next(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def next(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def next(self):
3 // 0
class N:
'Iterator missing next()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class EnumerateTestCase(unittest.TestCase):
enum = enumerate
seq, res = 'abc', [(0,'a'), (1,'b'), (2,'c')]
def test_basicfunction(self):
self.assertEqual(type(self.enum(self.seq)), self.enum)
e = self.enum(self.seq)
self.assertEqual(iter(e), e)
self.assertEqual(list(self.enum(self.seq)), self.res)
self.enum.__doc__
def test_getitemseqn(self):
self.assertEqual(list(self.enum(G(self.seq))), self.res)
e = self.enum(G(''))
self.assertRaises(StopIteration, e.next)
def test_iteratorseqn(self):
self.assertEqual(list(self.enum(I(self.seq))), self.res)
e = self.enum(I(''))
self.assertRaises(StopIteration, e.next)
def test_iteratorgenerator(self):
self.assertEqual(list(self.enum(Ig(self.seq))), self.res)
e = self.enum(Ig(''))
self.assertRaises(StopIteration, e.next)
def test_noniterable(self):
self.assertRaises(TypeError, self.enum, X(self.seq))
def test_illformediterable(self):
self.assertRaises(TypeError, list, self.enum(N(self.seq)))
def test_exception_propagation(self):
self.assertRaises(ZeroDivisionError, list, self.enum(E(self.seq)))
def test_argumentcheck(self):
self.assertRaises(TypeError, self.enum) # no arguments
self.assertRaises(TypeError, self.enum, 1) # wrong type (not iterable)
self.assertRaises(TypeError, self.enum, 'abc', 'a') # wrong type
self.assertRaises(TypeError, self.enum, 'abc', 2, 3) # too many arguments
def test_tuple_reuse(self):
# Tests an implementation detail where tuple is reused
# whenever nothing else holds a reference to it
self.assertEqual(len(set(map(id, list(enumerate(self.seq))))), len(self.seq))
self.assertEqual(len(set(map(id, enumerate(self.seq)))), min(1,len(self.seq)))
class MyEnum(enumerate):
pass
class SubclassTestCase(EnumerateTestCase):
enum = MyEnum
class TestEmpty(EnumerateTestCase):
seq, res = '', []
class TestBig(EnumerateTestCase):
seq = range(10,20000,2)
res = zip(range(20000), seq)
class TestReversed(unittest.TestCase):
def test_simple(self):
class A:
def __getitem__(self, i):
if i < 5:
return str(i)
raise StopIteration
def __len__(self):
return 5
for data in 'abc', range(5), tuple(enumerate('abc')), A(), xrange(1,17,5):
self.assertEqual(list(data)[::-1], list(reversed(data)))
self.assertRaises(TypeError, reversed, {})
# don't allow keyword arguments
self.assertRaises(TypeError, reversed, [], a=1)
def test_xrange_optimization(self):
x = xrange(1)
self.assertEqual(type(reversed(x)), type(iter(x)))
def test_len(self):
# This is an implementation detail, not an interface requirement
from test.test_iterlen import len
for s in ('hello', tuple('hello'), list('hello'), xrange(5)):
self.assertEqual(len(reversed(s)), len(s))
r = reversed(s)
list(r)
self.assertEqual(len(r), 0)
class SeqWithWeirdLen:
called = False
def __len__(self):
if not self.called:
self.called = True
return 10
raise ZeroDivisionError
def __getitem__(self, index):
return index
r = reversed(SeqWithWeirdLen())
self.assertRaises(ZeroDivisionError, len, r)
def test_gc(self):
class Seq:
def __len__(self):
return 10
def __getitem__(self, index):
return index
s = Seq()
r = reversed(s)
s.r = r
def test_args(self):
self.assertRaises(TypeError, reversed)
self.assertRaises(TypeError, reversed, [], 'extra')
def test_bug1229429(self):
# this bug was never in reversed, it was in
# PyObject_CallMethod, and reversed_new calls that sometimes.
if not hasattr(sys, "getrefcount"):
return
def f():
pass
r = f.__reversed__ = object()
rc = sys.getrefcount(r)
for i in range(10):
try:
reversed(f)
except TypeError:
pass
else:
self.fail("non-callable __reversed__ didn't raise!")
self.assertEqual(rc, sys.getrefcount(r))
class TestStart(EnumerateTestCase):
enum = lambda i: enumerate(i, start=11)
seq, res = 'abc', [(1, 'a'), (2, 'b'), (3, 'c')]
class TestLongStart(EnumerateTestCase):
enum = lambda i: enumerate(i, start=sys.maxint+1)
seq, res = 'abc', [(sys.maxint+1,'a'), (sys.maxint+2,'b'),
(sys.maxint+3,'c')]
def test_main(verbose=None):
testclasses = (EnumerateTestCase, SubclassTestCase, TestEmpty, TestBig,
TestReversed)
test_support.run_unittest(*testclasses)
# verify reference counting
import sys
if verbose and hasattr(sys, "gettotalrefcount"):
counts = [None] * 5
for i in xrange(len(counts)):
test_support.run_unittest(*testclasses)
counts[i] = sys.gettotalrefcount()
print counts
if __name__ == "__main__":
test_main(verbose=True) | unknown | codeparrot/codeparrot-clean | ||
"""Implement the auth feature from Hass.io for Add-ons."""
from ipaddress import ip_address
import logging
import os
from aiohttp import web
from aiohttp.web_exceptions import HTTPNotFound, HTTPUnauthorized
import voluptuous as vol
from homeassistant.auth.models import User
from homeassistant.auth.providers import homeassistant as auth_ha
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.http.const import KEY_HASS_USER
from homeassistant.components.http.data_validator import RequestDataValidator
from homeassistant.const import HTTP_OK
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import HomeAssistantType
from .const import ATTR_ADDON, ATTR_PASSWORD, ATTR_USERNAME
_LOGGER = logging.getLogger(__name__)
@callback
def async_setup_auth_view(hass: HomeAssistantType, user: User):
"""Auth setup."""
hassio_auth = HassIOAuth(hass, user)
hassio_password_reset = HassIOPasswordReset(hass, user)
hass.http.register_view(hassio_auth)
hass.http.register_view(hassio_password_reset)
class HassIOBaseAuth(HomeAssistantView):
"""Hass.io view to handle auth requests."""
def __init__(self, hass: HomeAssistantType, user: User):
"""Initialize WebView."""
self.hass = hass
self.user = user
def _check_access(self, request: web.Request):
"""Check if this call is from Supervisor."""
# Check caller IP
hassio_ip = os.environ["HASSIO"].split(":")[0]
if ip_address(request.transport.get_extra_info("peername")[0]) != ip_address(
hassio_ip
):
_LOGGER.error("Invalid auth request from %s", request.remote)
raise HTTPUnauthorized()
# Check caller token
if request[KEY_HASS_USER].id != self.user.id:
_LOGGER.error("Invalid auth request from %s", request[KEY_HASS_USER].name)
raise HTTPUnauthorized()
class HassIOAuth(HassIOBaseAuth):
"""Hass.io view to handle auth requests."""
name = "api:hassio:auth"
url = "/api/hassio_auth"
@RequestDataValidator(
vol.Schema(
{
vol.Required(ATTR_USERNAME): cv.string,
vol.Required(ATTR_PASSWORD): cv.string,
vol.Required(ATTR_ADDON): cv.string,
},
extra=vol.ALLOW_EXTRA,
)
)
async def post(self, request, data):
"""Handle auth requests."""
self._check_access(request)
provider = auth_ha.async_get_provider(request.app["hass"])
try:
await provider.async_validate_login(
data[ATTR_USERNAME], data[ATTR_PASSWORD]
)
except auth_ha.InvalidAuth:
raise HTTPNotFound() from None
return web.Response(status=HTTP_OK)
class HassIOPasswordReset(HassIOBaseAuth):
"""Hass.io view to handle password reset requests."""
name = "api:hassio:auth:password:reset"
url = "/api/hassio_auth/password_reset"
@RequestDataValidator(
vol.Schema(
{
vol.Required(ATTR_USERNAME): cv.string,
vol.Required(ATTR_PASSWORD): cv.string,
},
extra=vol.ALLOW_EXTRA,
)
)
async def post(self, request, data):
"""Handle password reset requests."""
self._check_access(request)
provider = auth_ha.async_get_provider(request.app["hass"])
try:
await provider.async_change_password(
data[ATTR_USERNAME], data[ATTR_PASSWORD]
)
except auth_ha.InvalidUser as err:
raise HTTPNotFound() from err
return web.Response(status=HTTP_OK) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from openfisca_core.tests import dummy_country
from openfisca_core.variables import Variable
from openfisca_core.columns import IntCol
from openfisca_core.tests.dummy_country import Individus
class input(Variable):
column = IntCol
entity_class = Individus
label = u"Input variable"
class intermediate(Variable):
column = IntCol
entity_class = Individus
label = u"Intermediate result that don't need to be cached"
def function(self, simulation, period):
return period, simulation.calculate('input', period)
class output(Variable):
column = IntCol
entity_class = Individus
label = u'Output variable'
def function(self, simulation, period):
return period, simulation.calculate('intermediate', period)
def get_filled_tbs():
tax_benefit_system = dummy_country.DummyTaxBenefitSystem()
tax_benefit_system.add_variables(input, intermediate, output)
return tax_benefit_system
# TaxBenefitSystem instance declared after formulas
tax_benefit_system = get_filled_tbs()
tax_benefit_system.cache_blacklist = set(['intermediate'])
scenario = tax_benefit_system.new_scenario().init_from_attributes(
period = 2016,
input_variables = {
'input': 1,
},
)
def test_without_cache_opt_out():
simulation = scenario.new_simulation(debug = True)
simulation.calculate('output')
intermediate_cache = simulation.get_or_new_holder('intermediate')
assert(len(intermediate_cache._array_by_period) > 0)
def test_with_cache_opt_out():
simulation = scenario.new_simulation(debug = True, opt_out_cache = True)
simulation.calculate('output')
intermediate_cache = simulation.get_or_new_holder('intermediate')
assert(intermediate_cache._array_by_period is None)
tax_benefit_system2 = get_filled_tbs()
scenario2 = tax_benefit_system2.new_scenario().init_from_attributes(
period = 2016,
input_variables = {
'input': 1,
},
)
def test_with_no_blacklist():
simulation = scenario2.new_simulation(debug = True, opt_out_cache = True)
simulation.calculate('output')
intermediate_cache = simulation.get_or_new_holder('intermediate')
assert(len(intermediate_cache._array_by_period) > 0) | unknown | codeparrot/codeparrot-clean | ||
#!/bin/env python3
#
# Copyright (c) 2013 Mobilinkd LLC. All Rights Reserved.
# Released under the Apache License 2.0.
from IntelHexRecord import IntelHexRecord
from Avr109 import Avr109
import time
import sys
import os
import traceback
import binascii
class FirmwareSegment(object):
def __init__(self, memory_type, address, data):
self.memory_type = memory_type
self.address = address
self.data = data
def __len__(self):
return len(self.data)
def __repr__(self):
return "%s: type(%s), len(%d)" % \
(self.__class__.__name__, self.memory_type, len(self.data))
class Firmware(object):
def __init__(self, filename):
self.filename = filename
self.segments = []
self.load()
def load(self):
address = 0
data = []
for line in open(self.filename):
record = IntelHexRecord(line.strip())
if record.recordType == 1:
break
if record.address != address:
if len(data) > 0:
segment = FirmwareSegment('F', address - len(data), data)
self.segments.append(segment)
data = []
address = record.address
data += record.data
address += record.byteCount
segment = FirmwareSegment('F', address - len(data), data)
self.segments.append(segment)
def __len__(self):
size = 0
for segment in self.segments:
size += len(segment)
return size
def __iter__(self):
for segment in self.segments:
yield segment
class BootLoader(object):
def __init__(self, reader, writer, filename, gui = None):
self.avr109 = None
self.reader = reader
self.firmware = Firmware(filename)
self.gui = gui
self.avr109 = Avr109(reader, writer)
self.initialize()
self.block = []
self.address = 0
if self.gui is not None:
self.gui.firmware_set_steps(len(self.firmware) / self.block_size)
def __del__(self):
if self.avr109 is not None:
self.exit()
def initialize(self):
self.loader = self.avr109.get_bootloader_signature()
self.programmer_type = self.avr109.get_programmer_type()
self.sw_version = self.avr109.get_software_version()
self.auto_increment = self.avr109.supports_auto_increment()
self.block_size = self.avr109.get_block_size()
self.device_list = self.avr109.get_device_list()
self.signature = self.avr109.get_device_signature()
# print " Found programmer: Id = '%s'; type = '%s'" % (self.loader, self.programmer_type)
# print "Programmer Version: %s" % self.sw_version
# print "Has auto-increment: %s" % (str(self.auto_increment))
# print " Has block-mode: %s (size = %d)" % (str(self.block_size > 0), self.block_size)
# print " Device Signature: %02x %02x %02x" % (ord(self.signature[0]),ord(self.signature[1]),ord(self.signature[2]))
if self.signature != bytes(b'\x0f\x95\x1e') and self.signature != bytes(b'\x16\x95\x1e'):
self.avr109.exit_bootloader()
raise ValueError("Bad device signature. Not an AVR ATmega 328P. {}".format(self.signature))
if not self.auto_increment:
self.avr109.exit_bootloader()
raise ValueError("Bootloader does not support auto-increment")
if self.block_size != 128:
self.avr109.exit_bootloader()
raise ValueError("Unexpected block size")
def chip_erase(self):
self.avr109.enter_program_mode()
self.avr109.chip_erase()
self.avr109.leave_program_mode()
def set_address(self, address):
# print("Setting address %x" % address)
self.avr109.send_address(address)
def load(self):
if self.gui is not None:
self.gui.firmware_writing()
try:
self.avr109.enter_program_mode()
for segment in self.firmware:
self.set_address(segment.address)
pos = 0
size = len(segment)
while pos < size:
tmp = segment.data[pos:pos + self.block_size]
# print("sending %04x" % (pos + segment.address))
self.avr109.send_block(b'F', tmp)
pos += self.block_size
if self.gui is not None:
self.gui.firmware_pulse()
except Exception as e:
traceback.print_exc()
# app.exception(e)
self.avr109.chip_erase()
finally:
self.avr109.leave_program_mode()
def verify(self):
if self.gui is not None:
self.gui.firmware_verifying()
try:
for segment in self.firmware:
self.set_address(segment.address)
pos = 0
size = len(segment)
while pos < size:
tmp = bytearray(segment.data[pos:pos + self.block_size])
# print("reading %04x" % (pos + segment.address))
block = self.avr109.read_block(b'F', len(tmp))
if tmp != block:
print(binascii.hexlify(tmp))
print(binascii.hexlify(block))
raise IOError(
"verify failed at %04X" % (pos + segment.address))
pos += self.block_size
if self.gui is not None:
self.gui.firmware_pulse()
except Exception as e:
traceback.print_exc()
# app.exception(e)
self.chip_erase()
return False
return True
def exit(self):
self.avr109.exit_bootloader()
self.avr109 = None
if __name__ == '__main__':
import sys, os
import serial
if len(sys.argv) < 3:
print("Usage: %s <device> <intel hex image file>")
sys.exit(1)
device = sys.argv[1]
if not os.path.exists(device):
print("%s does not exist")
sys.exit(1)
filename = sys.argv[2]
if not os.path.exists(filename):
print("%s does not exist")
sys.exit(1)
ser = serial.Serial(device, 115200, timeout=.1)
loader = BootLoader(ser, ser, filename)
loader.load()
verified = loader.verify()
if not verified:
loader.chip_erase()
pass | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.connect.data;
import org.apache.kafka.common.utils.Utils;
import org.apache.kafka.connect.data.Schema.Type;
import org.apache.kafka.connect.data.Values.Parser;
import org.apache.kafka.connect.errors.DataException;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.text.SimpleDateFormat;
import java.time.Instant;
import java.time.LocalDateTime;
import java.time.LocalTime;
import java.time.ZoneId;
import java.time.ZoneOffset;
import java.time.format.DateTimeFormatter;
import java.time.temporal.ChronoUnit;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
public class ValuesTest {
private static final String WHITESPACE = "\n \t \t\n";
private static final long MILLIS_PER_DAY = 24 * 60 * 60 * 1000;
private static final Map<String, String> STRING_MAP = new LinkedHashMap<>();
private static final Schema STRING_MAP_SCHEMA = SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.STRING_SCHEMA).schema();
private static final Map<String, Short> STRING_SHORT_MAP = new LinkedHashMap<>();
private static final Schema STRING_SHORT_MAP_SCHEMA = SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.INT16_SCHEMA).schema();
private static final Map<String, Integer> STRING_INT_MAP = new LinkedHashMap<>();
private static final Schema STRING_INT_MAP_SCHEMA = SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.INT32_SCHEMA).schema();
private static final List<Integer> INT_LIST = new ArrayList<>();
private static final Schema INT_LIST_SCHEMA = SchemaBuilder.array(Schema.INT32_SCHEMA).schema();
private static final List<String> STRING_LIST = new ArrayList<>();
private static final Schema STRING_LIST_SCHEMA = SchemaBuilder.array(Schema.STRING_SCHEMA).schema();
static {
STRING_MAP.put("foo", "123");
STRING_MAP.put("bar", "baz");
STRING_SHORT_MAP.put("foo", (short) 12345);
STRING_SHORT_MAP.put("bar", (short) 0);
STRING_SHORT_MAP.put("baz", (short) -4321);
STRING_INT_MAP.put("foo", 1234567890);
STRING_INT_MAP.put("bar", 0);
STRING_INT_MAP.put("baz", -987654321);
STRING_LIST.add("foo");
STRING_LIST.add("bar");
INT_LIST.add(1234567890);
INT_LIST.add(-987654321);
}
@Test
public void shouldParseNullString() {
SchemaAndValue schemaAndValue = Values.parseString(null);
assertNull(schemaAndValue.schema());
assertNull(schemaAndValue.value());
}
@Test
public void shouldParseEmptyString() {
SchemaAndValue schemaAndValue = Values.parseString("");
assertEquals(Schema.STRING_SCHEMA, schemaAndValue.schema());
assertEquals("", schemaAndValue.value());
}
@Test
@Timeout(5)
public void shouldNotEncounterInfiniteLoop() {
// This byte sequence gets parsed as CharacterIterator.DONE and can cause issues if
// comparisons to that character are done to check if the end of a string has been reached.
// For more information, see https://issues.apache.org/jira/browse/KAFKA-10574
byte[] bytes = new byte[] {-17, -65, -65};
String str = new String(bytes, StandardCharsets.UTF_8);
SchemaAndValue schemaAndValue = Values.parseString(str);
assertEquals(Type.STRING, schemaAndValue.schema().type());
assertEquals(str, schemaAndValue.value());
}
@Test
public void shouldNotParseUnquotedEmbeddedMapKeysAsStrings() {
SchemaAndValue schemaAndValue = Values.parseString("{foo: 3}");
assertEquals(Type.STRING, schemaAndValue.schema().type());
assertEquals("{foo: 3}", schemaAndValue.value());
}
@Test
public void shouldNotParseUnquotedEmbeddedMapValuesAsStrings() {
SchemaAndValue schemaAndValue = Values.parseString("{3: foo}");
assertEquals(Type.STRING, schemaAndValue.schema().type());
assertEquals("{3: foo}", schemaAndValue.value());
}
@Test
public void shouldNotParseUnquotedArrayElementsAsStrings() {
SchemaAndValue schemaAndValue = Values.parseString("[foo]");
assertEquals(Type.STRING, schemaAndValue.schema().type());
assertEquals("[foo]", schemaAndValue.value());
}
@Test
public void shouldNotParseStringsBeginningWithNullAsStrings() {
SchemaAndValue schemaAndValue = Values.parseString("null=");
assertEquals(Type.STRING, schemaAndValue.schema().type());
assertEquals("null=", schemaAndValue.value());
}
@Test
public void shouldParseStringsBeginningWithTrueAsStrings() {
SchemaAndValue schemaAndValue = Values.parseString("true}");
assertEquals(Type.STRING, schemaAndValue.schema().type());
assertEquals("true}", schemaAndValue.value());
}
@Test
public void shouldParseStringsBeginningWithFalseAsStrings() {
SchemaAndValue schemaAndValue = Values.parseString("false]");
assertEquals(Type.STRING, schemaAndValue.schema().type());
assertEquals("false]", schemaAndValue.value());
}
@Test
public void shouldParseTrueAsBooleanIfSurroundedByWhitespace() {
SchemaAndValue schemaAndValue = Values.parseString(WHITESPACE + "true" + WHITESPACE);
assertEquals(Type.BOOLEAN, schemaAndValue.schema().type());
assertEquals(true, schemaAndValue.value());
}
@Test
public void shouldParseFalseAsBooleanIfSurroundedByWhitespace() {
SchemaAndValue schemaAndValue = Values.parseString(WHITESPACE + "false" + WHITESPACE);
assertEquals(Type.BOOLEAN, schemaAndValue.schema().type());
assertEquals(false, schemaAndValue.value());
}
@Test
public void shouldParseNullAsNullIfSurroundedByWhitespace() {
SchemaAndValue schemaAndValue = Values.parseString(WHITESPACE + "null" + WHITESPACE);
assertNull(schemaAndValue);
}
@Test
public void shouldParseBooleanLiteralsEmbeddedInArray() {
SchemaAndValue schemaAndValue = Values.parseString("[true, false]");
assertEquals(Type.ARRAY, schemaAndValue.schema().type());
assertEquals(Type.BOOLEAN, schemaAndValue.schema().valueSchema().type());
assertEquals(List.of(true, false), schemaAndValue.value());
}
@Test
public void shouldParseBooleanLiteralsEmbeddedInMap() {
SchemaAndValue schemaAndValue = Values.parseString("{true: false, false: true}");
assertEquals(Type.MAP, schemaAndValue.schema().type());
assertEquals(Type.BOOLEAN, schemaAndValue.schema().keySchema().type());
assertEquals(Type.BOOLEAN, schemaAndValue.schema().valueSchema().type());
Map<Boolean, Boolean> expectedValue = new HashMap<>();
expectedValue.put(true, false);
expectedValue.put(false, true);
assertEquals(expectedValue, schemaAndValue.value());
}
@Test
public void shouldNotParseAsMapWithoutCommas() {
SchemaAndValue schemaAndValue = Values.parseString("{6:9 4:20}");
assertEquals(Type.STRING, schemaAndValue.schema().type());
assertEquals("{6:9 4:20}", schemaAndValue.value());
}
@Test
public void shouldNotParseAsArrayWithoutCommas() {
SchemaAndValue schemaAndValue = Values.parseString("[0 1 2]");
assertEquals(Type.STRING, schemaAndValue.schema().type());
assertEquals("[0 1 2]", schemaAndValue.value());
}
@Test
public void shouldParseEmptyMap() {
SchemaAndValue schemaAndValue = Values.parseString("{}");
assertEquals(Type.MAP, schemaAndValue.schema().type());
assertEquals(Map.of(), schemaAndValue.value());
}
@Test
public void shouldParseEmptyArray() {
SchemaAndValue schemaAndValue = Values.parseString("[]");
assertEquals(Type.ARRAY, schemaAndValue.schema().type());
assertEquals(List.of(), schemaAndValue.value());
}
@Test
public void shouldNotParseAsMapWithNullKeys() {
SchemaAndValue schemaAndValue = Values.parseString("{null: 3}");
assertEquals(Type.STRING, schemaAndValue.schema().type());
assertEquals("{null: 3}", schemaAndValue.value());
}
@Test
public void shouldParseNull() {
SchemaAndValue schemaAndValue = Values.parseString("null");
assertNull(schemaAndValue);
}
@Test
public void shouldConvertStringOfNull() {
assertRoundTrip(Schema.STRING_SCHEMA, "null");
}
@Test
public void shouldParseNullMapValues() {
SchemaAndValue schemaAndValue = Values.parseString("{3: null}");
assertEquals(Type.MAP, schemaAndValue.schema().type());
assertEquals(Type.INT8, schemaAndValue.schema().keySchema().type());
assertEquals(Collections.singletonMap((byte) 3, null), schemaAndValue.value());
}
@Test
public void shouldParseNullArrayElements() {
SchemaAndValue schemaAndValue = Values.parseString("[null]");
assertEquals(Type.ARRAY, schemaAndValue.schema().type());
assertEquals(Collections.singletonList(null), schemaAndValue.value());
}
@Test
public void shouldEscapeStringsWithEmbeddedQuotesAndBackslashes() {
String original = "three\"blind\\\"mice";
String expected = "three\\\"blind\\\\\\\"mice";
assertEquals(expected, Values.escape(original));
}
@Test
public void shouldConvertNullValue() {
assertRoundTrip(Schema.INT8_SCHEMA, Schema.STRING_SCHEMA, null);
assertRoundTrip(Schema.OPTIONAL_INT8_SCHEMA, Schema.STRING_SCHEMA, null);
assertRoundTrip(Schema.INT16_SCHEMA, Schema.STRING_SCHEMA, null);
assertRoundTrip(Schema.OPTIONAL_INT16_SCHEMA, Schema.STRING_SCHEMA, null);
assertRoundTrip(Schema.INT32_SCHEMA, Schema.STRING_SCHEMA, null);
assertRoundTrip(Schema.OPTIONAL_INT32_SCHEMA, Schema.STRING_SCHEMA, null);
assertRoundTrip(Schema.INT64_SCHEMA, Schema.STRING_SCHEMA, null);
assertRoundTrip(Schema.OPTIONAL_INT64_SCHEMA, Schema.STRING_SCHEMA, null);
assertRoundTrip(Schema.FLOAT32_SCHEMA, Schema.STRING_SCHEMA, null);
assertRoundTrip(Schema.OPTIONAL_FLOAT32_SCHEMA, Schema.STRING_SCHEMA, null);
assertRoundTrip(Schema.FLOAT64_SCHEMA, Schema.STRING_SCHEMA, null);
assertRoundTrip(Schema.OPTIONAL_FLOAT64_SCHEMA, Schema.STRING_SCHEMA, null);
assertRoundTrip(Schema.BOOLEAN_SCHEMA, Schema.STRING_SCHEMA, null);
assertRoundTrip(Schema.OPTIONAL_BOOLEAN_SCHEMA, Schema.STRING_SCHEMA, null);
assertRoundTrip(Schema.STRING_SCHEMA, Schema.STRING_SCHEMA, null);
assertRoundTrip(Schema.OPTIONAL_STRING_SCHEMA, Schema.STRING_SCHEMA, null);
}
@Test
public void shouldConvertBooleanValues() {
assertRoundTrip(Schema.BOOLEAN_SCHEMA, Schema.BOOLEAN_SCHEMA, Boolean.FALSE);
assertShortCircuit(Schema.BOOLEAN_SCHEMA, Boolean.FALSE);
SchemaAndValue resultFalse = roundTrip(Schema.BOOLEAN_SCHEMA, "false");
assertEquals(Schema.BOOLEAN_SCHEMA, resultFalse.schema());
assertEquals(Boolean.FALSE, resultFalse.value());
resultFalse = roundTrip(Schema.BOOLEAN_SCHEMA, "0");
assertEquals(Schema.BOOLEAN_SCHEMA, resultFalse.schema());
assertEquals(Boolean.FALSE, resultFalse.value());
assertRoundTrip(Schema.BOOLEAN_SCHEMA, Schema.BOOLEAN_SCHEMA, Boolean.TRUE);
assertShortCircuit(Schema.BOOLEAN_SCHEMA, Boolean.TRUE);
SchemaAndValue resultTrue = roundTrip(Schema.BOOLEAN_SCHEMA, "true");
assertEquals(Schema.BOOLEAN_SCHEMA, resultTrue.schema());
assertEquals(Boolean.TRUE, resultTrue.value());
resultTrue = roundTrip(Schema.BOOLEAN_SCHEMA, "1");
assertEquals(Schema.BOOLEAN_SCHEMA, resultTrue.schema());
assertEquals(Boolean.TRUE, resultTrue.value());
}
@Test
public void shouldFailToParseInvalidBooleanValueString() {
assertThrows(DataException.class, () -> Values.convertToBoolean(Schema.STRING_SCHEMA, "\"green\""));
}
@Test
public void shouldConvertInt8() {
assertRoundTrip(Schema.INT8_SCHEMA, Schema.INT8_SCHEMA, (byte) 0);
assertRoundTrip(Schema.INT8_SCHEMA, Schema.INT8_SCHEMA, (byte) 1);
}
@Test
public void shouldConvertInt64() {
assertRoundTrip(Schema.INT64_SCHEMA, Schema.INT64_SCHEMA, (long) 1);
assertShortCircuit(Schema.INT64_SCHEMA, (long) 1);
}
@Test
public void shouldConvertFloat32() {
assertRoundTrip(Schema.FLOAT32_SCHEMA, Schema.FLOAT32_SCHEMA, (float) 1);
assertShortCircuit(Schema.FLOAT32_SCHEMA, (float) 1);
}
@Test
public void shouldConvertFloat64() {
assertRoundTrip(Schema.FLOAT64_SCHEMA, Schema.FLOAT64_SCHEMA, (double) 1);
assertShortCircuit(Schema.FLOAT64_SCHEMA, (double) 1);
}
@Test
public void shouldConvertEmptyStruct() {
Struct struct = new Struct(SchemaBuilder.struct().build());
assertThrows(DataException.class, () -> Values.convertToStruct(struct.schema(), null));
assertThrows(DataException.class, () -> Values.convertToStruct(struct.schema(), ""));
Values.convertToStruct(struct.schema(), struct);
}
@Test
public void shouldConvertSimpleString() {
assertRoundTrip(Schema.STRING_SCHEMA, "simple");
}
@Test
public void shouldConvertEmptyString() {
assertRoundTrip(Schema.STRING_SCHEMA, "");
}
@Test
public void shouldConvertStringWithQuotesAndOtherDelimiterCharacters() {
assertRoundTrip(Schema.STRING_SCHEMA, Schema.STRING_SCHEMA, "three\"blind\\\"mice");
assertRoundTrip(Schema.STRING_SCHEMA, Schema.STRING_SCHEMA, "string with delimiters: <>?,./\\=+-!@#$%^&*(){}[]|;':");
}
@Test
public void shouldConvertMapWithStringKeys() {
assertRoundTrip(STRING_MAP_SCHEMA, STRING_MAP_SCHEMA, STRING_MAP);
}
@Test
public void shouldParseStringOfMapWithStringValuesWithoutWhitespaceAsMap() {
SchemaAndValue result = roundTrip(STRING_MAP_SCHEMA, "{\"foo\":\"123\",\"bar\":\"baz\"}");
assertEquals(STRING_MAP_SCHEMA, result.schema());
assertEquals(STRING_MAP, result.value());
}
@Test
public void shouldParseStringOfMapWithStringValuesWithWhitespaceAsMap() {
SchemaAndValue result = roundTrip(STRING_MAP_SCHEMA, "{ \"foo\" : \"123\", \n\"bar\" : \"baz\" } ");
assertEquals(STRING_MAP_SCHEMA, result.schema());
assertEquals(STRING_MAP, result.value());
}
@Test
public void shouldConvertMapWithStringKeysAndShortValues() {
assertRoundTrip(STRING_SHORT_MAP_SCHEMA, STRING_SHORT_MAP_SCHEMA, STRING_SHORT_MAP);
}
@Test
public void shouldParseStringOfMapWithShortValuesWithoutWhitespaceAsMap() {
SchemaAndValue result = roundTrip(STRING_SHORT_MAP_SCHEMA, "{\"foo\":12345,\"bar\":0,\"baz\":-4321}");
assertEquals(STRING_SHORT_MAP_SCHEMA, result.schema());
assertEquals(STRING_SHORT_MAP, result.value());
}
@Test
public void shouldParseStringOfMapWithShortValuesWithWhitespaceAsMap() {
SchemaAndValue result = roundTrip(STRING_SHORT_MAP_SCHEMA, " { \"foo\" : 12345 , \"bar\" : 0, \"baz\" : -4321 } ");
assertEquals(STRING_SHORT_MAP_SCHEMA, result.schema());
assertEquals(STRING_SHORT_MAP, result.value());
}
@Test
public void shouldConvertMapWithStringKeysAndIntegerValues() {
assertRoundTrip(STRING_INT_MAP_SCHEMA, STRING_INT_MAP_SCHEMA, STRING_INT_MAP);
}
@Test
public void shouldParseStringOfMapWithIntValuesWithoutWhitespaceAsMap() {
SchemaAndValue result = roundTrip(STRING_INT_MAP_SCHEMA, "{\"foo\":1234567890,\"bar\":0,\"baz\":-987654321}");
assertEquals(STRING_INT_MAP_SCHEMA, result.schema());
assertEquals(STRING_INT_MAP, result.value());
}
@Test
public void shouldParseStringOfMapWithIntValuesWithWhitespaceAsMap() {
SchemaAndValue result = roundTrip(STRING_INT_MAP_SCHEMA, " { \"foo\" : 1234567890 , \"bar\" : 0, \"baz\" : -987654321 } ");
assertEquals(STRING_INT_MAP_SCHEMA, result.schema());
assertEquals(STRING_INT_MAP, result.value());
}
@Test
public void shouldConvertListWithStringValues() {
assertRoundTrip(STRING_LIST_SCHEMA, STRING_LIST_SCHEMA, STRING_LIST);
}
@Test
public void shouldConvertListWithIntegerValues() {
assertRoundTrip(INT_LIST_SCHEMA, INT_LIST_SCHEMA, INT_LIST);
}
/**
* The parsed array has byte values and one int value, so we should return list with single unified type of integers.
*/
@Test
public void shouldConvertStringOfListWithOnlyNumericElementTypesIntoListOfLargestNumericType() {
int thirdValue = Short.MAX_VALUE + 1;
List<?> list = Values.convertToList(Schema.STRING_SCHEMA, "[1, 2, " + thirdValue + "]");
assertEquals(3, list.size());
assertEquals(1, ((Number) list.get(0)).intValue());
assertEquals(2, ((Number) list.get(1)).intValue());
assertEquals(thirdValue, list.get(2));
}
@Test
public void shouldConvertIntegralTypesToFloat() {
float thirdValue = Float.MAX_VALUE;
List<?> list = Values.convertToList(Schema.STRING_SCHEMA, "[1, 2, " + thirdValue + "]");
assertEquals(3, list.size());
assertEquals(1, ((Number) list.get(0)).intValue());
assertEquals(2, ((Number) list.get(1)).intValue());
assertEquals(thirdValue, list.get(2));
}
@Test
public void shouldConvertIntegralTypesToDouble() {
double thirdValue = Double.MAX_VALUE;
List<?> list = Values.convertToList(Schema.STRING_SCHEMA, "[1, 2, " + thirdValue + "]");
assertEquals(3, list.size());
assertEquals(1, ((Number) list.get(0)).intValue());
assertEquals(2, ((Number) list.get(1)).intValue());
assertEquals(thirdValue, list.get(2));
}
/**
* We parse into different element types, but cannot infer a common element schema.
* This behavior should be independent of the order that the elements appear in the string
*/
@Test
public void shouldParseStringListWithMultipleElementTypes() {
assertParseStringArrayWithNoSchema(
List.of((byte) 1, (byte) 2, (short) 300, "four"),
"[1, 2, 300, \"four\"]");
assertParseStringArrayWithNoSchema(
List.of((byte) 2, (short) 300, "four", (byte) 1),
"[2, 300, \"four\", 1]");
assertParseStringArrayWithNoSchema(
List.of((short) 300, "four", (byte) 1, (byte) 2),
"[300, \"four\", 1, 2]");
assertParseStringArrayWithNoSchema(
List.of("four", (byte) 1, (byte) 2, (short) 300),
"[\"four\", 1, 2, 300]");
}
private void assertParseStringArrayWithNoSchema(List<Object> expected, String str) {
SchemaAndValue result = Values.parseString(str);
assertEquals(Type.ARRAY, result.schema().type());
assertNull(result.schema().valueSchema());
List<?> list = (List<?>) result.value();
assertEquals(expected, list);
}
/**
* Maps with an inconsistent key type don't find a common type for the keys or the values
* This behavior should be independent of the order that the pairs appear in the string
*/
@Test
public void shouldParseStringMapWithMultipleKeyTypes() {
Map<Object, Object> expected = new HashMap<>();
expected.put((byte) 1, (byte) 1);
expected.put((byte) 2, (byte) 1);
expected.put((short) 300, (short) 300);
expected.put("four", (byte) 1);
assertParseStringMapWithNoSchema(expected, "{1:1, 2:1, 300:300, \"four\":1}");
assertParseStringMapWithNoSchema(expected, "{2:1, 300:300, \"four\":1, 1:1}");
assertParseStringMapWithNoSchema(expected, "{300:300, \"four\":1, 1:1, 2:1}");
assertParseStringMapWithNoSchema(expected, "{\"four\":1, 1:1, 2:1, 300:300}");
}
/**
* Maps with a consistent key type may still not have a common type for the values
* This behavior should be independent of the order that the pairs appear in the string
*/
@Test
public void shouldParseStringMapWithMultipleValueTypes() {
Map<Object, Object> expected = new HashMap<>();
expected.put((short) 1, (byte) 1);
expected.put((short) 2, (byte) 1);
expected.put((short) 300, (short) 300);
expected.put((short) 4, "four");
assertParseStringMapWithNoSchema(expected, "{1:1, 2:1, 300:300, 4:\"four\"}");
assertParseStringMapWithNoSchema(expected, "{2:1, 300:300, 4:\"four\", 1:1}");
assertParseStringMapWithNoSchema(expected, "{300:300, 4:\"four\", 1:1, 2:1}");
assertParseStringMapWithNoSchema(expected, "{4:\"four\", 1:1, 2:1, 300:300}");
}
private void assertParseStringMapWithNoSchema(Map<Object, Object> expected, String str) {
SchemaAndValue result = Values.parseString(str);
assertEquals(Type.MAP, result.schema().type());
assertNull(result.schema().valueSchema());
Map<?, ?> list = (Map<?, ?>) result.value();
assertEquals(expected, list);
}
@Test
public void shouldParseNestedArray() {
SchemaAndValue schemaAndValue = Values.parseString("[[]]");
assertEquals(Type.ARRAY, schemaAndValue.schema().type());
assertEquals(Type.ARRAY, schemaAndValue.schema().valueSchema().type());
}
@Test
public void shouldParseArrayContainingMap() {
SchemaAndValue schemaAndValue = Values.parseString("[{}]");
assertEquals(Type.ARRAY, schemaAndValue.schema().type());
assertEquals(Type.MAP, schemaAndValue.schema().valueSchema().type());
}
@Test
public void shouldParseNestedMap() {
SchemaAndValue schemaAndValue = Values.parseString("{\"a\":{}}");
assertEquals(Type.MAP, schemaAndValue.schema().type());
assertEquals(Type.MAP, schemaAndValue.schema().valueSchema().type());
}
@Test
public void shouldParseMapContainingArray() {
SchemaAndValue schemaAndValue = Values.parseString("{\"a\":[]}");
assertEquals(Type.MAP, schemaAndValue.schema().type());
assertEquals(Type.ARRAY, schemaAndValue.schema().valueSchema().type());
}
/**
* We can't infer or successfully parse into a different type, so this returns the same string.
*/
@Test
public void shouldParseStringListWithExtraDelimitersAndReturnString() {
String str = "[1, 2, 3,,,]";
SchemaAndValue result = Values.parseString(str);
assertEquals(Type.STRING, result.schema().type());
assertEquals(str, result.value());
}
@Test
public void shouldParseStringListWithNullLastAsString() {
String str = "[1, null]";
SchemaAndValue result = Values.parseString(str);
assertEquals(Type.STRING, result.schema().type());
assertEquals(str, result.value());
}
@Test
public void shouldParseStringListWithNullFirstAsString() {
String str = "[null, 1]";
SchemaAndValue result = Values.parseString(str);
assertEquals(Type.STRING, result.schema().type());
assertEquals(str, result.value());
}
@Test
public void shouldParseTimestampStringAsTimestamp() throws Exception {
String str = "2019-08-23T14:34:54.346Z";
SchemaAndValue result = Values.parseString(str);
assertEquals(Type.INT64, result.schema().type());
assertEquals(Timestamp.LOGICAL_NAME, result.schema().name());
java.util.Date expected = new SimpleDateFormat(Values.ISO_8601_TIMESTAMP_FORMAT_PATTERN).parse(str);
assertEquals(expected, result.value());
}
@Test
public void shouldParseDateStringAsDate() throws Exception {
String str = "2019-08-23";
SchemaAndValue result = Values.parseString(str);
assertEquals(Type.INT32, result.schema().type());
assertEquals(Date.LOGICAL_NAME, result.schema().name());
java.util.Date expected = new SimpleDateFormat(Values.ISO_8601_DATE_FORMAT_PATTERN).parse(str);
assertEquals(expected, result.value());
}
@Test
public void shouldParseTimeStringAsDate() throws Exception {
String str = "14:34:54.346Z";
SchemaAndValue result = Values.parseString(str);
assertEquals(Type.INT32, result.schema().type());
assertEquals(Time.LOGICAL_NAME, result.schema().name());
java.util.Date expected = new SimpleDateFormat(Values.ISO_8601_TIME_FORMAT_PATTERN).parse(str);
assertEquals(expected, result.value());
}
@Test
public void shouldParseTimestampStringWithEscapedColonsAsTimestamp() throws Exception {
String str = "2019-08-23T14\\:34\\:54.346Z";
SchemaAndValue result = Values.parseString(str);
assertEquals(Type.INT64, result.schema().type());
assertEquals(Timestamp.LOGICAL_NAME, result.schema().name());
String expectedStr = "2019-08-23T14:34:54.346Z";
java.util.Date expected = new SimpleDateFormat(Values.ISO_8601_TIMESTAMP_FORMAT_PATTERN).parse(expectedStr);
assertEquals(expected, result.value());
}
@Test
public void shouldParseTimeStringWithEscapedColonsAsDate() throws Exception {
String str = "14\\:34\\:54.346Z";
SchemaAndValue result = Values.parseString(str);
assertEquals(Type.INT32, result.schema().type());
assertEquals(Time.LOGICAL_NAME, result.schema().name());
String expectedStr = "14:34:54.346Z";
java.util.Date expected = new SimpleDateFormat(Values.ISO_8601_TIME_FORMAT_PATTERN).parse(expectedStr);
assertEquals(expected, result.value());
}
@Test
public void shouldParseDateStringAsDateInArray() throws Exception {
String dateStr = "2019-08-23";
String arrayStr = "[" + dateStr + "]";
SchemaAndValue result = Values.parseString(arrayStr);
assertEquals(Type.ARRAY, result.schema().type());
Schema elementSchema = result.schema().valueSchema();
assertEquals(Type.INT32, elementSchema.type());
assertEquals(Date.LOGICAL_NAME, elementSchema.name());
java.util.Date expected = new SimpleDateFormat(Values.ISO_8601_DATE_FORMAT_PATTERN).parse(dateStr);
assertEquals(List.of(expected), result.value());
}
@Test
public void shouldParseTimeStringAsTimeInArray() throws Exception {
String timeStr = "14:34:54.346Z";
String arrayStr = "[" + timeStr + "]";
SchemaAndValue result = Values.parseString(arrayStr);
assertEquals(Type.ARRAY, result.schema().type());
Schema elementSchema = result.schema().valueSchema();
assertEquals(Type.INT32, elementSchema.type());
assertEquals(Time.LOGICAL_NAME, elementSchema.name());
java.util.Date expected = new SimpleDateFormat(Values.ISO_8601_TIME_FORMAT_PATTERN).parse(timeStr);
assertEquals(List.of(expected), result.value());
}
@Test
public void shouldParseTimestampStringAsTimestampInArray() throws Exception {
String tsStr = "2019-08-23T14:34:54.346Z";
String arrayStr = "[" + tsStr + "]";
SchemaAndValue result = Values.parseString(arrayStr);
assertEquals(Type.ARRAY, result.schema().type());
Schema elementSchema = result.schema().valueSchema();
assertEquals(Type.INT64, elementSchema.type());
assertEquals(Timestamp.LOGICAL_NAME, elementSchema.name());
java.util.Date expected = new SimpleDateFormat(Values.ISO_8601_TIMESTAMP_FORMAT_PATTERN).parse(tsStr);
assertEquals(List.of(expected), result.value());
}
@Test
public void shouldParseMultipleTimestampStringAsTimestampInArray() throws Exception {
String tsStr1 = "2019-08-23T14:34:54.346Z";
String tsStr2 = "2019-01-23T15:12:34.567Z";
String tsStr3 = "2019-04-23T19:12:34.567Z";
String arrayStr = "[" + tsStr1 + "," + tsStr2 + ", " + tsStr3 + "]";
SchemaAndValue result = Values.parseString(arrayStr);
assertEquals(Type.ARRAY, result.schema().type());
Schema elementSchema = result.schema().valueSchema();
assertEquals(Type.INT64, elementSchema.type());
assertEquals(Timestamp.LOGICAL_NAME, elementSchema.name());
java.util.Date expected1 = new SimpleDateFormat(Values.ISO_8601_TIMESTAMP_FORMAT_PATTERN).parse(tsStr1);
java.util.Date expected2 = new SimpleDateFormat(Values.ISO_8601_TIMESTAMP_FORMAT_PATTERN).parse(tsStr2);
java.util.Date expected3 = new SimpleDateFormat(Values.ISO_8601_TIMESTAMP_FORMAT_PATTERN).parse(tsStr3);
assertEquals(List.of(expected1, expected2, expected3), result.value());
}
@Test
public void shouldParseQuotedTimeStringAsTimeInMap() throws Exception {
String keyStr = "k1";
String timeStr = "14:34:54.346Z";
String mapStr = "{\"" + keyStr + "\":\"" + timeStr + "\"}";
SchemaAndValue result = Values.parseString(mapStr);
assertEquals(Type.MAP, result.schema().type());
Schema keySchema = result.schema().keySchema();
Schema valueSchema = result.schema().valueSchema();
assertEquals(Type.STRING, keySchema.type());
assertEquals(Type.INT32, valueSchema.type());
assertEquals(Time.LOGICAL_NAME, valueSchema.name());
java.util.Date expected = new SimpleDateFormat(Values.ISO_8601_TIME_FORMAT_PATTERN).parse(timeStr);
assertEquals(Map.of(keyStr, expected), result.value());
}
@Test
public void shouldParseTimeStringAsTimeInMap() throws Exception {
String keyStr = "k1";
String timeStr = "14:34:54.346Z";
String mapStr = "{\"" + keyStr + "\":" + timeStr + "}";
SchemaAndValue result = Values.parseString(mapStr);
assertEquals(Type.MAP, result.schema().type());
Schema keySchema = result.schema().keySchema();
Schema valueSchema = result.schema().valueSchema();
assertEquals(Type.STRING, keySchema.type());
assertEquals(Type.INT32, valueSchema.type());
assertEquals(Time.LOGICAL_NAME, valueSchema.name());
java.util.Date expected = new SimpleDateFormat(Values.ISO_8601_TIME_FORMAT_PATTERN).parse(timeStr);
assertEquals(Map.of(keyStr, expected), result.value());
}
@Test
public void shouldFailToConvertNullTime() {
assertThrows(DataException.class, () -> Values.convertToTime(null, null));
assertThrows(DataException.class, () -> Values.convertToDate(null, null));
assertThrows(DataException.class, () -> Values.convertToTimestamp(null, null));
}
/**
* This is technically invalid JSON, and we don't want to simply ignore the blank elements.
*/
@Test
public void shouldFailToConvertToListFromStringWithExtraDelimiters() {
assertThrows(DataException.class, () -> Values.convertToList(Schema.STRING_SCHEMA, "[1, 2, 3,,,]"));
}
/**
* Schema of type ARRAY requires a schema for the values, but Connect has no union or "any" schema type.
* Therefore, we can't represent this.
*/
@Test
public void shouldFailToConvertToListFromStringWithNonCommonElementTypeAndBlankElement() {
assertThrows(DataException.class, () -> Values.convertToList(Schema.STRING_SCHEMA, "[1, 2, 3, \"four\",,,]"));
}
/**
* This is technically invalid JSON, and we don't want to simply ignore the blank entry.
*/
@Test
public void shouldFailToParseStringOfMapWithIntValuesWithBlankEntry() {
assertThrows(DataException.class,
() -> Values.convertToMap(Schema.STRING_SCHEMA, " { \"foo\" : 1234567890 ,, \"bar\" : 0, \"baz\" : -987654321 } "));
}
/**
* This is technically invalid JSON, and we don't want to simply ignore the malformed entry.
*/
@Test
public void shouldFailToParseStringOfMalformedMap() {
assertThrows(DataException.class,
() -> Values.convertToMap(Schema.STRING_SCHEMA, " { \"foo\" : 1234567890 , \"a\", \"bar\" : 0, \"baz\" : -987654321 } "));
}
/**
* This is technically invalid JSON, and we don't want to simply ignore the blank entries.
*/
@Test
public void shouldFailToParseStringOfMapWithIntValuesWithOnlyBlankEntries() {
assertThrows(DataException.class, () -> Values.convertToMap(Schema.STRING_SCHEMA, " { ,, , , } "));
}
/**
* This is technically invalid JSON, and we don't want to simply ignore the blank entry.
*/
@Test
public void shouldFailToParseStringOfMapWithIntValuesWithBlankEntries() {
assertThrows(DataException.class,
() -> Values.convertToMap(Schema.STRING_SCHEMA, " { \"foo\" : \"1234567890\" ,, \"bar\" : \"0\", \"baz\" : \"boz\" } "));
}
@Test
public void shouldConsumeMultipleTokens() {
String value = "a:b:c:d:e:f:g:h";
Parser parser = new Parser(value);
String firstFive = parser.next(5);
assertEquals("a:b:c", firstFive);
assertEquals(":", parser.next());
assertEquals("d", parser.next());
assertEquals(":", parser.next());
String lastEight = parser.next(8); // only 7 remain
assertNull(lastEight);
assertEquals("e", parser.next());
}
@Test
public void shouldParseStringsWithoutDelimiters() {
//assertParsed("");
assertParsed(" ");
assertParsed("simple");
assertParsed("simple string");
assertParsed("simple \n\t\bstring");
assertParsed("'simple' string");
assertParsed("si\\mple");
assertParsed("si\\\\mple");
}
@Test
public void shouldParseStringsWithEscapedDelimiters() {
assertParsed("si\\\"mple");
assertParsed("si\\{mple");
assertParsed("si\\}mple");
assertParsed("si\\]mple");
assertParsed("si\\[mple");
assertParsed("si\\:mple");
assertParsed("si\\,mple");
}
@Test
public void shouldParseStringsWithSingleDelimiter() {
assertParsed("a{b", "a", "{", "b");
assertParsed("a}b", "a", "}", "b");
assertParsed("a[b", "a", "[", "b");
assertParsed("a]b", "a", "]", "b");
assertParsed("a:b", "a", ":", "b");
assertParsed("a,b", "a", ",", "b");
assertParsed("a\"b", "a", "\"", "b");
assertParsed("{b", "{", "b");
assertParsed("}b", "}", "b");
assertParsed("[b", "[", "b");
assertParsed("]b", "]", "b");
assertParsed(":b", ":", "b");
assertParsed(",b", ",", "b");
assertParsed("\"b", "\"", "b");
assertParsed("{", "{");
assertParsed("}", "}");
assertParsed("[", "[");
assertParsed("]", "]");
assertParsed(":", ":");
assertParsed(",", ",");
assertParsed("\"", "\"");
}
@Test
public void shouldParseStringsWithMultipleDelimiters() {
assertParsed("\"simple\" string", "\"", "simple", "\"", " string");
assertParsed("a{bc}d", "a", "{", "bc", "}", "d");
assertParsed("a { b c } d", "a ", "{", " b c ", "}", " d");
assertParsed("a { b c } d", "a ", "{", " b c ", "}", " d");
}
@Test
public void shouldConvertTimeValues() {
LocalDateTime localTime = LocalDateTime.now();
LocalTime localTimeTruncated = localTime.toLocalTime().truncatedTo(ChronoUnit.MILLIS);
ZoneOffset zoneOffset = ZoneId.systemDefault().getRules().getOffset(localTime);
java.util.Date current = new java.util.Date(localTime.toEpochSecond(zoneOffset) * 1000);
long currentMillis = current.getTime() % MILLIS_PER_DAY;
// java.util.Date - just copy
java.util.Date t1 = Values.convertToTime(Time.SCHEMA, current);
assertEquals(current, t1);
// java.util.Date as a Timestamp - discard the date and keep just day's milliseconds
java.util.Date t2 = Values.convertToTime(Timestamp.SCHEMA, current);
assertEquals(new java.util.Date(currentMillis), t2);
// ISO8601 strings - accept a string matching pattern "HH:mm:ss.SSS'Z'"
java.util.Date t3 = Values.convertToTime(Time.SCHEMA, localTime.format(DateTimeFormatter.ofPattern(Values.ISO_8601_TIME_FORMAT_PATTERN)));
LocalTime time3 = LocalDateTime.ofInstant(Instant.ofEpochMilli(t3.getTime()), ZoneId.systemDefault()).toLocalTime();
assertEquals(localTimeTruncated, time3);
// Millis as string
java.util.Date t4 = Values.convertToTime(Time.SCHEMA, Long.toString(currentMillis));
assertEquals(currentMillis, t4.getTime());
// Millis as long
java.util.Date t5 = Values.convertToTime(Time.SCHEMA, currentMillis);
assertEquals(currentMillis, t5.getTime());
}
@Test
public void shouldConvertDateValues() {
LocalDateTime localTime = LocalDateTime.now();
ZoneOffset zoneOffset = ZoneId.systemDefault().getRules().getOffset(localTime);
java.util.Date current = new java.util.Date(localTime.toEpochSecond(zoneOffset) * 1000);
long currentMillis = current.getTime() % MILLIS_PER_DAY;
long days = current.getTime() / MILLIS_PER_DAY;
// java.util.Date - just copy
java.util.Date d1 = Values.convertToDate(Date.SCHEMA, current);
assertEquals(current, d1);
// java.util.Date as a Timestamp - discard the day's milliseconds and keep the date
java.util.Date currentDate = new java.util.Date(current.getTime() - currentMillis);
java.util.Date d2 = Values.convertToDate(Timestamp.SCHEMA, currentDate);
assertEquals(currentDate, d2);
// ISO8601 strings - accept a string matching pattern "yyyy-MM-dd"
LocalDateTime localTimeTruncated = localTime.truncatedTo(ChronoUnit.DAYS);
java.util.Date d3 = Values.convertToDate(Date.SCHEMA, localTime.format(DateTimeFormatter.ISO_LOCAL_DATE));
LocalDateTime date3 = LocalDateTime.ofInstant(Instant.ofEpochMilli(d3.getTime()), ZoneId.systemDefault());
assertEquals(localTimeTruncated, date3);
// Days as string
java.util.Date d4 = Values.convertToDate(Date.SCHEMA, Long.toString(days));
assertEquals(currentDate, d4);
// Days as long
java.util.Date d5 = Values.convertToDate(Date.SCHEMA, days);
assertEquals(currentDate, d5);
}
@Test
public void shouldConvertTimestampValues() {
LocalDateTime localTime = LocalDateTime.now();
LocalDateTime localTimeTruncated = localTime.truncatedTo(ChronoUnit.MILLIS);
ZoneOffset zoneOffset = ZoneId.systemDefault().getRules().getOffset(localTime);
java.util.Date current = new java.util.Date(localTime.toEpochSecond(zoneOffset) * 1000);
long currentMillis = current.getTime() % MILLIS_PER_DAY;
// java.util.Date - just copy
java.util.Date ts1 = Values.convertToTimestamp(Timestamp.SCHEMA, current);
assertEquals(current, ts1);
// java.util.Date as a Timestamp - discard the day's milliseconds and keep the date
java.util.Date currentDate = new java.util.Date(current.getTime() - currentMillis);
ts1 = Values.convertToTimestamp(Date.SCHEMA, currentDate);
assertEquals(currentDate, ts1);
// java.util.Date as a Time - discard the date and keep the day's milliseconds
java.util.Date ts2 = Values.convertToTimestamp(Time.SCHEMA, currentMillis);
assertEquals(new java.util.Date(currentMillis), ts2);
// ISO8601 strings - accept a string matching pattern "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"
java.util.Date ts3 = Values.convertToTime(Time.SCHEMA, localTime.format(DateTimeFormatter.ofPattern(Values.ISO_8601_TIMESTAMP_FORMAT_PATTERN)));
LocalDateTime time3 = LocalDateTime.ofInstant(Instant.ofEpochMilli(ts3.getTime()), ZoneId.systemDefault());
assertEquals(localTimeTruncated, time3);
// Millis as string
java.util.Date ts4 = Values.convertToTimestamp(Timestamp.SCHEMA, Long.toString(current.getTime()));
assertEquals(current, ts4);
// Millis as long
java.util.Date ts5 = Values.convertToTimestamp(Timestamp.SCHEMA, current.getTime());
assertEquals(current, ts5);
}
@Test
public void shouldConvertDecimalValues() {
// Various forms of the same number should all be parsed to the same BigDecimal
Number number = 1.0f;
String string = number.toString();
BigDecimal value = new BigDecimal(string);
byte[] bytes = Decimal.fromLogical(Decimal.schema(1), value);
ByteBuffer buffer = ByteBuffer.wrap(bytes);
assertEquals(value, Values.convertToDecimal(null, number, 1));
assertEquals(value, Values.convertToDecimal(null, string, 1));
assertEquals(value, Values.convertToDecimal(null, value, 1));
assertEquals(value, Values.convertToDecimal(null, bytes, 1));
assertEquals(value, Values.convertToDecimal(null, buffer, 1));
}
@Test
public void shouldFailToConvertNullToDecimal() {
assertThrows(DataException.class, () -> Values.convertToDecimal(null, null, 1));
}
@Test
public void shouldInferByteSchema() {
byte[] bytes = new byte[1];
Schema byteSchema = Values.inferSchema(bytes);
assertEquals(Schema.BYTES_SCHEMA, byteSchema);
Schema byteBufferSchema = Values.inferSchema(ByteBuffer.wrap(bytes));
assertEquals(Schema.BYTES_SCHEMA, byteBufferSchema);
}
@Test
public void shouldInferStructSchema() {
Struct struct = new Struct(SchemaBuilder.struct().build());
Schema structSchema = Values.inferSchema(struct);
assertEquals(struct.schema(), structSchema);
}
@Test
public void shouldInferNoSchemaForEmptyList() {
Schema listSchema = Values.inferSchema(List.of());
assertNull(listSchema);
}
@Test
public void shouldInferNoSchemaForListContainingObject() {
Schema listSchema = Values.inferSchema(List.of(new Object()));
assertNull(listSchema);
}
@Test
public void shouldInferNoSchemaForEmptyMap() {
Schema listSchema = Values.inferSchema(Map.of());
assertNull(listSchema);
}
@Test
public void shouldInferNoSchemaForMapContainingObject() {
Schema listSchema = Values.inferSchema(Map.of(new Object(), new Object()));
assertNull(listSchema);
}
/**
* Test parsing distinct number-like types (strings containing numbers, and logical Decimals) in the same list
* The parser does not convert Numbers to Decimals, or Strings containing numbers to Numbers automatically.
*/
@Test
public void shouldNotConvertArrayValuesToDecimal() {
List<Object> decimals = List.of("\"1.0\"", BigDecimal.valueOf(Long.MAX_VALUE).add(BigDecimal.ONE),
BigDecimal.valueOf(Long.MIN_VALUE).subtract(BigDecimal.ONE), (byte) 1, (byte) 1);
List<Object> expected = new ArrayList<>(decimals); // most values are directly reproduced with the same type
expected.set(0, "1.0"); // The quotes are parsed away, but the value remains a string
SchemaAndValue schemaAndValue = Values.parseString(decimals.toString());
Schema schema = schemaAndValue.schema();
assertEquals(Type.ARRAY, schema.type());
assertNull(schema.valueSchema());
assertEquals(expected, schemaAndValue.value());
}
@Test
public void shouldParseArrayOfOnlyDecimals() {
List<Object> decimals = List.of(BigDecimal.valueOf(Long.MAX_VALUE).add(BigDecimal.ONE),
BigDecimal.valueOf(Long.MIN_VALUE).subtract(BigDecimal.ONE));
SchemaAndValue schemaAndValue = Values.parseString(decimals.toString());
Schema schema = schemaAndValue.schema();
assertEquals(Type.ARRAY, schema.type());
assertEquals(Decimal.schema(0), schema.valueSchema());
assertEquals(decimals, schemaAndValue.value());
}
@Test
public void canConsume() {
}
@Test
public void shouldParseBigIntegerAsDecimalWithZeroScale() {
BigInteger value = BigInteger.valueOf(Long.MAX_VALUE).add(new BigInteger("1"));
SchemaAndValue schemaAndValue = Values.parseString(
String.valueOf(value)
);
assertEquals(Decimal.schema(0), schemaAndValue.schema());
assertInstanceOf(BigDecimal.class, schemaAndValue.value());
assertEquals(value, ((BigDecimal) schemaAndValue.value()).unscaledValue());
value = BigInteger.valueOf(Long.MIN_VALUE).subtract(new BigInteger("1"));
schemaAndValue = Values.parseString(
String.valueOf(value)
);
assertEquals(Decimal.schema(0), schemaAndValue.schema());
assertInstanceOf(BigDecimal.class, schemaAndValue.value());
assertEquals(value, ((BigDecimal) schemaAndValue.value()).unscaledValue());
}
@Test
public void shouldParseByteAsInt8() {
Byte value = Byte.MAX_VALUE;
SchemaAndValue schemaAndValue = Values.parseString(
String.valueOf(value)
);
assertEquals(Schema.INT8_SCHEMA, schemaAndValue.schema());
assertInstanceOf(Byte.class, schemaAndValue.value());
assertEquals(value.byteValue(), ((Byte) schemaAndValue.value()).byteValue());
value = Byte.MIN_VALUE;
schemaAndValue = Values.parseString(
String.valueOf(value)
);
assertEquals(Schema.INT8_SCHEMA, schemaAndValue.schema());
assertInstanceOf(Byte.class, schemaAndValue.value());
assertEquals(value.byteValue(), ((Byte) schemaAndValue.value()).byteValue());
}
@Test
public void shouldParseShortAsInt16() {
Short value = Short.MAX_VALUE;
SchemaAndValue schemaAndValue = Values.parseString(
String.valueOf(value)
);
assertEquals(Schema.INT16_SCHEMA, schemaAndValue.schema());
assertInstanceOf(Short.class, schemaAndValue.value());
assertEquals(value.shortValue(), ((Short) schemaAndValue.value()).shortValue());
value = Short.MIN_VALUE;
schemaAndValue = Values.parseString(
String.valueOf(value)
);
assertEquals(Schema.INT16_SCHEMA, schemaAndValue.schema());
assertInstanceOf(Short.class, schemaAndValue.value());
assertEquals(value.shortValue(), ((Short) schemaAndValue.value()).shortValue());
}
@Test
public void shouldParseIntegerAsInt32() {
Integer value = Integer.MAX_VALUE;
SchemaAndValue schemaAndValue = Values.parseString(
String.valueOf(value)
);
assertEquals(Schema.INT32_SCHEMA, schemaAndValue.schema());
assertInstanceOf(Integer.class, schemaAndValue.value());
assertEquals(value.intValue(), ((Integer) schemaAndValue.value()).intValue());
value = Integer.MIN_VALUE;
schemaAndValue = Values.parseString(
String.valueOf(value)
);
assertEquals(Schema.INT32_SCHEMA, schemaAndValue.schema());
assertInstanceOf(Integer.class, schemaAndValue.value());
assertEquals(value.intValue(), ((Integer) schemaAndValue.value()).intValue());
}
@Test
public void shouldParseLongAsInt64() {
Long value = Long.MAX_VALUE;
SchemaAndValue schemaAndValue = Values.parseString(
String.valueOf(value)
);
assertEquals(Schema.INT64_SCHEMA, schemaAndValue.schema());
assertInstanceOf(Long.class, schemaAndValue.value());
assertEquals(value.longValue(), ((Long) schemaAndValue.value()).longValue());
value = Long.MIN_VALUE;
schemaAndValue = Values.parseString(
String.valueOf(value)
);
assertEquals(Schema.INT64_SCHEMA, schemaAndValue.schema());
assertInstanceOf(Long.class, schemaAndValue.value());
assertEquals(value.longValue(), ((Long) schemaAndValue.value()).longValue());
}
@Test
public void shouldParseFloatAsFloat32() {
Float value = Float.MAX_VALUE;
SchemaAndValue schemaAndValue = Values.parseString(
String.valueOf(value)
);
assertEquals(Schema.FLOAT32_SCHEMA, schemaAndValue.schema());
assertInstanceOf(Float.class, schemaAndValue.value());
assertEquals(value, (Float) schemaAndValue.value(), 0);
value = -Float.MAX_VALUE;
schemaAndValue = Values.parseString(
String.valueOf(value)
);
assertEquals(Schema.FLOAT32_SCHEMA, schemaAndValue.schema());
assertInstanceOf(Float.class, schemaAndValue.value());
assertEquals(value, (Float) schemaAndValue.value(), 0);
}
@Test
public void shouldParseDoubleAsFloat64() {
Double value = Double.MAX_VALUE;
SchemaAndValue schemaAndValue = Values.parseString(
String.valueOf(value)
);
assertEquals(Schema.FLOAT64_SCHEMA, schemaAndValue.schema());
assertInstanceOf(Double.class, schemaAndValue.value());
assertEquals(value, (Double) schemaAndValue.value(), 0);
value = -Double.MAX_VALUE;
schemaAndValue = Values.parseString(
String.valueOf(value)
);
assertEquals(Schema.FLOAT64_SCHEMA, schemaAndValue.schema());
assertInstanceOf(Double.class, schemaAndValue.value());
assertEquals(value, (Double) schemaAndValue.value(), 0);
}
@Test
public void shouldParseFractionalPartsAsIntegerWhenNoFractionalPart() {
assertEquals(new SchemaAndValue(Schema.INT8_SCHEMA, (byte) 1), Values.parseString("1.0"));
assertEquals(new SchemaAndValue(Schema.FLOAT32_SCHEMA, 1.1f), Values.parseString("1.1"));
assertEquals(new SchemaAndValue(Schema.INT16_SCHEMA, (short) 300), Values.parseString("300.0"));
assertEquals(new SchemaAndValue(Schema.FLOAT32_SCHEMA, 300.01f), Values.parseString("300.01"));
assertEquals(new SchemaAndValue(Schema.INT32_SCHEMA, 66000), Values.parseString("66000.0"));
assertEquals(new SchemaAndValue(Schema.FLOAT32_SCHEMA, 66000.0008f), Values.parseString("66000.0008"));
}
@Test
public void avoidCpuAndMemoryIssuesConvertingExtremeBigDecimals() {
String parsingBig = "1e+100000000"; // new BigDecimal().setScale(0, RoundingMode.FLOOR) takes around two minutes and uses 3GB;
BigDecimal valueBig = new BigDecimal(parsingBig);
assertEquals(new SchemaAndValue(Decimal.schema(-100000000), valueBig), Values.parseString(parsingBig), "parsing number that's too big");
String parsingSmall = "1e-100000000";
BigDecimal valueSmall = new BigDecimal(parsingSmall);
assertEquals(new SchemaAndValue(Schema.FLOAT32_SCHEMA, (float) valueSmall.doubleValue()), Values.parseString(parsingSmall), "parsing number that's too big, strictly this should return a bigdecimal");
}
protected void assertParsed(String input) {
assertParsed(input, input);
}
protected void assertParsed(String input, String... expectedTokens) {
Parser parser = new Parser(input);
if (!parser.hasNext()) {
assertEquals(1, expectedTokens.length);
assertTrue(expectedTokens[0].isEmpty());
return;
}
for (String expectedToken : expectedTokens) {
assertTrue(parser.hasNext());
int position = parser.mark();
assertEquals(expectedToken, parser.next());
assertEquals(position + expectedToken.length(), parser.position());
assertEquals(expectedToken, parser.previous());
parser.rewindTo(position);
assertEquals(position, parser.position());
assertEquals(expectedToken, parser.next());
int newPosition = parser.mark();
assertEquals(position + expectedToken.length(), newPosition);
assertEquals(expectedToken, parser.previous());
}
assertFalse(parser.hasNext());
// Rewind and try consuming expected tokens ...
parser.rewindTo(0);
assertConsumable(parser, expectedTokens);
// Parse again and try consuming expected tokens ...
parser = new Parser(input);
assertConsumable(parser, expectedTokens);
}
protected void assertConsumable(Parser parser, String... expectedTokens) {
for (String expectedToken : expectedTokens) {
if (!Utils.isBlank(expectedToken)) {
int position = parser.mark();
assertTrue(parser.canConsume(expectedToken.trim()));
parser.rewindTo(position);
assertTrue(parser.canConsume(expectedToken.trim(), true));
parser.rewindTo(position);
assertTrue(parser.canConsume(expectedToken, false));
}
}
}
protected SchemaAndValue roundTrip(Schema desiredSchema, String currentValue) {
return roundTrip(desiredSchema, new SchemaAndValue(Schema.STRING_SCHEMA, currentValue));
}
protected SchemaAndValue roundTrip(Schema desiredSchema, SchemaAndValue input) {
String serialized = input != null ? Values.convertToString(input.schema(), input.value()) : null;
if (input != null && input.value() != null) {
assertNotNull(serialized);
}
if (desiredSchema == null) {
desiredSchema = Values.inferSchema(input);
assertNotNull(desiredSchema);
}
return convertTo(desiredSchema, serialized);
}
protected SchemaAndValue convertTo(Schema desiredSchema, Object value) {
Object newValue = null;
switch (desiredSchema.type()) {
case STRING:
newValue = Values.convertToString(Schema.STRING_SCHEMA, value);
break;
case INT8:
newValue = Values.convertToByte(Schema.STRING_SCHEMA, value);
break;
case INT16:
newValue = Values.convertToShort(Schema.STRING_SCHEMA, value);
break;
case INT32:
newValue = Values.convertToInteger(Schema.STRING_SCHEMA, value);
break;
case INT64:
newValue = Values.convertToLong(Schema.STRING_SCHEMA, value);
break;
case FLOAT32:
newValue = Values.convertToFloat(Schema.STRING_SCHEMA, value);
break;
case FLOAT64:
newValue = Values.convertToDouble(Schema.STRING_SCHEMA, value);
break;
case BOOLEAN:
newValue = Values.convertToBoolean(Schema.STRING_SCHEMA, value);
break;
case ARRAY:
newValue = Values.convertToList(Schema.STRING_SCHEMA, value);
break;
case MAP:
newValue = Values.convertToMap(Schema.STRING_SCHEMA, value);
break;
case STRUCT:
case BYTES:
fail("unexpected schema type");
break;
}
Schema newSchema = Values.inferSchema(newValue);
return new SchemaAndValue(newSchema, newValue);
}
protected void assertRoundTrip(Schema schema, String value) {
assertRoundTrip(schema, Schema.STRING_SCHEMA, value);
}
protected void assertRoundTrip(Schema schema, Schema currentSchema, Object value) {
SchemaAndValue result = roundTrip(schema, new SchemaAndValue(currentSchema, value));
if (value == null) {
assertNull(result.schema());
assertNull(result.value());
} else {
assertEquals(value, result.value());
assertEquals(schema, result.schema());
SchemaAndValue result2 = roundTrip(result.schema(), result);
assertEquals(schema, result2.schema());
assertEquals(value, result2.value());
assertEquals(result, result2);
}
}
protected void assertShortCircuit(Schema schema, Object value) {
SchemaAndValue result = convertTo(schema, value);
if (value == null) {
assertNull(result.schema());
assertNull(result.value());
} else {
assertEquals(value, result.value());
assertEquals(schema, result.schema());
}
}
} | java | github | https://github.com/apache/kafka | connect/api/src/test/java/org/apache/kafka/connect/data/ValuesTest.java |
from django.conf.urls import patterns, url
from radpress.views import (
ArticleArchiveView, ArticleDetailView, ArticleListView, PreviewView,
PageDetailView, SearchView, ZenModeView, ZenModeUpdateView)
from radpress.feeds import ArticleFeed
urlpatterns = patterns(
'',
url(r'^$',
view=ArticleListView.as_view(),
name='radpress-article-list'),
url(r'^archives/$',
view=ArticleArchiveView.as_view(),
name='radpress-article-archive'),
url(r'^detail/(?P<slug>[-\w]+)/$',
view=ArticleDetailView.as_view(),
name='radpress-article-detail'),
url(r'^p/(?P<slug>[-\w]+)/$',
view=PageDetailView.as_view(),
name='radpress-page-detail'),
url(r'^preview/$',
view=PreviewView.as_view(),
name='radpress-preview'),
url(r'^search/$',
view=SearchView.as_view(),
name='radpress-search'),
url(r'^zen/$',
view=ZenModeView.as_view(),
name='radpress-zen-mode'),
url(r'zen/(?P<pk>\d+)/$',
view=ZenModeUpdateView.as_view(),
name='radpress-zen-mode-update'),
url(r'^rss/$',
view=ArticleFeed(),
name='radpress-rss'),
url(r'^rss/(?P<tags>[-/\w]+)/$',
view=ArticleFeed(),
name='radpress-rss')
) | unknown | codeparrot/codeparrot-clean | ||
import pytest
from tempfile import NamedTemporaryFile
from django.core.urlresolvers import reverse
from .. import factories as f
from taiga.base.utils import json
from taiga.users import models
from taiga.auth.tokens import get_token_for_user
from taiga.permissions.permissions import MEMBERS_PERMISSIONS, ANON_PERMISSIONS, USER_PERMISSIONS
pytestmark = pytest.mark.django_db
def test_users_create_through_standard_api(client):
user = f.UserFactory.create(is_superuser=True)
url = reverse('users-list')
data = {}
response = client.post(url, json.dumps(data), content_type="application/json")
assert response.status_code == 405
client.login(user)
response = client.post(url, json.dumps(data), content_type="application/json")
assert response.status_code == 405
def test_update_user_with_same_email(client):
user = f.UserFactory.create(email="same@email.com")
url = reverse('users-detail', kwargs={"pk": user.pk})
data = {"email": "same@email.com"}
client.login(user)
response = client.patch(url, json.dumps(data), content_type="application/json")
assert response.status_code == 400
assert response.data['_error_message'] == 'Duplicated email'
def test_update_user_with_duplicated_email(client):
f.UserFactory.create(email="one@email.com")
user = f.UserFactory.create(email="two@email.com")
url = reverse('users-detail', kwargs={"pk": user.pk})
data = {"email": "one@email.com"}
client.login(user)
response = client.patch(url, json.dumps(data), content_type="application/json")
assert response.status_code == 400
assert response.data['_error_message'] == 'Duplicated email'
def test_update_user_with_invalid_email(client):
user = f.UserFactory.create(email="my@email.com")
url = reverse('users-detail', kwargs={"pk": user.pk})
data = {"email": "my@email"}
client.login(user)
response = client.patch(url, json.dumps(data), content_type="application/json")
assert response.status_code == 400
assert response.data['_error_message'] == 'Not valid email'
def test_update_user_with_valid_email(client):
user = f.UserFactory.create(email="old@email.com")
url = reverse('users-detail', kwargs={"pk": user.pk})
data = {"email": "new@email.com"}
client.login(user)
response = client.patch(url, json.dumps(data), content_type="application/json")
assert response.status_code == 200
user = models.User.objects.get(pk=user.id)
assert user.email_token is not None
assert user.new_email == "new@email.com"
def test_validate_requested_email_change(client):
user = f.UserFactory.create(email_token="change_email_token", new_email="new@email.com")
url = reverse('users-change-email')
data = {"email_token": "change_email_token"}
client.login(user)
response = client.post(url, json.dumps(data), content_type="application/json")
assert response.status_code == 204
user = models.User.objects.get(pk=user.id)
assert user.email_token is None
assert user.new_email is None
assert user.email == "new@email.com"
def test_validate_requested_email_change_for_anonymous_user(client):
user = f.UserFactory.create(email_token="change_email_token", new_email="new@email.com")
url = reverse('users-change-email')
data = {"email_token": "change_email_token"}
response = client.post(url, json.dumps(data), content_type="application/json")
assert response.status_code == 204
user = models.User.objects.get(pk=user.id)
assert user.email_token is None
assert user.new_email is None
assert user.email == "new@email.com"
def test_validate_requested_email_change_without_token(client):
user = f.UserFactory.create(email_token="change_email_token", new_email="new@email.com")
url = reverse('users-change-email')
data = {}
client.login(user)
response = client.post(url, json.dumps(data), content_type="application/json")
assert response.status_code == 400
def test_validate_requested_email_change_with_invalid_token(client):
user = f.UserFactory.create(email_token="change_email_token", new_email="new@email.com")
url = reverse('users-change-email')
data = {"email_token": "invalid_email_token"}
client.login(user)
response = client.post(url, json.dumps(data), content_type="application/json")
assert response.status_code == 400
def test_delete_self_user(client):
user = f.UserFactory.create()
url = reverse('users-detail', kwargs={"pk": user.pk})
client.login(user)
response = client.delete(url)
assert response.status_code == 204
user = models.User.objects.get(pk=user.id)
assert user.full_name == "Deleted user"
def test_cancel_self_user_with_valid_token(client):
user = f.UserFactory.create()
url = reverse('users-cancel')
cancel_token = get_token_for_user(user, "cancel_account")
data = {"cancel_token": cancel_token}
client.login(user)
response = client.post(url, json.dumps(data), content_type="application/json")
assert response.status_code == 204
user = models.User.objects.get(pk=user.id)
assert user.full_name == "Deleted user"
def test_cancel_self_user_with_invalid_token(client):
user = f.UserFactory.create()
url = reverse('users-cancel')
data = {"cancel_token": "invalid_cancel_token"}
client.login(user)
response = client.post(url, json.dumps(data), content_type="application/json")
assert response.status_code == 400
DUMMY_BMP_DATA = b'BM:\x00\x00\x00\x00\x00\x00\x006\x00\x00\x00(\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x01\x00\x18\x00\x00\x00\x00\x00\x04\x00\x00\x00\x13\x0b\x00\x00\x13\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
def test_change_avatar(client):
url = reverse('users-change-avatar')
user = f.UserFactory()
client.login(user)
with NamedTemporaryFile() as avatar:
# Test no avatar send
post_data = {}
response = client.post(url, post_data)
assert response.status_code == 400
# Test invalid file send
post_data = {
'avatar': avatar
}
response = client.post(url, post_data)
assert response.status_code == 400
# Test empty valid avatar send
avatar.write(DUMMY_BMP_DATA)
avatar.seek(0)
response = client.post(url, post_data)
assert response.status_code == 200
def test_list_contacts_private_projects(client):
project = f.ProjectFactory.create()
user_1 = f.UserFactory.create()
user_2 = f.UserFactory.create()
role = f.RoleFactory(project=project, permissions=["view_project"])
membership_1 = f.MembershipFactory.create(project=project, user=user_1, role=role)
membership_2 = f.MembershipFactory.create(project=project, user=user_2, role=role)
url = reverse('users-contacts', kwargs={"pk": user_1.pk})
response = client.get(url, content_type="application/json")
assert response.status_code == 200
response_content = response.data
assert len(response_content) == 0
client.login(user_1)
response = client.get(url, content_type="application/json")
assert response.status_code == 200
response_content = response.data
assert len(response_content) == 1
assert response_content[0]["id"] == user_2.id
def test_list_contacts_no_projects(client):
user_1 = f.UserFactory.create()
user_2 = f.UserFactory.create()
role_1 = f.RoleFactory(permissions=["view_project"])
role_2 = f.RoleFactory(permissions=["view_project"])
membership_1 = f.MembershipFactory.create(project=role_1.project, user=user_1, role=role_1)
membership_2 = f.MembershipFactory.create(project=role_2.project, user=user_2, role=role_2)
client.login(user_1)
url = reverse('users-contacts', kwargs={"pk": user_1.pk})
response = client.get(url, content_type="application/json")
assert response.status_code == 200
response_content = response.data
assert len(response_content) == 0
def test_list_contacts_public_projects(client):
project = f.ProjectFactory.create(is_private=False,
anon_permissions=list(map(lambda x: x[0], ANON_PERMISSIONS)),
public_permissions=list(map(lambda x: x[0], USER_PERMISSIONS)))
user_1 = f.UserFactory.create()
user_2 = f.UserFactory.create()
role = f.RoleFactory(project=project)
membership_1 = f.MembershipFactory.create(project=project, user=user_1, role=role)
membership_2 = f.MembershipFactory.create(project=project, user=user_2, role=role)
url = reverse('users-contacts', kwargs={"pk": user_1.pk})
response = client.get(url, content_type="application/json")
assert response.status_code == 200
response_content = response.data
assert len(response_content) == 1
assert response_content[0]["id"] == user_2.id | unknown | codeparrot/codeparrot-clean | ||
"""
Standard resistor values.
Commonly used for verifying electronic components in circuit classes are
standard values, or conversely, for generating realistic component
values in parameterized problems. For details, see:
http://en.wikipedia.org/wiki/Electronic_color_code
"""
# pylint: disable=invalid-name
# r is standard name for a resistor. We would like to use it as such.
import math
import numbers
E6 = [10, 15, 22, 33, 47, 68]
E12 = [10, 12, 15, 18, 22, 27, 33, 39, 47, 56, 68, 82]
E24 = [10, 12, 15, 18, 22, 27, 33, 39, 47, 56, 68, 82, 11, 13, 16, 20,
24, 30, 36, 43, 51, 62, 75, 91]
E48 = [100, 121, 147, 178, 215, 261, 316, 383, 464, 562, 681, 825, 105,
127, 154, 187, 226, 274, 332, 402, 487, 590, 715, 866, 110, 133,
162, 196, 237, 287, 348, 422, 511, 619, 750, 909, 115, 140, 169,
205, 249, 301, 365, 442, 536, 649, 787, 953]
E96 = [100, 121, 147, 178, 215, 261, 316, 383, 464, 562, 681, 825, 102,
124, 150, 182, 221, 267, 324, 392, 475, 576, 698, 845, 105, 127,
154, 187, 226, 274, 332, 402, 487, 590, 715, 866, 107, 130, 158,
191, 232, 280, 340, 412, 499, 604, 732, 887, 110, 133, 162, 196,
237, 287, 348, 422, 511, 619, 750, 909, 113, 137, 165, 200, 243,
294, 357, 432, 523, 634, 768, 931, 115, 140, 169, 205, 249, 301,
365, 442, 536, 649, 787, 953, 118, 143, 174, 210, 255, 309, 374,
453, 549, 665, 806, 976]
E192 = [100, 121, 147, 178, 215, 261, 316, 383, 464, 562, 681, 825, 101,
123, 149, 180, 218, 264, 320, 388, 470, 569, 690, 835, 102, 124,
150, 182, 221, 267, 324, 392, 475, 576, 698, 845, 104, 126, 152,
184, 223, 271, 328, 397, 481, 583, 706, 856, 105, 127, 154, 187,
226, 274, 332, 402, 487, 590, 715, 866, 106, 129, 156, 189, 229,
277, 336, 407, 493, 597, 723, 876, 107, 130, 158, 191, 232, 280,
340, 412, 499, 604, 732, 887, 109, 132, 160, 193, 234, 284, 344,
417, 505, 612, 741, 898, 110, 133, 162, 196, 237, 287, 348, 422,
511, 619, 750, 909, 111, 135, 164, 198, 240, 291, 352, 427, 517,
626, 759, 920, 113, 137, 165, 200, 243, 294, 357, 432, 523, 634,
768, 931, 114, 138, 167, 203, 246, 298, 361, 437, 530, 642, 777,
942, 115, 140, 169, 205, 249, 301, 365, 442, 536, 649, 787, 953,
117, 142, 172, 208, 252, 305, 370, 448, 542, 657, 796, 965, 118,
143, 174, 210, 255, 309, 374, 453, 549, 665, 806, 976, 120, 145,
176, 213, 258, 312, 379, 459, 556, 673, 816, 988]
def iseia(r, valid_types=(E6, E12, E24)):
'''
Check if a component is a valid EIA value.
By default, check 5% component values
'''
# Step 1: Discount things which are not numbers
if not isinstance(r, numbers.Number) or \
r < 0 or \
math.isnan(r) or \
math.isinf(r):
return False
# Special case: 0 is an okay resistor
if r == 0:
return True
# Step 2: Move into the range [100, 1000)
while r < 100:
r = r * 10
while r >= 1000:
r = r / 10
# Step 3: Discount things which are not integers, and cast to int
if abs(r - round(r)) > 0.01:
return False
r = int(round(r))
# Step 4: Check if we're a valid EIA value
for type_list in valid_types:
if r in type_list:
return True
if int(r / 10.) in type_list and (r % 10) == 0:
return True
return False
if __name__ == '__main__':
# Test cases. All of these should return True
print iseia(100) # 100 ohm resistor is EIA
print not iseia(101) # 101 is not
print not iseia(100.3) # Floating point close to EIA is not EIA
print iseia(100.001) # But within floating point error is
print iseia(1e5) # We handle big numbers well
print iseia(2200) # We handle middle-of-the-list well
# We can handle 1% components correctly; 2.2k is EIA24, but not EIA48.
print not iseia(2200, (E48, E96, E192))
print iseia(5490e2, (E48, E96, E192))
print iseia(2200)
print not iseia(5490e2)
print iseia(1e-5) # We handle little numbers well
print not iseia("Hello") # Junk handled okay
print not iseia(float('NaN'))
print not iseia(-1)
print not iseia(iseia)
print not iseia(float('Inf'))
print iseia(0) # Corner case. 0 is a standard resistor value. | unknown | codeparrot/codeparrot-clean | ||
# (C) Wegener Center for Climate and Global Change, University of Graz, 2015
#
# This file is part of pyCAT.
#
# pyCAT is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 3 as published by the
# Free Software Foundation.
#
# pyCAT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyCAT. If not, see <http://www.gnu.org/licenses/>.
import datetime
import iris
import numpy as np
import numpy.ma as ma
from dateutil import parser
from dateutil.relativedelta import relativedelta
from iris.coords import DimCoord
try:
from cf_units import Unit
except:
# for iris<=1.9
from iris.unit import Unit
def _get_max_true_block_length(up_down):
"""
Calculate the maximum length of True blocks in an array.
The True blocks in the array are defined by up/down changes from False
to True and vice-versa.
Args:
* up_down (tuple of numpy.arrays):
information about the up/down changes
Returns:
numpy.array of same dimension of up_down[0] holding the number
of maximum of the True block lengths in the array
"""
out_shape = up_down[0].shape[:-1]
ret = ma.zeros(out_shape) - 1
ret.fill_value = -1
ret.mask = True
for index in np.ndindex(out_shape):
if not up_down[0][index].mask.any():
start_idxs = ma.where(up_down[0][index])
stop_idxs = ma.where(up_down[1][index])
try:
ret[index] = np.max(stop_idxs[-1] - start_idxs[-1] + 1)
except ValueError:
ret[index] = 0
return ret
def _get_len_true_block_length(up_down, length):
"""
Calculate the len of True blocks in an array that succeed the given length.
The True blocks in the array are defined by up/down changes from False
to True and vice-versa.
Args:
* up_down (tuple of numpy.arrays):
information about the up/down changes
* length (int or float):
threshold for the length of blocks to be accounted for
Returns:
numpy.array of same dimension of up_down[0] holding the number
of block lengths succeeding the given length
"""
out_shape = up_down[0].shape[:-1]
ret = ma.zeros(out_shape) - 1
ret.fill_value = -1
ret.mask = True
for index in np.ndindex(out_shape):
if not up_down[0][index].mask.any():
start_idxs = ma.where(up_down[0][index])
stop_idxs = ma.where(up_down[1][index])
try:
dry_blocks = stop_idxs[-1] - start_idxs[-1] + 1
ret[index] = np.where(dry_blocks > length)[0].shape[0]
except ValueError:
ret[index] = 0
return ret
def _get_true_block_lengths(array, axis=-1):
"""
calculate the lengths of True blocks in an array over the given axis
Args:
* array (numpy.array):
a boolean numpy.array in any dimension
* axis (int):
the axis over which the True blocks are calculated
Returns:
tuple of numpy.arrays holding the indices of the array from
False to True and True to False, respectively
"""
# roll the considered axis to the end
a = np.rollaxis(array, axis, array.ndim)
up = np.concatenate(
(np.resize(a[..., 0], a.shape[:-1] + (1,)),
np.logical_and(np.logical_not(a[..., :-1]), a[..., 1:])
), axis=a.ndim - 1)
down = np.concatenate(
(np.logical_and(a[..., :-1], np.logical_not(a[..., 1:])),
np.resize(a[..., -1], a.shape[:-1] + (1,))
), axis=a.ndim - 1)
if isinstance(a, ma.core.MaskedArray):
up.mask = a.mask
else:
up = ma.masked_array(up, False)
return up, down
def _make_time_dimension(start_date, end_date, period='year', align='center'):
"""
create a temporal iris.coords.DimCoord
Args:
* start_date (string or datetime):
the start date of the vector
* end_date (string or datetime):
the end date of the vector
Kwargs:
* period (string):
create annual ('year'), seasonal ('season') or monthly ('month')
intervals in days
* align (string):
put the datetime point at the first, center (default) or
last of the period
Returns:
iris.coords.DimCoord with standard_name 'time' using a gregorian calendar
"""
if not isinstance(start_date, datetime.datetime):
start_date = parser.parse(start_date)
if not isinstance(end_date, datetime.datetime):
end_date = parser.parse(end_date)
first_day_of_year = datetime.datetime(start_date.year, 1, 1)
units = Unit('days since {:%F}'.format(first_day_of_year),
calendar='gregorian'
)
if period == 'year':
increment = relativedelta(years=1)
start = datetime.datetime(start_date.year, 1, 1)
elif period == 'season':
increment = relativedelta(months=3)
year = start_date.year
month = start_date.month / 3 * 3
if month in (0, 12):
month = 12
year -= 1
start = datetime.datetime(year, month, 1)
# add one month to the end_date in order to catch the december
# of the last djf within the period
end_date += relativedelta(months=1)
elif period == 'month':
increment = relativedelta(months=1)
start = datetime.datetime(start_date.year, start_date.month, 1)
else:
raise ValueError("period must be one of year, season, month or day")
if align == 'center':
start = start + \
((start + increment - relativedelta(days=1)) - start) / 2
elif align == 'last':
start = start + increment - relativedelta(days=1)
data = []
while start < end_date:
data.append((start - first_day_of_year).days)
start += increment
return DimCoord(np.array(data),
var_name='time',
standard_name='time',
units=units
)
def _create_cube(long_name='', var_name='', units='1',
dim_coords_and_dims=[], fill_value=-1):
"""
Create an iris.cube.Cube given by its dimensions
Kwargs:
* long_name (string):
Long description of the variable
* var_name (string):
Variable name
* units (iris.unit.Unit or string):
the unit of the variable
* dims_coords_and_dims (list of iris.coords.DimCoord):
the dimension of the variable
Returns:
An 'empty' iris.cube.Cube
"""
shape = [x[0].shape[0] for x in dim_coords_and_dims]
array = ma.ones(shape) * fill_value
array.mask = True
array.fill_value = fill_value
if isinstance(units, str):
units = Unit(units)
return iris.cube.Cube(array, long_name=long_name, var_name=var_name,
units=units, dim_coords_and_dims=dim_coords_and_dims) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import unittest
import hashlib
import os
class ExternalDataCoreTest(unittest.TestCase):
def testExternalDataFile(self):
"""Asserts that the external data file was correctly downloaded."""
filepath = os.path.join(
os.environ['GIRDER_TEST_DATA_PREFIX'],
'test_file.txt'
)
self.assertTrue(
os.path.exists(filepath),
'The test file does not exist.'
)
hash = hashlib.md5()
with open(filepath, 'r') as f:
hash.update(f.read().encode('utf-8'))
self.assertEqual(
hash.hexdigest(),
'169293f7c9138e4b50ebcab4358dc509',
'Invalid test file content.'
) | unknown | codeparrot/codeparrot-clean | ||
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
import os,unittest
from reportlab.platypus import Spacer, SimpleDocTemplate, Table, TableStyle, LongTable
from reportlab.platypus.doctemplate import PageAccumulator
from reportlab.platypus.paragraph import Paragraph
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.units import inch, cm
from reportlab.lib.utils import simpleSplit
from reportlab.lib import colors
styleSheet = getSampleStyleSheet()
class MyPageAccumulator(PageAccumulator):
def pageEndAction(self,canv,doc):
L42 = [x[0] for x in self.data if not x[0]%42]
L13 = [x[0] for x in self.data if not x[0]%13]
if L42 and L13:
s = 'Saw multiples of 13 and 42'
elif L13:
s = 'Saw multiples of 13'
elif L42:
s = 'Saw multiples of 42'
else:
return
canv.saveState()
canv.setFillColor(colors.purple)
canv.setFont("Helvetica",6)
canv.drawString(1*inch,1*inch,s)
canv.restoreState()
PA = MyPageAccumulator('_42_divides')
class MyDocTemplate(SimpleDocTemplate):
def beforeDocument(self):
for pt in self.pageTemplates:
PA.attachToPageTemplate(pt)
def textAccum2():
doc = MyDocTemplate(outputfile('test_platypus_accum2.pdf'),
pagesize=(8.5*inch, 11*inch), showBoundary=1)
story=[]
story.append(Paragraph("A table with 500 rows", styleSheet['BodyText']))
sty = [ ('GRID',(0,0),(-1,-1),1,colors.green),
('BOX',(0,0),(-1,-1),2,colors.red),
('FONTNAME',(0,0),(-1,-1),'Helvetica'),
('FONTSIZE',(0,0),(-1,-1),10),
]
def myCV(s,fontName='Helvetica',fontSize=10,maxWidth=72):
return '\n'.join(simpleSplit(s,fontName,fontSize,maxWidth))
data = [[PA.onDrawStr(str(i+1),i+1),
myCV("xx "* (i%10),maxWidth=100-12),
myCV("blah "*(i%40),maxWidth=200-12)]
for i in range(500)]
t=LongTable(data, style=sty, colWidths = [50,100,200])
story.append(t)
doc.build(story)
def textAccum1():
doc = MyDocTemplate(outputfile('test_platypus_accum1.pdf'),
pagesize=(8.5*inch, 11*inch), showBoundary=1)
story=[]
story.append(Paragraph("A table with 500 rows", styleSheet['BodyText']))
sty = [ ('GRID',(0,0),(-1,-1),1,colors.green),
('BOX',(0,0),(-1,-1),2,colors.red),
]
data = [[str(i+1), Paragraph("xx "* (i%10),
styleSheet["BodyText"]),
Paragraph(("blah "*(i%40))+PA.onDrawText(i+1), styleSheet["BodyText"])]
for i in range(500)]
t=LongTable(data, style=sty, colWidths = [50,100,200])
story.append(t)
doc.build(story)
class TablesTestCase(unittest.TestCase):
"Make documents with tables"
def test1(self):
textAccum1()
def test2(self):
textAccum2()
def makeSuite():
return makeSuiteForClasses(TablesTestCase)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation() | unknown | codeparrot/codeparrot-clean | ||
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Internal utilities for parsing Python subset to TIR"""
import ast
import inspect
import logging
import sys
import numpy
import tvm.runtime
from tvm._ffi.base import numeric_types
from tvm.ir.container import Array
from tvm.tir import expr as _expr
from tvm.tir import stmt as _stmt
from tvm.te.tensor import Tensor
# pylint: disable=invalid-name
np_arg_types = tuple(list(numeric_types) + [numpy.ndarray])
tvm_arg_types = (Tensor, Array, _expr.Var, _expr.ConstExpr)
halide_imm_types = (_expr.IntImm, _expr.FloatImm)
def _internal_assert(cond, err):
"""Simplify the code segment like if not XXX then raise an error"""
if not cond:
raise ValueError(err)
# Useful constants. In avoid of runtime dependences, we use function calls to return them.
def make_nop():
"""Returns a 'no operation' node in HalideIR."""
return _stmt.Evaluate(tvm.runtime.const(0, dtype="int32"))
def is_docstring(node):
"""Checks if a Python AST node is a docstring"""
return isinstance(node, ast.Expr) and isinstance(node.value, ast.Str)
def _pruned_source(func):
"""Prune source code's extra leading spaces"""
try:
lines = inspect.getsource(func).split("\n")
leading_space = len(lines[0]) - len(lines[0].lstrip(" "))
lines = [line[leading_space:] for line in lines]
return "\n".join(lines)
except IOError as err:
if sys.version_info[0] == 2 and str(err) == "could not get source code":
logging.log(
logging.CRITICAL,
"This module is not fully operated under Python2... " "Please move to Python3!",
)
raise err
def replace_io(body, rmap):
"""Replacing tensors usage according to the dict given"""
# pylint: disable=import-outside-toplevel
from tvm.tir import stmt_functor
def replace(op):
if isinstance(op, _stmt.ProducerStore) and op.producer.op in rmap.keys():
buf = rmap[op.producer.op]
return _stmt.ProducerStore(buf, op.value, op.indices)
if isinstance(op, _expr.ProducerLoad) and op.producer.op in rmap.keys():
buf = rmap[op.producer.op]
return _expr.ProducerLoad(buf, op.indices)
return None
return stmt_functor.ir_transform(body, None, replace, ["tir.ProducerStore", "tir.ProducerLoad"])
def _is_tvm_arg_types(args):
"""Determine a list of element is either a list of tvm arguments of a list of numpy arguments.
If neither is true, raise a value error."""
if isinstance(args[0], tvm_arg_types):
for elem in args[1:]:
_internal_assert(
isinstance(elem, tvm_arg_types),
"Expecting a Var, Tensor or ConstExpr instance but %s get!" % str(type(elem)),
)
return True
_internal_assert(
isinstance(args[0], np_arg_types), "Expect a numpy type but %s get!" % str(type(args[0]))
)
for elem in args[1:]:
_internal_assert(
isinstance(elem, np_arg_types), "Expect a numpy type but %s get!" % str(type(elem))
)
return False | unknown | codeparrot/codeparrot-clean | ||
//// [tests/cases/compiler/APISample_WatchWithOwnWatchHost.ts] ////
//// [package.json]
{
"name": "typescript",
"types": "/.ts/typescript.d.ts"
}
//// [APISample_WatchWithOwnWatchHost.ts]
/*
* Note: This test is a public API sample. This sample verifies creating abstract builder to watch list of root files
* Please log a "breaking change" issue for any API breaking change affecting this issue
*/
declare var console: any;
import ts = require("typescript");
function watchMain() {
// get list of files and compiler options somehow
const files: string[] = [];
const options: ts.CompilerOptions = {};
const host: ts.WatchCompilerHostOfFilesAndCompilerOptions<ts.BuilderProgram> = {
rootFiles: files,
options,
useCaseSensitiveFileNames: () => ts.sys.useCaseSensitiveFileNames,
getNewLine: () => ts.sys.newLine,
getCurrentDirectory: ts.sys.getCurrentDirectory,
getDefaultLibFileName: options => ts.getDefaultLibFilePath(options),
fileExists: ts.sys.fileExists,
readFile: ts.sys.readFile,
directoryExists: ts.sys.directoryExists,
getDirectories: ts.sys.getDirectories,
readDirectory: ts.sys.readDirectory,
realpath: ts.sys.realpath,
watchFile: ts.sys.watchFile!,
watchDirectory: ts.sys.watchDirectory!,
createProgram: ts.createAbstractBuilder
};
// You can technically override any given hook on the host, though you probably don't need to.
// Note that we're assuming `origCreateProgram` and `origPostProgramCreate` doesn't use `this` at all.
const origCreateProgram = host.createProgram;
host.createProgram = (rootNames, options, host, oldProgram) => {
console.log("** We're about to create the program! **");
return origCreateProgram(rootNames, options, host, oldProgram);
}
const origPostProgramCreate = host.afterProgramCreate;
host.afterProgramCreate = program => {
console.log("** We finished making the program! **");
origPostProgramCreate!(program);
};
// `createWatchProgram` creates an initial program, watches files, and updates the program over time.
ts.createWatchProgram(host);
}
watchMain();
//// [APISample_WatchWithOwnWatchHost.js]
"use strict";
/*
* Note: This test is a public API sample. This sample verifies creating abstract builder to watch list of root files
* Please log a "breaking change" issue for any API breaking change affecting this issue
*/
Object.defineProperty(exports, "__esModule", { value: true });
const ts = require("typescript");
function watchMain() {
// get list of files and compiler options somehow
const files = [];
const options = {};
const host = {
rootFiles: files,
options,
useCaseSensitiveFileNames: () => ts.sys.useCaseSensitiveFileNames,
getNewLine: () => ts.sys.newLine,
getCurrentDirectory: ts.sys.getCurrentDirectory,
getDefaultLibFileName: options => ts.getDefaultLibFilePath(options),
fileExists: ts.sys.fileExists,
readFile: ts.sys.readFile,
directoryExists: ts.sys.directoryExists,
getDirectories: ts.sys.getDirectories,
readDirectory: ts.sys.readDirectory,
realpath: ts.sys.realpath,
watchFile: ts.sys.watchFile,
watchDirectory: ts.sys.watchDirectory,
createProgram: ts.createAbstractBuilder
};
// You can technically override any given hook on the host, though you probably don't need to.
// Note that we're assuming `origCreateProgram` and `origPostProgramCreate` doesn't use `this` at all.
const origCreateProgram = host.createProgram;
host.createProgram = (rootNames, options, host, oldProgram) => {
console.log("** We're about to create the program! **");
return origCreateProgram(rootNames, options, host, oldProgram);
};
const origPostProgramCreate = host.afterProgramCreate;
host.afterProgramCreate = program => {
console.log("** We finished making the program! **");
origPostProgramCreate(program);
};
// `createWatchProgram` creates an initial program, watches files, and updates the program over time.
ts.createWatchProgram(host);
}
watchMain(); | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/APISample_WatchWithOwnWatchHost.js |
# Copyright 2015 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This tool creates a docker image from a list of layers."""
# This is the main program to create a docker image. It expect to be run with:
# join_layers --output=output_file \
# --layer=layer1 [--layer=layer2 ... --layer=layerN] \
# --id=@identifier \
# --name=myname --repository=repositoryName
# See the gflags declaration about the flags argument details.
import os.path
import sys
from tools.build_defs.pkg import archive
from third_party.py import gflags
gflags.DEFINE_string('output', None, 'The output file, mandatory')
gflags.MarkFlagAsRequired('output')
gflags.DEFINE_multistring('layer', [], 'The tar files for layers to join.')
gflags.DEFINE_string(
'id', None, 'The hex identifier of the top layer (hexstring or @filename).')
gflags.DEFINE_string(
'repository', None,
'The name of the repository to add this image (use with --id and --name).')
gflags.DEFINE_string(
'name', None,
'The symbolic name of this image (use with --id and --repsoitory).')
FLAGS = gflags.FLAGS
def _layer_filter(name):
"""Ignore files 'top' and 'repositories' when merging layers."""
basename = os.path.basename(name)
return basename not in ('top', 'repositories')
def create_image(output, layers, identifier=None,
name=None, repository=None):
"""Creates a Docker image from a list of layers.
Args:
output: the name of the docker image file to create.
layers: the layers (tar files) to join to the image.
identifier: the identifier of the top layer for this image.
name: symbolic name for this docker image.
repository: repository name for this docker image.
"""
tar = archive.TarFileWriter(output)
for layer in layers:
tar.add_tar(layer, name_filter=_layer_filter)
# In addition to N layers of the form described above, there might be
# a single file at the top of the image called repositories.
# This file contains a JSON blob of the form:
# {
# 'repo':{
# 'tag-name': 'top-most layer hex',
# ...
# },
# ...
# }
if identifier:
# If the identifier is not provided, then the resulted layer will be
# created without a 'top' file. Docker doesn't needs that file nor
# the repository to load the image and for intermediate layer,
# docker_build store the name of the layer in a separate artifact so
# this 'top' file is not needed.
tar.add_file('top', content=identifier)
if repository and name:
tar.add_file('repositories',
content='\n'.join([
'{', ' "%s": {' % repository, ' "%s": "%s"' % (
name, identifier), ' }', '}'
]))
def main(unused_argv):
identifier = FLAGS.id
if identifier and identifier.startswith('@'):
with open(identifier[1:], 'r') as f:
identifier = f.read()
create_image(FLAGS.output, FLAGS.layer, identifier, FLAGS.name,
FLAGS.repository)
if __name__ == '__main__':
main(FLAGS(sys.argv)) | unknown | codeparrot/codeparrot-clean | ||
import os
import pkg_resources
from digicampipe.io.event_stream import event_stream
from digicampipe.io.zfits import count_number_events
from digicampipe.io.zfits import zfits_event_source
example_file_path = pkg_resources.resource_filename(
'digicampipe',
os.path.join(
'tests',
'resources',
'example_100_evts.000.fits.fz'
)
)
FIRST_EVENT_ID = 97750287
LAST_EVENT_ID = 97750386
EVENTS_IN_EXAMPLE_FILE = 100
def test_and_benchmark_event_source(benchmark):
@benchmark
def loop():
for _ in zfits_event_source(example_file_path):
pass
def test_count_number_event():
n_files = 10
files = [example_file_path] * n_files # create a list of files
assert count_number_events(files) == n_files * EVENTS_IN_EXAMPLE_FILE
def test_event_id():
event_id = LAST_EVENT_ID - 3
for data in zfits_event_source(example_file_path,
event_id=event_id):
tel_id = 1
r0 = data.r0.tel[tel_id]
number = r0.camera_event_number
break
assert number == event_id
for data in event_stream(example_file_path, event_id=event_id):
tel_id = 1
r0 = data.r0.tel[tel_id]
number = r0.camera_event_number
break
assert number == event_id
if __name__ == '__main__':
test_event_id() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
***************************************************************************
test_qgssinglesymbolrenderer.py
---------------------
Date : December 2015
Copyright : (C) 2015 by Matthias Kuhn
Email : matthias at opengis dot ch
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Matthias Kuhn'
__date__ = 'December 2015'
__copyright__ = '(C) 2015, Matthias Kuhn'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
import os
from qgis.PyQt.QtCore import QSize
from qgis.core import (QgsVectorLayer,
QgsProject,
QgsRectangle,
QgsMultiRenderChecker,
QgsSingleSymbolRenderer,
QgsFillSymbol,
QgsFeatureRequest
)
from qgis.testing import unittest
from qgis.testing.mocked import get_iface
from utilities import unitTestDataPath
TEST_DATA_DIR = unitTestDataPath()
class TestQgsSingleSymbolRenderer(unittest.TestCase):
def setUp(self):
self.iface = get_iface()
myShpFile = os.path.join(TEST_DATA_DIR, 'polys_overlapping.shp')
layer = QgsVectorLayer(myShpFile, 'Polys', 'ogr')
QgsProject.instance().addMapLayer(layer)
# Create rulebased style
sym1 = QgsFillSymbol.createSimple({'color': '#fdbf6f', 'outline_color': 'black'})
self.renderer = QgsSingleSymbolRenderer(sym1)
layer.setRenderer(self.renderer)
rendered_layers = [layer]
self.mapsettings = self.iface.mapCanvas().mapSettings()
self.mapsettings.setOutputSize(QSize(400, 400))
self.mapsettings.setOutputDpi(96)
self.mapsettings.setExtent(QgsRectangle(-163, 22, -70, 52))
self.mapsettings.setLayers(rendered_layers)
def testOrderBy(self):
self.renderer.setOrderBy(QgsFeatureRequest.OrderBy([QgsFeatureRequest.OrderByClause('Value', False)]))
self.renderer.setOrderByEnabled(True)
# Setup rendering check
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(self.mapsettings)
renderchecker.setControlName('expected_singlesymbol_orderby')
self.assertTrue(renderchecker.runTest('singlesymbol_orderby'))
# disable order by and retest
self.renderer.setOrderByEnabled(False)
self.assertTrue(renderchecker.runTest('single'))
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Bindings to the `tee` and `splice` system calls
'''
import os
import operator
import six
import ctypes
import ctypes.util
__all__ = ['tee', 'splice']
c_loff_t = ctypes.c_long
# python 2.6 doesn't have c_ssize_t
c_ssize_t = getattr(ctypes, 'c_ssize_t', ctypes.c_long)
class Tee(object):
'''Binding to `tee`'''
__slots__ = '_c_tee',
def __init__(self):
libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
try:
c_tee = libc.tee
except AttributeError:
self._c_tee = None
return
c_tee.argtypes = [
ctypes.c_int,
ctypes.c_int,
ctypes.c_size_t,
ctypes.c_uint
]
c_tee.restype = c_ssize_t
def errcheck(result, func, arguments):
if result == -1:
errno = ctypes.set_errno(0)
raise IOError(errno, 'tee: %s' % os.strerror(errno))
else:
return result
c_tee.errcheck = errcheck
self._c_tee = c_tee
def __call__(self, fd_in, fd_out, len_, flags):
'''See `man 2 tee`
File-descriptors can be file-like objects with a `fileno` method, or
integers.
Flags can be an integer value, or a list of flags (exposed on
`splice`).
This function returns the number of bytes transferred (i.e. the actual
result of the call to `tee`).
Upon other errors, an `IOError` is raised with the proper `errno` set.
'''
if not self.available:
raise EnvironmentError('tee not available')
if not isinstance(flags, six.integer_types):
c_flags = six.moves.reduce(operator.or_, flags, 0)
else:
c_flags = flags
c_fd_in = getattr(fd_in, 'fileno', lambda: fd_in)()
c_fd_out = getattr(fd_out, 'fileno', lambda: fd_out)()
return self._c_tee(c_fd_in, c_fd_out, len_, c_flags)
@property
def available(self):
'''Availability of `tee`'''
return self._c_tee is not None
tee = Tee()
del Tee
class Splice(object):
'''Binding to `splice`'''
# From `bits/fcntl-linux.h`
SPLICE_F_MOVE = 1
SPLICE_F_NONBLOCK = 2
SPLICE_F_MORE = 4
SPLICE_F_GIFT = 8
__slots__ = '_c_splice',
def __init__(self):
libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
try:
c_splice = libc.splice
except AttributeError:
self._c_splice = None
return
c_loff_t_p = ctypes.POINTER(c_loff_t)
c_splice.argtypes = [
ctypes.c_int, c_loff_t_p,
ctypes.c_int, c_loff_t_p,
ctypes.c_size_t,
ctypes.c_uint
]
c_splice.restype = c_ssize_t
def errcheck(result, func, arguments):
if result == -1:
errno = ctypes.set_errno(0)
raise IOError(errno, 'splice: %s' % os.strerror(errno))
else:
off_in = arguments[1]
off_out = arguments[3]
return (
result,
off_in.contents.value if off_in is not None else None,
off_out.contents.value if off_out is not None else None)
c_splice.errcheck = errcheck
self._c_splice = c_splice
def __call__(self, fd_in, off_in, fd_out, off_out, len_, flags):
'''See `man 2 splice`
File-descriptors can be file-like objects with a `fileno` method, or
integers.
Flags can be an integer value, or a list of flags (exposed on this
object).
Returns a tuple of the result of the `splice` call, the output value of
`off_in` and the output value of `off_out` (or `None` for any of these
output values, if applicable).
Upon other errors, an `IOError` is raised with the proper `errno` set.
Note: if you want to pass `NULL` as value for `off_in` or `off_out` to
the system call, you must pass `None`, *not* 0!
'''
if not self.available:
raise EnvironmentError('splice not available')
if not isinstance(flags, six.integer_types):
c_flags = six.moves.reduce(operator.or_, flags, 0)
else:
c_flags = flags
c_fd_in = getattr(fd_in, 'fileno', lambda: fd_in)()
c_fd_out = getattr(fd_out, 'fileno', lambda: fd_out)()
c_off_in = \
ctypes.pointer(c_loff_t(off_in)) if off_in is not None else None
c_off_out = \
ctypes.pointer(c_loff_t(off_out)) if off_out is not None else None
return self._c_splice(
c_fd_in, c_off_in, c_fd_out, c_off_out, len_, c_flags)
@property
def available(self):
'''Availability of `splice`'''
return self._c_splice is not None
splice = Splice()
del Splice | unknown | codeparrot/codeparrot-clean | ||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/i3c/i3c.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: I3C bus
maintainers:
- Alexandre Belloni <alexandre.belloni@bootlin.com>
- Miquel Raynal <miquel.raynal@bootlin.com>
description: |
I3C busses can be described with a node for the primary I3C controller device
and a set of child nodes for each I2C or I3C slave on the bus. Each of them
may, during the life of the bus, request mastership.
properties:
$nodename:
pattern: "^i3c@[0-9a-f]+$"
"#address-cells":
const: 3
description: |
Each I2C device connected to the bus should be described in a subnode.
All I3C devices are supposed to support DAA (Dynamic Address Assignment),
and are thus discoverable. So, by default, I3C devices do not have to be
described in the device tree. This being said, one might want to attach
extra resources to these devices, and those resources may have to be
described in the device tree, which in turn means we have to describe
I3C devices.
Another use case for describing an I3C device in the device tree is when
this I3C device has a static I2C address and we want to assign it a
specific I3C dynamic address before the DAA takes place (so that other
devices on the bus can't take this dynamic address).
"#size-cells":
const: 0
i3c-scl-hz:
description: |
Frequency of the SCL signal used for I3C transfers. When undefined, the
default value should be 12.5MHz.
May not be supported by all controllers.
i2c-scl-hz:
description: |
Frequency of the SCL signal used for I2C transfers. When undefined, the
default should be to look at LVR (Legacy Virtual Register) values of
I2C devices described in the device tree to determine the maximum I2C
frequency.
May not be supported by all controllers.
mctp-controller:
type: boolean
description: |
Indicates that the system is accessible via this bus as an endpoint for
MCTP over I3C transport.
required:
- "#address-cells"
- "#size-cells"
patternProperties:
"@[0-9a-f]+$":
type: object
description: |
I2C child, should be named: <device-type>@<i2c-address>
All properties described in dtschema schemas/i2c/i2c-controller.yaml
are valid here, except the reg property whose content is changed.
properties:
compatible:
description:
Compatible of the I2C device.
reg:
items:
- items:
- description: |
I2C address. 10 bit addressing is not supported. Devices with
10-bit address can't be properly passed through DEFSLVS
command.
minimum: 0
maximum: 0x7f
- const: 0
- description: |
Shall encode the I3C LVR (Legacy Virtual Register):
See include/dt-bindings/i3c/i3c.h
bit[31:8]: unused/ignored
bit[7:5]: I2C device index. Possible values:
* 0: I2C device has a 50 ns spike filter
* 1: I2C device does not have a 50 ns spike filter but
supports high frequency on SCL
* 2: I2C device does not have a 50 ns spike filter and is
not tolerant to high frequencies
* 3-7: reserved
bit[4]: tell whether the device operates in FM (Fast Mode)
or FM+ mode:
* 0: FM+ mode
* 1: FM mode
bit[3:0]: device type
* 0-15: reserved
required:
- compatible
- reg
"@[0-9a-f]+,[0-9a-f]+$":
type: object
description: |
I3C child, should be named: <device-type>@<static-i2c-address>,<i3c-pid>
properties:
reg:
items:
- items:
- description: |
Encodes the static I2C address. Should be 0 if the device does
not have one (0 is not a valid I2C address).
minimum: 0
maximum: 0x7f
- description: |
First half of the Provisioned ID (following the PID
definition provided by the I3C specification).
Contains the manufacturer ID left-shifted by 1.
- description: |
Second half of the Provisioned ID (following the PID
definition provided by the I3C specification).
Contains the ORing of the part ID left-shifted by 16,
the instance ID left-shifted by 12 and extra information.
assigned-address:
$ref: /schemas/types.yaml#/definitions/uint32
minimum: 0x1
maximum: 0xff
description: |
Dynamic address to be assigned to this device. In case static address is
present (first cell of the reg property != 0), this address is assigned
through SETDASA. If static address is not present, this address is assigned
through SETNEWDA after assigning a temporary address via ENTDAA.
required:
- reg
additionalProperties: true
examples:
- |
#include <dt-bindings/i3c/i3c.h>
i3c@d040000 {
compatible = "cdns,i3c-master";
clocks = <&coreclock>, <&i3csysclock>;
clock-names = "pclk", "sysclk";
interrupts = <3 0>;
reg = <0x0d040000 0x1000>;
#address-cells = <3>;
#size-cells = <0>;
i2c-scl-hz = <100000>;
/* I2C device. */
eeprom@57 {
compatible = "atmel,24c01";
reg = <0x57 0x0 (I2C_FM | I2C_FILTER)>;
pagesize = <0x8>;
};
/* I3C device with a static I2C address and assigned address. */
thermal_sensor: sensor@68,39200144004 {
reg = <0x68 0x392 0x144004>;
assigned-address = <0xa>;
};
/* I3C device with only assigned address. */
pressure_sensor: sensor@0,39200124004 {
reg = <0x0 0x392 0x124000>;
assigned-address = <0xc>;
};
/*
* I3C device without a static I2C address but requiring
* resources described in the DT.
*/
sensor@0,39200154004 {
reg = <0x0 0x392 0x154004>;
clocks = <&clock_provider 0>;
};
}; | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/i3c/i3c.yaml |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Evaluate Object Detection result on a single image.
Annotate each detected result as true positives or false positive according to
a predefined IOU ratio. Non Maximum Supression is used by default. Multi class
detection is supported by default.
Based on the settings, per image evaluation is either performed on boxes or
on object masks.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
from object_detection.utils import np_box_list
from object_detection.utils import np_box_list_ops
from object_detection.utils import np_box_mask_list
from object_detection.utils import np_box_mask_list_ops
class PerImageEvaluation(object):
"""Evaluate detection result of a single image."""
def __init__(self,
num_groundtruth_classes,
matching_iou_threshold=0.5,
nms_iou_threshold=0.3,
nms_max_output_boxes=50,
group_of_weight=0.0):
"""Initialized PerImageEvaluation by evaluation parameters.
Args:
num_groundtruth_classes: Number of ground truth object classes
matching_iou_threshold: A ratio of area intersection to union, which is
the threshold to consider whether a detection is true positive or not
nms_iou_threshold: IOU threshold used in Non Maximum Suppression.
nms_max_output_boxes: Number of maximum output boxes in NMS.
group_of_weight: Weight of the group-of boxes.
"""
self.matching_iou_threshold = matching_iou_threshold
self.nms_iou_threshold = nms_iou_threshold
self.nms_max_output_boxes = nms_max_output_boxes
self.num_groundtruth_classes = num_groundtruth_classes
self.group_of_weight = group_of_weight
def compute_object_detection_metrics(self,
detected_boxes,
detected_scores,
detected_class_labels,
groundtruth_boxes,
groundtruth_class_labels,
groundtruth_is_difficult_list,
groundtruth_is_group_of_list,
detected_masks=None,
groundtruth_masks=None):
"""Evaluates detections as being tp, fp or weighted from a single image.
The evaluation is done in two stages:
1. All detections are matched to non group-of boxes; true positives are
determined and detections matched to difficult boxes are ignored.
2. Detections that are determined as false positives are matched against
group-of boxes and weighted if matched.
Args:
detected_boxes: A float numpy array of shape [N, 4], representing N
regions of detected object regions. Each row is of the format [y_min,
x_min, y_max, x_max]
detected_scores: A float numpy array of shape [N, 1], representing the
confidence scores of the detected N object instances.
detected_class_labels: A integer numpy array of shape [N, 1], repreneting
the class labels of the detected N object instances.
groundtruth_boxes: A float numpy array of shape [M, 4], representing M
regions of object instances in ground truth
groundtruth_class_labels: An integer numpy array of shape [M, 1],
representing M class labels of object instances in ground truth
groundtruth_is_difficult_list: A boolean numpy array of length M denoting
whether a ground truth box is a difficult instance or not
groundtruth_is_group_of_list: A boolean numpy array of length M denoting
whether a ground truth box has group-of tag
detected_masks: (optional) A uint8 numpy array of shape [N, height,
width]. If not None, the metrics will be computed based on masks.
groundtruth_masks: (optional) A uint8 numpy array of shape [M, height,
width]. Can have empty masks, i.e. where all values are 0.
Returns:
scores: A list of C float numpy arrays. Each numpy array is of
shape [K, 1], representing K scores detected with object class
label c
tp_fp_labels: A list of C boolean numpy arrays. Each numpy array
is of shape [K, 1], representing K True/False positive label of
object instances detected with class label c
is_class_correctly_detected_in_image: a numpy integer array of
shape [C, 1], indicating whether the correponding class has a least
one instance being correctly detected in the image
"""
detected_boxes, detected_scores, detected_class_labels, detected_masks = (
self._remove_invalid_boxes(detected_boxes, detected_scores,
detected_class_labels, detected_masks))
scores, tp_fp_labels = self._compute_tp_fp(
detected_boxes=detected_boxes,
detected_scores=detected_scores,
detected_class_labels=detected_class_labels,
groundtruth_boxes=groundtruth_boxes,
groundtruth_class_labels=groundtruth_class_labels,
groundtruth_is_difficult_list=groundtruth_is_difficult_list,
groundtruth_is_group_of_list=groundtruth_is_group_of_list,
detected_masks=detected_masks,
groundtruth_masks=groundtruth_masks)
is_class_correctly_detected_in_image = self._compute_cor_loc(
detected_boxes=detected_boxes,
detected_scores=detected_scores,
detected_class_labels=detected_class_labels,
groundtruth_boxes=groundtruth_boxes,
groundtruth_class_labels=groundtruth_class_labels,
detected_masks=detected_masks,
groundtruth_masks=groundtruth_masks)
return scores, tp_fp_labels, is_class_correctly_detected_in_image
def _compute_cor_loc(self,
detected_boxes,
detected_scores,
detected_class_labels,
groundtruth_boxes,
groundtruth_class_labels,
detected_masks=None,
groundtruth_masks=None):
"""Compute CorLoc score for object detection result.
Args:
detected_boxes: A float numpy array of shape [N, 4], representing N
regions of detected object regions. Each row is of the format [y_min,
x_min, y_max, x_max]
detected_scores: A float numpy array of shape [N, 1], representing the
confidence scores of the detected N object instances.
detected_class_labels: A integer numpy array of shape [N, 1], repreneting
the class labels of the detected N object instances.
groundtruth_boxes: A float numpy array of shape [M, 4], representing M
regions of object instances in ground truth
groundtruth_class_labels: An integer numpy array of shape [M, 1],
representing M class labels of object instances in ground truth
detected_masks: (optional) A uint8 numpy array of shape [N, height,
width]. If not None, the scores will be computed based on masks.
groundtruth_masks: (optional) A uint8 numpy array of shape [M, height,
width].
Returns:
is_class_correctly_detected_in_image: a numpy integer array of
shape [C, 1], indicating whether the correponding class has a least
one instance being correctly detected in the image
Raises:
ValueError: If detected masks is not None but groundtruth masks are None,
or the other way around.
"""
if (detected_masks is not None and
groundtruth_masks is None) or (detected_masks is None and
groundtruth_masks is not None):
raise ValueError(
'If `detected_masks` is provided, then `groundtruth_masks` should '
'also be provided.')
is_class_correctly_detected_in_image = np.zeros(
self.num_groundtruth_classes, dtype=int)
for i in range(self.num_groundtruth_classes):
(gt_boxes_at_ith_class, gt_masks_at_ith_class,
detected_boxes_at_ith_class, detected_scores_at_ith_class,
detected_masks_at_ith_class) = self._get_ith_class_arrays(
detected_boxes, detected_scores, detected_masks,
detected_class_labels, groundtruth_boxes, groundtruth_masks,
groundtruth_class_labels, i)
is_class_correctly_detected_in_image[i] = (
self._compute_is_class_correctly_detected_in_image(
detected_boxes=detected_boxes_at_ith_class,
detected_scores=detected_scores_at_ith_class,
groundtruth_boxes=gt_boxes_at_ith_class,
detected_masks=detected_masks_at_ith_class,
groundtruth_masks=gt_masks_at_ith_class))
return is_class_correctly_detected_in_image
def _compute_is_class_correctly_detected_in_image(self,
detected_boxes,
detected_scores,
groundtruth_boxes,
detected_masks=None,
groundtruth_masks=None):
"""Compute CorLoc score for a single class.
Args:
detected_boxes: A numpy array of shape [N, 4] representing detected box
coordinates
detected_scores: A 1-d numpy array of length N representing classification
score
groundtruth_boxes: A numpy array of shape [M, 4] representing ground truth
box coordinates
detected_masks: (optional) A np.uint8 numpy array of shape [N, height,
width]. If not None, the scores will be computed based on masks.
groundtruth_masks: (optional) A np.uint8 numpy array of shape [M, height,
width].
Returns:
is_class_correctly_detected_in_image: An integer 1 or 0 denoting whether a
class is correctly detected in the image or not
"""
if detected_boxes.size > 0:
if groundtruth_boxes.size > 0:
max_score_id = np.argmax(detected_scores)
mask_mode = False
if detected_masks is not None and groundtruth_masks is not None:
mask_mode = True
if mask_mode:
detected_boxlist = np_box_mask_list.BoxMaskList(
box_data=np.expand_dims(detected_boxes[max_score_id], axis=0),
mask_data=np.expand_dims(detected_masks[max_score_id], axis=0))
gt_boxlist = np_box_mask_list.BoxMaskList(
box_data=groundtruth_boxes, mask_data=groundtruth_masks)
iou = np_box_mask_list_ops.iou(detected_boxlist, gt_boxlist)
else:
detected_boxlist = np_box_list.BoxList(
np.expand_dims(detected_boxes[max_score_id, :], axis=0))
gt_boxlist = np_box_list.BoxList(groundtruth_boxes)
iou = np_box_list_ops.iou(detected_boxlist, gt_boxlist)
if np.max(iou) >= self.matching_iou_threshold:
return 1
return 0
def _compute_tp_fp(self,
detected_boxes,
detected_scores,
detected_class_labels,
groundtruth_boxes,
groundtruth_class_labels,
groundtruth_is_difficult_list,
groundtruth_is_group_of_list,
detected_masks=None,
groundtruth_masks=None):
"""Labels true/false positives of detections of an image across all classes.
Args:
detected_boxes: A float numpy array of shape [N, 4], representing N
regions of detected object regions. Each row is of the format [y_min,
x_min, y_max, x_max]
detected_scores: A float numpy array of shape [N, 1], representing the
confidence scores of the detected N object instances.
detected_class_labels: A integer numpy array of shape [N, 1], repreneting
the class labels of the detected N object instances.
groundtruth_boxes: A float numpy array of shape [M, 4], representing M
regions of object instances in ground truth
groundtruth_class_labels: An integer numpy array of shape [M, 1],
representing M class labels of object instances in ground truth
groundtruth_is_difficult_list: A boolean numpy array of length M denoting
whether a ground truth box is a difficult instance or not
groundtruth_is_group_of_list: A boolean numpy array of length M denoting
whether a ground truth box has group-of tag
detected_masks: (optional) A np.uint8 numpy array of shape [N, height,
width]. If not None, the scores will be computed based on masks.
groundtruth_masks: (optional) A np.uint8 numpy array of shape [M, height,
width].
Returns:
result_scores: A list of float numpy arrays. Each numpy array is of
shape [K, 1], representing K scores detected with object class
label c
result_tp_fp_labels: A list of boolean numpy array. Each numpy array is of
shape [K, 1], representing K True/False positive label of object
instances detected with class label c
Raises:
ValueError: If detected masks is not None but groundtruth masks are None,
or the other way around.
"""
if detected_masks is not None and groundtruth_masks is None:
raise ValueError(
'Detected masks is available but groundtruth masks is not.')
if detected_masks is None and groundtruth_masks is not None:
raise ValueError(
'Groundtruth masks is available but detected masks is not.')
result_scores = []
result_tp_fp_labels = []
for i in range(self.num_groundtruth_classes):
groundtruth_is_difficult_list_at_ith_class = (
groundtruth_is_difficult_list[groundtruth_class_labels == i])
groundtruth_is_group_of_list_at_ith_class = (
groundtruth_is_group_of_list[groundtruth_class_labels == i])
(gt_boxes_at_ith_class, gt_masks_at_ith_class,
detected_boxes_at_ith_class, detected_scores_at_ith_class,
detected_masks_at_ith_class) = self._get_ith_class_arrays(
detected_boxes, detected_scores, detected_masks,
detected_class_labels, groundtruth_boxes, groundtruth_masks,
groundtruth_class_labels, i)
scores, tp_fp_labels = self._compute_tp_fp_for_single_class(
detected_boxes=detected_boxes_at_ith_class,
detected_scores=detected_scores_at_ith_class,
groundtruth_boxes=gt_boxes_at_ith_class,
groundtruth_is_difficult_list=groundtruth_is_difficult_list_at_ith_class,
groundtruth_is_group_of_list=groundtruth_is_group_of_list_at_ith_class,
detected_masks=detected_masks_at_ith_class,
groundtruth_masks=gt_masks_at_ith_class)
result_scores.append(scores)
result_tp_fp_labels.append(tp_fp_labels)
return result_scores, result_tp_fp_labels
def _get_overlaps_and_scores_mask_mode(self, detected_boxes, detected_scores,
detected_masks, groundtruth_boxes,
groundtruth_masks,
groundtruth_is_group_of_list):
"""Computes overlaps and scores between detected and groudntruth masks.
Args:
detected_boxes: A numpy array of shape [N, 4] representing detected box
coordinates
detected_scores: A 1-d numpy array of length N representing classification
score
detected_masks: A uint8 numpy array of shape [N, height, width]. If not
None, the scores will be computed based on masks.
groundtruth_boxes: A numpy array of shape [M, 4] representing ground truth
box coordinates
groundtruth_masks: A uint8 numpy array of shape [M, height, width].
groundtruth_is_group_of_list: A boolean numpy array of length M denoting
whether a ground truth box has group-of tag. If a groundtruth box is
group-of box, every detection matching this box is ignored.
Returns:
iou: A float numpy array of size [num_detected_boxes, num_gt_boxes]. If
gt_non_group_of_boxlist.num_boxes() == 0 it will be None.
ioa: A float numpy array of size [num_detected_boxes, num_gt_boxes]. If
gt_group_of_boxlist.num_boxes() == 0 it will be None.
scores: The score of the detected boxlist.
num_boxes: Number of non-maximum suppressed detected boxes.
"""
detected_boxlist = np_box_mask_list.BoxMaskList(
box_data=detected_boxes, mask_data=detected_masks)
detected_boxlist.add_field('scores', detected_scores)
detected_boxlist = np_box_mask_list_ops.non_max_suppression(
detected_boxlist, self.nms_max_output_boxes, self.nms_iou_threshold)
gt_non_group_of_boxlist = np_box_mask_list.BoxMaskList(
box_data=groundtruth_boxes[~groundtruth_is_group_of_list],
mask_data=groundtruth_masks[~groundtruth_is_group_of_list])
gt_group_of_boxlist = np_box_mask_list.BoxMaskList(
box_data=groundtruth_boxes[groundtruth_is_group_of_list],
mask_data=groundtruth_masks[groundtruth_is_group_of_list])
iou = np_box_mask_list_ops.iou(detected_boxlist, gt_non_group_of_boxlist)
ioa = np.transpose(
np_box_mask_list_ops.ioa(gt_group_of_boxlist, detected_boxlist))
scores = detected_boxlist.get_field('scores')
num_boxes = detected_boxlist.num_boxes()
return iou, ioa, scores, num_boxes
def _get_overlaps_and_scores_box_mode(self, detected_boxes, detected_scores,
groundtruth_boxes,
groundtruth_is_group_of_list):
"""Computes overlaps and scores between detected and groudntruth boxes.
Args:
detected_boxes: A numpy array of shape [N, 4] representing detected box
coordinates
detected_scores: A 1-d numpy array of length N representing classification
score
groundtruth_boxes: A numpy array of shape [M, 4] representing ground truth
box coordinates
groundtruth_is_group_of_list: A boolean numpy array of length M denoting
whether a ground truth box has group-of tag. If a groundtruth box is
group-of box, every detection matching this box is ignored.
Returns:
iou: A float numpy array of size [num_detected_boxes, num_gt_boxes]. If
gt_non_group_of_boxlist.num_boxes() == 0 it will be None.
ioa: A float numpy array of size [num_detected_boxes, num_gt_boxes]. If
gt_group_of_boxlist.num_boxes() == 0 it will be None.
scores: The score of the detected boxlist.
num_boxes: Number of non-maximum suppressed detected boxes.
"""
detected_boxlist = np_box_list.BoxList(detected_boxes)
detected_boxlist.add_field('scores', detected_scores)
detected_boxlist = np_box_list_ops.non_max_suppression(
detected_boxlist, self.nms_max_output_boxes, self.nms_iou_threshold)
gt_non_group_of_boxlist = np_box_list.BoxList(
groundtruth_boxes[~groundtruth_is_group_of_list])
gt_group_of_boxlist = np_box_list.BoxList(
groundtruth_boxes[groundtruth_is_group_of_list])
iou = np_box_list_ops.iou(detected_boxlist, gt_non_group_of_boxlist)
ioa = np.transpose(
np_box_list_ops.ioa(gt_group_of_boxlist, detected_boxlist))
scores = detected_boxlist.get_field('scores')
num_boxes = detected_boxlist.num_boxes()
return iou, ioa, scores, num_boxes
def _compute_tp_fp_for_single_class(self,
detected_boxes,
detected_scores,
groundtruth_boxes,
groundtruth_is_difficult_list,
groundtruth_is_group_of_list,
detected_masks=None,
groundtruth_masks=None):
"""Labels boxes detected with the same class from the same image as tp/fp.
Args:
detected_boxes: A numpy array of shape [N, 4] representing detected box
coordinates
detected_scores: A 1-d numpy array of length N representing classification
score
groundtruth_boxes: A numpy array of shape [M, 4] representing ground truth
box coordinates
groundtruth_is_difficult_list: A boolean numpy array of length M denoting
whether a ground truth box is a difficult instance or not. If a
groundtruth box is difficult, every detection matching this box is
ignored.
groundtruth_is_group_of_list: A boolean numpy array of length M denoting
whether a ground truth box has group-of tag. If a groundtruth box is
group-of box, every detection matching this box is ignored.
detected_masks: (optional) A uint8 numpy array of shape [N, height,
width]. If not None, the scores will be computed based on masks.
groundtruth_masks: (optional) A uint8 numpy array of shape [M, height,
width].
Returns:
Two arrays of the same size, containing all boxes that were evaluated as
being true positives or false positives; if a box matched to a difficult
box or to a group-of box, it is ignored.
scores: A numpy array representing the detection scores.
tp_fp_labels: a boolean numpy array indicating whether a detection is a
true positive.
"""
if detected_boxes.size == 0:
return np.array([], dtype=float), np.array([], dtype=bool)
mask_mode = False
if detected_masks is not None and groundtruth_masks is not None:
mask_mode = True
iou = np.ndarray([0, 0])
ioa = np.ndarray([0, 0])
iou_mask = np.ndarray([0, 0])
ioa_mask = np.ndarray([0, 0])
if mask_mode:
# For Instance Segmentation Evaluation on Open Images V5, not all boxed
# instances have corresponding segmentation annotations. Those boxes that
# dont have segmentation annotations are represented as empty masks in
# groundtruth_masks nd array.
mask_presence_indicator = (np.sum(groundtruth_masks, axis=(1, 2)) > 0)
(iou_mask, ioa_mask, scores,
num_detected_boxes) = self._get_overlaps_and_scores_mask_mode(
detected_boxes=detected_boxes,
detected_scores=detected_scores,
detected_masks=detected_masks,
groundtruth_boxes=groundtruth_boxes[mask_presence_indicator, :],
groundtruth_masks=groundtruth_masks[mask_presence_indicator, :],
groundtruth_is_group_of_list=groundtruth_is_group_of_list[
mask_presence_indicator])
if sum(mask_presence_indicator) < len(mask_presence_indicator):
# Not all masks are present - some masks are empty
(iou, ioa, _,
num_detected_boxes) = self._get_overlaps_and_scores_box_mode(
detected_boxes=detected_boxes,
detected_scores=detected_scores,
groundtruth_boxes=groundtruth_boxes[~mask_presence_indicator, :],
groundtruth_is_group_of_list=groundtruth_is_group_of_list[
~mask_presence_indicator])
num_detected_boxes = detected_boxes.shape[0]
else:
mask_presence_indicator = np.zeros(
groundtruth_is_group_of_list.shape, dtype=bool)
(iou, ioa, scores,
num_detected_boxes) = self._get_overlaps_and_scores_box_mode(
detected_boxes=detected_boxes,
detected_scores=detected_scores,
groundtruth_boxes=groundtruth_boxes,
groundtruth_is_group_of_list=groundtruth_is_group_of_list)
if groundtruth_boxes.size == 0:
return scores, np.zeros(num_detected_boxes, dtype=bool)
tp_fp_labels = np.zeros(num_detected_boxes, dtype=bool)
is_matched_to_box = np.zeros(num_detected_boxes, dtype=bool)
is_matched_to_difficult = np.zeros(num_detected_boxes, dtype=bool)
is_matched_to_group_of = np.zeros(num_detected_boxes, dtype=bool)
def compute_match_iou(iou, groundtruth_nongroup_of_is_difficult_list,
is_box):
"""Computes TP/FP for non group-of box matching.
The function updates the following local variables:
tp_fp_labels - if a box is matched to group-of
is_matched_to_difficult - the detections that were processed at this are
matched to difficult box.
is_matched_to_box - the detections that were processed at this stage are
marked as is_box.
Args:
iou: intersection-over-union matrix [num_gt_boxes]x[num_det_boxes].
groundtruth_nongroup_of_is_difficult_list: boolean that specifies if gt
box is difficult.
is_box: boolean that specifies if currently boxes or masks are
processed.
"""
max_overlap_gt_ids = np.argmax(iou, axis=1)
is_gt_detected = np.zeros(iou.shape[1], dtype=bool)
for i in range(num_detected_boxes):
gt_id = max_overlap_gt_ids[i]
is_evaluatable = (not tp_fp_labels[i] and
not is_matched_to_difficult[i] and
iou[i, gt_id] >= self.matching_iou_threshold and
not is_matched_to_group_of[i])
if is_evaluatable:
if not groundtruth_nongroup_of_is_difficult_list[gt_id]:
if not is_gt_detected[gt_id]:
tp_fp_labels[i] = True
is_gt_detected[gt_id] = True
is_matched_to_box[i] = is_box
else:
is_matched_to_difficult[i] = True
def compute_match_ioa(ioa, is_box):
"""Computes TP/FP for group-of box matching.
The function updates the following local variables:
is_matched_to_group_of - if a box is matched to group-of
is_matched_to_box - the detections that were processed at this stage are
marked as is_box.
Args:
ioa: intersection-over-area matrix [num_gt_boxes]x[num_det_boxes].
is_box: boolean that specifies if currently boxes or masks are
processed.
Returns:
scores_group_of: of detections matched to group-of boxes
[num_groupof_matched].
tp_fp_labels_group_of: boolean array of size [num_groupof_matched], all
values are True.
"""
scores_group_of = np.zeros(ioa.shape[1], dtype=float)
tp_fp_labels_group_of = self.group_of_weight * np.ones(
ioa.shape[1], dtype=float)
max_overlap_group_of_gt_ids = np.argmax(ioa, axis=1)
for i in range(num_detected_boxes):
gt_id = max_overlap_group_of_gt_ids[i]
is_evaluatable = (not tp_fp_labels[i] and
not is_matched_to_difficult[i] and
ioa[i, gt_id] >= self.matching_iou_threshold and
not is_matched_to_group_of[i])
if is_evaluatable:
is_matched_to_group_of[i] = True
is_matched_to_box[i] = is_box
scores_group_of[gt_id] = max(scores_group_of[gt_id], scores[i])
selector = np.where((scores_group_of > 0) & (tp_fp_labels_group_of > 0))
scores_group_of = scores_group_of[selector]
tp_fp_labels_group_of = tp_fp_labels_group_of[selector]
return scores_group_of, tp_fp_labels_group_of
# The evaluation is done in two stages:
# 1. Evaluate all objects that actually have instance level masks.
# 2. Evaluate all objects that are not already evaluated as boxes.
if iou_mask.shape[1] > 0:
groundtruth_is_difficult_mask_list = groundtruth_is_difficult_list[
mask_presence_indicator]
groundtruth_is_group_of_mask_list = groundtruth_is_group_of_list[
mask_presence_indicator]
compute_match_iou(
iou_mask,
groundtruth_is_difficult_mask_list[
~groundtruth_is_group_of_mask_list],
is_box=False)
scores_mask_group_of = np.ndarray([0], dtype=float)
tp_fp_labels_mask_group_of = np.ndarray([0], dtype=float)
if ioa_mask.shape[1] > 0:
scores_mask_group_of, tp_fp_labels_mask_group_of = compute_match_ioa(
ioa_mask, is_box=False)
# Tp-fp evaluation for non-group of boxes (if any).
if iou.shape[1] > 0:
groundtruth_is_difficult_box_list = groundtruth_is_difficult_list[
~mask_presence_indicator]
groundtruth_is_group_of_box_list = groundtruth_is_group_of_list[
~mask_presence_indicator]
compute_match_iou(
iou,
groundtruth_is_difficult_box_list[~groundtruth_is_group_of_box_list],
is_box=True)
scores_box_group_of = np.ndarray([0], dtype=float)
tp_fp_labels_box_group_of = np.ndarray([0], dtype=float)
if ioa.shape[1] > 0:
scores_box_group_of, tp_fp_labels_box_group_of = compute_match_ioa(
ioa, is_box=True)
if mask_mode:
# Note: here crowds are treated as ignore regions.
valid_entries = (~is_matched_to_difficult & ~is_matched_to_group_of
& ~is_matched_to_box)
return np.concatenate(
(scores[valid_entries], scores_mask_group_of)), np.concatenate(
(tp_fp_labels[valid_entries].astype(float),
tp_fp_labels_mask_group_of))
else:
valid_entries = (~is_matched_to_difficult & ~is_matched_to_group_of)
return np.concatenate(
(scores[valid_entries], scores_box_group_of)), np.concatenate(
(tp_fp_labels[valid_entries].astype(float),
tp_fp_labels_box_group_of))
def _get_ith_class_arrays(self, detected_boxes, detected_scores,
detected_masks, detected_class_labels,
groundtruth_boxes, groundtruth_masks,
groundtruth_class_labels, class_index):
"""Returns numpy arrays belonging to class with index `class_index`.
Args:
detected_boxes: A numpy array containing detected boxes.
detected_scores: A numpy array containing detected scores.
detected_masks: A numpy array containing detected masks.
detected_class_labels: A numpy array containing detected class labels.
groundtruth_boxes: A numpy array containing groundtruth boxes.
groundtruth_masks: A numpy array containing groundtruth masks.
groundtruth_class_labels: A numpy array containing groundtruth class
labels.
class_index: An integer index.
Returns:
gt_boxes_at_ith_class: A numpy array containing groundtruth boxes labeled
as ith class.
gt_masks_at_ith_class: A numpy array containing groundtruth masks labeled
as ith class.
detected_boxes_at_ith_class: A numpy array containing detected boxes
corresponding to the ith class.
detected_scores_at_ith_class: A numpy array containing detected scores
corresponding to the ith class.
detected_masks_at_ith_class: A numpy array containing detected masks
corresponding to the ith class.
"""
selected_groundtruth = (groundtruth_class_labels == class_index)
gt_boxes_at_ith_class = groundtruth_boxes[selected_groundtruth]
if groundtruth_masks is not None:
gt_masks_at_ith_class = groundtruth_masks[selected_groundtruth]
else:
gt_masks_at_ith_class = None
selected_detections = (detected_class_labels == class_index)
detected_boxes_at_ith_class = detected_boxes[selected_detections]
detected_scores_at_ith_class = detected_scores[selected_detections]
if detected_masks is not None:
detected_masks_at_ith_class = detected_masks[selected_detections]
else:
detected_masks_at_ith_class = None
return (gt_boxes_at_ith_class, gt_masks_at_ith_class,
detected_boxes_at_ith_class, detected_scores_at_ith_class,
detected_masks_at_ith_class)
def _remove_invalid_boxes(self,
detected_boxes,
detected_scores,
detected_class_labels,
detected_masks=None):
"""Removes entries with invalid boxes.
A box is invalid if either its xmax is smaller than its xmin, or its ymax
is smaller than its ymin.
Args:
detected_boxes: A float numpy array of size [num_boxes, 4] containing box
coordinates in [ymin, xmin, ymax, xmax] format.
detected_scores: A float numpy array of size [num_boxes].
detected_class_labels: A int32 numpy array of size [num_boxes].
detected_masks: A uint8 numpy array of size [num_boxes, height, width].
Returns:
valid_detected_boxes: A float numpy array of size [num_valid_boxes, 4]
containing box coordinates in [ymin, xmin, ymax, xmax] format.
valid_detected_scores: A float numpy array of size [num_valid_boxes].
valid_detected_class_labels: A int32 numpy array of size
[num_valid_boxes].
valid_detected_masks: A uint8 numpy array of size
[num_valid_boxes, height, width].
"""
valid_indices = np.logical_and(detected_boxes[:, 0] < detected_boxes[:, 2],
detected_boxes[:, 1] < detected_boxes[:, 3])
detected_boxes = detected_boxes[valid_indices]
detected_scores = detected_scores[valid_indices]
detected_class_labels = detected_class_labels[valid_indices]
if detected_masks is not None:
detected_masks = detected_masks[valid_indices]
return [
detected_boxes, detected_scores, detected_class_labels, detected_masks
] | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack.compute import admin_actions as admin_actions_v21
from nova.api.openstack.compute.legacy_v2.contrib import admin_actions \
as admin_actions_v2
from nova import exception
from nova import test
from nova.tests.unit.api.openstack.compute import admin_only_action_common
from nova.tests.unit.api.openstack import fakes
class AdminActionsTestV21(admin_only_action_common.CommonTests):
admin_actions = admin_actions_v21
_api_version = '2.1'
def setUp(self):
super(AdminActionsTestV21, self).setUp()
self.controller = self.admin_actions.AdminActionsController()
self.compute_api = self.controller.compute_api
def _fake_controller(*args, **kwargs):
return self.controller
self.stubs.Set(self.admin_actions, 'AdminActionsController',
_fake_controller)
self.mox.StubOutWithMock(self.compute_api, 'get')
def test_actions(self):
actions = ['_reset_network', '_inject_network_info']
method_translations = {'_reset_network': 'reset_network',
'_inject_network_info': 'inject_network_info'}
self._test_actions(actions, method_translations)
def test_actions_with_non_existed_instance(self):
actions = ['_reset_network', '_inject_network_info']
self._test_actions_with_non_existed_instance(actions)
def test_actions_with_locked_instance(self):
actions = ['_reset_network', '_inject_network_info']
method_translations = {'_reset_network': 'reset_network',
'_inject_network_info': 'inject_network_info'}
self._test_actions_with_locked_instance(actions,
method_translations=method_translations)
class AdminActionsTestV2(AdminActionsTestV21):
admin_actions = admin_actions_v2
_api_version = '2'
class AdminActionsPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(AdminActionsPolicyEnforcementV21, self).setUp()
self.controller = admin_actions_v21.AdminActionsController()
self.req = fakes.HTTPRequest.blank('')
self.fake_id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
def common_policy_check(self, rule, fun_name, *arg, **kwarg):
self.policy.set_rules(rule)
func = getattr(self.controller, fun_name)
exc = self.assertRaises(
exception.PolicyNotAuthorized, func, *arg, **kwarg)
self.assertEqual(
"Policy doesn't allow %s to be performed." %
rule.popitem()[0], exc.format_message())
def test_reset_network_policy_failed(self):
rule = {"os_compute_api:os-admin-actions:reset_network":
"project:non_fake"}
self.common_policy_check(
rule, "_reset_network", self.req, self.fake_id, body={})
def test_inject_network_info_policy_failed(self):
rule = {"os_compute_api:os-admin-actions:inject_network_info":
"project:non_fake"}
self.common_policy_check(
rule, "_inject_network_info", self.req, self.fake_id, body={})
def test_reset_state_policy_failed(self):
rule = {"os_compute_api:os-admin-actions:reset_state":
"project:non_fake"}
self.common_policy_check(
rule, "_reset_state", self.req,
self.fake_id, body={"os-resetState": {"state": "active"}}) | unknown | codeparrot/codeparrot-clean | ||
#! /usr/bin/env python
import sys
import os
import glob
from fontTools.ttLib import identifierToTag
fontToolsDir = os.path.dirname(os.path.dirname(os.path.join(os.getcwd(), sys.argv[0])))
fontToolsDir= os.path.normpath(fontToolsDir)
tablesDir = os.path.join(fontToolsDir,
"Lib", "fontTools", "ttLib", "tables")
docFile = os.path.join(fontToolsDir, "Doc", "documentation.html")
names = glob.glob1(tablesDir, "*.py")
modules = []
tables = []
for name in names:
try:
tag = identifierToTag(name[:-3])
except:
pass
else:
modules.append(name[:-3])
tables.append(tag.strip())
modules.sort()
tables.sort()
file = open(os.path.join(tablesDir, "__init__.py"), "w")
file.write("# DON'T EDIT! This file is generated by MetaTools/buildTableList.py.\n")
file.write("def _moduleFinderHint():\n")
file.write('\t"""Dummy function to let modulefinder know what tables may be\n')
file.write('\tdynamically imported. Generated by MetaTools/buildTableList.py.\n')
file.write('\t"""\n')
for module in modules:
file.write("\tfrom . import %s\n" % module)
file.close()
begin = "<!-- begin table list -->"
end = "<!-- end table list -->"
doc = open(docFile).read()
beginPos = doc.find(begin)
assert beginPos > 0
beginPos = beginPos + len(begin) + 1
endPos = doc.find(end)
doc = doc[:beginPos] + ", ".join(tables[:-1]) + " and " + tables[-1] + "\n" + doc[endPos:]
open(docFile, "w").write(doc) | unknown | codeparrot/codeparrot-clean | ||
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
import atexit
import sys
from .ansitowin32 import AnsiToWin32
orig_stdout = sys.stdout
orig_stderr = sys.stderr
wrapped_stdout = sys.stdout
wrapped_stderr = sys.stderr
atexit_done = False
def reset_all():
AnsiToWin32(orig_stdout).reset_all()
def init(autoreset=False, convert=None, strip=None, wrap=True):
if not wrap and any([autoreset, convert, strip]):
raise ValueError('wrap=False conflicts with any other arg=True')
global wrapped_stdout, wrapped_stderr
if sys.stdout is None:
wrapped_stdout = None
else:
sys.stdout = wrapped_stdout = \
wrap_stream(orig_stdout, convert, strip, autoreset, wrap)
if sys.stderr is None:
wrapped_stderr = None
else:
sys.stderr = wrapped_stderr = \
wrap_stream(orig_stderr, convert, strip, autoreset, wrap)
global atexit_done
if not atexit_done:
atexit.register(reset_all)
atexit_done = True
def deinit():
if orig_stdout is not None:
sys.stdout = orig_stdout
if orig_stderr is not None:
sys.stderr = orig_stderr
def reinit():
if wrapped_stdout is not None:
sys.stdout = wrapped_stdout
if wrapped_stderr is not None:
sys.stderr = wrapped_stderr
def wrap_stream(stream, convert, strip, autoreset, wrap):
if wrap:
wrapper = AnsiToWin32(stream,
convert=convert, strip=strip, autoreset=autoreset)
if wrapper.should_wrap():
stream = wrapper.stream
return stream | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.