code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
"""Tests for special utilities related to nuclear problems."""
import random
import pytest
from sympy import Symbol, symbols, KroneckerDelta, sqrt, IndexedBase
from drudge import NuclearBogoliubovDrudge, Range, Term
from drudge.nuclear import (
JOf, MOf, CG, Wigner6j, Wigner3j, _simpl_pono_term
)
from conftest import skip_in_spark
@pytest.fixture(scope='module')
def nuclear(spark_ctx):
"""Set up the drudge to test."""
return NuclearBogoliubovDrudge(spark_ctx)
#
# Test of the core power of negative one simplification.
#
def test_jm_acc_as_half_integer():
"""Test j/m access of single-particle k to be half integers.
"""
k = Symbol('k')
for acc in [JOf, MOf]:
e = acc(k)
term = Term(
(), (-1) ** (e * 2), ()
)
res = _simpl_pono_term(term, [])
assert len(res.sums) == 0
assert res.amp == -1
assert len(res.vecs) == 0
def test_coll_jm_integer(nuclear: NuclearBogoliubovDrudge):
"""Test integrity of collective angular momentum symbols.
"""
p = nuclear.names
k = Symbol('k')
wigner = Wigner3j(p.J1, p.M1, p.J2, p.M2, JOf(k), p.m1)
for factor, phase in [
(p.M1, 1), (p.M2, 1), (p.J1, 1), (p.J2, 1),
(JOf(k), -1), (p.m1, -1)
]:
term = Term(
(), (-1) ** (factor * 2) * wigner, ()
)
res = _simpl_pono_term(term, nuclear.resolvers.value)
assert len(res.sums) == 0
assert res.amp == phase * wigner
assert len(res.vecs) == 0
def test_wigner_3j_m_rels_simpl():
"""Test simplification based on m-sum rules of Wigner 3j symbols.
"""
j = Symbol('j')
a, b, c, d, e = symbols('a b c d e')
wigner_3js = Wigner3j(j, a, j, b, j, c) * Wigner3j(j, c, j, d, j, e)
for amp in [
(-1) ** (a + b + c),
(-1) ** (c + d + e),
(-1) ** (a + b + 2 * c + d + e),
(-1) ** (a + b - d - e)
]:
term = Term((), wigner_3js * amp, ())
res = _simpl_pono_term(term, [])
assert len(res.sums) == 0
assert res.amp == wigner_3js
assert len(res.vecs) == 0
def test_varsh_872_4(nuclear: NuclearBogoliubovDrudge):
"""Test simplification based on Varshalovich 8.7.2 Eq (4)."""
dr = nuclear
c, gamma, c_prm, gamma_prm = symbols('c gamma cprm gammaprm', integer=True)
a, alpha, b, beta = symbols('a alpha b beta', integer=True)
m_range = Range('m')
sums = [
(alpha, m_range[-a, a + 1]), (beta, m_range[-b, b + 1])
]
amp = CG(a, alpha, b, beta, c, gamma) * CG(
a, alpha, b, beta, c_prm, gamma_prm
)
# Make sure that the pattern matching works in any way the summations are
# written.
for sums_i in [sums, reversed(sums)]:
tensor = dr.sum(*sums_i, amp)
res = tensor.simplify_am()
assert res.n_terms == 1
term = res.local_terms[0]
assert len(term.sums) == 0
assert term.amp == KroneckerDelta(
c, c_prm
) * KroneckerDelta(gamma, gamma_prm)
@skip_in_spark(reason="Maybe assumptions are not serialized with symbols")
def test_varsh_872_5(nuclear: NuclearBogoliubovDrudge):
"""Test simplification based on the rule in Varshalovich 8.7.2 Eq (5).
TODO: Investigate its failure in Apache Spark environment.
"""
dr = nuclear
a, alpha, b, beta, b_prm, beta_prm = symbols(
'a alpha b beta bprm betaprm', integer=True
)
c, gamma = symbols('c gamma', integer=True)
sums = [
(alpha, Range('m', -a, a + 1)),
(gamma, Range('M', -c, c + 1))
]
amp = CG(a, alpha, b, beta, c, gamma) * CG(
a, alpha, b_prm, beta_prm, c, gamma
)
expected = (
KroneckerDelta(b, b_prm) * KroneckerDelta(beta, beta_prm)
* (2 * c + 1) / (2 * b + 1)
)
for sums_i in [sums, reversed(sums)]:
tensor = dr.sum(*sums_i, amp)
res = tensor.deep_simplify().merge()
assert res.n_terms == 1
term = res.local_terms[0]
assert len(term.sums) == 0
assert len(term.vecs) == 0
assert (term.amp - expected).simplify() == 0
@pytest.mark.skip(reason='Pending improvement in PONO simplification')
def test_varsh_911_8(nuclear: NuclearBogoliubovDrudge):
"""Test simplification based on the rule in Varshalovich 9.1.1 Eq (8).
"""
dr = nuclear
j, m, j12, m12, j2, m2, j1, m1, j_prm, m_prm, j23, m23, j3, m3 = symbols(
'j m j12 m12 j2 m2 j1 m1 jprm mprm j23 m23 j3 m3', integer=True
)
m_range = Range('m')
sums = [(m_i, m_range[-j_i, j_i + 1]) for m_i, j_i in [
(m1, j1), (m2, j2), (m3, j3), (m12, j12), (m23, j23)
]]
amp = CG(j12, m12, j3, m3, j, m) * CG(j1, m1, j2, m2, j12, m12) * CG(
j1, m1, j23, m23, j_prm, m_prm
) * CG(j2, m2, j3, m3, j23, m23)
expected = (
KroneckerDelta(j, j_prm) * KroneckerDelta(m, m_prm)
* (-1) ** (j1 + j2 + j3 + j)
* sqrt(2 * j12 + 1) * sqrt(2 * j23 + 1)
* Wigner6j(j1, j2, j12, j3, j, j23)
)
# For performance reason, just test a random arrangement of the summations.
random.shuffle(sums)
tensor = dr.sum(*sums, amp)
res = tensor.deep_simplify().merge()
assert res.n_terms == 1
term = res.local_terms[0]
assert len(term.sums) == 0
assert len(term.vecs) == 0
assert (term.amp - expected).simplify() == 0
def test_wigner3j_sum_to_wigner6j(nuclear: NuclearBogoliubovDrudge):
"""Test simplification of sum of product of four 3j's to a 6j.
This test tries to simplify the original LHS of the equation from the
Wolfram website.
"""
dr = nuclear
j1, j2, j3, jprm3, j4, j5, j6 = symbols(
'j1 j2 j3 jprm3 j4 j5 j6', integer=True
)
m1, m2, m3, mprm3, m4, m5, m6 = symbols(
'm1 m2 m3 mprm3 m4 m5 m6', integer=True
)
m_range = Range('m')
sums = [(m_i, m_range[-j_i, j_i + 1]) for m_i, j_i in [
(m1, j1), (m2, j2), (m4, j4), (m5, j5), (m6, j6)
]]
phase = (-1) ** (
j1 + j2 + j4 + j5 + j6 - m1 - m2 - m4 - m5 - m6
)
amp = (
Wigner3j(j2, m2, j3, -m3, j1, m1)
* Wigner3j(j1, -m1, j5, m5, j6, m6)
* Wigner3j(j5, -m5, jprm3, mprm3, j4, m4)
* Wigner3j(j4, -m4, j2, -m2, j6, -m6)
)
expected = (
((-1) ** (j3 - m3) / (2 * j3 + 1))
* KroneckerDelta(j3, jprm3) * KroneckerDelta(m3, mprm3)
* Wigner6j(j1, j2, j3, j4, j5, j6)
).expand().simplify()
# For performance reason, just test a random arrangement of the summations.
random.shuffle(sums)
tensor = dr.sum(*sums, phase * amp)
res = tensor.deep_simplify().merge()
assert res.n_terms == 1
term = res.local_terms[0]
assert len(term.sums) == 0
assert len(term.vecs) == 0
difference = (res - dr.sum(expected)).deep_simplify()
assert len(difference.local_terms) == 0
@pytest.mark.skip(reason='Pending improvement in PONO simplification')
def test_sum_4_3j_to_6j_in_bccd(nuclear: NuclearBogoliubovDrudge):
"""Test summation of 4 Wigner 3j symbols in a really BCCD term.
This example comes from the angular momentum coupled BCCD doubles equation
where the H04 term in the Hamiltonian contracts with the T tensor.
"""
dr = nuclear
p = dr.names
j_range = dr.coll_j_range
m_range = dr.m_range
tilde_range = dr.tilde_range
J1, J2, J3 = p.J1, p.J2, p.J3
M1, M2 = p.M1, p.M2
ktilde1, ktilde2, ktilde3 = p.ktilde1, p.ktilde2, p.ktilde3
ktilde4, ktilde5, ktilde6 = p.ktilde4, p.ktilde5, p.ktilde6
ktilde7, ktilde8 = p.ktilde7, p.ktilde8
m1, m2, m3, m4 = p.m1, p.m2, p.m3, p.m4
t = IndexedBase('t')
h04 = IndexedBase('H04')
tensor = dr.sum(
(J2, j_range), (J3, j_range), (M2, m_range[-J2, J2 + 1]),
(ktilde5, tilde_range),
(ktilde6, tilde_range),
(ktilde7, tilde_range),
(ktilde8, tilde_range),
(m1, m_range[-JOf(ktilde1), JOf(ktilde1) + 1]),
(m2, m_range[-JOf(ktilde2), JOf(ktilde2) + 1]),
(m3, m_range[-JOf(ktilde4), JOf(ktilde4) + 1]),
(m4, m_range[-JOf(ktilde5), JOf(ktilde5) + 1]),
-(-1) ** J1 * (-1) ** J2 * (-1) ** (6 * J3) * (-1) ** (-M1)
* (-1) ** (-M2) * (-1) ** JOf(ktilde2) * (-1) ** (3 * JOf(ktilde3))
* (-1) ** (4 * JOf(ktilde4)) * (-1) ** (2 * JOf(ktilde5))
* (-1) ** (4 * JOf(ktilde7)) * (-1) ** (2 * JOf(ktilde8))
* (
4 * J1 * J2 * J3 + 2 * J1 * J2 + 2 * J1 * J3 + J1 + 2 * J2 * J3
+ J2 + J3
) * KroneckerDelta(JOf(ktilde3), JOf(ktilde5))
* h04[J3, ktilde6, ktilde7, ktilde8, ktilde5]
* t[J2, ktilde5, ktilde1, ktilde2, ktilde4]
* t[J3, ktilde6, ktilde7, ktilde8, ktilde3]
* Wigner3j(JOf(ktilde1), m1, JOf(ktilde2), m2, J1, -M1)
* Wigner3j(JOf(ktilde2), -m2, JOf(ktilde4), -m3, J2, -M2)
* Wigner3j(JOf(ktilde3), -m4, JOf(ktilde4), -m3, J1, -M1)
* Wigner3j(JOf(ktilde5), m4, JOf(ktilde1), m1, J2, -M2)
/ (3 * (2 * JOf(ktilde5) + 1))
)
res = tensor.deep_simplify()
assert res.n_terms == 1
term = res.local_terms[0]
assert term.amp.has(Wigner6j) | unknown | codeparrot/codeparrot-clean | ||
"""
Test Form class.
Objective: Test parameters, and behavior.
"""
import datetime
from django.test import TestCase, override_settings
from django.core.urlresolvers import reverse
from mock import patch
from colab.accounts import forms as accounts_forms
from colab.accounts.models import User
from colab.accounts.forms import (UserCreationForm, UserChangeForm,
UserUpdateForm, UserForm,
ColabSetPasswordForm,
ColabPasswordChangeForm,
ColabSetUsernameForm)
class SetPasswordFormTestCase(TestCase):
TEST_COLAB_APPS = {
'test_plugin': {
'password_validators': (
'colab.accounts.tests.utils.password_validator',
)
}
}
@property
def user(self):
return User.objects.create_user(username='test_user',
email='test@example.com')
@property
def valid_form_data(self):
return {'new_password1': '12345',
'new_password2': '12345'}
def test_no_custom_validators(self):
form = ColabSetPasswordForm(self.user, data=self.valid_form_data)
self.assertTrue(form.is_valid(), True)
@override_settings(COLAB_APPS=TEST_COLAB_APPS)
@patch('colab.accounts.tests.utils.password_validator')
def test_custom_validator(self, validator):
form = ColabSetPasswordForm(self.user, data=self.valid_form_data)
self.assertTrue(form.is_valid())
validator.assert_called_with('12345')
class SetUsernameFormTestCase(TestCase):
TEST_COLAB_APPS = {
'test_plugin': {
'username_validators': (
'colab.accounts.tests.utils.username_validator',
)
}
}
@property
def valid_form_data(self):
return {'username': 'test_user',
'email': 'test@email.com',
'first_name': 'test',
'last_name': 'test',
'password1': '12345',
'password2': '12345'}
def test_no_custom_validators(self):
form = ColabSetUsernameForm(data=self.valid_form_data)
self.assertTrue(form.is_valid(), True)
@override_settings(COLAB_APPS=TEST_COLAB_APPS)
@patch('colab.accounts.tests.utils.username_validator')
def test_custom_validator(self, validator):
form = ColabSetUsernameForm(data=self.valid_form_data)
self.assertTrue(form.is_valid())
validator.assert_called_with('test_user')
class FormTest(TestCase):
def setUp(self):
user = User()
user.username = "USERtestCoLaB"
user.set_password("123colab4")
user.email = "usertest@colab.com.br"
user.id = 1
user.twitter = "usertestcolab"
user.facebook = "usertestcolab"
user.first_name = "USERtestCoLaB"
user.last_name = "COLAB"
user.save()
def tearDown(self):
pass
def create_form_data(self, email, username):
form_data = {'email': email,
'first_name': 'colabName',
'last_name': 'secondName',
'username': username,
'password1': '123colab4',
'password2': '123colab4'}
form = UserCreationForm(data=form_data)
return form
def create_update_form_data(self):
updated_data = {'username': "colab",
'email': 'email@email.com',
'last_login': datetime.date.today(),
'date_joined': datetime.date.today(),
'twitter': 'nick_twitter',
'first_name': 'colabName',
'last_name': 'secondName',
}
initial = {'email': 'email@email.com',
'first_name': 'colabName',
'last_name': 'secondName',
'username': 'colab',
'password': '123colab4'}
form = UserUpdateForm(initial=initial, data=updated_data)
return form
def create_change_form_data(self, username):
updated_data = {'username': username,
'email': 'email@email.com',
'last_login': datetime.date.today(),
'date_joined': datetime.date.today()}
initial = {'email': 'email@email.com',
'first_name': 'colabName',
'last_name': 'secondName',
'username': 'colab',
'password': '123colab4'}
form = UserChangeForm(initial=initial, data=updated_data)
return form
def create_user_form_data(self):
initial = {'email': 'email@email.com',
'first_name': 'colabName',
'last_name': 'secondName',
'username': 'colab',
'password': '123colab4'}
form = UserForm(data=initial)
return form
def test_already_registered_email(self):
form = self.create_form_data('usertest@colab.com.br',
'colab')
self.assertFalse(form.is_valid())
self.assertIn('duplicate_email', form.error_messages)
def test_registered_email_message(self):
form = self.create_form_data('usertest@colab.com.br',
'colab')
msg = form.error_messages.get('duplicate_email') % {
'url': reverse('login')
}
self.assertIn(msg, str(form))
def test_valid_username(self):
form = self.create_form_data('user@email.com',
'colab123')
self.assertTrue(form.is_valid())
def test_already_created_username(self):
form = self.create_form_data('usertest@colab.com.br',
'USERtestCoLaB')
self.assertFalse(form.is_valid())
self.assertIn('duplicate_username', form.error_messages)
def test_not_valid_username(self):
form = self.create_form_data('user@email.com',
'colab!')
self.assertFalse(form.is_valid())
def test_update_valid_username(self):
form = self.create_change_form_data('colab123')
self.assertTrue(form.is_valid())
def test_update_not_valid_username(self):
form = self.create_change_form_data('colab!')
self.assertFalse(form.is_valid())
@patch.object(accounts_forms, "validate_social_account")
def test_validate_social_account(self, validate_social_account):
validate_social_account.return_value = False
form = self.create_update_form_data()
self.assertFalse(form.is_valid())
self.assertIn("Social account does not exist", form.errors['twitter'])
def test_required_valid_fields_user_form(self):
form_data = {
'first_name': 'colabName',
'last_name': 'secondName',
'username': 'colab',
}
form = UserForm(data=form_data)
self.assertTrue(form.is_valid())
def test_required_empty_fields_user_form(self):
form_data = {
'first_name': '',
'last_name': '',
'username': '',
}
form = UserForm(data=form_data)
self.assertFalse(form.is_valid())
self.assertIn('first_name', form.errors)
self.assertIn('last_name', form.errors)
self.assertIn('username', form.errors)
def test_blank_required_fields_user_form(self):
form_data = {
'first_name': ' ',
'last_name': ' ',
'username': ' ',
}
form = UserForm(data=form_data)
self.assertFalse(form.is_valid())
self.assertIn('first_name', form.errors)
self.assertIn('last_name', form.errors)
self.assertIn('username', form.errors)
class ChangePasswordFormTestCase(TestCase):
TEST_COLAB_APPS = {
'test_plugin': {
'password_validators': (
'colab.accounts.tests.utils.password_validator',
)
}
}
@property
def user(self):
u = User.objects.create_user(username='test_user',
email='test@example.com')
u.set_password("123colab4")
return u
@property
def valid_form_data(self):
return {'old_password': '123colab4',
'new_password1': '12345',
'new_password2': '12345'}
def test_no_custom_validators(self):
form = ColabPasswordChangeForm(self.user, data=self.valid_form_data)
self.assertTrue(form.is_valid(), True)
@override_settings(COLAB_APPS=TEST_COLAB_APPS)
@patch('colab.accounts.tests.utils.password_validator')
def test_custom_validator(self, validator):
form = ColabPasswordChangeForm(self.user, data=self.valid_form_data)
self.assertTrue(form.is_valid())
validator.assert_called_with('12345')
class UserCreationFormTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.user = User.objects.create_user(username='user1234',
email='teste1234@example.com',
first_name='test_first_name',
last_name='test_last_name')
cls.user.set_password("123colab4")
cls.user.save()
@classmethod
def tearDownClass(cls):
User.objects.all().delete()
def get_form_data(self, email, username='test_user',
password1='12345', password2='12345'):
return {
'first_name': 'test_first_name',
'last_name': 'test_last_name',
'username': username,
'email': email,
'password1': password1,
'password2': password2
}
def test_clean_mail_error(self):
creation_form = UserCreationForm(
data=self.get_form_data('teste1234@example.com'))
self.assertFalse(creation_form.is_valid())
self.assertTrue('email' in creation_form.errors)
self.assertEqual(1, len(creation_form.errors))
def test_clean_mail(self):
creation_form = UserCreationForm(
data=self.get_form_data('teste12345@example.com'))
self.assertTrue(creation_form.is_valid())
def test_clean_username_error(self):
creation_form = UserCreationForm(
data=self.get_form_data('teste12345@example.com',
username='user1234'))
self.assertFalse(creation_form.is_valid())
self.assertTrue('username' in creation_form.errors)
self.assertEqual(1, len(creation_form.errors))
def test_clean_username(self):
creation_form = UserCreationForm(
data=self.get_form_data('teste12345@example.com',
username='user12345'))
self.assertTrue(creation_form.is_valid())
def test_clean_password2_empty_password1(self):
creation_form = UserCreationForm(
data=self.get_form_data('teste12345@example.com',
username='user12345',
password1=''))
self.assertFalse(creation_form.is_valid())
self.assertTrue('password1' in creation_form.errors)
self.assertEqual(1, len(creation_form.errors))
def test_clean_password2_empty_password2(self):
creation_form = UserCreationForm(
data=self.get_form_data('teste12345@example.com',
username='user12345',
password2=''))
self.assertFalse(creation_form.is_valid())
self.assertTrue('password2' in creation_form.errors)
def test_clean_password2_different_passwords(self):
creation_form = UserCreationForm(
data=self.get_form_data('teste12345@example.com',
username='user12345',
password1='1234'))
self.assertFalse(creation_form.is_valid())
self.assertTrue('password2' in creation_form.errors)
self.assertEqual(1, len(creation_form.errors))
self.assertEqual(1, len(creation_form.errors))
def test_clean_password(self):
creation_form = UserCreationForm(
data=self.get_form_data('teste12345@example.com',
username='user12345'))
self.assertTrue(creation_form.is_valid()) | unknown | codeparrot/codeparrot-clean | ||
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
package command
import (
"strings"
"github.com/hashicorp/cli"
)
var _ cli.Command = (*PolicyCommand)(nil)
// PolicyCommand is a Command that holds the audit commands
type PolicyCommand struct {
*BaseCommand
}
func (c *PolicyCommand) Synopsis() string {
return "Interact with policies"
}
func (c *PolicyCommand) Help() string {
helpText := `
Usage: vault policy <subcommand> [options] [args]
This command groups subcommands for interacting with policies.
Users can write, read, and list policies in Vault.
List all enabled policies:
$ vault policy list
Create a policy named "my-policy" from contents on local disk:
$ vault policy write my-policy ./my-policy.hcl
Delete the policy named my-policy:
$ vault policy delete my-policy
Please see the individual subcommand help for detailed usage information.
`
return strings.TrimSpace(helpText)
}
func (c *PolicyCommand) Run(args []string) int {
return cli.RunResultHelp
} | go | github | https://github.com/hashicorp/vault | command/policy.go |
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2012-2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for database migrations.
There are "opportunistic" tests which allows testing against all 3 databases
(sqlite in memory, mysql, pg) in a properly configured unit test environment.
For the opportunistic testing you need to set up db's named 'openstack_citest'
with user 'openstack_citest' and password 'openstack_citest' on localhost. The
test will then use that db and u/p combo to run the tests.
For postgres on Ubuntu this can be done with the following commands::
| sudo -u postgres psql
| postgres=# create user openstack_citest with createdb login password
| 'openstack_citest';
| postgres=# create database openstack_citest with owner openstack_citest;
"""
import glob
# NOTE(dhellmann): Use stdlib logging instead of oslo.log because we
# need to call methods on the logger that are not exposed through the
# adapter provided by oslo.log.
import logging
import os
import alembic
from migrate import UniqueConstraint
from migrate.versioning import repository
import mock
from oslo_db.sqlalchemy import test_base
from oslo_db.sqlalchemy import test_migrations
from oslo_db.sqlalchemy import utils as oslodbutils
import sqlalchemy
from sqlalchemy.engine import reflection
import sqlalchemy.exc
from sqlalchemy.sql import null
from nova.db import migration
from nova.db.sqlalchemy import migrate_repo
from nova.db.sqlalchemy import migration as sa_migration
from nova.db.sqlalchemy import models
from nova.db.sqlalchemy import utils as db_utils
from nova import exception
from nova import test
from nova.tests import fixtures as nova_fixtures
LOG = logging.getLogger(__name__)
class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
test_migrations.WalkVersionsMixin):
"""Test sqlalchemy-migrate migrations."""
TIMEOUT_SCALING_FACTOR = 2
@property
def INIT_VERSION(self):
return migration.db_initial_version()
@property
def REPOSITORY(self):
return repository.Repository(
os.path.abspath(os.path.dirname(migrate_repo.__file__)))
@property
def migration_api(self):
return sa_migration.versioning_api
@property
def migrate_engine(self):
return self.engine
def setUp(self):
super(NovaMigrationsCheckers, self).setUp()
# NOTE(viktors): We should reduce log output because it causes issues,
# when we run tests with testr
migrate_log = logging.getLogger('migrate')
old_level = migrate_log.level
migrate_log.setLevel(logging.WARN)
self.addCleanup(migrate_log.setLevel, old_level)
# NOTE(rpodolyaka): we need to repeat the functionality of the base
# test case a bit here as this gets overriden by oslotest base test
# case and nova base test case cleanup must be the last one (as it
# deletes attributes of test case instances)
self.useFixture(nova_fixtures.Timeout(
os.environ.get('OS_TEST_TIMEOUT', 0),
self.TIMEOUT_SCALING_FACTOR))
def assertColumnExists(self, engine, table_name, column):
self.assertTrue(oslodbutils.column_exists(engine, table_name, column),
'Column %s.%s does not exist' % (table_name, column))
def assertColumnNotExists(self, engine, table_name, column):
self.assertFalse(oslodbutils.column_exists(engine, table_name, column),
'Column %s.%s should not exist' % (table_name, column))
def assertTableNotExists(self, engine, table):
self.assertRaises(sqlalchemy.exc.NoSuchTableError,
oslodbutils.get_table, engine, table)
def assertIndexExists(self, engine, table_name, index):
self.assertTrue(oslodbutils.index_exists(engine, table_name, index),
'Index %s on table %s does not exist' %
(index, table_name))
def assertIndexNotExists(self, engine, table_name, index):
self.assertFalse(oslodbutils.index_exists(engine, table_name, index),
'Index %s on table %s should not exist' %
(index, table_name))
def assertIndexMembers(self, engine, table, index, members):
# NOTE(johannes): Order of columns can matter. Most SQL databases
# can use the leading columns for optimizing queries that don't
# include all of the covered columns.
self.assertIndexExists(engine, table, index)
t = oslodbutils.get_table(engine, table)
index_columns = None
for idx in t.indexes:
if idx.name == index:
index_columns = [c.name for c in idx.columns]
break
self.assertEqual(members, index_columns)
# Implementations for ModelsMigrationsSync
def db_sync(self, engine):
with mock.patch.object(sa_migration, 'get_engine',
return_value=engine):
sa_migration.db_sync()
def get_engine(self):
return self.migrate_engine
def get_metadata(self):
return models.BASE.metadata
def include_object(self, object_, name, type_, reflected, compare_to):
if type_ == 'table':
# migrate_version is a sqlalchemy-migrate control table and
# isn't included in the model. shadow_* are generated from
# the model and have their own tests to ensure they don't
# drift.
if name == 'migrate_version' or name.startswith('shadow_'):
return False
return True
def _skippable_migrations(self):
special = [
216, # Havana
272, # NOOP migration due to revert
]
havana_placeholders = range(217, 227)
icehouse_placeholders = range(235, 244)
juno_placeholders = range(255, 265)
kilo_placeholders = range(281, 291)
return (special +
havana_placeholders +
icehouse_placeholders +
juno_placeholders +
kilo_placeholders)
def migrate_up(self, version, with_data=False):
if with_data:
check = getattr(self, "_check_%03d" % version, None)
if version not in self._skippable_migrations():
self.assertIsNotNone(check,
('DB Migration %i does not have a '
'test. Please add one!') % version)
# NOTE(danms): This is a list of migrations where we allow dropping
# things. The rules for adding things here are very very specific.
# Chances are you don't meet the critera.
# Reviewers: DO NOT ALLOW THINGS TO BE ADDED HERE
exceptions = [
# 267 enforces non-nullable instance.uuid. This was mostly
# a special case because instance.uuid shouldn't be able
# to be nullable
267,
# 278 removes a FK restriction, so it's an alter operation
# that doesn't break existing users
278,
# 280 enforces non-null keypair name. This is really not
# something we should allow, but it's in the past
280,
# 292 drops completely orphaned tables with no users, so
# it can be done without affecting anything.
292,
]
# Reviewers: DO NOT ALLOW THINGS TO BE ADDED HERE
# NOTE(danms): We only started requiring things be additive in
# kilo, so ignore all migrations before that point.
KILO_START = 265
if version >= KILO_START and version not in exceptions:
banned = ['Table', 'Column']
else:
banned = None
with nova_fixtures.BannedDBSchemaOperations(banned):
super(NovaMigrationsCheckers, self).migrate_up(version, with_data)
def test_walk_versions(self):
self.walk_versions(snake_walk=False, downgrade=False)
def _check_227(self, engine, data):
table = oslodbutils.get_table(engine, 'project_user_quotas')
# Insert fake_quotas with the longest resource name.
fake_quotas = {'id': 5,
'project_id': 'fake_project',
'user_id': 'fake_user',
'resource': 'injected_file_content_bytes',
'hard_limit': 10}
table.insert().execute(fake_quotas)
# Check we can get the longest resource name.
quota = table.select(table.c.id == 5).execute().first()
self.assertEqual(quota['resource'], 'injected_file_content_bytes')
def _check_228(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes', 'metrics')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
self.assertIsInstance(compute_nodes.c.metrics.type,
sqlalchemy.types.Text)
def _check_229(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes', 'extra_resources')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
self.assertIsInstance(compute_nodes.c.extra_resources.type,
sqlalchemy.types.Text)
def _check_230(self, engine, data):
for table_name in ['instance_actions_events',
'shadow_instance_actions_events']:
self.assertColumnExists(engine, table_name, 'host')
self.assertColumnExists(engine, table_name, 'details')
action_events = oslodbutils.get_table(engine,
'instance_actions_events')
self.assertIsInstance(action_events.c.host.type,
sqlalchemy.types.String)
self.assertIsInstance(action_events.c.details.type,
sqlalchemy.types.Text)
def _check_231(self, engine, data):
self.assertColumnExists(engine, 'instances', 'ephemeral_key_uuid')
instances = oslodbutils.get_table(engine, 'instances')
self.assertIsInstance(instances.c.ephemeral_key_uuid.type,
sqlalchemy.types.String)
self.assertTrue(db_utils.check_shadow_table(engine, 'instances'))
def _check_232(self, engine, data):
table_names = ['compute_node_stats', 'compute_nodes',
'instance_actions', 'instance_actions_events',
'instance_faults', 'migrations']
for table_name in table_names:
self.assertTableNotExists(engine, 'dump_' + table_name)
def _check_233(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes', 'stats')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
self.assertIsInstance(compute_nodes.c.stats.type,
sqlalchemy.types.Text)
self.assertRaises(sqlalchemy.exc.NoSuchTableError,
oslodbutils.get_table, engine, 'compute_node_stats')
def _check_234(self, engine, data):
self.assertIndexMembers(engine, 'reservations',
'reservations_deleted_expire_idx',
['deleted', 'expire'])
def _check_244(self, engine, data):
volume_usage_cache = oslodbutils.get_table(
engine, 'volume_usage_cache')
self.assertEqual(64, volume_usage_cache.c.user_id.type.length)
def _pre_upgrade_245(self, engine):
# create a fake network
networks = oslodbutils.get_table(engine, 'networks')
fake_network = {'id': 1}
networks.insert().execute(fake_network)
def _check_245(self, engine, data):
networks = oslodbutils.get_table(engine, 'networks')
network = networks.select(networks.c.id == 1).execute().first()
# mtu should default to None
self.assertIsNone(network.mtu)
# dhcp_server should default to None
self.assertIsNone(network.dhcp_server)
# enable dhcp should default to true
self.assertTrue(network.enable_dhcp)
# share address should default to false
self.assertFalse(network.share_address)
def _check_246(self, engine, data):
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
self.assertEqual(1, len([fk for fk in pci_devices.foreign_keys
if fk.parent.name == 'compute_node_id']))
def _check_247(self, engine, data):
quota_usages = oslodbutils.get_table(engine, 'quota_usages')
self.assertFalse(quota_usages.c.resource.nullable)
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
if engine.name == 'ibm_db_sa':
self.assertFalse(pci_devices.c.deleted.nullable)
else:
self.assertTrue(pci_devices.c.deleted.nullable)
self.assertFalse(pci_devices.c.product_id.nullable)
self.assertFalse(pci_devices.c.vendor_id.nullable)
self.assertFalse(pci_devices.c.dev_type.nullable)
def _check_248(self, engine, data):
self.assertIndexMembers(engine, 'reservations',
'reservations_deleted_expire_idx',
['deleted', 'expire'])
def _check_249(self, engine, data):
# Assert that only one index exists that covers columns
# instance_uuid and device_name
bdm = oslodbutils.get_table(engine, 'block_device_mapping')
self.assertEqual(1, len([i for i in bdm.indexes
if [c.name for c in i.columns] ==
['instance_uuid', 'device_name']]))
def _check_250(self, engine, data):
self.assertTableNotExists(engine, 'instance_group_metadata')
self.assertTableNotExists(engine, 'shadow_instance_group_metadata')
def _check_251(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes', 'numa_topology')
self.assertColumnExists(engine, 'shadow_compute_nodes',
'numa_topology')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
shadow_compute_nodes = oslodbutils.get_table(engine,
'shadow_compute_nodes')
self.assertIsInstance(compute_nodes.c.numa_topology.type,
sqlalchemy.types.Text)
self.assertIsInstance(shadow_compute_nodes.c.numa_topology.type,
sqlalchemy.types.Text)
def _check_252(self, engine, data):
oslodbutils.get_table(engine, 'instance_extra')
oslodbutils.get_table(engine, 'shadow_instance_extra')
self.assertIndexMembers(engine, 'instance_extra',
'instance_extra_idx',
['instance_uuid'])
def _check_253(self, engine, data):
self.assertColumnExists(engine, 'instance_extra', 'pci_requests')
self.assertColumnExists(
engine, 'shadow_instance_extra', 'pci_requests')
instance_extra = oslodbutils.get_table(engine, 'instance_extra')
shadow_instance_extra = oslodbutils.get_table(engine,
'shadow_instance_extra')
self.assertIsInstance(instance_extra.c.pci_requests.type,
sqlalchemy.types.Text)
self.assertIsInstance(shadow_instance_extra.c.pci_requests.type,
sqlalchemy.types.Text)
def _check_254(self, engine, data):
self.assertColumnExists(engine, 'pci_devices', 'request_id')
self.assertColumnExists(
engine, 'shadow_pci_devices', 'request_id')
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
shadow_pci_devices = oslodbutils.get_table(
engine, 'shadow_pci_devices')
self.assertIsInstance(pci_devices.c.request_id.type,
sqlalchemy.types.String)
self.assertIsInstance(shadow_pci_devices.c.request_id.type,
sqlalchemy.types.String)
def _check_265(self, engine, data):
# Assert that only one index exists that covers columns
# host and deleted
instances = oslodbutils.get_table(engine, 'instances')
self.assertEqual(1, len([i for i in instances.indexes
if [c.name for c in i.columns][:2] ==
['host', 'deleted']]))
# and only one index covers host column
iscsi_targets = oslodbutils.get_table(engine, 'iscsi_targets')
self.assertEqual(1, len([i for i in iscsi_targets.indexes
if [c.name for c in i.columns][:1] ==
['host']]))
def _check_266(self, engine, data):
self.assertColumnExists(engine, 'tags', 'resource_id')
self.assertColumnExists(engine, 'tags', 'tag')
table = oslodbutils.get_table(engine, 'tags')
self.assertIsInstance(table.c.resource_id.type,
sqlalchemy.types.String)
self.assertIsInstance(table.c.tag.type,
sqlalchemy.types.String)
def _pre_upgrade_267(self, engine):
# Create a fixed_ips row with a null instance_uuid (if not already
# there) to make sure that's not deleted.
fixed_ips = oslodbutils.get_table(engine, 'fixed_ips')
fake_fixed_ip = {'id': 1}
fixed_ips.insert().execute(fake_fixed_ip)
# Create an instance record with a valid (non-null) UUID so we make
# sure we don't do something stupid and delete valid records.
instances = oslodbutils.get_table(engine, 'instances')
fake_instance = {'id': 1, 'uuid': 'fake-non-null-uuid'}
instances.insert().execute(fake_instance)
# Add a null instance_uuid entry for the volumes table
# since it doesn't have a foreign key back to the instances table.
volumes = oslodbutils.get_table(engine, 'volumes')
fake_volume = {'id': '9c3c317e-24db-4d57-9a6f-96e6d477c1da'}
volumes.insert().execute(fake_volume)
def _check_267(self, engine, data):
# Make sure the column is non-nullable and the UC exists.
fixed_ips = oslodbutils.get_table(engine, 'fixed_ips')
self.assertTrue(fixed_ips.c.instance_uuid.nullable)
fixed_ip = fixed_ips.select(fixed_ips.c.id == 1).execute().first()
self.assertIsNone(fixed_ip.instance_uuid)
instances = oslodbutils.get_table(engine, 'instances')
self.assertFalse(instances.c.uuid.nullable)
inspector = reflection.Inspector.from_engine(engine)
constraints = inspector.get_unique_constraints('instances')
constraint_names = [constraint['name'] for constraint in constraints]
self.assertIn('uniq_instances0uuid', constraint_names)
# Make sure the instances record with the valid uuid is still there.
instance = instances.select(instances.c.id == 1).execute().first()
self.assertIsNotNone(instance)
# Check that the null entry in the volumes table is still there since
# we skipped tables that don't have FK's back to the instances table.
volumes = oslodbutils.get_table(engine, 'volumes')
self.assertTrue(volumes.c.instance_uuid.nullable)
volume = fixed_ips.select(
volumes.c.id == '9c3c317e-24db-4d57-9a6f-96e6d477c1da'
).execute().first()
self.assertIsNone(volume.instance_uuid)
def test_migration_267(self):
# This is separate from test_walk_versions so we can test the case
# where there are non-null instance_uuid entries in the database which
# cause the 267 migration to fail.
engine = self.migrate_engine
self.migration_api.version_control(
engine, self.REPOSITORY, self.INIT_VERSION)
self.migration_api.upgrade(engine, self.REPOSITORY, 266)
# Create a consoles record with a null instance_uuid so
# we can test that the upgrade fails if that entry is found.
# NOTE(mriedem): We use the consoles table since that's the only table
# created in the 216 migration with a ForeignKey created on the
# instance_uuid table for sqlite.
consoles = oslodbutils.get_table(engine, 'consoles')
fake_console = {'id': 1}
consoles.insert().execute(fake_console)
# NOTE(mriedem): We handle the 267 migration where we expect to
# hit a ValidationError on the consoles table to have
# a null instance_uuid entry
ex = self.assertRaises(exception.ValidationError,
self.migration_api.upgrade,
engine, self.REPOSITORY, 267)
self.assertIn("There are 1 records in the "
"'consoles' table where the uuid or "
"instance_uuid column is NULL.",
ex.kwargs['detail'])
# Remove the consoles entry with the null instance_uuid column.
rows = consoles.delete().where(
consoles.c['instance_uuid'] == null()).execute().rowcount
self.assertEqual(1, rows)
# Now run the 267 upgrade again.
self.migration_api.upgrade(engine, self.REPOSITORY, 267)
# Make sure the consoles entry with the null instance_uuid
# was deleted.
console = consoles.select(consoles.c.id == 1).execute().first()
self.assertIsNone(console)
def _check_268(self, engine, data):
# We can only assert that the col exists, not the unique constraint
# as the engine is running sqlite
self.assertColumnExists(engine, 'compute_nodes', 'host')
self.assertColumnExists(engine, 'shadow_compute_nodes', 'host')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
shadow_compute_nodes = oslodbutils.get_table(
engine, 'shadow_compute_nodes')
self.assertIsInstance(compute_nodes.c.host.type,
sqlalchemy.types.String)
self.assertIsInstance(shadow_compute_nodes.c.host.type,
sqlalchemy.types.String)
def _check_269(self, engine, data):
self.assertColumnExists(engine, 'pci_devices', 'numa_node')
self.assertColumnExists(engine, 'shadow_pci_devices', 'numa_node')
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
shadow_pci_devices = oslodbutils.get_table(
engine, 'shadow_pci_devices')
self.assertIsInstance(pci_devices.c.numa_node.type,
sqlalchemy.types.Integer)
self.assertTrue(pci_devices.c.numa_node.nullable)
self.assertIsInstance(shadow_pci_devices.c.numa_node.type,
sqlalchemy.types.Integer)
self.assertTrue(shadow_pci_devices.c.numa_node.nullable)
def _check_270(self, engine, data):
self.assertColumnExists(engine, 'instance_extra', 'flavor')
self.assertColumnExists(engine, 'shadow_instance_extra', 'flavor')
instance_extra = oslodbutils.get_table(engine, 'instance_extra')
shadow_instance_extra = oslodbutils.get_table(
engine, 'shadow_instance_extra')
self.assertIsInstance(instance_extra.c.flavor.type,
sqlalchemy.types.Text)
self.assertIsInstance(shadow_instance_extra.c.flavor.type,
sqlalchemy.types.Text)
def _check_271(self, engine, data):
self.assertIndexMembers(engine, 'block_device_mapping',
'snapshot_id', ['snapshot_id'])
self.assertIndexMembers(engine, 'block_device_mapping',
'volume_id', ['volume_id'])
self.assertIndexMembers(engine, 'dns_domains',
'dns_domains_project_id_idx',
['project_id'])
self.assertIndexMembers(engine, 'fixed_ips',
'network_id', ['network_id'])
self.assertIndexMembers(engine, 'fixed_ips',
'fixed_ips_instance_uuid_fkey',
['instance_uuid'])
self.assertIndexMembers(engine, 'fixed_ips',
'fixed_ips_virtual_interface_id_fkey',
['virtual_interface_id'])
self.assertIndexMembers(engine, 'floating_ips',
'fixed_ip_id', ['fixed_ip_id'])
self.assertIndexMembers(engine, 'iscsi_targets',
'iscsi_targets_volume_id_fkey', ['volume_id'])
self.assertIndexMembers(engine, 'virtual_interfaces',
'virtual_interfaces_network_id_idx',
['network_id'])
self.assertIndexMembers(engine, 'virtual_interfaces',
'virtual_interfaces_instance_uuid_fkey',
['instance_uuid'])
# Removed on MySQL, never existed on other databases
self.assertIndexNotExists(engine, 'dns_domains', 'project_id')
self.assertIndexNotExists(engine, 'virtual_interfaces', 'network_id')
def _pre_upgrade_273(self, engine):
if engine.name != 'sqlite':
return
# Drop a variety of unique constraints to ensure that the script
# properly readds them back
for table_name, constraint_name in [
('compute_nodes', 'uniq_compute_nodes0'
'host0hypervisor_hostname'),
('fixed_ips', 'uniq_fixed_ips0address0deleted'),
('instance_info_caches', 'uniq_instance_info_caches0'
'instance_uuid'),
('instance_type_projects', 'uniq_instance_type_projects0'
'instance_type_id0project_id0'
'deleted'),
('pci_devices', 'uniq_pci_devices0compute_node_id0'
'address0deleted'),
('virtual_interfaces', 'uniq_virtual_interfaces0'
'address0deleted')]:
table = oslodbutils.get_table(engine, table_name)
constraints = [c for c in table.constraints
if c.name == constraint_name]
for cons in constraints:
# Need to use sqlalchemy-migrate UniqueConstraint
cons = UniqueConstraint(*[c.name for c in cons.columns],
name=cons.name,
table=table)
cons.drop()
def _check_273(self, engine, data):
for src_table, src_column, dst_table, dst_column in [
('fixed_ips', 'instance_uuid', 'instances', 'uuid'),
('block_device_mapping', 'instance_uuid', 'instances', 'uuid'),
('instance_info_caches', 'instance_uuid', 'instances', 'uuid'),
('instance_metadata', 'instance_uuid', 'instances', 'uuid'),
('instance_system_metadata', 'instance_uuid',
'instances', 'uuid'),
('instance_type_projects', 'instance_type_id',
'instance_types', 'id'),
('iscsi_targets', 'volume_id', 'volumes', 'id'),
('reservations', 'usage_id', 'quota_usages', 'id'),
('security_group_instance_association', 'instance_uuid',
'instances', 'uuid'),
('security_group_instance_association', 'security_group_id',
'security_groups', 'id'),
('virtual_interfaces', 'instance_uuid', 'instances', 'uuid'),
('compute_nodes', 'service_id', 'services', 'id'),
('instance_actions', 'instance_uuid', 'instances', 'uuid'),
('instance_faults', 'instance_uuid', 'instances', 'uuid'),
('migrations', 'instance_uuid', 'instances', 'uuid')]:
src_table = oslodbutils.get_table(engine, src_table)
fkeys = {fk.parent.name: fk.column
for fk in src_table.foreign_keys}
self.assertIn(src_column, fkeys)
self.assertEqual(fkeys[src_column].table.name, dst_table)
self.assertEqual(fkeys[src_column].name, dst_column)
def _check_274(self, engine, data):
self.assertIndexMembers(engine, 'instances',
'instances_project_id_deleted_idx',
['project_id', 'deleted'])
self.assertIndexNotExists(engine, 'instances', 'project_id')
def _pre_upgrade_275(self, engine):
# Create a keypair record so we can test that the upgrade will set
# 'ssh' as default value in the new column for the previous keypair
# entries.
key_pairs = oslodbutils.get_table(engine, 'key_pairs')
fake_keypair = {'name': 'test-migr'}
key_pairs.insert().execute(fake_keypair)
def _check_275(self, engine, data):
self.assertColumnExists(engine, 'key_pairs', 'type')
self.assertColumnExists(engine, 'shadow_key_pairs', 'type')
key_pairs = oslodbutils.get_table(engine, 'key_pairs')
shadow_key_pairs = oslodbutils.get_table(engine, 'shadow_key_pairs')
self.assertIsInstance(key_pairs.c.type.type,
sqlalchemy.types.String)
self.assertIsInstance(shadow_key_pairs.c.type.type,
sqlalchemy.types.String)
# Make sure the keypair entry will have the type 'ssh'
key_pairs = oslodbutils.get_table(engine, 'key_pairs')
keypair = key_pairs.select(
key_pairs.c.name == 'test-migr').execute().first()
self.assertEqual('ssh', keypair.type)
def _check_276(self, engine, data):
self.assertColumnExists(engine, 'instance_extra', 'vcpu_model')
self.assertColumnExists(engine, 'shadow_instance_extra', 'vcpu_model')
instance_extra = oslodbutils.get_table(engine, 'instance_extra')
shadow_instance_extra = oslodbutils.get_table(
engine, 'shadow_instance_extra')
self.assertIsInstance(instance_extra.c.vcpu_model.type,
sqlalchemy.types.Text)
self.assertIsInstance(shadow_instance_extra.c.vcpu_model.type,
sqlalchemy.types.Text)
def _check_277(self, engine, data):
self.assertIndexMembers(engine, 'fixed_ips',
'fixed_ips_deleted_allocated_updated_at_idx',
['deleted', 'allocated', 'updated_at'])
def _check_278(self, engine, data):
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
self.assertEqual(0, len([fk for fk in compute_nodes.foreign_keys
if fk.parent.name == 'service_id']))
self.assertTrue(compute_nodes.c.service_id.nullable)
def _check_279(self, engine, data):
inspector = reflection.Inspector.from_engine(engine)
constraints = inspector.get_unique_constraints('compute_nodes')
constraint_names = [constraint['name'] for constraint in constraints]
self.assertNotIn('uniq_compute_nodes0host0hypervisor_hostname',
constraint_names)
self.assertIn('uniq_compute_nodes0host0hypervisor_hostname0deleted',
constraint_names)
def _check_280(self, engine, data):
key_pairs = oslodbutils.get_table(engine, 'key_pairs')
self.assertFalse(key_pairs.c.name.nullable)
def _check_291(self, engine, data):
# NOTE(danms): This is a dummy migration that just does a consistency
# check
pass
def _check_292(self, engine, data):
self.assertTableNotExists(engine, 'iscsi_targets')
self.assertTableNotExists(engine, 'volumes')
self.assertTableNotExists(engine, 'shadow_iscsi_targets')
self.assertTableNotExists(engine, 'shadow_volumes')
def _pre_upgrade_293(self, engine):
migrations = oslodbutils.get_table(engine, 'migrations')
fake_migration = {}
migrations.insert().execute(fake_migration)
def _check_293(self, engine, data):
self.assertColumnExists(engine, 'migrations', 'migration_type')
self.assertColumnExists(engine, 'shadow_migrations', 'migration_type')
migrations = oslodbutils.get_table(engine, 'migrations')
fake_migration = migrations.select().execute().first()
self.assertIsNone(fake_migration.migration_type)
self.assertFalse(fake_migration.hidden)
def _check_294(self, engine, data):
self.assertColumnExists(engine, 'services', 'last_seen_up')
self.assertColumnExists(engine, 'shadow_services', 'last_seen_up')
services = oslodbutils.get_table(engine, 'services')
shadow_services = oslodbutils.get_table(
engine, 'shadow_services')
self.assertIsInstance(services.c.last_seen_up.type,
sqlalchemy.types.DateTime)
self.assertIsInstance(shadow_services.c.last_seen_up.type,
sqlalchemy.types.DateTime)
def _pre_upgrade_295(self, engine):
self.assertIndexNotExists(engine, 'virtual_interfaces',
'virtual_interfaces_uuid_idx')
def _check_295(self, engine, data):
self.assertIndexMembers(engine, 'virtual_interfaces',
'virtual_interfaces_uuid_idx', ['uuid'])
def _check_296(self, engine, data):
if engine.name == 'ibm_db_sa':
# Make sure the last FK in the list was created.
inspector = reflection.Inspector.from_engine(engine)
fkeys = inspector.get_foreign_keys('instance_extra')
fkey_names = [fkey['name'] for fkey in fkeys]
self.assertIn('fk_instance_extra_instance_uuid', fkey_names)
def _check_297(self, engine, data):
self.assertColumnExists(engine, 'services', 'forced_down')
def _check_298(self, engine, data):
# NOTE(nic): This is a MySQL-specific migration, and is a no-op from
# the point-of-view of unit tests, since they use SQLite
pass
def filter_metadata_diff(self, diff):
# Overriding the parent method to decide on certain attributes
# that maybe present in the DB but not in the models.py
def removed_column(element):
# Define a whitelist of columns that would be removed from the
# DB at a later release.
column_whitelist = {'instances': ['scheduled_at']}
if element[0] != 'remove_column':
return False
table_name, column = element[2], element[3]
return (table_name in column_whitelist and
column.name in column_whitelist[table_name])
return [
element
for element in diff
if not removed_column(element)
]
def _check_299(self, engine, data):
self.assertColumnExists(engine, 'services', 'version')
def _check_300(self, engine, data):
self.assertColumnExists(engine, 'instance_extra', 'migration_context')
def _check_301(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes',
'cpu_allocation_ratio')
self.assertColumnExists(engine, 'compute_nodes',
'ram_allocation_ratio')
def _check_302(self, engine, data):
self.assertIndexMembers(engine, 'instance_system_metadata',
'instance_uuid', ['instance_uuid'])
class TestNovaMigrationsSQLite(NovaMigrationsCheckers,
test_base.DbTestCase,
test.NoDBTestCase):
pass
class TestNovaMigrationsMySQL(NovaMigrationsCheckers,
test_base.MySQLOpportunisticTestCase,
test.NoDBTestCase):
def test_innodb_tables(self):
with mock.patch.object(sa_migration, 'get_engine',
return_value=self.migrate_engine):
sa_migration.db_sync()
total = self.migrate_engine.execute(
"SELECT count(*) "
"FROM information_schema.TABLES "
"WHERE TABLE_SCHEMA = '%(database)s'" %
{'database': self.migrate_engine.url.database})
self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?")
noninnodb = self.migrate_engine.execute(
"SELECT count(*) "
"FROM information_schema.TABLES "
"WHERE TABLE_SCHEMA='%(database)s' "
"AND ENGINE != 'InnoDB' "
"AND TABLE_NAME != 'migrate_version'" %
{'database': self.migrate_engine.url.database})
count = noninnodb.scalar()
self.assertEqual(count, 0, "%d non InnoDB tables created" % count)
class TestNovaMigrationsPostgreSQL(NovaMigrationsCheckers,
test_base.PostgreSQLOpportunisticTestCase,
test.NoDBTestCase):
pass
class ProjectTestCase(test.NoDBTestCase):
def test_no_migrations_have_downgrade(self):
topdir = os.path.normpath(os.path.dirname(__file__) + '/../../../')
py_glob = os.path.join(topdir, "nova", "db", "sqlalchemy",
"migrate_repo", "versions", "*.py")
includes_downgrade = []
for path in glob.iglob(py_glob):
has_upgrade = False
has_downgrade = False
with open(path, "r") as f:
for line in f:
if 'def upgrade(' in line:
has_upgrade = True
if 'def downgrade(' in line:
has_downgrade = True
if has_upgrade and has_downgrade:
fname = os.path.basename(path)
includes_downgrade.append(fname)
helpful_msg = ("The following migrations have a downgrade "
"which is not supported:"
"\n\t%s" % '\n\t'.join(sorted(includes_downgrade)))
self.assertFalse(includes_downgrade, helpful_msg)
class ExpandTest(test.NoDBTestCase):
@mock.patch('nova.db.sqlalchemy.migration._schedule_schema_changes')
@mock.patch('nova.db.sqlalchemy.migration._find_migrate_repo')
def test_dryrun(self, find_repo, schedule):
# we shouldn't lock the sqlalchemy migrate table on a dry run
schedule.return_value = [], [], []
sa_migration.db_expand(dryrun=True)
self.assertEqual([], find_repo.mock_calls)
class SchemaChangeSchedulerTest(test.NoDBTestCase):
def test_add_fk_after_add_column(self):
exist_meta = sqlalchemy.MetaData()
sqlalchemy.Table('a', exist_meta,
sqlalchemy.Column('id', sqlalchemy.Integer))
sqlalchemy.Table('b', exist_meta,
sqlalchemy.Column('id', sqlalchemy.Integer))
model_meta = sqlalchemy.MetaData()
sqlalchemy.Table('a', model_meta,
sqlalchemy.Column('id', sqlalchemy.Integer))
column = sqlalchemy.Column('a_id', sqlalchemy.Integer,
sqlalchemy.ForeignKey('a.id'))
table = sqlalchemy.Table('b', model_meta,
sqlalchemy.Column('id', sqlalchemy.Integer),
column)
fkc = sqlalchemy.ForeignKeyConstraint(['a_id'], ['a.id'],
table=table)
addcolumn = sa_migration.AddColumn('b', column,
desired_phase='migrate')
addfk = sa_migration.AddForeignKey(fkc)
scheduler = sa_migration.Scheduler()
scheduler.add(addfk)
scheduler.add(addcolumn)
expand, migrate, contract = scheduler.schedule()
self.assertEqual([], expand)
self.assertEqual([addcolumn, addfk], migrate)
self.assertEqual([], contract)
def test_remove_index_after_add(self):
exist_meta = sqlalchemy.MetaData()
oldtbl = sqlalchemy.Table('a', exist_meta,
sqlalchemy.Column('id', sqlalchemy.Integer),
sqlalchemy.Column('foo', sqlalchemy.Integer))
model_meta = sqlalchemy.MetaData()
newtbl = sqlalchemy.Table('a', model_meta,
sqlalchemy.Column('id', sqlalchemy.Integer))
old_index = sqlalchemy.Index('a_id_idx', oldtbl.c.id, oldtbl.c.foo)
new_index = sqlalchemy.Index('a_id_idx', newtbl.c.id)
dropidx = sa_migration.DropIndex(old_index)
addidx = sa_migration.AddIndex(new_index, {})
scheduler = sa_migration.Scheduler()
scheduler.add(addidx)
scheduler.add(dropidx)
expand, migrate, contract = scheduler.schedule()
self.assertEqual([], expand)
self.assertEqual([dropidx, addidx], migrate)
self.assertEqual([], contract)
def _table(*args, **kwargs):
kwargs = kwargs.copy()
kwargs['mysql_engine'] = 'InnoDB'
return sqlalchemy.Table(*args, **kwargs)
class SchemaChangeDDLCheckers(object):
def setUp(self):
super(SchemaChangeDDLCheckers, self).setUp()
context = alembic.migration.MigrationContext.configure(self.engine)
self.ddlop = alembic.operations.Operations(context)
def test_add_table(self):
meta = sqlalchemy.MetaData()
table = _table('a', meta,
sqlalchemy.Column('id', sqlalchemy.Integer))
meta.create_all(self.engine)
table = oslodbutils.get_table(self.engine, 'a')
self.assertIn('id', table.c)
def test_drop_table(self):
meta = sqlalchemy.MetaData()
table = _table('a', meta,
sqlalchemy.Column('id', sqlalchemy.Integer))
meta.create_all(self.engine)
# Will raise exception if table does not exist
oslodbutils.get_table(self.engine, 'a')
op = sa_migration.DropTable(table)
op.execute(self.ddlop)
self.assertRaises(sqlalchemy.exc.NoSuchTableError,
oslodbutils.get_table, self.engine, 'a')
def test_add_column(self):
meta = sqlalchemy.MetaData()
table = _table('a', meta,
sqlalchemy.Column('id', sqlalchemy.Integer))
meta.create_all(self.engine)
column = sqlalchemy.Column('uuid', sqlalchemy.String(36))
op = sa_migration.AddColumn('a', column)
op.execute(self.ddlop)
table = oslodbutils.get_table(self.engine, 'a')
self.assertIn('id', table.c)
self.assertIn('uuid', table.c)
def test_alter_column_nullable(self):
meta = sqlalchemy.MetaData()
column = sqlalchemy.Column('uuid', sqlalchemy.String(36))
table = _table('a', meta,
sqlalchemy.Column('id', sqlalchemy.Integer),
column)
meta.create_all(self.engine)
self.assertTrue(table.c.uuid.nullable)
op = sa_migration.AlterColumn('a', 'uuid',
{'nullable': False,
'existing_type': column.type})
op.execute(self.ddlop)
table = oslodbutils.get_table(self.engine, 'a')
self.assertFalse(table.c.uuid.nullable)
def test_alter_column_type(self):
meta = sqlalchemy.MetaData()
column = sqlalchemy.Column('uuid', sqlalchemy.Text)
table = _table('a', meta,
sqlalchemy.Column('id', sqlalchemy.Integer),
column)
meta.create_all(self.engine)
self.assertIsInstance(table.c.uuid.type, sqlalchemy.Text)
new_type = sqlalchemy.String(36)
op = sa_migration.AlterColumn('a', 'uuid',
{'nullable': True,
'type_': new_type})
op.execute(self.ddlop)
table = oslodbutils.get_table(self.engine, 'a')
self.assertIsInstance(table.c.uuid.type, sqlalchemy.String)
# Text is a subclass of String, so the previous assert could pass
# if the column type didn't change
self.assertNotIsInstance(table.c.uuid.type, sqlalchemy.Text)
def test_drop_column(self):
meta = sqlalchemy.MetaData()
column = sqlalchemy.Column('uuid', sqlalchemy.String(36))
table = _table('a', meta,
sqlalchemy.Column('id', sqlalchemy.Integer),
column)
meta.create_all(self.engine)
op = sa_migration.DropColumn('a', column)
op.execute(self.ddlop)
table = oslodbutils.get_table(self.engine, 'a')
self.assertIn('id', table.c)
self.assertNotIn('uuid', table.c)
def test_add_index(self):
meta = sqlalchemy.MetaData()
table = _table('a', meta,
sqlalchemy.Column('id', sqlalchemy.Integer))
meta.create_all(self.engine)
index = sqlalchemy.Index('a_id_idx', table.c.id)
op = sa_migration.AddIndex(index, {})
op.execute(self.ddlop)
table = oslodbutils.get_table(self.engine, 'a')
self.assertIn('a_id_idx', [i.name for i in table.indexes])
def test_drop_index(self):
meta = sqlalchemy.MetaData()
index = sqlalchemy.Index('a_id_idx', 'id')
table = _table('a', meta,
sqlalchemy.Column('id', sqlalchemy.Integer),
index)
meta.create_all(self.engine)
op = sa_migration.DropIndex(index)
op.execute(self.ddlop)
table = oslodbutils.get_table(self.engine, 'a')
self.assertNotIn('a_id_idx', [i.name for i in table.indexes])
def test_add_unique_constraint(self):
meta = sqlalchemy.MetaData()
table = _table('a', meta,
sqlalchemy.Column('id', sqlalchemy.Integer))
meta.create_all(self.engine)
uc = sqlalchemy.UniqueConstraint(table.c.id, name='uniq_a_id')
op = sa_migration.AddUniqueConstraint(uc)
op.execute(self.ddlop)
table = oslodbutils.get_table(self.engine, 'a')
# Collect all unique indexes and constraints. MySQL will
# transparently create unique constraints as unique indexes
# (which is different than PostgreSQL). Also, older versions
# of SQLAlchemy will sometimes reflect these inconsistently.
uniques = {i.name for i in table.indexes if i.unique}
uniques.update(c.name for c in table.constraints
if isinstance(c, sqlalchemy.UniqueConstraint))
self.assertIn('uniq_a_id', uniques)
def test_drop_unique_constraint(self):
meta = sqlalchemy.MetaData()
uc = sqlalchemy.UniqueConstraint('id', name='uniq_a_id')
table = _table('a', meta,
sqlalchemy.Column('id', sqlalchemy.Integer),
uc)
meta.create_all(self.engine)
op = sa_migration.DropUniqueConstraint(uc)
op.execute(self.ddlop)
table = oslodbutils.get_table(self.engine, 'a')
# See comment for test_add_unique_constraint
uniques = {i.name for i in table.indexes if i.unique}
uniques.update(c.name for c in table.constraints
if isinstance(c, sqlalchemy.UniqueConstraint))
self.assertNotIn('uniq_a_id', uniques)
def test_add_foreign_key(self):
meta = sqlalchemy.MetaData()
a = _table('a', meta,
sqlalchemy.Column('id', sqlalchemy.Integer),
sqlalchemy.UniqueConstraint('id'))
b = _table('b', meta,
sqlalchemy.Column('a_id', sqlalchemy.Integer))
meta.create_all(self.engine)
fkc = sqlalchemy.ForeignKeyConstraint([b.c.a_id], [a.c.id],
name='b_a_id_fk')
op = sa_migration.AddForeignKey(fkc)
op.execute(self.ddlop)
table = oslodbutils.get_table(self.engine, 'b')
fkcs = {c.name: c for c in table.constraints
if isinstance(c, sqlalchemy.ForeignKeyConstraint)}
self.assertIn('b_a_id_fk', fkcs)
columns = [(fk.parent.name, fk.column.name)
for fk in fkcs['b_a_id_fk'].elements]
self.assertEqual([('a_id', 'id')], columns)
def test_drop_foreign_key(self):
meta = sqlalchemy.MetaData()
a = _table('a', meta,
sqlalchemy.Column('id', sqlalchemy.Integer),
sqlalchemy.UniqueConstraint('id'))
b = _table('b', meta,
sqlalchemy.Column('a_id', sqlalchemy.Integer))
fkc = sqlalchemy.ForeignKeyConstraint([b.c.a_id], [a.c.id],
name='b_a_id_fk')
meta.create_all(self.engine)
op = sa_migration.DropForeignKey(fkc)
op.execute(self.ddlop)
table = oslodbutils.get_table(self.engine, 'b')
fkcs = {c.name: c for c in table.constraints}
self.assertNotIn('b_a_id_fk', fkcs)
class TestSchemaChangeDDLMySQL(SchemaChangeDDLCheckers,
test_base.MySQLOpportunisticTestCase,
test.NoDBTestCase):
pass
class TestSchemaChangeDDLPostgreSQL(SchemaChangeDDLCheckers,
test_base.PostgreSQLOpportunisticTestCase,
test.NoDBTestCase):
pass | unknown | codeparrot/codeparrot-clean | ||
from geopy import Point
from geopy.util import NULL_HANDLER
from geopy.parsers.iso8601 import parse_iso8601
import sys, re, logging
from xml.etree import ElementTree
log = logging.getLogger(__name__)
log.addHandler(NULL_HANDLER)
class VersionError(Exception):
pass
class Waypoint(Point):
'''
A `Waypoint` is a geopy `Point` with additional waypoint metadata as
defined by the GPX format specification.
'''
@classmethod
def from_xml_names(cls, attrs, children):
'''
Construct a new Waypoint from dictionaries of attribute and child
element names corresponding to GPX waypoint information, as parsed
by the `GPX` class.
'''
lat = attrs['lat']
lon = attrs['lon']
if 'ele' in children:
ele = children['ele']
else:
ele = None
w = cls(lat, lon, ele)
if 'time' in children:
w.timestamp = children['time']
if 'name' in children:
w.name = children['name']
if 'desc' in children:
w.description = children['desc']
if 'cmt' in children:
w.comment = children['cmt']
if 'src' in children:
w.source = children['src']
if 'sym' in children:
w.symbol = children['sym']
if 'type' in children:
w.classification = children['type']
if 'fix' in children:
w.fix = children['fix']
if 'sat' in children:
w.num_satellites = children['sat']
if 'ageofdgpsdata' in children:
w.age = children['ageofdgpsdata']
if 'dgpsid' in children:
w.dgpsid = children['dgpsid']
return w
class _Attr(object):
'''
Value wrapper for allowing interfaces to access attribute values with
`obj.text`
'''
def __init__(self, value):
self.text = value
class GPX(object):
GPX_NS = "http://www.topografix.com/GPX/1/1"
FILE_EXT = '.gpx'
MIME_TYPE = 'application/gpx+xml'
VERSION = '1.1'
FIX_TYPES = set(('none', '2d', '3d', 'dgps', 'pps'))
DECIMAL_RE = re.compile(r'([+-]?\d*\.?\d+)$')
# Each "type tuple" is a tuple of two items:
# 1. Dictionary of attributes in the type
# 2. Dictionary of child elements that can appear in the type
GPX_TYPE = ({'version': 'string', 'creator': 'string'}, {
'metadata': 'metadata', 'wpt': ['waypoint'], 'rte': ['route'],
'trk': ['track'], 'extensions': 'extensions'
})
METADATA_TYPE = ({}, {
'name': 'string', 'desc': 'string', 'author': 'person',
'copyright': 'copyright', 'link': ['link'], 'time': 'datetime',
'keywords': 'string', 'bounds': 'bounds', 'extensions': 'extensions'
})
WAYPOINT_TYPE = ({'lat': 'decimal', 'lon': 'decimal'}, {
'ele': 'decimal', 'time': 'datetime', 'magvar': 'degrees',
'geoidheight': 'decimal', 'name': 'string', 'cmt': 'string',
'desc': 'string', 'src': 'string', 'link': ['link'], 'sym': 'string',
'type': 'string', 'fix': 'fix', 'sat': 'unsigned', 'hdop': 'decimal',
'vdop': 'decimal', 'pdop': 'decimal', 'ageofdgpsdata': 'decimal',
'dgpsid': 'dgpsid', 'extensions': 'extensions'
})
ROUTE_TYPE = ({}, {
'name': 'string', 'cmt': 'string', 'desc': 'string', 'src': 'string',
'link': ['link'], 'number': 'unsigned', 'type': 'string',
'extensions': 'extensions', 'rtept': ['waypoint']
})
TRACK_TYPE = ({}, {
'name': 'string', 'cmt': 'string', 'desc': 'string', 'src': 'string',
'link': ['link'], 'number': 'unsigned', 'type': 'string',
'extensions': 'extensions', 'trkseg': ['segment']
})
TRACK_SEGMENT_TYPE = ({},
{'trkpt': ['waypoint'], 'extensions': 'extensions'}
)
COPYRIGHT_TYPE = (
{'author': 'string'}, {'year': 'year', 'license': 'uri'}
)
LINK_TYPE = ({'href': 'uri'}, {'text': 'string', 'type': 'string'})
EMAIL_TYPE = ({'id': 'string', 'domain': 'string'}, {})
PERSON_TYPE = ({}, {'name': 'string', 'email': 'email', 'link': 'link'})
POINT_TYPE = ({'lat': 'longitude', 'lon': 'longitude'},
{'ele': 'decimal', 'time': 'datetime'}
)
POINT_SEGMENT_TYPE = ({}, {'pt': ['point']})
BOUNDS_TYPE = ({
'minlat': 'latitude', 'minlon': 'longitude',
'maxlat': 'latitude', 'maxlon': 'longitude'
}, {})
def __init__(self, document=None, cache=True):
self.cache = cache
self._waypoints = {}
self._routes = {}
self._tracks = {}
self.type_handlers = {
'string': lambda e: e.text,
'uri': lambda e: e.text,
'datetime': self._parse_datetime_element,
'decimal': self._parse_decimal,
'dgpsid': self._parse_dgps_station,
'email': self._parse_email,
'link': self._parse_link,
'year': self._parse_int,
'waypoint': self._parse_waypoint,
'segment': self._parse_segment,
'unsigned': self._parse_unsigned,
'degrees': self._parse_degrees,
'fix': self._parse_fix,
'extensions': self._parse_noop,
}
if document is not None:
self.open(document)
def open(self, string_or_file):
if isinstance(string_or_file, basestring):
string_or_file = ElementTree.fromstring(string_or_file)
elif not ElementTree.iselement(string_or_file):
string_or_file = ElementTree.parse(string_or_file)
if string_or_file.getroot().tag == self._get_qname('gpx'):
self._root = string_or_file.getroot()
@property
def version(self):
if not hasattr(self, '_version'):
version = self._root.get('version')
if version == self.VERSION:
self._version = version
else:
raise VersionError("%r" % (version,))
return self._version
@property
def creator(self):
if not hasattr(self, '_creator'):
self._creator = self._root.get('creator')
return self._creator
@property
def metadata(self):
if not hasattr(self, '_metadata'):
metadata_qname = self._get_qname('metadata')
metadata = {}
element = self._root.find(metadata_qname)
if element is not None:
single, multi = self.METADATA
metadata.update(self._child_dict(element, single, multi))
for tag in ('name', 'desc', 'time', 'keywords'):
if tag in metadata:
metadata[tag] = metadata[tag]
if 'time' in metadata:
metadata['time'] = self._parse_datetime(metadata['time'])
self._metadata = metadata
return self._metadata
@property
def waypoints(self):
tag = self._get_qname('wpt')
return self._cache_parsed(tag, self._parse_waypoint, self._waypoints)
def _parse_waypoint(self, element):
waypoint = {}
point = Point(element.get('lat'), element.get('lon'))
def _parse_segment(self, element):
pass
@property
def routes(self):
tag = self._get_qname('rte')
return self._cache_parsed(tag, self._parse_route, self._routes)
def _parse_route(self, element):
pass
@property
def route_names(self):
for route in self._root.findall(self._get_qname('rte')):
yield route.findtext(self._get_qname('name'))
@property
def waypoints(self):
return self.get_waypoints()
def get_waypoints(self, route=None):
if route is None:
root = self._root
waypoint_name = self._get_qname('wpt')
else:
root = self.get_route_by_name(route)
waypoint_name = self._get_qname('rtept')
for rtept in root.findall(waypoint_name):
attrs, children = self._parse_type(rtept, self.WAYPOINT_TYPE)
yield Waypoint.from_xml_names(attrs, children)
def get_route_by_name(self, route):
if isinstance(route, basestring):
name = route
index = 0
else:
name, index = route
seen_index = 0
for rte in self._root.findall(self._get_qname('rte')):
rname = rte.findtext(self._get_qname('name'))
if rname == name:
if not seen_index == index:
seen_index = seen_index + 1
else:
return rte
return None
@property
def tracks(self):
tag = self._get_qname('rte')
return self._cache_parsed(tag, self._parse_track, self._tracks)
def _parse_track(self, element):
pass
def _parse_type(self, element, type_def):
attr_types, child_types = type_def
attrs = {}
children = {}
for attr, handler in attr_types.iteritems():
value = element.get(attr)
type_func = self.type_handlers[handler]
attrs[attr] = type_func(_Attr(value))
for tag, handler in child_types.iteritems():
values = []
all = False
if isinstance(handler, list):
all = True
type_func = self.type_handlers[handler[0]]
else:
type_func = self.type_handlers[handler]
for e in element.findall(self._get_qname(tag)):
values.append(type_func(e))
if len(values) > 0:
if all:
children[tag] = values
else:
children[tag] = values[-1]
return attrs, children
@property
def extensions(self):
extensions_qname = self._get_qname('extensions')
def _cache_parsed(self, tag, parse_func, cache):
i = -1
for i in xrange(len(cache)):
item = cache[i]
if item is not None:
yield item
for element in self._root:
if element.tag == tag:
i += 1
item = parse_func(element)
if self.cache:
cache[i] = item
if item is not None:
yield item
def _parse_decimal(self, element):
value = element.text
match = re.match(self.DECIMAL_RE, value)
if match:
return float(match.group(1))
else:
raise ValueError("Invalid decimal value: %r" % (value,))
def _parse_degrees(self, element):
value = self._parse_decimal(element)
if 0 <= value <= 360:
return value
else:
raise ValueError("Value out of range [0, 360]: %r" % (value,))
def _parse_dgps_station(self, element):
value = int(element.text)
if 0 <= value <= 1023:
return value
else:
raise ValueError("Value out of range [0, 1023]: %r" % (value,))
def _parse_datetime(self, value):
return parse_iso8601(value)
def _parse_datetime_element(self, element):
return self._parse_datetime(element.text)
def _parse_email(self, element):
value = element.text
if not value:
name = element.get('id')
domain = element.get('domain')
if name and domain:
return '@'.join((name, domain))
return value or None
def _parse_link(self, element):
pass
def _parse_int(self, element):
return int(element.text)
def _parse_unsigned(self, element):
return int(element.text)
def _parse_fix(self, element):
value = element.text
if value in self.FIX_TYPES:
return value
else:
raise ValueError("Value is not a valid fix type: %r" % (value,))
def _parse_string(self, element):
return element.text
def _parse_noop(self, element):
return element
def _child_dict(self, element, single, multi):
single = dict([(self._get_qname(tag), tag) for tag in single])
multi = dict([(self._get_qname(tag), tag) for tag in multi])
limit = len(single)
d = {}
if limit or multi:
for child in element:
if child.tag in single:
name = single.pop(child.tag)
d[name] = child
limit -= 1
elif child.tag in multi:
name = multi[child.tag]
d.setdefault(name, []).append(child)
if not limit and not multi:
break
return d
def _get_qname(self, name):
return "{%s}%s" % (self.GPX_NS, name) | unknown | codeparrot/codeparrot-clean | ||
////////////////////////////////////////////////////////////////////////////
//
// Copyright 2020 Realm Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////
import XCTest
import RealmSwift
class MutableSetTests: TestCase {
var str1: SwiftStringObject?
var str2: SwiftStringObject?
var str3: SwiftStringObject?
var setObject: SwiftMutableSetPropertyObject!
var setObject2: SwiftMutableSetPropertyObject!
var set: MutableSet<SwiftStringObject>?
var set2: MutableSet<SwiftStringObject>?
func createSet() -> SwiftMutableSetPropertyObject {
fatalError("abstract")
}
func createSetWithLinks() -> SwiftMutableSetOfSwiftObject {
fatalError("abstract")
}
override func setUp() {
super.setUp()
let str1 = SwiftStringObject()
str1.stringCol = "1"
self.str1 = str1
let str2 = SwiftStringObject()
str2.stringCol = "2"
self.str2 = str2
let str3 = SwiftStringObject()
str3.stringCol = "3"
self.str3 = str3
setObject = createSet()
setObject2 = createSet()
set = setObject.set
set2 = setObject2.set
let realm = realmWithTestPath()
try! realm.write {
realm.add(str1)
realm.add(str2)
realm.add(str3)
}
realm.beginWrite()
}
override func tearDown() {
try! realmWithTestPath().commitWrite()
str1 = nil
str2 = nil
str3 = nil
setObject = nil
setObject2 = nil
set = nil
set2 = nil
super.tearDown()
}
override class var defaultTestSuite: XCTestSuite {
// Don't run tests for the base class
if isEqual(MutableSetTests.self) {
return XCTestSuite(name: "empty")
}
return super.defaultTestSuite
}
func testPrimitive() {
let obj = SwiftMutableSetObject()
obj.int.insert(5)
XCTAssertEqual(obj.int.first!, 5)
XCTAssertEqual(obj.int.last!, 5)
XCTAssertEqual(obj.int[0], 5)
obj.int.insert(objectsIn: [6, 7, 8] as [Int])
XCTAssertEqual(obj.int.max(), 8)
XCTAssertEqual(obj.int.sum(), 26)
obj.string.insert("str")
XCTAssertEqual(obj.string.first!, "str")
XCTAssertEqual(obj.string[0], "str")
}
func testPrimitiveIterationAcrossNil() {
let obj = SwiftMutableSetObject()
XCTAssertFalse(obj.int.contains(5))
XCTAssertFalse(obj.int8.contains(5))
XCTAssertFalse(obj.int16.contains(5))
XCTAssertFalse(obj.int32.contains(5))
XCTAssertFalse(obj.int64.contains(5))
XCTAssertFalse(obj.float.contains(3.141592))
XCTAssertFalse(obj.double.contains(3.141592))
XCTAssertFalse(obj.string.contains("foobar"))
XCTAssertFalse(obj.data.contains(Data()))
XCTAssertFalse(obj.date.contains(Date()))
XCTAssertFalse(obj.decimal.contains(Decimal128()))
XCTAssertFalse(obj.objectId.contains(ObjectId()))
XCTAssertFalse(obj.uuidOpt.contains(UUID()))
XCTAssertFalse(obj.intOpt.contains { $0 == nil })
XCTAssertFalse(obj.int8Opt.contains { $0 == nil })
XCTAssertFalse(obj.int16Opt.contains { $0 == nil })
XCTAssertFalse(obj.int32Opt.contains { $0 == nil })
XCTAssertFalse(obj.int64Opt.contains { $0 == nil })
XCTAssertFalse(obj.floatOpt.contains { $0 == nil })
XCTAssertFalse(obj.doubleOpt.contains { $0 == nil })
XCTAssertFalse(obj.stringOpt.contains { $0 == nil })
XCTAssertFalse(obj.dataOpt.contains { $0 == nil })
XCTAssertFalse(obj.dateOpt.contains { $0 == nil })
XCTAssertFalse(obj.decimalOpt.contains { $0 == nil })
XCTAssertFalse(obj.objectIdOpt.contains { $0 == nil })
XCTAssertFalse(obj.uuidOpt.contains { $0 == nil })
}
func testInvalidated() {
guard let set = set else {
fatalError("Test precondition failure")
}
XCTAssertFalse(set.isInvalidated)
if let realm = setObject.realm {
realm.delete(setObject)
XCTAssertTrue(set.isInvalidated)
}
}
func testFastEnumerationWithMutation() {
guard let set = set, let str1 = str1, let str2 = str2 else {
fatalError("Test precondition failure")
}
set.insert(objectsIn: [str1, str2, str1, str2, str1, str2, str1, str2, str1,
str2, str1, str2, str1, str2, str1, str2, str1, str2, str1, str2])
var str = ""
for obj in set {
str += obj.stringCol
set.insert(objectsIn: [str1])
}
XCTAssertTrue(set.contains(str1))
XCTAssertTrue(set.contains(str2))
}
func testAppendObject() {
guard let set = set, let str1 = str1, let str2 = str2 else {
fatalError("Test precondition failure")
}
for str in [str1, str2, str1] {
set.insert(str)
}
XCTAssertEqual(Int(2), set.count)
XCTAssertTrue(set.contains(str1))
XCTAssertTrue(set.contains(str2))
}
func testInsert() {
guard let set = set, let str1 = str1, let str2 = str2 else {
fatalError("Test precondition failure")
}
set.insert(objectsIn: [str1, str2, str1])
XCTAssertEqual(Int(2), set.count)
XCTAssertTrue(set.contains(str1))
XCTAssertTrue(set.contains(str2))
}
func testAppendResults() {
guard let set = set, let str1 = str1, let str2 = str2 else {
fatalError("Test precondition failure")
}
set.insert(objectsIn: realmWithTestPath().objects(SwiftStringObject.self))
XCTAssertEqual(Int(3), set.count)
// The unmanaged NSSet backing object won't work with MutableSet.contains(:)
set.forEach {
// Ordering is not guaranteed so we can't subscript
XCTAssertTrue($0.isSameObject(as: str1) || $0.isSameObject(as: str2) || $0.isSameObject(as: str3))
}
}
func testRemoveAll() {
guard let set = set, let str1 = str1, let str2 = str2 else {
fatalError("Test precondition failure")
}
set.insert(objectsIn: [str1, str2])
set.removeAll()
XCTAssertEqual(Int(0), set.count)
set.removeAll() // should be a no-op
XCTAssertEqual(Int(0), set.count)
}
func testRemoveObject() {
guard let set = set, let str1 = str1, let str2 = str2 else {
fatalError("Test precondition failure")
}
set.insert(objectsIn: [str1, str2])
XCTAssertTrue(set.contains(str1))
XCTAssertEqual(Int(2), set.count)
set.remove(str1)
XCTAssertFalse(set.contains(str1))
XCTAssertEqual(Int(1), set.count)
set.removeAll()
XCTAssertEqual(Int(0), set.count)
XCTAssertFalse(set.contains(str1))
XCTAssertFalse(set.contains(str2))
}
func testChangesArePersisted() {
guard let set = set, let str1 = str1, let str2 = str2 else {
fatalError("Test precondition failure")
}
if let realm = set.realm {
set.insert(objectsIn: [str1, str2])
let otherSet = realm.objects(SwiftMutableSetPropertyObject.self).first!.set
XCTAssertEqual(Int(2), otherSet.count)
}
}
func testPopulateEmptySet() {
guard let set = set else {
fatalError("Test precondition failure")
}
XCTAssertEqual(set.count, 0, "Should start with no set elements.")
let obj = SwiftStringObject()
obj.stringCol = "a"
set.insert(obj)
set.insert(realmWithTestPath().create(SwiftStringObject.self, value: ["b"]))
set.insert(obj)
XCTAssertEqual(set.count, 2)
set.forEach {
XCTAssertTrue($0.stringCol == "a" || $0.stringCol == "b")
}
// Make sure we can enumerate
for obj in set {
XCTAssertTrue(obj.description.utf16.count > 0, "Object should have description")
}
}
func testEnumeratingSetWithSetProperties() {
let setObject = createSetWithLinks()
setObject.realm?.beginWrite()
for _ in 0..<10 {
setObject.set.insert(SwiftObject())
}
try! setObject.realm?.commitWrite()
XCTAssertEqual(10, setObject.set.count)
for object in setObject.set {
XCTAssertEqual(123, object.intCol)
XCTAssertEqual(false, object.objectCol!.boolCol)
XCTAssertEqual(0, object.setCol.count)
}
}
func testValueForKey() {
let realm = try! Realm()
try! realm.write {
for value in [1, 2] {
let setObject = SwiftMutableSetOfSwiftObject()
let object = SwiftObject()
object.intCol = value
object.doubleCol = Double(value)
object.stringCol = String(value)
object.decimalCol = Decimal128(number: value as NSNumber)
object.objectIdCol = try! ObjectId(string: String(repeating: String(value), count: 24))
setObject.set.insert(object)
realm.add(setObject)
}
}
let setObjects = realm.objects(SwiftMutableSetOfSwiftObject.self)
let setsOfObjects = setObjects.value(forKeyPath: "set") as! [MutableSet<SwiftObject>]
let objects = realm.objects(SwiftObject.self)
func testProperty<T: Equatable>(line: UInt = #line, fn: @escaping (SwiftObject) -> T) {
let properties: [T] = Array(setObjects.flatMap { $0.set.map(fn) })
let kvcProperties: [T] = Array(setsOfObjects.flatMap { $0.map(fn) })
XCTAssertEqual(properties, kvcProperties, line: line)
}
func testProperty<T: Equatable>(_ name: String, line: UInt = #line, fn: @escaping (SwiftObject) -> T) {
let properties = Array(objects.compactMap(fn))
let setsOfObjects = objects.value(forKeyPath: name) as! [T]
let kvcProperties = Array(setsOfObjects.compactMap { $0 })
XCTAssertEqual(properties, kvcProperties, line: line)
}
testProperty { $0.intCol }
testProperty { $0.doubleCol }
testProperty { $0.stringCol }
testProperty { $0.decimalCol }
testProperty { $0.objectIdCol }
testProperty("intCol") { $0.intCol }
testProperty("doubleCol") { $0.doubleCol }
testProperty("stringCol") { $0.stringCol }
testProperty("decimalCol") { $0.decimalCol }
testProperty("objectIdCol") { $0.objectIdCol }
}
@available(*, deprecated) // Silence deprecation warnings for RealmOptional
func testValueForKeyOptional() {
let realm = try! Realm()
try! realm.write {
for value in [1, 2] {
let setObject = SwiftMutableSetOfSwiftOptionalObject()
let object = SwiftOptionalObject()
object.optIntCol.value = value
object.optInt8Col.value = Int8(value)
object.optDoubleCol.value = Double(value)
object.optStringCol = String(value)
object.optNSStringCol = NSString(format: "%d", value)
object.optDecimalCol = Decimal128(number: value as NSNumber)
object.optObjectIdCol = try! ObjectId(string: String(repeating: String(value), count: 24))
setObject.set.insert(object)
realm.add(setObject)
}
}
let setObjects = realm.objects(SwiftMutableSetOfSwiftOptionalObject.self)
let setOfObjects = setObjects.value(forKeyPath: "set") as! [MutableSet<SwiftOptionalObject>]
let objects = realm.objects(SwiftOptionalObject.self)
func testProperty<T: Equatable>(line: UInt = #line, fn: @escaping (SwiftOptionalObject) -> T) {
let properties: [T] = Array(setObjects.flatMap { $0.set.map(fn) })
let kvcProperties: [T] = Array(setOfObjects.flatMap { $0.map(fn) })
XCTAssertEqual(properties, kvcProperties, line: line)
}
func testProperty<T: Equatable>(_ name: String, line: UInt = #line, fn: @escaping (SwiftOptionalObject) -> T) {
let properties = Array(objects.compactMap(fn))
let setsOfObjects = objects.value(forKeyPath: name) as! [T]
let kvcProperties = Array(setsOfObjects.compactMap { $0 })
XCTAssertEqual(properties, kvcProperties, line: line)
}
testProperty { $0.optIntCol.value }
testProperty { $0.optInt8Col.value }
testProperty { $0.optDoubleCol.value }
testProperty { $0.optStringCol }
testProperty { $0.optNSStringCol }
testProperty { $0.optDecimalCol }
testProperty { $0.optObjectCol }
testProperty("optIntCol") { $0.optIntCol.value }
testProperty("optInt8Col") { $0.optInt8Col.value }
testProperty("optDoubleCol") { $0.optDoubleCol.value }
testProperty("optStringCol") { $0.optStringCol }
testProperty("optNSStringCol") { $0.optNSStringCol }
testProperty("optDecimalCol") { $0.optDecimalCol }
testProperty("optObjectCol") { $0.optObjectCol }
}
func testUnmanagedSetComparison() {
let obj = SwiftIntObject()
obj.intCol = 5
let obj2 = SwiftIntObject()
obj2.intCol = 6
let obj3 = SwiftIntObject()
obj3.intCol = 8
let objects = [obj, obj2, obj3]
let objects2 = [obj, obj2]
let set1 = MutableSet<SwiftIntObject>()
let set2 = MutableSet<SwiftIntObject>()
XCTAssertEqual(set1, set2, "Empty instances should be equal by `==` operator")
set1.insert(objectsIn: objects)
set2.insert(objectsIn: objects)
let set3 = MutableSet<SwiftIntObject>()
set3.insert(objectsIn: objects2)
XCTAssertTrue(set1 !== set2, "instances should not be identical")
XCTAssertEqual(set1, set2, "instances should be equal by `==` operator")
XCTAssertNotEqual(set1, set3, "instances should be equal by `==` operator")
XCTAssertTrue(set1.isEqual(set2), "instances should be equal by `isEqual` method")
XCTAssertTrue(!set1.isEqual(set3), "instances should be equal by `isEqual` method")
XCTAssertEqual(Array(set1), Array(set2), "instances converted to Swift.Array should be equal")
XCTAssertNotEqual(Array(set1), Array(set3), "instances converted to Swift.Array should be equal")
XCTAssertEqual(Set(set1), Set(set2), "instances converted to Swift.Array should be equal")
XCTAssertNotEqual(Set(set1), Set(set3), "instances converted to Swift.Array should be equal")
set3.insert(obj3)
XCTAssertEqual(set1, set3, "instances should be equal by `==` operator")
}
func testSubset() {
guard let set = set, let set2 = set2, let str1 = str1, let str2 = str2 else {
fatalError("Test precondition failure")
}
set.removeAll()
set2.removeAll()
XCTAssertEqual(Int(0), set.count)
XCTAssertEqual(Int(0), set2.count)
set.insert(objectsIn: [str1, str2, str1])
set2.insert(objectsIn: [str1])
XCTAssertEqual(Int(2), set.count)
XCTAssertEqual(Int(1), set2.count)
XCTAssertTrue(set2.isSubset(of: set))
XCTAssertFalse(set.isSubset(of: set2))
}
func testUnion() {
guard let set = set, let set2 = set2, let str1 = str1, let str2 = str2 else {
fatalError("Test precondition failure")
}
set.removeAll()
set2.removeAll()
set.insert(objectsIn: [str1, str2, str1])
set2.insert(objectsIn: [str1])
XCTAssertEqual(Int(2), set.count)
XCTAssertEqual(Int(1), set2.count)
XCTAssertTrue(set2.isSubset(of: set))
XCTAssertFalse(set.isSubset(of: set2))
}
func testIntersection() {
guard let set = set, let set2 = set2, let str1 = str1, let str2 = str2, let str3 = str3 else {
fatalError("Test precondition failure")
}
set.removeAll()
set2.removeAll()
set.insert(objectsIn: [str1, str2])
set2.insert(objectsIn: [str2, str3])
XCTAssertEqual(Int(2), set.count)
XCTAssertEqual(Int(2), set2.count)
XCTAssertTrue(set.intersects(set2))
XCTAssertTrue(set2.intersects(set))
set.formIntersection(set2)
XCTAssertTrue(set.intersects(set2))
XCTAssertTrue(set2.intersects(set))
XCTAssertEqual(Int(1), set.count)
}
func testSubtract() {
guard let set = set, let set2 = set2, let str1 = str1, let str2 = str2, let str3 = str3 else {
fatalError("Test precondition failure")
}
set.removeAll()
set2.removeAll()
set.insert(objectsIn: [str1, str2])
set2.insert(objectsIn: [str2, str3])
XCTAssertEqual(Int(2), set.count)
XCTAssertEqual(Int(2), set2.count)
set.subtract(set2)
XCTAssertEqual(Int(1), set.count)
XCTAssertTrue(set.contains(str1))
}
}
class MutableSetStandaloneTests: MutableSetTests {
override func createSet() -> SwiftMutableSetPropertyObject {
let set = SwiftMutableSetPropertyObject()
XCTAssertNil(set.realm)
return set
}
override func createSetWithLinks() -> SwiftMutableSetOfSwiftObject {
let set = SwiftMutableSetOfSwiftObject()
XCTAssertNil(set.realm)
return set
}
}
class MutableSetNewlyAddedTests: MutableSetTests {
override func createSet() -> SwiftMutableSetPropertyObject {
let set = SwiftMutableSetPropertyObject()
set.name = "name"
let realm = realmWithTestPath()
try! realm.write { realm.add(set) }
XCTAssertNotNil(set.realm)
return set
}
override func createSetWithLinks() -> SwiftMutableSetOfSwiftObject {
let set = SwiftMutableSetOfSwiftObject()
let realm = try! Realm()
try! realm.write { realm.add(set) }
XCTAssertNotNil(set.realm)
return set
}
}
class MutableSetNewlyCreatedTests: MutableSetTests {
override func createSet() -> SwiftMutableSetPropertyObject {
let realm = realmWithTestPath()
realm.beginWrite()
let set = realm.create(SwiftMutableSetPropertyObject.self, value: ["name"])
try! realm.commitWrite()
XCTAssertNotNil(set.realm)
return set
}
override func createSetWithLinks() -> SwiftMutableSetOfSwiftObject {
let realm = try! Realm()
realm.beginWrite()
let set = realm.create(SwiftMutableSetOfSwiftObject.self)
try! realm.commitWrite()
XCTAssertNotNil(set.realm)
return set
}
}
class MutableSetRetrievedTests: MutableSetTests {
override func createSet() -> SwiftMutableSetPropertyObject {
let realm = realmWithTestPath()
realm.beginWrite()
realm.create(SwiftMutableSetPropertyObject.self, value: ["name"])
try! realm.commitWrite()
let set = realm.objects(SwiftMutableSetPropertyObject.self).last!
XCTAssertNotNil(set.realm)
return set
}
override func createSetWithLinks() -> SwiftMutableSetOfSwiftObject {
let realm = try! Realm()
realm.beginWrite()
realm.create(SwiftMutableSetOfSwiftObject.self)
try! realm.commitWrite()
let set = realm.objects(SwiftMutableSetOfSwiftObject.self).first!
XCTAssertNotNil(set.realm)
return set
}
} | swift | github | https://github.com/realm/realm-swift | RealmSwift/Tests/MutableSetTests.swift |
#!/bin/python
import os
import sys
from pymongo import MongoClient
from bson.objectid import ObjectId
def modifyConfig(expression, value):
print('Modifying server.properties '+expression+' with value '+str(value))
os.system("sed -i 's/"+str(expression)+"/"+str(value)+"/' server.properties")
def modifyLog(expression, value):
print('Modifying log4j2.xml '+expression+' with value '+str(value))
os.system("sed -i 's/"+str(expression)+"/"+str(value)+"/' log4j2.xml")
def main():
mongoHosts = os.environ['mongo_addresses'].split(',')
mongoDB = os.environ['mongo_database']
mongoUsername = os.getenv('mongo_username', None)
mongoPassword = os.getenv('mongo_password', None)
client = MongoClient(mongoHosts)
db = client[mongoDB]
if mongoUsername is not None:
db.authenticate(mongoUsername, mongoPassword)
serverCollection = db['servers']
servertypesCollection = db['servertypes']
nodesCollection = db['nodes']
worldsCollection = db['worlds']
pluginsCollection = db['plugins']
query = {"_id": ObjectId(os.environ['server_id'])}
server = serverCollection.find_one(query)
query = {"_id": ObjectId(server['server_type_id'])}
servertype = servertypesCollection.find_one(query)
query = {"_id": ObjectId(server['node_id'])}
node = nodesCollection.find_one(query)
if servertype is None:
print('No server type found')
sys.exit(1)
worlds = []
plugins = []
if 'worlds' in servertype:
for worldInfo in servertype['worlds']:
world = worldsCollection.find_one({"_id": ObjectId(worldInfo['world_id'])})
worldVersion = None
if 'versions' in world and 'worldversion_id' in worldInfo:
for version in world['versions']:
if version['_id'] == ObjectId(worldInfo['worldversion_id']):
worldVersion = version
break
default = worldInfo['defaultWorld']
worldDict = {'world': world, 'version': worldVersion, 'default': default}
worlds.append(worldDict)
if 'plugins' in servertype:
for pluginInfo in servertype['plugins']:
plugin = pluginsCollection.find_one({"_id": ObjectId(pluginInfo['plugin_id'])})
pluginConfig = None
pluginVersion = None
if 'configs' in plugin and 'pluginconfig_id' in pluginInfo:
for config in plugin['configs']:
if config['_id'] == ObjectId(pluginInfo['pluginconfig_id']):
pluginConfig = config
break
if 'versions' in plugin and 'pluginversion_id' in pluginInfo:
for version in plugin['versions']:
if version['_id'] == ObjectId(pluginInfo['pluginversion_id']):
pluginVersion = version
break
pluginDict = {'plugin': plugin, 'version': pluginVersion, 'config': pluginConfig}
plugins.append(pluginDict)
print('Copying Main Server files')
os.system('cp -R /mnt/minestack/server/bukkit/* .')
defaultWorld = None
os.system('mkdir worlds')
for worldInfo in worlds:
world = worldInfo['world']
version = worldInfo['version']
default = worldInfo['default']
print('Copying world '+world['name'])
if version is None:
print('World '+world['name']+' has no version. Skipping')
continue
if default is True:
defaultWorld = world
os.system('mkdir worlds/'+world['directory'])
os.system('cp -R /mnt/minestack/worlds/'+world['directory']+'/versions/'+version['version']+'/* worlds/'+world['directory'])
os.system('ls -l worlds')
if defaultWorld is None:
print('No default world set')
sys.exit(1)
# modify server config for default world
modifyConfig('levelname', defaultWorld['name'])
os.system('mkdir plugins')
for pluginInfo in plugins:
plugin = pluginInfo['plugin']
version = pluginInfo['version']
config = pluginInfo['config']
print('Copying plugin '+plugin['name'])
if version is None:
print('Plugin '+plugin['name']+' has no version. Skipping')
continue
if config is not None:
os.system('mkdir plugins/'+plugin['directory'])
os.system('cp -R /mnt/minestack/plugins/'+plugin['directory']+'/configs/'+config['directory']+'/* plugins/'+plugin['directory'])
os.system('cp -R /mnt/minestack/plugins/'+plugin['directory']+'/versions/'+version['version']+'/* plugins')
os.system('ls -l plugins')
# modify server config for num of players
modifyConfig('maxplayers', servertype['players'])
# modify server config for server name
modifyConfig('servername', servertype['name']+'.'+str(server['number']))
modifyLog('SYS_HOST', node['privateAddress'])
modifyLog('SERVERTYPE', servertype['name'])
modifyLog('NUMBER', server['number'])
os.system('touch .update-lock')
os.system('ls -l')
os.system("chmod +x start.sh")
os.system("./start.sh "+str(servertype['ram']))
main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 08 15:09:49 2016
#test Github
@author: tih
"""
import numpy as np
import os
import scipy.interpolate
import gdal
from openpyxl import load_workbook
import osr
from datetime import datetime, timedelta
import pandas as pd
import shutil
import glob
from netCDF4 import Dataset
import warnings
import SEBAL.pySEBAL.pySEBAL_code as SEBAL
def main():
####################################################################################################################
############################################# CREATE INPUT FOR SEBAL RUN ###########################################
####################################################################################################################
####################################################################################################################
##################################################### PreHANTS ####################################################
####################################################################################################################
# PreHANTS
# Part 1: Define input by user
# Part 2: Set parameters and output folder
# Part 3: RUN SEBAL
# Part 4: HANTS
# Part 5: post HANTS
# Part 6: Write output
####################################################################################################################
################################################# PreHANTS part 1 ##################################################
####################################################################################################################
VegetationExcel =r"E:\Project_2\UAE\Excel\Excel_PreSEBAL_v1_0.xlsx" # This excel defines the p and c factor and vegetation height.
####################################################################################################################
################################################# PreHANTS part 2 ##################################################
####################################################################################################################
# Open Excel workbook used for Vegetation c and p factor conversions
wb_veg = load_workbook(VegetationExcel, data_only=True)
ws_veg = wb_veg['General_Input']
# Input for preSEBAL.py
start_date = "%s" %str(ws_veg['B2'].value)
end_date = "%s" %str(ws_veg['B3'].value)
inputExcel= r"%s" %str(ws_veg['B4'].value) # The excel with all the SEBAL input data
LU_data_FileName = r"%s" %str(ws_veg['B5'].value) # Path to Land Use map
output_folder = r"%s" %str(ws_veg['B7'].value)
# optional paramater
DSSF_Folder= r"%s" %str(ws_veg['B6'].value)
######################## Load Excels ##########################################
# Open Excel workbook for SEBAL inputs
wb = load_workbook(inputExcel, data_only=True)
# Get length of EXCEL sheet
ws = wb['General_Input']
ws2 = wb['VIIRS_PROBAV_Input']
endExcel=int(ws.max_row)
# Create Dict
SEBAL_RUNS = dict()
for number in range(2,endExcel+1):
input_folder_SEBAL = str(ws['B%d' % number].value)
output_folder_SEBAL = str(ws['C%d' % number].value)
Image_Type = int(ws['D%d' % number].value)
PROBA_V_name = str(ws2['D%d' % number].value)
VIIRS_name = str(ws2['B%d' % number].value)
SEBAL_RUNS[number] = {'input_folder': input_folder_SEBAL, 'output_folder': output_folder_SEBAL, 'image_type': Image_Type,'PROBA_V_name': PROBA_V_name,'VIIRS_name': VIIRS_name}
Kind_Of_Runs_Dict = {}
for k, v in SEBAL_RUNS.iteritems():
Kind_Of_Runs_Dict.setdefault(v['image_type'], []).append(k)
######################## Create output folders ##########################################
output_folder_PreSEBAL_SEBAL = os.path.join(output_folder,'PreSEBAL_SEBAL_out')
input_folder_HANTS = os.path.join(output_folder,'HANTS_in')
output_folder_PreSEBAL = os.path.join(output_folder,'PreSEBAL_out')
temp_folder_PreSEBAL = os.path.join(output_folder,'PreSEBAL_temp')
temp_folder_PreSEBAL_LST = os.path.join(temp_folder_PreSEBAL,'LST')
NDVI_outfolder = os.path.join(output_folder_PreSEBAL_SEBAL,'NDVI')
Albedo_outfolder = os.path.join(output_folder_PreSEBAL_SEBAL,'Albedo')
WaterMask_outfolder = os.path.join(output_folder_PreSEBAL_SEBAL,'Water_Mask')
LAI_outfolder = os.path.join(output_folder_PreSEBAL,'LAI')
ALBEDO_outfolder_end = os.path.join(output_folder_PreSEBAL,'ALBEDO')
NDVI_outfolder_end = os.path.join(output_folder_PreSEBAL,'NDVI')
WaterMask_outfolder_end = os.path.join(output_folder_PreSEBAL,'Water_Mask')
TRANS_outfolder = os.path.join(output_folder_PreSEBAL,'Transmissivity')
Surface_Temperature_outfolder = os.path.join(output_folder_PreSEBAL_SEBAL,'Surface_Temperature')
output_folder_HANTS_end_sharp = os.path.join(output_folder_PreSEBAL, 'LST_Sharpened')
output_folder_HANTS_end_Veg = os.path.join(output_folder_PreSEBAL, 'Vegetation_Height')
output_folder_p_factor = os.path.join(output_folder_PreSEBAL, 'p_factor')
output_folder_LUE = os.path.join(output_folder_PreSEBAL, 'LUE')
if not os.path.exists(output_folder_PreSEBAL_SEBAL):
os.makedirs(output_folder_PreSEBAL_SEBAL)
if not os.path.exists(output_folder_PreSEBAL):
os.mkdir(output_folder_PreSEBAL)
if not os.path.exists(temp_folder_PreSEBAL):
os.mkdir(temp_folder_PreSEBAL)
if not os.path.exists(NDVI_outfolder):
os.makedirs(NDVI_outfolder)
if not os.path.exists(Albedo_outfolder):
os.makedirs(Albedo_outfolder)
if not os.path.exists(WaterMask_outfolder):
os.makedirs(WaterMask_outfolder)
if not os.path.exists(LAI_outfolder):
os.makedirs(LAI_outfolder)
if not os.path.exists(ALBEDO_outfolder_end):
os.makedirs(ALBEDO_outfolder_end)
if not os.path.exists(NDVI_outfolder_end):
os.makedirs(NDVI_outfolder_end)
if not os.path.exists(WaterMask_outfolder_end):
os.makedirs(WaterMask_outfolder_end)
if not os.path.exists(temp_folder_PreSEBAL_LST):
os.makedirs(temp_folder_PreSEBAL_LST)
if not os.path.exists(Surface_Temperature_outfolder):
os.makedirs(Surface_Temperature_outfolder)
if not os.path.exists(TRANS_outfolder):
os.makedirs(TRANS_outfolder)
if not os.path.exists(output_folder_HANTS_end_sharp):
os.mkdir(output_folder_HANTS_end_sharp)
if not os.path.exists(output_folder_HANTS_end_Veg):
os.mkdir(output_folder_HANTS_end_Veg)
if not os.path.exists(output_folder_p_factor):
os.mkdir(output_folder_p_factor)
if not os.path.exists(output_folder_LUE):
os.mkdir(output_folder_LUE)
# Do not show warnings
warnings.filterwarnings('ignore')
####################################################################################################################
################################################### RUN SEBAL part 3 ###############################################
####################################################################################################################
############################## Define General info ############################
for number in Kind_Of_Runs_Dict[2]: # Number defines the column of the inputExcel
print(number)
if not (SEBAL_RUNS[number]['PROBA_V_name'] == 'None' and SEBAL_RUNS[number]['VIIRS_name'] == 'None'):
Rp = 0.91 # Path radiance in the 10.4-12.5 µm band (W/m2/sr/µm)
tau_sky = 0.866 # Narrow band transmissivity of air, range: [10.4-12.5 µm]
surf_temp_offset = 3 # Surface temperature offset for water
######################## Open General info from SEBAL Excel ###################
# Open the General_Input sheet
ws = wb['General_Input']
# Extract the input and output folder, and Image type from the excel file
input_folder = str(ws['B%d' % number].value)
Image_Type = int(2) # Type of Image (1=Landsat & 2 = VIIRS & GLOBA-V)
# Extract the Path to the DEM map from the excel file
DEM_fileName = '%s' %str(ws['E%d' % number].value) #'DEM_HydroShed_m'
# Open DEM and create Latitude and longitude files
lat,lon,lat_fileName,lon_fileName=SEBAL.DEM_lat_lon(DEM_fileName, temp_folder_PreSEBAL)
######################## Extract general data for Landsat ##########################################
if Image_Type == 1:
# Open the Landsat_Input sheet
ws = wb['Landsat_Input']
# Extract Landsat name, number and amount of thermal bands from excel file
Name_Landsat_Image = str(ws['B%d' % number].value) # From glovis.usgs.gov
Landsat_nr = int(ws['C%d' % number].value) # Type of Landsat (LS) image used (LS5, LS7, or LS8)
Bands_thermal = int(ws['D%d' %number].value) # Number of LS bands to use to retrieve land surface
# Pixel size of the model
pixel_spacing=int(30)
# the path to the MTL file of landsat
Landsat_meta_fileName = os.path.join(input_folder, '%s_MTL.txt' % Name_Landsat_Image)
# read out the general info out of the MTL file in Greenwich Time
year, DOY, hour, minutes, UTM_Zone, Sun_elevation = SEBAL.info_general_metadata(Landsat_meta_fileName) # call definition info_general_metadata
date=datetime.strptime('%s %s'%(year,DOY), '%Y %j')
month = date.month
day = date.day
# define the kind of sensor and resolution of the sensor
sensor1 = 'L%d' % Landsat_nr
sensor2 = 'L%d' % Landsat_nr
sensor3 = 'L%d' % Landsat_nr
res1 = '30m'
res2 = '%sm' %int(pixel_spacing)
res3 = '30m'
# Set the start parameter for determining transmissivity at 0
Determine_transmissivity = 0
######################## Extract general data for VIIRS-PROBAV ##########################################
if Image_Type == 2:
# Open the VIIRS_PROBAV_Input sheet
ws = wb['VIIRS_PROBAV_Input']
# Extract the name of the thermal and quality VIIRS image from the excel file
Name_VIIRS_Image_TB = '%s' %str(ws['B%d' % number].value)
# Extract the name to the PROBA-V image from the excel file
Name_PROBAV_Image = '%s' %str(ws['D%d' % number].value) # Must be a tiff file
# Pixel size of the model
pixel_spacing=int(100)
# UTM Zone of the end results
UTM_Zone = float(ws['G%d' % number].value)
if not Name_VIIRS_Image_TB == 'None':
#Get time from the VIIRS dataset name (IMPORTANT TO KEEP THE TEMPLATE OF THE VIIRS NAME CORRECT example: VIIRS_SVI05_npp_d20161021_t0956294_e1002080_b25822_c20161021160209495952_noaa_ops.tif)
Total_Day_VIIRS = Name_VIIRS_Image_TB.split('_')[3]
Total_Time_VIIRS = Name_VIIRS_Image_TB.split('_')[4]
# Get the information out of the VIIRS name in GMT (Greenwich time)
year = int(Total_Day_VIIRS[1:5])
month = int(Total_Day_VIIRS[5:7])
day = int(Total_Day_VIIRS[7:9])
Startdate = '%d-%02d-%02d' % (year,month,day)
DOY=datetime.strptime(Startdate,'%Y-%m-%d').timetuple().tm_yday
hour = int(Total_Time_VIIRS[1:3])
minutes = int(Total_Time_VIIRS[3:5])
# If this is runned correctly, we can determine transmissivity
ws = wb['Meteo_Input']
Field_Radiation_24 = '%s' %str(ws['J%d' % number].value)
Field_Trans_24 = '%s' %str(ws['K%d' % number].value)
Determine_transmissivity = 1
# else use PROBA-V day but than no transmissivity can be determined for now
else:
# Get the day and time from the PROBA-V
Band_PROBAVhdf_fileName = os.path.join(input_folder, '%s.HDF5' % (Name_PROBAV_Image))
g=gdal.Open(Band_PROBAVhdf_fileName, gdal.GA_ReadOnly)
Meta_data = g.GetMetadata()
Date_PROBAV = str(Meta_data['LEVEL3_RADIOMETRY_BLUE_OBSERVATION_START_DATE'])
year = int(Date_PROBAV.split("-")[0])
month = int(Date_PROBAV.split("-")[1])
day = int(Date_PROBAV.split("-")[2])
Var_name = '%d%02d%02d' %(year, month, day)
DOY=datetime.strptime(Var_name,'%Y%m%d').timetuple().tm_yday
# We cannot determine transmissivity
Determine_transmissivity = 0
# Determine the transmissivity if possible (Determine_transmissivity = 1)
if Determine_transmissivity == 1:
# Rounded difference of the local time from Greenwich (GMT) (hours):
delta_GTM = round(np.sign(lon[int(np.shape(lon)[0]/2), int(np.shape(lon)[1]/2)]) * lon[int(np.shape(lon)[0]/2), int(np.shape(lon)[1]/2)] * 24 / 360)
if np.isnan(delta_GTM) == True:
delta_GTM = round(np.nanmean(lon) * np.nanmean(lon) * 24 / 360)
# Calculate local time
hour += delta_GTM
if hour < 0.0:
day -= 1
hour += 24
if hour >= 24:
day += 1
hour -= 24
# define the kind of sensor and resolution of the sensor
sensor1 = 'PROBAV'
sensor2 = 'VIIRS'
res1 = '375m'
res2 = '%sm' %int(pixel_spacing)
res3 = '30m'
######################## Extract general data from DEM file and create Slope map ##########################################
# Variable date name
Var_name = '%d%02d%02d' %(year, month, day)
# Reproject from Geog Coord Syst to UTM -
# 1) DEM - Original DEM coordinates is Geographic: lat, lon
dest, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = SEBAL.reproject_dataset(
DEM_fileName, pixel_spacing, UTM_Zone=UTM_Zone)
band = dest.GetRasterBand(1) # Get the reprojected dem band
ncol = dest.RasterXSize # Get the reprojected dem column size
nrow = dest.RasterYSize # Get the reprojected dem row size
shape=[ncol, nrow]
# Read out the DEM band and print the DEM properties
data_DEM = band.ReadAsArray(0, 0, ncol, nrow)
# 2) Latitude file - reprojection
# reproject latitude to the landsat projection and save as tiff file
lat_rep, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = SEBAL.reproject_dataset(
lat_fileName, pixel_spacing, UTM_Zone=UTM_Zone)
# Get the reprojected latitude data
lat_proy = lat_rep.GetRasterBand(1).ReadAsArray(0, 0, ncol, nrow)
# 3) Longitude file - reprojection
# reproject longitude to the landsat projection and save as tiff file
lon_rep, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = SEBAL.reproject_dataset(lon_fileName, pixel_spacing, UTM_Zone=UTM_Zone)
# Get the reprojected longitude data
lon_proy = lon_rep.GetRasterBand(1).ReadAsArray(0, 0, ncol, nrow)
lon_fileName = os.path.join(temp_folder_PreSEBAL,'lon_resh.tif')
SEBAL.save_GeoTiff_proy(dest, lon_proy, lon_fileName, shape, nband=1)
# Calculate slope and aspect from the reprojected DEM
deg2rad,rad2deg,slope,aspect=SEBAL.Calc_Gradient(data_DEM, pixel_spacing)
if Determine_transmissivity == 1:
# calculate the coz zenith angle
Ra_mountain_24, Ra_inst, cos_zn_resh, dr, phi, delta = SEBAL.Calc_Ra_Mountain(lon,DOY,hour,minutes,lon_proy,lat_proy,slope,aspect)
cos_zn_fileName = os.path.join(temp_folder_PreSEBAL,'cos_zn.tif')
SEBAL.save_GeoTiff_proy(dest, cos_zn_resh, cos_zn_fileName, shape, nband=1)
# Save the Ra
Ra_inst_fileName = os.path.join(temp_folder_PreSEBAL,'Ra_inst.tif')
SEBAL.save_GeoTiff_proy(dest, Ra_inst, Ra_inst_fileName, shape, nband=1)
Ra_mountain_24_fileName = os.path.join(temp_folder_PreSEBAL,'Ra_mountain_24.tif')
SEBAL.save_GeoTiff_proy(dest, Ra_mountain_24, Ra_mountain_24_fileName, shape, nband=1)
#################### Calculate Transmissivity ##########################################
# Open the General_Input sheet
ws = wb['Meteo_Input']
# Extract the method radiation value
Value_Method_Radiation_inst = '%s' %str(ws['L%d' % number].value)
# Values to check if data is created
Check_Trans_inst = 0
Check_Trans_24 = 0
''' This is now turned of, so you need to fill in the instantanious transmissivity or Radiation
# Extract the data to the method of radiation
if int(Value_Method_Radiation_inst) == 2:
Field_Radiation_inst = '%s' %str(ws['N%d' % number].value)
if Field_Radiation_inst == 'None':
# Instantanious Transmissivity files must be created
Check_Trans_inst = 1
# Calculate Transmissivity
quarters_hours = np.ceil(minutes/30.) * 30
hours_GMT = hour - delta_GTM
if quarters_hours >= 60:
hours_GMT += 1
quarters_hours = 0
# Define the instantanious LANDSAF file
name_Landsaf_inst = 'HDF5_LSASAF_MSG_DSSF_MSG-Disk_%d%02d%02d%02d%02d.tif' %(year, month,day, hours_GMT, quarters_hours)
file_Landsaf_inst = os.path.join(DSSF_Folder,name_Landsaf_inst)
# Reproject the Ra_inst data to match the LANDSAF data
Ra_inst_3Km_dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(Ra_inst_fileName, file_Landsaf_inst, method = 1)
Ra_inst_3Km = Ra_inst_3Km_dest.GetRasterBand(1).ReadAsArray()
Ra_inst_3Km[Ra_inst_3Km==0] = np.nan
# Open the Rs LANDSAF data
dest_Rs_inst_3Km = gdal.Open(file_Landsaf_inst)
Rs_inst_3Km = dest_Rs_inst_3Km.GetRasterBand(1).ReadAsArray()
Rs_inst_3Km = np.float_(Rs_inst_3Km)/10
Rs_inst_3Km[Rs_inst_3Km<0]=np.nan
# Get shape LANDSAF data
shape_trans=[dest_Rs_inst_3Km.RasterXSize , dest_Rs_inst_3Km.RasterYSize ]
# Calculate Transmissivity 3Km
Transmissivity_3Km = Rs_inst_3Km/Ra_inst_3Km
Transmissivity_3Km_fileName = os.path.join(output_folder_temp,'Transmissivity_3Km.tif')
SEBAL.save_GeoTiff_proy(Ra_inst_3Km_dest, Transmissivity_3Km, Transmissivity_3Km_fileName, shape_trans, nband=1)
# Reproject Transmissivity to match DEM (now this is done by using the nearest neighbour method)
Transmissivity_inst_dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(Transmissivity_3Km_fileName, cos_zn_fileName, method = 3)
Transmissivity_inst = Transmissivity_inst_dest.GetRasterBand(1).ReadAsArray()
Transmissivity_inst[Transmissivity_inst>0.98] = 0.98
Transmissivity_inst_fileName = os.path.join(TRANS_outfolder,'Transmissivity_inst_%s.tif' %Var_name)
SEBAL.save_GeoTiff_proy(Transmissivity_inst_dest, Transmissivity_inst, Transmissivity_inst_fileName, shape, nband=1)
'''
# Extract the method radiation value
Value_Method_Radiation_24 = '%s' %str(ws['I%d' % number].value)
# Extract the data to the method of radiation
if int(Value_Method_Radiation_24) == 2:
Field_Radiation_24 = '%s' %str(ws['K%d' % number].value)
if Field_Radiation_24 == 'None':
# Daily Transmissivity files must be created
Check_Trans_24 = 1
# Create times that are needed to calculate daily Rs (LANDSAF)
Starttime_GMT = datetime.strptime(Startdate,'%Y-%m-%d') + timedelta(hours=-delta_GTM)
Endtime_GMT = Starttime_GMT + timedelta(days=1)
Times = pd.date_range(Starttime_GMT, Endtime_GMT,freq = '30min')
for Time in Times[:-1]:
year_LANDSAF = Time.year
month_LANDSAF = Time.month
day_LANDSAF = Time.day
hour_LANDSAF = Time.hour
min_LANDSAF = Time.minute
# Define the instantanious LANDSAF file
#re = glob.glob('')
name_Landsaf_inst = 'HDF5_LSASAF_MSG_DSSF_MSG-Disk_%d%02d%02d%02d%02d.tif' %(year_LANDSAF, month_LANDSAF,day_LANDSAF, hour_LANDSAF, min_LANDSAF)
file_Landsaf_inst = os.path.join(DSSF_Folder,name_Landsaf_inst)
# Open the Rs LANDSAF data
dest_Rs_inst_3Km = gdal.Open(file_Landsaf_inst)
Rs_one_3Km = dest_Rs_inst_3Km.GetRasterBand(1).ReadAsArray()
Rs_one_3Km = np.float_(Rs_one_3Km)/10
Rs_one_3Km[Rs_one_3Km < 0]=np.nan
if Time == Times[0]:
Rs_24_3Km_tot = Rs_one_3Km
else:
Rs_24_3Km_tot += Rs_one_3Km
Rs_24_3Km = Rs_24_3Km_tot / len(Times[:-1])
# Reproject the Ra_inst data to match the LANDSAF data
Ra_24_3Km_dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(Ra_mountain_24_fileName, file_Landsaf_inst, method = 3)
Ra_24_3Km = Ra_24_3Km_dest.GetRasterBand(1).ReadAsArray()
Ra_24_3Km[Ra_24_3Km==0] = np.nan
# Do gapfilling
Ra_24_3Km = gap_filling(Ra_24_3Km,np.nan)
# Get shape LANDSAF data
shape_trans=[dest_Rs_inst_3Km.RasterXSize , dest_Rs_inst_3Km.RasterYSize ]
# Calculate Transmissivity 3Km
Transmissivity_24_3Km = Rs_24_3Km/Ra_24_3Km
Transmissivity_24_3Km_fileName = os.path.join(temp_folder_PreSEBAL,'Transmissivity_24_3Km.tif')
SEBAL.save_GeoTiff_proy(Ra_24_3Km_dest, Transmissivity_24_3Km, Transmissivity_24_3Km_fileName, shape_trans, nband=1)
# Reproject Transmissivity to match DEM (now this is done by using the nearest neighbour method)
Transmissivity_24_dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(Transmissivity_24_3Km_fileName, lon_fileName, method = 3)
Transmissivity_24 = Transmissivity_24_dest.GetRasterBand(1).ReadAsArray()
Transmissivity_24[Transmissivity_24>0.98] = 0.98
Transmissivity_24_fileName = os.path.join(TRANS_outfolder,'Transmissivity_24_%s.tif' %Var_name)
SEBAL.save_GeoTiff_proy(Transmissivity_24_dest, Transmissivity_24, Transmissivity_24_fileName, shape, nband=1)
#################### Calculate NDVI for LANDSAT ##########################################
if Image_Type == 1:
# Define bands used for each Landsat number
if Landsat_nr == 5 or Landsat_nr == 7:
Bands = np.array([1, 2, 3, 4, 5, 7, 6])
elif Landsat_nr == 8:
Bands = np.array([2, 3, 4, 5, 6, 7, 10, 11])
else:
print('Landsat image not supported, use Landsat 7 or 8')
# Open MTL landsat and get the correction parameters
Landsat_meta_fileName = os.path.join(input_folder, '%s_MTL.txt' % Name_Landsat_Image)
Lmin, Lmax, k1_c, k2_c = SEBAL.info_band_metadata(Landsat_meta_fileName, Bands)
# Mean solar exo-atmospheric irradiance for each band (W/m2/microm)
# for the different Landsat images (L5, L7, or L8)
ESUN_L5 = np.array([1983, 1796, 1536, 1031, 220, 83.44])
ESUN_L7 = np.array([1997, 1812, 1533, 1039, 230.8, 84.9])
ESUN_L8 = np.array([1973.28, 1842.68, 1565.17, 963.69, 245, 82.106])
# Open one band - To get the metadata of the landsat images only once (to get the extend)
src_FileName = os.path.join(input_folder, '%s_B2.TIF' % Name_Landsat_Image) # before 10!
ls,band_data,ulx,uly,lrx,lry,x_size_ls,y_size_ls = SEBAL.Get_Extend_Landsat(src_FileName)
# Crop the Landsat images to the DEM extent -
dst_FileName = os.path.join(temp_folder_PreSEBAL,'cropped_LS_b2.tif') # Before 10 !!
# Clip the landsat image to match the DEM map
lsc, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(src_FileName, lon_fileName)
data_LS = lsc.GetRasterBand(1).ReadAsArray()
SEBAL.save_GeoTiff_proy(dest, data_LS, dst_FileName, shape, nband=1)
# Get the extend of the remaining landsat file after clipping based on the DEM file
lsc,band_data,ulx,uly,lrx,lry,x_size_lsc,y_size_lsc = SEBAL.Get_Extend_Landsat(dst_FileName)
# Create the corrected signals of Landsat in 1 array
Reflect = SEBAL.Landsat_Reflect(Bands,input_folder,Name_Landsat_Image,output_folder,shape,Lmax,Lmin,ESUN_L5,ESUN_L7,ESUN_L8,cos_zn_resh,dr,Landsat_nr, cos_zn_fileName)
# Calculate temporal water mask
water_mask_temp=SEBAL.Water_Mask(shape,Reflect)
# Calculate NDVI
NDVI = SEBAL.Calc_NDVI(Reflect)
# Calculate albedo
albedo = SEBAL.Calc_albedo(Reflect)
# Save NDVI
NDVI_FileName = os.path.join(NDVI_outfolder,'NDVI_LS_%s.tif'%Var_name)
SEBAL.save_GeoTiff_proy(dest, NDVI, NDVI_FileName, shape, nband=1)
# Save albedo
albedo_FileName = os.path.join(Albedo_outfolder,'Albedo_LS_%s.tif'%Var_name)
SEBAL.save_GeoTiff_proy(dest, albedo, albedo_FileName, shape, nband=1)
################### Extract Meteo data for Landsat days from SEBAL Excel ##################
# Open the Meteo_Input sheet
ws = wb['Meteo_Input']
# ---------------------------- Instantaneous Air Temperature ------------
# Open meteo data, first try to open as value, otherwise as string (path)
try:
Temp_inst = float(ws['B%d' %number].value) # Instantaneous Air Temperature (°C)
# if the data is not a value, than open as a string
except:
Temp_inst_name = '%s' %str(ws['B%d' %number].value)
Temp_inst_fileName = os.path.join(temp_folder_PreSEBAL, 'Temp_inst_input.tif')
Temp_inst = SEBAL.Reshape_Reproject_Input_data(Temp_inst_name, Temp_inst_fileName, lon_fileName)
try:
RH_inst = float(ws['D%d' %number].value) # Instantaneous Relative humidity (%)
# if the data is not a value, than open as a string
except:
RH_inst_name = '%s' %str(ws['D%d' %number].value)
RH_inst_fileName = os.path.join(temp_folder_PreSEBAL, 'RH_inst_input.tif')
RH_inst = SEBAL.Reshape_Reproject_Input_data(RH_inst_name, RH_inst_fileName, lon_fileName)
esat_inst = 0.6108 * np.exp(17.27 * Temp_inst / (Temp_inst + 237.3))
eact_inst = RH_inst * esat_inst / 100
#################### Calculate NDVI for VIIRS-PROBAV ##########################################
if Image_Type == 2:
if Name_PROBAV_Image == 'None':
offset_all = [-1, 1, -2, 2, -3, 3,-4, 4,-5 ,5 ,-6 , 6, -7, 7, -8, 8]
found_Name_PROBAV_Image = 0
for offset in offset_all:
if found_Name_PROBAV_Image == 1:
continue
else:
try:
Name_PROBAV_Image = SEBAL_RUNS[number + offset]['PROBA_V_name']
if not Name_PROBAV_Image == 'None':
found_Name_PROBAV_Image = 1
except:
pass
# Get the day and time from the PROBA-V
Band_PROBAVhdf_fileName = os.path.join(input_folder, '%s.HDF5' % (Name_PROBAV_Image))
g=gdal.Open(Band_PROBAVhdf_fileName, gdal.GA_ReadOnly)
Meta_data = g.GetMetadata()
Date_PROBAV = str(Meta_data['LEVEL3_RADIOMETRY_BLUE_OBSERVATION_START_DATE'])
year = int(Date_PROBAV.split("-")[0])
month = int(Date_PROBAV.split("-")[1])
day = int(Date_PROBAV.split("-")[2])
Var_name_2 = '%d%02d%02d' %(year, month, day)
# Define the output name
NDVI_FileName = os.path.join(NDVI_outfolder,'NDVI_PROBAV_%s.tif' %Var_name_2)
Albedo_FileName = os.path.join(Albedo_outfolder, 'Albedo_PROBAV_%s.tif' %Var_name_2)
water_mask_temp_FileName = os.path.join(WaterMask_outfolder, 'Water_Mask_PROBAV_%s.tif' %Var_name_2)
else:
NDVI_FileName = os.path.join(NDVI_outfolder,'NDVI_PROBAV_%s.tif' %Var_name)
Albedo_FileName = os.path.join(Albedo_outfolder, 'Albedo_PROBAV_%s.tif' %Var_name)
water_mask_temp_FileName = os.path.join(WaterMask_outfolder, 'Water_Mask_PROBAV_%s.tif' %Var_name)
# vegetation maps that will be generated
if not os.path.exists(NDVI_FileName):
# Define the bands that will be used
bands=['SM', 'B1', 'B2', 'B3', 'B4'] #'SM', 'BLUE', 'RED', 'NIR', 'SWIR'
# Set the index number at 0
index=0
# create a zero array with the shape of the reprojected DEM file
data_PROBAV=np.zeros((shape[1], shape[0]))
spectral_reflectance_PROBAV=np.zeros([shape[1], shape[0], 5])
# constants
n188_float=248 # Now it is 248, but we do not exactly know what this really means and if this is for constant for all images.
# write the data one by one to the spectral_reflectance_PROBAV
for bandnmr in bands:
# Translate the PROBA-V names to the Landsat band names
Band_number = {'SM':7,'B1':8,'B2':10,'B3':9,'B4':11}
# Open the dataset
Band_PROBAVhdf_fileName = os.path.join(input_folder, '%s.HDF5' % (Name_PROBAV_Image))
g=gdal.Open(Band_PROBAVhdf_fileName, gdal.GA_ReadOnly)
# define data if it is not there yet
if not 'Var_name' in locals():
Meta_data = g.GetMetadata()
Date_PROBAV = str(Meta_data['LEVEL3_RADIOMETRY_BLUE_OBSERVATION_START_DATE'])
year = int(Date_PROBAV.split("-")[0])
month = int(Date_PROBAV.split("-")[0])
day = int(Date_PROBAV.split("-")[0])
Var_name = '%d%02d%02d' %(year, month, day)
# Open the .hdf file
name_out = os.path.join(input_folder, '%s_test.tif' % (Name_PROBAV_Image))
name_in = g.GetSubDatasets()[Band_number[bandnmr]][0]
# Get environmental variable
SEBAL_env_paths = os.environ["SEBAL"].split(';')
GDAL_env_path = SEBAL_env_paths[0]
GDAL_TRANSLATE = os.path.join(GDAL_env_path, 'gdal_translate.exe')
# run gdal translate command
FullCmd = '%s -of GTiff %s %s' %(GDAL_TRANSLATE, name_in, name_out)
SEBAL.Run_command_window(FullCmd)
# Open data
dest_PV = gdal.Open(name_out)
Data = dest_PV.GetRasterBand(1).ReadAsArray()
dest_PV = None
# Remove temporary file
os.remove(name_out)
# Define the x and y spacing
Meta_data = g.GetMetadata()
Lat_Bottom = float(Meta_data['LEVEL3_GEOMETRY_BOTTOM_LEFT_LATITUDE'])
Lat_Top = float(Meta_data['LEVEL3_GEOMETRY_TOP_RIGHT_LATITUDE'])
Lon_Left = float(Meta_data['LEVEL3_GEOMETRY_BOTTOM_LEFT_LONGITUDE'])
Lon_Right = float(Meta_data['LEVEL3_GEOMETRY_TOP_RIGHT_LONGITUDE'])
Pixel_size = float((Meta_data['LEVEL3_GEOMETRY_VNIR_VAA_MAPPING']).split(' ')[-3])
# Define the georeference of the PROBA-V data
geo_PROBAV=[Lon_Left-0.5*Pixel_size, Pixel_size, 0, Lat_Top+0.5*Pixel_size, 0, -Pixel_size] #0.000992063492063
# Define the name of the output file
PROBAV_data_name=os.path.join(input_folder, '%s_%s.tif' % (Name_PROBAV_Image,bandnmr))
dst_fileName=os.path.join(input_folder, PROBAV_data_name)
# create gtiff output with the PROBA-V band
fmt = 'GTiff'
driver = gdal.GetDriverByName(fmt)
dst_dataset = driver.Create(dst_fileName, int(Data.shape[1]), int(Data.shape[0]), 1,gdal.GDT_Float32)
dst_dataset.SetGeoTransform(geo_PROBAV)
# set the reference info
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS("WGS84")
dst_dataset.SetProjection(srs.ExportToWkt())
# write the array in the geotiff band
dst_dataset.GetRasterBand(1).WriteArray(Data)
dst_dataset = None
# Open the PROBA-V band in SEBAL
g=gdal.Open(PROBAV_data_name.replace("\\","/"))
# If the data cannot be opened, change the extension
if g is None:
PROBAV_data_name=os.path.join(input_folder, '%s_%s.tiff' % (Name_PROBAV_Image,bandnmr))
# Reproject the PROBA-V band to match DEM's resolution
PROBAV, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(
PROBAV_data_name, lon_fileName)
# Open the reprojected PROBA-V band data
data_PROBAV_DN = PROBAV.GetRasterBand(1).ReadAsArray(0, 0, ncol, nrow)
# Define the filename to store the cropped Landsat image
dst_FileName = os.path.join(output_folder, 'Output_PROBAV','proy_PROBAV_%s.tif' % bandnmr)
# close the PROBA-V
g=None
# If the band data is not SM change the DN values into PROBA-V values and write into the spectral_reflectance_PROBAV
if bandnmr is not 'SM':
data_PROBAV[:, :]=data_PROBAV_DN/2000
spectral_reflectance_PROBAV[:, :, index]=data_PROBAV[:, :]
# If the band data is the SM band than write the data into the spectral_reflectance_PROBAV and create cloud mask
else:
data_PROBAV[:, :]=data_PROBAV_DN
Cloud_Mask_PROBAV=np.zeros((shape[1], shape[0]))
Cloud_Mask_PROBAV[data_PROBAV[:,:]!=n188_float]=1
spectral_reflectance_PROBAV[:, :, index]=Cloud_Mask_PROBAV
# Change the spectral reflectance to meet certain limits
spectral_reflectance_PROBAV[:, :, index]=np.where(spectral_reflectance_PROBAV[:, :, index]<=0,np.nan,spectral_reflectance_PROBAV[:, :, index])
spectral_reflectance_PROBAV[:, :, index]=np.where(spectral_reflectance_PROBAV[:, :, index]>=150,np.nan,spectral_reflectance_PROBAV[:, :, index])
# Go to the next index
index=index+1
# Bands in PROBAV spectral reflectance
# 0 = MS
# 1 = BLUE
# 2 = NIR
# 3 = RED
# 4 = SWIR
# Calculate surface albedo based on PROBA-V
Surface_Albedo_PROBAV = 0.219 * spectral_reflectance_PROBAV[:, :, 1] + 0.361 * spectral_reflectance_PROBAV[:, :, 2] + 0.379 * spectral_reflectance_PROBAV[:, :, 3] + 0.041 * spectral_reflectance_PROBAV[:, :, 4]
# Calculate the NDVI based on PROBA-V
n218_memory = spectral_reflectance_PROBAV[:, :, 2] + spectral_reflectance_PROBAV[:, :, 3]
NDVI = np.zeros((shape[1], shape[0]))
NDVI[n218_memory != 0] = ( spectral_reflectance_PROBAV[:, :, 3][n218_memory != 0] - spectral_reflectance_PROBAV[:, :, 2][n218_memory != 0] )/ ( spectral_reflectance_PROBAV[:, :, 2][n218_memory != 0] + spectral_reflectance_PROBAV[:, :, 3][n218_memory != 0] )
# Create Water mask based on PROBA-V
water_mask_temp = np.zeros((shape[1], shape[0]))
water_mask_temp[np.logical_and(np.logical_and(NDVI<0.1,data_DEM>0),Surface_Albedo_PROBAV<0.2)]=1
# Save Albedo for PROBA-V
SEBAL.save_GeoTiff_proy(dest, Surface_Albedo_PROBAV, Albedo_FileName, shape, nband=1)
# Save NDVI for PROBA-V
SEBAL.save_GeoTiff_proy(dest, NDVI, NDVI_FileName, shape, nband=1)
# Save Water Mask for PROBA-V
SEBAL.save_GeoTiff_proy(dest, water_mask_temp, water_mask_temp_FileName, shape, nband=1)
else:
dest_NDVI = gdal.Open(NDVI_FileName)
dest_water_mask_temp = gdal.Open(water_mask_temp_FileName)
NDVI = dest_NDVI.GetRasterBand(1).ReadAsArray()
water_mask_temp = dest_water_mask_temp.GetRasterBand(1).ReadAsArray()
############################ Calculate LAI ##########################################
# Calculate the LAI
FPAR,tir_emis,Nitrogen,vegt_cover,LAI,b10_emissivity = SEBAL.Calc_vegt_para(NDVI,water_mask_temp,shape)
# Create LAI name
if Image_Type == 1:
LAI_FileName = os.path.join(LAI_outfolder,'LAI_LS_%s.tif' %Var_name)
SEBAL.save_GeoTiff_proy(dest, LAI, LAI_FileName, shape, nband=1)
#################### Calculate thermal for Landsat ##########################################
if Image_Type == 1:
# Calculate thermal
therm_data = SEBAL.Landsat_therm_data(Bands,input_folder,Name_Landsat_Image,output_folder,ulx_dem,lry_dem,lrx_dem,uly_dem,shape)
# Calculate surface temperature
Surface_temp=SEBAL.Calc_surface_water_temp(Temp_inst,Landsat_nr,Lmax,Lmin,therm_data,b10_emissivity,k1_c,k2_c,eact_inst,shape,water_mask_temp,Bands_thermal,Rp,tau_sky,surf_temp_offset,Image_Type)
# Save surface temperature
therm_data_FileName = os.path.join(Surface_Temperature_outfolder,'Surface_Temperature_LS_%s.tif' %Var_name)
SEBAL.save_GeoTiff_proy(dest, Surface_temp, therm_data_FileName, shape, nband=1)
################################## Calculate VIIRS surface temperature ########################
if Image_Type == 2:
# If there is VIIRS data
if not Name_VIIRS_Image_TB == 'None':
# Define the VIIRS thermal data name
VIIRS_data_name=os.path.join(input_folder, '%s' % (Name_VIIRS_Image_TB))
# Reproject VIIRS thermal data
VIIRS, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(VIIRS_data_name, lon_fileName)
# Open VIIRS thermal data
data_VIIRS = VIIRS.GetRasterBand(1).ReadAsArray()
# Set the conditions for the brightness temperature (100m)
brightness_temp=np.where(data_VIIRS>=250, data_VIIRS, np.nan)
# Constants
k1=606.399172
k2=1258.78
L_lambda_b10_100=((2*6.63e-34*(3.0e8)**2)/((11.45e-6)**5*(np.exp((6.63e-34*3e8)/(1.38e-23*(11.45e-6)*brightness_temp))-1)))*1e-6
# Get Temperature for 100 and 375m resolution
Temp_TOA_100 = SEBAL.Get_Thermal(L_lambda_b10_100,Rp,Temp_inst,tau_sky,tir_emis,k1,k2)
# Conditions for surface temperature (100m)
n120_surface_temp=Temp_TOA_100.clip(250, 450)
# Save the surface temperature of the VIIRS in 100m resolution
temp_surface_100_fileName_beforeTS = os.path.join(Surface_Temperature_outfolder,'Surface_Temperature_VIIRS_%s.tif' %Var_name)
SEBAL.save_GeoTiff_proy(dest, n120_surface_temp, temp_surface_100_fileName_beforeTS, shape, nband=1)
###################################################################################################################
################################################### HANTS part 4 ##################################################
###################################################################################################################
# Select files for PROBA-V that needs to be used (sometimes a composite product is used)
PROBA_V_Dict = {}
for k, v in SEBAL_RUNS.iteritems():
if str(v['PROBA_V_name']) != 'None':
PROBA_V_Dict.setdefault(v['PROBA_V_name'], []).append(k)
Amount_Unique_PROBA_V_images = len(PROBA_V_Dict.keys())
Back_names = []
# Define HANTS PROBA-V variables
VARS = ["NDVI", "Albedo"]
for VAR in VARS:
output_folder_preprocessing_VAR = os.path.join(output_folder_PreSEBAL_SEBAL, VAR)
os.chdir(output_folder_preprocessing_VAR)
for PROBA_V_image in PROBA_V_Dict.keys():
Band_PROBAVhdf_fileName = os.path.join(input_folder_SEBAL, '%s.HDF5' % (PROBA_V_image))
g=gdal.Open(Band_PROBAVhdf_fileName, gdal.GA_ReadOnly)
Meta_data = g.GetMetadata()
Date_PROBAV = str(Meta_data['LEVEL3_RADIOMETRY_BLUE_OBSERVATION_START_DATE'])
year = int(Date_PROBAV.split("-")[0])
month = int(Date_PROBAV.split("-")[1])
day = int(Date_PROBAV.split("-")[2])
Back_name = '%s_PROBAV_%d%02d%02d.tif' %(VAR, year, month, day)
# Create HANTS input NDVI
input_folder_HANTS_VAR = os.path.join(temp_folder_PreSEBAL, VAR)
if not os.path.exists(input_folder_HANTS_VAR):
os.mkdir(input_folder_HANTS_VAR)
shutil.copy(os.path.join(output_folder_preprocessing_VAR,Back_name),os.path.join(input_folder_HANTS_VAR,Back_name))
# VIIRS parameter copy
VIIRS_Dict = {}
for k, v in SEBAL_RUNS.iteritems():
if str(v['VIIRS_name']) != 'None':
VIIRS_Dict.setdefault(v['VIIRS_name'], []).append(k)
THERM = 'Surface_Temperature'
output_folder_preprocessing_THERM = os.path.join(output_folder_PreSEBAL_SEBAL, THERM)
for VIIRS_image in VIIRS_Dict.keys():
try:
Date_VIIRS = (VIIRS_image.split("d")[1])
year = int(Date_VIIRS.split("-")[0][0:4])
month = int(Date_VIIRS.split("-")[0][4:6])
day = int(Date_VIIRS.split("-")[0][6:8])
except:
Date_VIIRS = (VIIRS_image.split("_")[3])
year = int(Date_VIIRS.split("-")[0][0:4])
month = int(Date_VIIRS.split("-")[0][4:6])
day = int(Date_VIIRS.split("-")[0][6:8])
Back_name_TB = '%s_VIIRS_%d%02d%02d.tif' %(THERM, year, month, day)
# Create HANTS input NDVI
input_folder_HANTS_THERM = os.path.join(temp_folder_PreSEBAL, THERM)
if not os.path.exists(input_folder_HANTS_THERM):
os.mkdir(input_folder_HANTS_THERM)
shutil.copy(os.path.join(output_folder_preprocessing_THERM,Back_name_TB),os.path.join(input_folder_HANTS_THERM,Back_name_TB))
############################################ Solve shift in PROBA=V ##############################################
VAR = 'Albedo'
os.chdir(os.path.join(temp_folder_PreSEBAL, VAR))
re = glob.glob('%s*.tif' %(VAR))
i = 0
while i < int(len(re)-1):
filename1 = re[0] # maak hier misschien later van dat alleen 0 word genomen als de hoeveelheid pixels minder dan 40% van totaal is
filename2 = re[i + 1]
dest1 = gdal.Open(filename1)
dest2 = gdal.Open(filename2)
Array1 = dest1.GetRasterBand(1).ReadAsArray().flatten()
Array2 = dest2.GetRasterBand(1).ReadAsArray().flatten()
Array3 = dest1.GetRasterBand(1).ReadAsArray()[1:,:].flatten()
Array4 = dest2.GetRasterBand(1).ReadAsArray()[:-1,:].flatten()
Array1_flat = Array1[np.logical_and(~np.isnan(Array1),~np.isnan(Array2))]
Array2_flat = Array2[np.logical_and(~np.isnan(Array1),~np.isnan(Array2))]
Array3_flat = Array3[np.logical_and(~np.isnan(Array3),~np.isnan(Array4))]
Array4_flat = Array4[np.logical_and(~np.isnan(Array3),~np.isnan(Array4))]
Corr = np.corrcoef(Array1_flat,Array2_flat)[0,1]
Corr2 = np.corrcoef(Array3_flat,Array4_flat)[0,1]
if Corr2 > Corr:
x,y = dest1.GetRasterBand(1).ReadAsArray().shape
for VAR_check in VARS:
os.chdir(os.path.join(temp_folder_PreSEBAL, VAR_check))
endname = filename2.split('_')[-1]
re_vars = glob.glob('%s*_%s' %(VAR_check,endname))
filename3 = re_vars[0]
dest3 = gdal.Open(filename3)
New_Array = np.ones(dest1.GetRasterBand(1).ReadAsArray().shape) * np.nan
New_Array[1:,:] = dest3.GetRasterBand(1).ReadAsArray()[:-1,:]
filename_out = os.path.join(temp_folder_PreSEBAL, VAR_check, filename3)
SEBAL.save_GeoTiff_proy(dest3, New_Array, filename_out, [int(y),int(x)], nband=1)
i += 1
################################################### General HANTS ###############################################
# Open one image
PROBA_V_IMAGE = os.path.join(input_folder_HANTS_VAR,Back_name)
destPROBAV = gdal.Open(PROBA_V_IMAGE)
VIIRS_IMAGE = os.path.join(input_folder_HANTS_THERM,Back_name_TB)
destVIIRS = gdal.Open(VIIRS_IMAGE)
# Get Geotransform
Geo_PROBAV = destPROBAV.GetGeoTransform()
x_size_PROBAV = destPROBAV.RasterXSize
y_size_PROBAV = destPROBAV.RasterYSize
Geo_VIIRS = destVIIRS.GetGeoTransform()
x_size_VIIRS = destVIIRS.RasterXSize
y_size_VIIRS = destVIIRS.RasterYSize
# Get projection
proj = Get_epsg(destPROBAV)
projVIIRS = Get_epsg(destVIIRS)
# Data parameters
latlim = [Geo_PROBAV[3] + y_size_PROBAV * Geo_PROBAV[5],Geo_PROBAV[3]]
lonlim = [Geo_PROBAV[0], Geo_PROBAV[0] + x_size_PROBAV * Geo_PROBAV[1]]
cellsize = Geo_PROBAV[1]
latlimVIIRS = [Geo_VIIRS [3] + y_size_VIIRS * Geo_VIIRS [5],Geo_VIIRS [3]]
lonlimVIIRS = [Geo_VIIRS [0], Geo_VIIRS [0] + x_size_VIIRS * Geo_VIIRS [1]]
cellsizeVIIRS = Geo_VIIRS [1]
# Get the HANTS parameters
ws_para = wb_veg['HANTS_Input']
# amount of images
Dates = pd.date_range(start_date, end_date, freq = 'D')
###################################################### HANTS Thermal ###############################################
# Define parameters for the NDVI
THERM = 'Surface_Temperature'
# Define paths for NDVI
input_folder_HANTS_THERM = os.path.join(temp_folder_PreSEBAL, THERM)
name_format = '%s_VIIRS_{0}.tif' %THERM
nc_path_TB = os.path.join(input_folder_HANTS_THERM,'%s_NC.nc' %THERM)
# Create Output folder
rasters_path_out = os.path.join(temp_folder_PreSEBAL, THERM + "_HANTS")
if not os.path.exists(rasters_path_out):
os.mkdir(rasters_path_out)
# HANTS parameters for NDVI
nb = int(len(Dates))
Dates = pd.date_range(start_date, end_date, freq = 'D')
nf = int(ws_para['D2'].value) # number of frequencies to be considered above the zero frequency
low = float(ws_para['D3'].value) # valid range minimum
high = float(ws_para['D4'].value) # valid range maximum
HiLo = str(ws_para['D5'].value) # 2-character string indicating rejection of high or low outliers
fet = float(ws_para['D6'].value) # fit error tolerance (point eviating more than fet from curve fit are rejected)
delta = float(ws_para['D7'].value) # small positive number e.g. 0.1 to supress high amplitudes
dod = float(ws_para['D8'].value) # degree of overdeterminedness (iteration stops if number of points reaches the minimum required for curve fitting, plus dod). This is a safety measure
from SEBAL.hants import wa_gdal
# Run
wa_gdal.run_HANTS(input_folder_HANTS_THERM, name_format,
start_date, end_date, latlimVIIRS, lonlimVIIRS, cellsizeVIIRS, nc_path_TB,
nb, nf, HiLo, low, high, fet, dod, delta,
projVIIRS, -9999.0, rasters_path_out, export_hants_only=True)
###################################################### HANTS NDVI ###############################################
# Define parameters for the NDVI
VAR = 'NDVI'
# Define paths for NDVI
input_folder_HANTS_VAR = os.path.join(temp_folder_PreSEBAL, VAR)
name_format = '%s_PROBAV_{0}.tif' %VAR
nc_path_ndvi = os.path.join(input_folder_HANTS_VAR,'%s_NC.nc' %VAR)
# Create Output folder
rasters_path_out = os.path.join(temp_folder_PreSEBAL, VAR + "_HANTS")
if not os.path.exists(rasters_path_out):
os.mkdir(rasters_path_out)
# HANTS parameters for NDVI # Dates = pd.date_range(start_date, end_date, freq = '5D')
nb = int(len(Dates)) # nr of images
nf = int(ws_para['C2'].value) # number of frequencies to be considered above the zero frequency
low = float(ws_para['C3'].value) # valid range minimum
high = float(ws_para['C4'].value) # valid range maximum
HiLo = str(ws_para['C5'].value) # 2-character string indicating rejection of high or low outliers
fet = float(ws_para['C6'].value) # fit error tolerance (point eviating more than fet from curve fit are rejected)
delta = float(ws_para['C7'].value) # small positive number e.g. 0.1 to supress high amplitudes
dod = float(ws_para['C8'].value) # degree of overdeterminedness (iteration stops if number of points reaches the minimum required for curve fitting, plus dod). This is a safety measure
from SEBAL.hants import wa_gdal
# Run
wa_gdal.run_HANTS(input_folder_HANTS_VAR, name_format,
start_date, end_date, latlim, lonlim, cellsize, nc_path_ndvi,
nb, nf, HiLo, low, high, fet, dod, delta,
proj, -9999.0, rasters_path_out, export_hants_only=True)
###################################################### HANTS Albedo ##############################################
# Define parameters for the albedo
VAR = 'Albedo'
# Define paths for NDVI
input_folder_HANTS_VAR = os.path.join(temp_folder_PreSEBAL, VAR)
name_format = '%s_PROBAV_{0}.tif' %VAR
nc_path_albedo = os.path.join(input_folder_HANTS_VAR,'%s_NC.nc' %VAR)
# Create Output folder
rasters_path_out = os.path.join(temp_folder_PreSEBAL, VAR + "_HANTS")
if not os.path.exists(rasters_path_out):
os.mkdir(rasters_path_out)
# HANTS parameters for NDVI
Dates = pd.date_range(start_date, end_date, freq = 'D')
nb = int(len(Dates)) # nr of images
nf = int(ws_para['B2'].value) # number of frequencies to be considered above the zero frequency
low = float(ws_para['B3'].value) # valid range minimum
high = float(ws_para['B4'].value) # valid range maximum
HiLo = str(ws_para['B5'].value) # 2-character string indicating rejection of high or low outliers
fet = float(ws_para['B6'].value) # fit error tolerance (point eviating more than fet from curve fit are rejected)
delta = float(ws_para['B7'].value) # small positive number e.g. 0.1 to supress high amplitudes
dod = float(ws_para['B8'].value) # degree of overdeterminedness (iteration stops if number of points reaches the minimum required for curve fitting, plus dod). This is a safety measure
from SEBAL.hants import wa_gdal
# Run
wa_gdal.run_HANTS(input_folder_HANTS_VAR, name_format,
start_date, end_date, latlim, lonlim, cellsize, nc_path_albedo,
nb, nf, HiLo, low, high, fet, dod, delta,
proj, -9999.0, rasters_path_out, export_hants_only=True)
###################################################################################################################
################################################### post HANTS part 5 #############################################
###################################################################################################################
############################################# Create Outlier maps for PROBA-V #######################################
# Create output folder if not exists
output_folder_HANTS_outliers_PROBAV = os.path.join(temp_folder_PreSEBAL, 'Outliers_PROBAV')
if not os.path.exists(output_folder_HANTS_outliers_PROBAV):
os.mkdir(output_folder_HANTS_outliers_PROBAV)
fh = Dataset(nc_path_albedo, mode='r')
Var = fh.variables.keys()[-1]
lat = fh.variables[fh.variables.keys()[1]][:]
lon = fh.variables[fh.variables.keys()[2]][:]
time = fh.variables[fh.variables.keys()[3]][:]
minimum_lon = np.min(lon)
maximum_lat = np.max(lat)
diff_lon = lon[1] - lon[0]
diff_lat = lat[1] - lat[0]
if not ('shape' in locals() or 'dest' in locals()):
Example_file = os.path.join(output_folder_preprocessing_VAR, Back_name)
dest = gdal.Open(Example_file)
ncol = dest.RasterXSize # Get the reprojected dem column size
nrow = dest.RasterYSize # Get the reprojected dem row size
shape=[ncol, nrow]
for i in range(0,int(np.shape(time)[0])):
time_now = time[i]
data = fh.variables['outliers'][:,:,i]
geo = tuple([minimum_lon, diff_lon, 0, maximum_lat, 0, diff_lat])
name_out = os.path.join(output_folder_HANTS_outliers_PROBAV, 'Outliers_PROBAV_%s.tif' %time_now)
SEBAL.save_GeoTiff_proy(dest, data, name_out, shape, nband=1)
############################################# Create ALBEDO and NDVI #########################################
# Create the end thermal files date by date
for date in Dates:
# Define date
year = date.year
month = date.month
day = date.day
# input filenames needed for creating end thermal file
filename_outliers = os.path.join(output_folder_HANTS_outliers_PROBAV,"Outliers_PROBAV_%d%02d%02d.tif" %(year,month,day))
VAR = 'Albedo'
input_folder_PreSEBAL_ALBEDO = os.path.join(temp_folder_PreSEBAL, VAR + "_HANTS")
filename_Albedo_original = os.path.join(Albedo_outfolder, "%s_PROBAV_%d%02d%02d.tif" %(VAR,year,month,day))
filename_Albedo_HANTS = os.path.join(input_folder_PreSEBAL_ALBEDO, "%s_PROBAV_%d%02d%02d.tif" %(VAR,year,month,day))
VAR = 'NDVI'
input_folder_PreSEBAL_NDVI = os.path.join(temp_folder_PreSEBAL, VAR + "_HANTS")
filename_NDVI_original = os.path.join(NDVI_outfolder, "%s_PROBAV_%d%02d%02d.tif" %(VAR,year,month,day))
filename_NDVI_HANTS = os.path.join(input_folder_PreSEBAL_NDVI, "%s_PROBAV_%d%02d%02d.tif" %(VAR,year,month,day))
# Open the input filenames
dest_outliers = gdal.Open(filename_outliers)
dest_PROBAV_ALBEDO = gdal.Open(filename_Albedo_original)
dest_PROBAV_NDVI = gdal.Open(filename_NDVI_original)
dest_HANTS_ALBEDO = gdal.Open(filename_Albedo_HANTS)
dest_HANTS_NDVI = gdal.Open(filename_NDVI_HANTS)
# If original exists, this will be the basis for the end thermal map
if not dest_PROBAV_ALBEDO == None:
# Open arrays of the input files
Array_outliers = dest_outliers.GetRasterBand(1).ReadAsArray()[:,:]
Array_ALBEDO_original = dest_PROBAV_ALBEDO.GetRasterBand(1).ReadAsArray()
Array_ALBEDO_HANTS = dest_HANTS_ALBEDO.GetRasterBand(1).ReadAsArray()[:,:]
Array_NDVI_original = dest_PROBAV_NDVI.GetRasterBand(1).ReadAsArray()
Array_NDVI_HANTS = dest_HANTS_NDVI.GetRasterBand(1).ReadAsArray()[:,:]
# Create outlier Mask
Array_outliers[Array_outliers==-9999.] = 0
Array_outliers_mask = np.zeros(np.shape(Array_outliers))
Array_outliers_mask[Array_outliers==1.]=0
Array_outliers_mask[Array_outliers==0.]=1
Array_outliers_mask[Array_outliers_mask==0]=2
Array_outliers_mask[Array_outliers_mask==1]=0
Array_outliers_mask[Array_outliers_mask==2]=1
# Create a buffer zone arround the bad pixels
Array_outliers_mask = Create_Buffer(Array_outliers_mask)
Array_outliers_mask[Array_outliers_mask==1] = 2
Array_outliers_mask[Array_outliers_mask==0] = 1
Array_outliers_mask[Array_outliers_mask==2] = 0
# If there are more than 300 Good pixels
if np.nansum(Array_outliers_mask) > 300:
# Use the mask to find the good original pixels and HANTS pixels
Array_ALBEDO_original_mask_nan = Array_ALBEDO_original * Array_outliers_mask
Array_ALBEDO_HANTS_mask_nan = Array_ALBEDO_HANTS * Array_outliers_mask
Array_NDVI_original_mask_nan = Array_NDVI_original * Array_outliers_mask
Array_NDVI_HANTS_mask_nan = Array_NDVI_HANTS * Array_outliers_mask
# Create a 1D array of those pixels
Array_ALBEDO_original_mask_nan_flatten = Array_ALBEDO_original_mask_nan.flatten()
Array_ALBEDO_HANTS_mask_nan_flatten = Array_ALBEDO_HANTS_mask_nan.flatten()
Array_NDVI_original_mask_nan_flatten = Array_NDVI_original_mask_nan.flatten()
Array_NDVI_HANTS_mask_nan_flatten = Array_NDVI_HANTS_mask_nan.flatten()
# Remove pixels with high and low values
Array_ALBEDO_HANTS_mask_nan_flatten[Array_ALBEDO_HANTS_mask_nan_flatten<-0.2] = np.nan
Array_ALBEDO_HANTS_mask_nan_flatten[Array_ALBEDO_HANTS_mask_nan_flatten>0.6] = np.nan
Array_ALBEDO_original_mask_nan_flatten[Array_ALBEDO_original_mask_nan_flatten<-0.2] = np.nan
Array_ALBEDO_original_mask_nan_flatten[Array_ALBEDO_original_mask_nan_flatten>0.6] = np.nan
Array_NDVI_HANTS_mask_nan_flatten[Array_NDVI_HANTS_mask_nan_flatten<-0.2] = np.nan
Array_NDVI_HANTS_mask_nan_flatten[Array_NDVI_HANTS_mask_nan_flatten>0.6] = np.nan
Array_NDVI_original_mask_nan_flatten[Array_NDVI_original_mask_nan_flatten<-0.2] = np.nan
Array_NDVI_original_mask_nan_flatten[Array_NDVI_original_mask_nan_flatten>0.6] = np.nan
# Remove the nan values (if there is a nan in one of the arrays remove also the same value in the other array)
Array_ALBEDO_original_mask_nan_flatten2 = Array_ALBEDO_original_mask_nan_flatten[np.logical_or(~np.isnan(Array_ALBEDO_original_mask_nan_flatten),~np.isnan(Array_ALBEDO_HANTS_mask_nan_flatten))]
Array_ALBEDO_HANTS_mask_nan_flatten2 = Array_ALBEDO_HANTS_mask_nan_flatten[np.logical_or(~np.isnan(Array_ALBEDO_original_mask_nan_flatten),~np.isnan(Array_ALBEDO_HANTS_mask_nan_flatten))]
Array_NDVI_original_mask_nan_flatten2 = Array_NDVI_original_mask_nan_flatten[np.logical_or(~np.isnan(Array_NDVI_original_mask_nan_flatten),~np.isnan(Array_NDVI_HANTS_mask_nan_flatten))]
Array_NDVI_HANTS_mask_nan_flatten2 = Array_NDVI_HANTS_mask_nan_flatten[np.logical_or(~np.isnan(Array_NDVI_HANTS_mask_nan_flatten),~np.isnan(Array_NDVI_original_mask_nan_flatten))]
Array_ALBEDO_original_mask_nan_flatten = Array_ALBEDO_original_mask_nan_flatten2
Array_ALBEDO_HANTS_mask_nan_flatten = Array_ALBEDO_HANTS_mask_nan_flatten2
Array_NDVI_original_mask_nan_flatten = Array_NDVI_original_mask_nan_flatten2
Array_NDVI_HANTS_mask_nan_flatten = Array_NDVI_HANTS_mask_nan_flatten2
# Remove all zero values
Array_ALBEDO_original_mask_nan_flatten_without_zero =Array_ALBEDO_original_mask_nan_flatten[Array_ALBEDO_original_mask_nan_flatten != 0.0]
Array_NDVI_original_mask_nan_flatten_without_zero =Array_NDVI_original_mask_nan_flatten[Array_NDVI_original_mask_nan_flatten != 0.0]
# Caluculate the value of the 40 and 90 percent percentiles of the original arrays good pixels
Array_ALBEDO_original_mask_value_cold = np.nanpercentile(Array_ALBEDO_original_mask_nan_flatten_without_zero,40)
Array_ALBEDO_original_mask_value_hot = np.nanpercentile(Array_ALBEDO_original_mask_nan_flatten_without_zero,90)
Array_NDVI_original_mask_value_cold = np.nanpercentile(Array_NDVI_original_mask_nan_flatten_without_zero,40)
Array_NDVI_original_mask_value_hot = np.nanpercentile(Array_NDVI_original_mask_nan_flatten_without_zero,90)
# Delete the colder and hotter pixel values in both 1D arrays (this is to exclude large areas of seas)
Array_ALBEDO_HANTS_mask_nan_flatten_exc_coldest = Array_ALBEDO_HANTS_mask_nan_flatten[np.logical_and(Array_ALBEDO_original_mask_nan_flatten > Array_ALBEDO_original_mask_value_cold,Array_ALBEDO_original_mask_nan_flatten < Array_ALBEDO_original_mask_value_hot)]
Array_ALBEDO_original_mask_nan_flatten_exc_coldest = Array_ALBEDO_original_mask_nan_flatten[np.logical_and(Array_ALBEDO_original_mask_nan_flatten > Array_ALBEDO_original_mask_value_cold,Array_ALBEDO_original_mask_nan_flatten < Array_ALBEDO_original_mask_value_hot)]
Array_NDVI_HANTS_mask_nan_flatten_exc_coldest = Array_NDVI_HANTS_mask_nan_flatten[np.logical_and(Array_NDVI_original_mask_nan_flatten > Array_NDVI_original_mask_value_cold,Array_NDVI_original_mask_nan_flatten < Array_NDVI_original_mask_value_hot)]
Array_NDVI_original_mask_nan_flatten_exc_coldest = Array_NDVI_original_mask_nan_flatten[np.logical_and(Array_NDVI_original_mask_nan_flatten > Array_NDVI_original_mask_value_cold,Array_NDVI_original_mask_nan_flatten < Array_NDVI_original_mask_value_hot)]
#Calculate the mean of those arrays
Ave_ALBEDO_HANTS = np.nanmean(Array_ALBEDO_HANTS_mask_nan_flatten_exc_coldest)
Ave_ALBEDO_original = np.nanmean(Array_ALBEDO_original_mask_nan_flatten_exc_coldest)
Ave_NDVI_HANTS = np.nanmean(Array_NDVI_HANTS_mask_nan_flatten_exc_coldest)
Ave_NDVI_original = np.nanmean(Array_NDVI_original_mask_nan_flatten_exc_coldest)
# Calculate the correction factor for the simulated image
Factor_Albedo = Ave_ALBEDO_original/Ave_ALBEDO_HANTS
Factor_NDVI = Ave_NDVI_original/Ave_NDVI_HANTS
# Apply this factor over the simulated HANTS image
Array_ALBEDO_HANTS_Corrected = Array_ALBEDO_HANTS * Factor_Albedo
Array_NDVI_HANTS_Corrected = Array_NDVI_HANTS * Factor_NDVI
# Create the end array by replacing the bad pixels of the original array by the corrected simulated HANTS values
End_array_Albedo = np.ones(np.shape(Array_outliers_mask)) * np.nan
End_array_Albedo[Array_outliers_mask==0] =Array_ALBEDO_HANTS_Corrected[Array_outliers_mask==0]
End_array_Albedo[Array_outliers_mask==1] =Array_ALBEDO_original[Array_outliers_mask==1]
End_array_NDVI = np.ones(np.shape(Array_outliers_mask)) * np.nan
End_array_NDVI[Array_outliers_mask==0] =Array_NDVI_HANTS_Corrected[Array_outliers_mask==0]
End_array_NDVI[Array_outliers_mask==1] =Array_NDVI_original[Array_outliers_mask==1]
# If the original images is to bad than replace the whole image by the simulated HANTS image
else:
End_array_Albedo = Array_ALBEDO_HANTS
End_array_NDVI = Array_NDVI_HANTS
# Get the geolocation information of the image
geo = dest_PROBAV_ALBEDO.GetGeoTransform()
proj = dest_outliers.GetProjection()
# If there is no original image, use the simulated HANTS image
else:
Array_ALBEDO_HANTS = dest_HANTS_ALBEDO.GetRasterBand(1).ReadAsArray()
End_array_Albedo = Array_ALBEDO_HANTS
Array_NDVI_HANTS = dest_HANTS_NDVI.GetRasterBand(1).ReadAsArray()
End_array_NDVI = Array_NDVI_HANTS
dest_test = None
i = 0
while dest_test == None:
# Get the date of the first image that exists to get the geolocation information
date2 = Dates[i]
year2 = date2.year
month2= date2.month
day2 = date2.day
try:
filename_ALBEDO_original2 = os.path.join(input_folder_PreSEBAL_ALBEDO, "Albedo_PROBAV_%d%02d%02d.tif" %(year2,month2,day2))
dest_test = gdal.Open(filename_ALBEDO_original2)
geo = dest_test.GetGeoTransform()
proj = dest_test.GetProjection()
except:
i+=1
# Save the end array
output_name_end_ALBEDO = os.path.join(ALBEDO_outfolder_end, "Albedo_PROBAV_%d%02d%02d.tif"%(year,month,day))
SEBAL.save_GeoTiff_proy(dest, End_array_Albedo, output_name_end_ALBEDO, shape, nband=1)
output_name_end_NDVI = os.path.join(NDVI_outfolder_end, "NDVI_PROBAV_%d%02d%02d.tif"%(year,month,day))
SEBAL.save_GeoTiff_proy(dest, End_array_NDVI, output_name_end_NDVI, shape, nband=1)
############################################# Create Outlier maps for VIIRS #########################################
# Create output folder if not exists
output_folder_HANTS_outliers_VIIRS = os.path.join(temp_folder_PreSEBAL, 'Outliers_VIIRS')
if not os.path.exists(output_folder_HANTS_outliers_VIIRS):
os.mkdir(output_folder_HANTS_outliers_VIIRS)
fh = Dataset(nc_path_TB, mode='r')
Var = fh.variables.keys()[-1]
lat = fh.variables[fh.variables.keys()[1]][:]
lon = fh.variables[fh.variables.keys()[2]][:]
time = fh.variables[fh.variables.keys()[3]][:]
minimum_lon = np.min(lon)
maximum_lat = np.max(lat)
diff_lon = lon[1] - lon[0]
diff_lat = lat[1] - lat[0]
if not ('shape' in locals() or 'dest' in locals()):
Example_file = os.path.join(output_folder_preprocessing_THERM,Back_name_TB)
dest = gdal.Open(Example_file)
ncol = dest.RasterXSize # Get the reprojected dem column size
nrow = dest.RasterYSize # Get the reprojected dem row size
shape=[ncol, nrow]
for i in range(0,int(np.shape(time)[0])):
time_now = time[i]
data = fh.variables['outliers'][:,:,i]
geo = tuple([minimum_lon, diff_lon, 0, maximum_lat, 0, diff_lat])
name_out = os.path.join(output_folder_HANTS_outliers_VIIRS, 'Outliers_VIIRS_%s.tif' %time_now)
SEBAL.save_GeoTiff_proy(dest, data, name_out, shape, nband=1)
############################################# Create end thermal #########################################
# Create the end thermal files date by date
for date in Dates:
# Define date
year = date.year
month = date.month
day = date.day
# input filenames needed for creating end thermal file
filename_outliers = os.path.join(output_folder_HANTS_outliers_VIIRS,"Outliers_VIIRS_%d%02d%02d.tif" %(year,month,day))
filename_VIIRS_original = os.path.join(input_folder_HANTS_THERM, "Surface_Temperature_VIIRS_%d%02d%02d.tif" %(year,month,day))
filename_VIIRS_HANTS = os.path.join(temp_folder_PreSEBAL, THERM + "_HANTS", "Surface_Temperature_VIIRS_%d%02d%02d.tif" %(year,month,day))
# Open the input filenames
dest_outliers = gdal.Open(filename_outliers)
dest_VIIRS_original = gdal.Open(filename_VIIRS_original)
dest_VIIRS_HANTS = gdal.Open(filename_VIIRS_HANTS)
# If original exists, this will be the basis for the end thermal map
if not dest_VIIRS_original == None:
# Open arrays of the input files
Array_outliers = dest_outliers.GetRasterBand(1).ReadAsArray()[:,:]
Array_VIIRS_original = dest_VIIRS_original.GetRasterBand(1).ReadAsArray()
Array_VIIRS_HANTS = dest_VIIRS_HANTS.GetRasterBand(1).ReadAsArray()[:,:]
# Create outlier Mask
Array_outliers[Array_outliers==-9999.] = 0
Array_outliers_mask = np.zeros(np.shape(Array_outliers))
Array_outliers_mask[Array_outliers==1.]=0
Array_outliers_mask[Array_outliers==0.]=1
Array_outliers_mask[Array_outliers_mask==0]=2
Array_outliers_mask[Array_outliers_mask==1]=0
Array_outliers_mask[Array_outliers_mask==2]=1
# Create a buffer zone arround the bad pixels
Array_outliers_mask = Create_Buffer(Array_outliers_mask)
Array_outliers_mask[Array_outliers_mask==1] = 2
Array_outliers_mask[Array_outliers_mask==0] = 1
Array_outliers_mask[Array_outliers_mask==2] = 0
# If there are more than 300 Good pixels
if np.nansum(Array_outliers_mask) > 300:
# Use the mask to find the good original pixels and HANTS pixels
Array_VIIRS_original_mask_nan = Array_VIIRS_original * Array_outliers_mask
Array_VIIRS_HANTS_mask_nan = Array_VIIRS_HANTS * Array_outliers_mask
# Create a 1D array of those pixels
Array_VIIRS_original_mask_nan_flatten = Array_VIIRS_original_mask_nan.flatten()
Array_VIIRS_HANTS_mask_nan_flatten = Array_VIIRS_HANTS_mask_nan.flatten()
# Remove pixels with high and low values
Array_VIIRS_HANTS_mask_nan_flatten[Array_VIIRS_HANTS_mask_nan_flatten<250] = np.nan
Array_VIIRS_HANTS_mask_nan_flatten[Array_VIIRS_HANTS_mask_nan_flatten>350] = np.nan
Array_VIIRS_original_mask_nan_flatten[Array_VIIRS_original_mask_nan_flatten<250] = np.nan
Array_VIIRS_original_mask_nan_flatten[Array_VIIRS_original_mask_nan_flatten>350] = np.nan
# Remove the nan values (if there is a nan in one of the arrays remove also the same value in the other array)
Array_VIIRS_original_mask_no_nan_flatten = Array_VIIRS_original_mask_nan_flatten[np.logical_or(~np.isnan(Array_VIIRS_original_mask_nan_flatten),~np.isnan(Array_VIIRS_HANTS_mask_nan_flatten))]
Array_VIIRS_HANTS_mask_no_nan_flatten = Array_VIIRS_HANTS_mask_nan_flatten[np.logical_or(~np.isnan(Array_VIIRS_original_mask_nan_flatten),~np.isnan(Array_VIIRS_HANTS_mask_nan_flatten))]
# Remove all zero values
Array_VIIRS_original_mask_nan_flatten_without_zero =Array_VIIRS_original_mask_no_nan_flatten[Array_VIIRS_original_mask_no_nan_flatten>0]
# Caluculate the value of the 40 and 90 percent percentiles of the original arrays good pixels
Array_VIIRS_original_mask_value_cold = np.nanpercentile(Array_VIIRS_original_mask_nan_flatten_without_zero,40)
Array_VIIRS_original_mask_value_hot = np.nanpercentile(Array_VIIRS_original_mask_nan_flatten_without_zero,90)
# Delete the colder and hotter pixel values in both 1D arrays (this is to exclude large areas of seas)
Array_VIIRS_HANTS_mask_nan_flatten_exc_coldest = Array_VIIRS_HANTS_mask_no_nan_flatten[np.logical_and(Array_VIIRS_original_mask_no_nan_flatten > Array_VIIRS_original_mask_value_cold,Array_VIIRS_original_mask_no_nan_flatten < Array_VIIRS_original_mask_value_hot)]
Array_VIIRS_original_mask_nan_flatten_exc_coldest = Array_VIIRS_original_mask_no_nan_flatten[np.logical_and(Array_VIIRS_original_mask_no_nan_flatten > Array_VIIRS_original_mask_value_cold,Array_VIIRS_original_mask_no_nan_flatten < Array_VIIRS_original_mask_value_hot)]
#Calculate the mean of those arrays
Ave_VIIRS_HANTS = np.nanmean(Array_VIIRS_HANTS_mask_nan_flatten_exc_coldest)
Ave_VIIRS_original = np.nanmean(Array_VIIRS_original_mask_nan_flatten_exc_coldest)
# Calculate the correction factor for the simulated image
Factor = Ave_VIIRS_original/Ave_VIIRS_HANTS
# Apply this factor over the simulated HANTS image
Array_VIIRS_HANTS_Corrected = Array_VIIRS_HANTS * Factor
# Create the end array by replacing the bad pixels of the original array by the corrected simulated HANTS values
End_array = np.ones(np.shape(Array_outliers_mask)) * np.nan
End_array[Array_outliers_mask==0] =Array_VIIRS_HANTS_Corrected[Array_outliers_mask==0]
End_array[Array_outliers_mask==1] =Array_VIIRS_original[Array_outliers_mask==1]
# If the original images is to bad than replace the whole image by the simulated HANTS image
else:
End_array = Array_VIIRS_HANTS
# Get the geolocation information of the image
geo = dest_VIIRS_original.GetGeoTransform()
proj = dest_outliers.GetProjection()
# If there is no original image, use the simulated HANTS image
else:
Array_VIIRS_HANTS = dest_VIIRS_HANTS.GetRasterBand(1).ReadAsArray()
End_array = Array_VIIRS_HANTS
dest_test = None
i = 0
while dest_test == None:
# Get the date of the first image that exists to get the geolocation information
date2 = Dates[i]
year2 = date2.year
month2= date2.month
day2 = date2.day
try:
filename_VIIRS_original2 = os.path.join(input_folder_HANTS_THERM, "Surface_Temperature_VIIRS_%d%02d%02d.tif" %(year2,month2,day2))
dest_test = gdal.Open(filename_VIIRS_original2)
geo = dest_test.GetGeoTransform()
proj = dest_test.GetProjection()
except:
i+=1
# Save the end array
output_name_end_LST = os.path.join(temp_folder_PreSEBAL_LST, "VIIRS_LST_%d%02d%02d.tif"%(year,month,day))
SEBAL.save_GeoTiff_proy(dest, End_array, output_name_end_LST, shape, nband=1)
###################################################################################################################
###################################################### preSEBAL continue ##########################################
###################################################################################################################
############################################### Apply thermal sharpening ##########################################
print('---------------------------------------------------------')
print('-------------------- Downscale VIIRS --------------------')
print('---------------------------------------------------------')
# Upscale VIIRS and PROBA-V to 400m
pixel_spacing_upscale = 400
# Open the General_Input sheet
ws = wb['General_Input']
# Extract the input and output folder, and Image type from the excel file
DEM_fileName = str(ws['E2'].value)
ws = wb['VIIRS_PROBAV_Input']
UTM_Zone = int(str(ws['G2'].value))
# Reproject from Geog Coord Syst to UTM -
# 1) DEM - Original DEM coordinates is Geographic: lat, lon
proyDEM_fileName_100 = os.path.join(temp_folder_PreSEBAL,'DEM_100.tif')
dest, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = SEBAL.reproject_dataset(
DEM_fileName, pixel_spacing = 100, UTM_Zone=UTM_Zone)
band = dest.GetRasterBand(1) # Get the reprojected dem band
ncol = dest.RasterXSize # Get the reprojected dem column size
nrow = dest.RasterYSize # Get the reprojected dem row size
shape=[ncol, nrow]
DEM = band.ReadAsArray()
# Save DEM file with the 100 meter resolution
SEBAL.save_GeoTiff_proy(dest, DEM, proyDEM_fileName_100, shape, nband=1)
# Create upscaled DEM
proyDEM_fileName_400 = os.path.join(temp_folder_PreSEBAL,'DEM_400.tif')
dest_400, ulx_dem_400, lry_dem_400, lrx_dem_400, uly_dem_400, epsg_to = SEBAL.reproject_dataset(
DEM_fileName, pixel_spacing_upscale, UTM_Zone = UTM_Zone)
# find spatial parameters array
DEM_400 = dest_400.GetRasterBand(1).ReadAsArray()
Y_raster_size_400 = dest_400.RasterYSize
X_raster_size_400 = dest_400.RasterXSize
shape_400=([X_raster_size_400, Y_raster_size_400])
# Save DEM file with the 400 meter resolution
SEBAL.save_GeoTiff_proy(dest_400, DEM_400, proyDEM_fileName_400, shape_400, nband=1)
for date in Dates:
surf_temp_fileName = os.path.join(temp_folder_PreSEBAL, 'Surf_temp_After_TS_%d%02d%02d.tif' %(date.year, date.month, date.day))
temp_surface_100_fileName_beforeTS = os.path.join(temp_folder_PreSEBAL_LST,'VIIRS_LST_%d%02d%02d.tif' %(date.year, date.month, date.day))
################################ Thermal Sharpening #####################################################
# Define filename
file_NDVI_after_HANTS = os.path.join(NDVI_outfolder_end, 'NDVI_PROBAV_%d%02d%02d.tif' %(date.year, date.month, date.day))
# Open NDVI/LST destination folder
dest_NDVI = gdal.Open(file_NDVI_after_HANTS)
dest_LST = gdal.Open(temp_surface_100_fileName_beforeTS)
# Open NDVI array
NDVI = dest_NDVI.GetRasterBand(1).ReadAsArray()
# Open LST array
LST = dest_LST.GetRasterBand(1).ReadAsArray()
# Upscale thermal band VIIRS from 100m to 400m
VIIRS_Upscale, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = SEBAL.reproject_dataset_example(
temp_surface_100_fileName_beforeTS, proyDEM_fileName_400)
data_Temp_Surf_400 = VIIRS_Upscale.GetRasterBand(1).ReadAsArray()
# Upscale PROBA-V NDVI from 100m to 400m
NDVI_PROBAV_Upscale, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = SEBAL.reproject_dataset_example(
file_NDVI_after_HANTS, proyDEM_fileName_400)
data_NDVI_400 = NDVI_PROBAV_Upscale.GetRasterBand(1).ReadAsArray()
# Define the width of the moving window box
Box=9
# Apply the surface temperature sharpening
temp_surface_sharpened = SEBAL.Thermal_Sharpening(data_Temp_Surf_400, data_NDVI_400, NDVI, Box, NDVI_PROBAV_Upscale, output_folder, proyDEM_fileName_100, shape, dest, surf_temp_fileName)
# Create Water mask based on HANTS NDVI output
water_mask = np.zeros((shape[1], shape[0]))
water_mask[NDVI<0.0]=1
# Divide temporal watermask in snow and water mask by using surface temperature
Snow_Mask_PROBAV, water_mask, ts_moist_veg_min, NDVI_max, NDVI_std = SEBAL.CalculateSnowWaterMask(NDVI,shape,water_mask,temp_surface_sharpened)
# Replace water values
temp_surface_sharpened[water_mask==1] = LST[water_mask == 1]
temp_surface_sharpened = np.where(np.isnan(temp_surface_sharpened), LST, temp_surface_sharpened)
surf_temp_fileName = os.path.join(output_folder_HANTS_end_sharp, 'LST_surface_temp_sharpened_%d%02d%02d.tif' %(date.year, date.month, date.day))
SEBAL.save_GeoTiff_proy(dest, temp_surface_sharpened, surf_temp_fileName, shape, nband=1)
################################################## Calculate LAI ##################################################
# Open NDVI destination folder
dest_NDVI = gdal.Open(file_NDVI_after_HANTS)
# Open NDVI array
NDVI = dest_NDVI.GetRasterBand(1).ReadAsArray()
LAI_FileName = os.path.join(LAI_outfolder,'LAI_%d%02d%02d.tif' %(date.year, date.month, date.day))
# Calculate LAI
FPAR, tir_emis, Nitrogen, vegt_cover, LAI, b10_emissivity = SEBAL.Calc_vegt_para(NDVI,water_mask, shape)
SEBAL.save_GeoTiff_proy(dest, LAI, LAI_FileName, shape, nband=1)
################################ Calculate the Vegetation height ########################
# Open preprosessing excel the Vegetation_Height sheet
ws_veg = wb_veg['Vegetation_Height']
# Define output name for the LandUse map
dst_FileName = os.path.join(output_folder,'LU.tif')
# Open LU data
LU_dest = gdal.Open(LU_data_FileName)
LU_data = LU_dest.GetRasterBand(1).ReadAsArray()
# Reproject the LAI to the same projection as LU
dest1, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(LAI_FileName, LU_data_FileName) ## input after HANTS
LAI_proj = dest1.GetRasterBand(1).ReadAsArray()
# Read out the excel file coefficient numbers
Array = np.zeros([ws_veg.max_row-1,4])
for j in ['A','C','D','E']:
j_number={'A' : 0, 'C' : 1, 'D' : 2, 'E' : 3}
for i in range(2,ws_veg.max_row+1):
Value = (ws_veg['%s%s' %(j,i)].value)
Array[i-2, j_number[j]] = Value
# Create maps with the coefficient numbers for the right land cover
coeff = np.zeros([int(np.shape(LU_data)[0]),int(np.shape(LU_data)[1]),3])
for coeff_nmbr in range(0,3):
for Class in range(0,len(Array)):
coeff[LU_data==Array[Class,0],coeff_nmbr] = Array[Class,coeff_nmbr+1]
# Get some dimensions of the projected dataset
band_data = dest1.GetRasterBand(1)
ncol_data = dest1.RasterXSize
nrow_data = dest1.RasterYSize
shape_data=[ncol_data, nrow_data]
# Calculate the vegetation height in the LU projection
Veg_Height_proj = coeff[:,:,0] * np.power(LAI_proj,2) + coeff[:,:,1] * LAI_proj + coeff[:,:,2]
Veg_Height_proj = np.clip(Veg_Height_proj, 0, 600)
# Save the vegetation height in the lU projection in the temporary directory
Veg_Height_proj_FileName = os.path.join(temp_folder_PreSEBAL,'Veg_Height_proj.tif')
SEBAL.save_GeoTiff_proy(dest1, Veg_Height_proj, Veg_Height_proj_FileName, shape_data, nband=1)
# Reproject the Veg_height to the LAI projection
dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(Veg_Height_proj_FileName, LAI_FileName)
# Get some dimensions of the original dataset
band_data = dest.GetRasterBand(1)
ncol_data = dest.RasterXSize
nrow_data = dest.RasterYSize
# Open the Veg_height with the same projection as LAI
Veg_Height = band_data.ReadAsArray(0, 0, ncol_data, nrow_data)
Veg_Height[Veg_Height == 0] = 0.4
# Save Vegetation Height in the end folder
dst_FileName = os.path.join(output_folder_HANTS_end_Veg,'Vegetation_Height_%d%02d%02d.tif' %(date.year, date.month, date.day))
SEBAL.save_GeoTiff_proy(dest, Veg_Height, dst_FileName, shape, nband=1)
######################## calculate Water Mask #########################
# Open all the water mask
os.chdir(WaterMask_outfolder)
re_water_mask = glob.glob('Water_Mask*.tif')
# Loop over all the files
for water_mask_filename in re_water_mask:
# Create the filepath to the water mask
water_mask_filepath = os.path.join(WaterMask_outfolder,water_mask_filename)
# Open Array
water_mask_dest = gdal.Open(water_mask_filepath)
# If the total water mask raster does not exists create this one
if not 'water_mask_array' in locals():
water_mask_array = np.zeros([water_mask_dest.RasterYSize, water_mask_dest.RasterXSize])
# Add all the water masks
water_mask_array += water_mask_dest.GetRasterBand(1).ReadAsArray()
# Calculate the end water mask if the area is more than 50 percent defined as water
water_mask_array_per = water_mask_array/len(re_water_mask)
water_mask_array_end = np.zeros([water_mask_dest.RasterYSize, water_mask_dest.RasterXSize])
water_mask_array_end[water_mask_array_per > 0.5] = 1
# Save water mask
WaterMask_outfolder_end_FileName = os.path.join(WaterMask_outfolder_end,'Water_Mask.tif')
SEBAL.save_GeoTiff_proy(dest, water_mask_array_end, WaterMask_outfolder_end_FileName, shape, nband=1)
######################## calculate p-factor by using the Landuse map #########################
ws_p = wb_veg['p-factor']
Array_P = np.zeros([ws_p.max_row-1,2])
for j in ['A','C']:
j_number={'A' : 0, 'C' : 1}
for i in range(2,ws_p.max_row+1):
Value = (ws_p['%s%s' %(j,i)].value)
Array_P[i-2, j_number[j]] = Value
p_factor = np.zeros([int(np.shape(LU_data)[0]),int(np.shape(LU_data)[1])])
for Class in range(0,len(Array_P)):
p_factor[LU_data==Array_P[Class,0]] = Array_P[Class,1]
p_factor[p_factor == 0] = 0.5
dst_FileName = os.path.join(temp_folder_PreSEBAL, 'p-factor_proj.tif')
SEBAL.save_GeoTiff_proy(dest1, p_factor, dst_FileName, shape_data, nband=1)
dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(dst_FileName, LAI_FileName)
band_data = dest.GetRasterBand(1) # Get the reprojected dem band
ncol_data = dest.RasterXSize
nrow_data = dest.RasterYSize
p_factor = band_data.ReadAsArray(0, 0, ncol_data, nrow_data)
p_factor[p_factor == 0] = 0.5
dst_pfactor_FileName = os.path.join(output_folder_p_factor,'p_factor.tif')
SEBAL.save_GeoTiff_proy(dest, p_factor, dst_pfactor_FileName, shape, nband=1)
######################## calculate c-factor by using the Landuse map #########################
ws_c = wb_veg['C-factor']
Array_C = np.zeros([ws_c.max_row-1,2])
for j in ['A','C']:
j_number={'A' : 0, 'C' : 1}
for i in range(2,ws_c.max_row+1):
Value = (ws_c['%s%s' %(j,i)].value)
Array_C[i-2, j_number[j]] = Value
c_factor = np.zeros([int(np.shape(LU_data)[0]),int(np.shape(LU_data)[1])])
for Class in range(0,len(Array_C)):
c_factor[LU_data==Array_C[Class,0]] = Array_C[Class,1]
c_factor[np.logical_and(c_factor != 3.0, c_factor != 4.0)] = np.nan
LUE_max = np.zeros([int(np.shape(LU_data)[0]),int(np.shape(LU_data)[1])])
LUE_max[c_factor == 3] = 2.5
LUE_max[c_factor == 4] = 4.5
LUE_max[LUE_max == 0] = 2.5
dst_FileName = os.path.join(temp_folder_PreSEBAL, 'LUE_max_proj.tif')
SEBAL.save_GeoTiff_proy(dest1, LUE_max, dst_FileName, shape_data, nband=1)
dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(dst_FileName, LAI_FileName)
band_data = dest.GetRasterBand(1) # Get the reprojected dem band
ncol_data = dest.RasterXSize
nrow_data = dest.RasterYSize
LUE_max = band_data.ReadAsArray(0, 0, ncol_data, nrow_data)
LUE_max[LUE_max == 0] = 2.5
dst_LUEmax_FileName = os.path.join(output_folder_LUE,'LUE_max.tif')
SEBAL.save_GeoTiff_proy(dest, LUE_max, dst_LUEmax_FileName, shape, nband=1)
####################################################################################################################
################################################ Write output part 6 ###############################################
####################################################################################################################
############################################# Fill in the additional input sheet #########################################
# things to be filled in:
# Transmissivity (optional)
# NDVI (additional input)
# Albedo (additional input)
# LST (additional input)
# Water Mask (additional input)
# p-factor (soil input)
# c-factor (soil input)
# Vegetation height (meteo input)
# VIIRS parameter copy
VIIRS_Dict = {}
for k, v in SEBAL_RUNS.iteritems():
VIIRS_Dict.setdefault(v['output_folder'], []).append(k)
'''
LST folder = output_folder_HANTS_end
NDVI folder = os.path.join(output_folder_HANTS, 'NDVI')
ALBEDO folder = os.path.join(output_folder_HANTS, 'Albedo')
SAVI folder = os.path.join(output_folder_HANTS, 'SAVI')
'''
VARS = ["NDVI", "Albedo"]
Letter_dict = {"NDVI":'B', "Albedo":'D'}
xfile = load_workbook(inputExcel)
sheet_additional = xfile.get_sheet_by_name('Additional_Input')
sheet_meteo = xfile.get_sheet_by_name('Meteo_Input')
sheet_soil = xfile.get_sheet_by_name('Soil_Input')
sheet_out_name = ''.join([os.path.splitext(os.path.basename(inputExcel))[0],'_SEBAL.xlsx'])
sheet_out_dir = os.path.dirname(inputExcel)
sheet_out_file_name = os.path.join(sheet_out_dir, sheet_out_name)
for output_name_run in VIIRS_Dict.keys():
# Get General parameters
Row_number = VIIRS_Dict[output_name_run][0]
Type_of_Run = SEBAL_RUNS.items()
VIIRS_date = output_name_run.split('_')[-1]
VIIRS_datetime= datetime.strptime(VIIRS_date, '%d%m%Y')
date_run = '%d%02d%02d' %(VIIRS_datetime.year,VIIRS_datetime.month,VIIRS_datetime.day)
# import LST
file_name_LST = os.path.join(output_folder_HANTS_end_sharp, 'LST_surface_temp_sharpened_%s.tif' %date_run )
sheet_additional['E%d'%(Row_number)] = str(file_name_LST)
# import NDVI and Albedo and water mask
for VAR_SINGLE in VARS:
Letter = Letter_dict[VAR_SINGLE]
file_name_VAR_single = os.path.join(output_folder_PreSEBAL, VAR_SINGLE, '%s_PROBAV_%s.tif' %(VAR_SINGLE, date_run))
sheet_additional['%s%d'%(Letter, Row_number)] = str(file_name_VAR_single)
# import Water Mask
sheet_additional['C%d'%(Row_number)] = str(WaterMask_outfolder_end_FileName)
# import p-factor
file_name_p_factor = os.path.join(output_folder_p_factor,'p_factor.tif')
sheet_soil['H%d'%(Row_number)] = str(file_name_p_factor)
# import p-factor
file_name_c_factor = os.path.join(output_folder_LUE, 'LUE_max.tif')
sheet_soil['I%d'%(Row_number)] = str(file_name_c_factor)
# import vegetation height
file_name_vegt_height = os.path.join(output_folder_HANTS_end_Veg,'Vegetation_Height_%s.tif' %date_run)
sheet_meteo['O%d'%(Row_number)] = str(file_name_vegt_height)
xfile.save(sheet_out_file_name)
'''
# If instantanious Transmissivity is calculated in PreSEBAL
if Check_Trans_inst == 1:
sheet['N%d'%(number)] = str(Transmissivity_inst_fileName)
xfile.save(inputExcel)
# If daily Transmissivity is calculated in PreSEBAL
if Check_Trans_24 == 1:
sheet_meteo['K%d'%(number)] = str(Transmissivity_24_fileName)
xfile.save(sheet_out_file_name)
'''
'''
############################################# Create Outlier maps for PROBA-V #########################################
# Create output folder if not exists
output_folder_HANTS_outliers = os.path.join(output_folder_HANTS, 'Outliers')
if not os.path.exists(output_folder_HANTS_outliers):
os.mkdir(output_folder_HANTS_outliers)
fh = Dataset(nc_path_albedo, mode='r')
Var = fh.variables.keys()[-1]
data = fh.variables['outliers'][:]
lat = fh.variables[fh.variables.keys()[1]][:]
lon = fh.variables[fh.variables.keys()[2]][:]
time = fh.variables[fh.variables.keys()[3]][:]
minimum_lon = np.min(lon)
maximum_lat = np.max(lat)
diff_lon = lon[1] - lon[0]
diff_lat = lat[1] - lat[0]
if not ('shape' in locals() or 'dest' in locals()):
Example_file = os.path.join(output_folder_preprocessing_VAR,Back_name)
dest = gdal.Open(Example_file)
ncol = dest.RasterXSize # Get the reprojected dem column size
nrow = dest.RasterYSize # Get the reprojected dem row size
shape=[ncol, nrow]
for i in range(0,int(np.shape(data)[2])):
time_now = time[i]
data_now = data[:,:,i]
geo = tuple([minimum_lon, diff_lon, 0, maximum_lat, 0, diff_lat])
name_out = os.path.join(output_folder_HANTS_outliers, 'Outliers_PROBAV_%s.tif' %time_now)
SEBAL.save_GeoTiff_proy(dest, data_now, name_out, shape, nband=1)
############################################ NDVI ##################################################
# Create output folder if not exists
output_folder_HANTS_outliers = os.path.join(output_folder_HANTS, 'Outliers_NDVI')
if not os.path.exists(output_folder_HANTS_outliers):
os.mkdir(output_folder_HANTS_outliers)
fh = Dataset(nc_path_ndvi, mode='r')
Var = fh.variables.keys()[-1]
data = fh.variables['outliers'][:]
lat = fh.variables[fh.variables.keys()[1]][:]
lon = fh.variables[fh.variables.keys()[2]][:]
time = fh.variables[fh.variables.keys()[3]][:]
minimum_lon = np.min(lon)
maximum_lat = np.max(lat)
diff_lon = lon[1] - lon[0]
diff_lat = lat[1] - lat[0]
if not ('shape' in locals() or 'dest' in locals()):
Example_file = os.path.join(output_folder_preprocessing_VAR,Back_name)
dest = gdal.Open(Example_file)
ncol = dest.RasterXSize # Get the reprojected dem column size
nrow = dest.RasterYSize # Get the reprojected dem row size
shape=[ncol, nrow]
for i in range(0,int(np.shape(data)[2])):
time_now = time[i]
data_now = data[:,:,i]
geo = tuple([minimum_lon, diff_lon, 0, maximum_lat, 0, diff_lat])
name_out = os.path.join(output_folder_HANTS_outliers, 'Outliers_PROBAV_%s.tif' %time_now)
SEBAL.save_GeoTiff_proy(dest, data_now, name_out, shape, nband=1)
###################################################### postHANTS Albedo ###############################################
for date in Dates:
year = date.year
month = date.month
day = date.day
filename_outliers = r"G:\SEBAL_Tadla\PROBAV-VIIRS\HANTS_output\Outliers\Outliers_PROBAV_%d%02d%02d.tif" %(year,month,day)
filename_VIIRS_original = r"G:\SEBAL_Tadla\PROBAV-VIIRS\HANTS_input\Albedo\Albedo_PROBAV_%d%02d%02d.tif" %(year,month,day)
filename_VIIRS_HANTS = r"G:\SEBAL_Tadla\PROBAV-VIIRS\HANTS_output\Albedo\Albedo_PROBAV_%d%02d%02d.tif"%(year,month,day)
dest_outliers = gdal.Open(filename_outliers)
dest_VIIRS_original = gdal.Open(filename_VIIRS_original)
dest_VIIRS_HANTS = gdal.Open(filename_VIIRS_HANTS)
if not dest_VIIRS_original == None:
Array_outliers = dest_outliers.GetRasterBand(1).ReadAsArray()[:,:]
Array_VIIRS_original = dest_VIIRS_original.GetRasterBand(1).ReadAsArray()
Array_VIIRS_HANTS = dest_VIIRS_HANTS.GetRasterBand(1).ReadAsArray()[:,:]
Array_outliers[Array_outliers==-9999.] = 0
Array_outliers_mask = np.zeros(np.shape(Array_outliers))
Array_outliers_mask[Array_outliers==1.]=0
Array_outliers_mask[Array_outliers==0.]=1
Array_outliers_mask[Array_outliers_mask==0]=2
Array_outliers_mask[Array_outliers_mask==1]=0
Array_outliers_mask[Array_outliers_mask==2]=1
Array_outliers_mask = Create_Buffer(Array_outliers_mask)
Array_outliers_mask[Array_outliers_mask==1] = 2
Array_outliers_mask[Array_outliers_mask==0] = 1
Array_outliers_mask[Array_outliers_mask==2] = 0
if np.nansum(Array_outliers_mask) > 30:
Array_outliers_mask[Array_VIIRS_HANTS == 0] = np.nan
Array_VIIRS_original_mask_nan = Array_VIIRS_original * Array_outliers_mask
Array_VIIRS_HANTS_mask_nan = Array_VIIRS_HANTS * Array_outliers_mask
Array_VIIRS_original_mask_nan_flatten = Array_VIIRS_original_mask_nan.flatten()
Array_VIIRS_HANTS_mask_nan_flatten = Array_VIIRS_HANTS_mask_nan.flatten()
Array_VIIRS_original_mask_nan_flatten = Array_VIIRS_original_mask_nan_flatten[~np.isnan(Array_VIIRS_original_mask_nan_flatten)]
Array_VIIRS_HANTS_mask_nan_flatten = Array_VIIRS_HANTS_mask_nan_flatten[~np.isnan(Array_VIIRS_HANTS_mask_nan_flatten)]
Array_VIIRS_original_mask_nan_flatten_without_zero =Array_VIIRS_original_mask_nan_flatten[Array_VIIRS_original_mask_nan_flatten>0]
Array_VIIRS_original_mask_value_cold = np.percentile(Array_VIIRS_original_mask_nan_flatten_without_zero,40)
Array_VIIRS_original_mask_value_hot = np.percentile(Array_VIIRS_original_mask_nan_flatten_without_zero,90)
Array_VIIRS_HANTS_mask_nan_flatten_exc_coldest = Array_VIIRS_HANTS_mask_nan_flatten[np.logical_and(Array_VIIRS_original_mask_nan_flatten > Array_VIIRS_original_mask_value_cold,Array_VIIRS_original_mask_nan_flatten < Array_VIIRS_original_mask_value_hot)]
Array_VIIRS_original_mask_nan_flatten_exc_coldest = Array_VIIRS_original_mask_nan_flatten[np.logical_and(Array_VIIRS_original_mask_nan_flatten > Array_VIIRS_original_mask_value_cold,Array_VIIRS_original_mask_nan_flatten < Array_VIIRS_original_mask_value_hot)]
Array_VIIRS_HANTS_mask_nan_flatten_exc_coldest[Array_VIIRS_HANTS_mask_nan_flatten_exc_coldest==-9999] = np.nan
Array_VIIRS_original_mask_nan_flatten_exc_coldest[Array_VIIRS_original_mask_nan_flatten_exc_coldest==-9999] = np.nan
Ave_VIIRS_HANTS = np.nanmean(Array_VIIRS_HANTS_mask_nan_flatten_exc_coldest)
Ave_VIIRS_original = np.nanmean(Array_VIIRS_original_mask_nan_flatten_exc_coldest)
Factor = Ave_VIIRS_original/Ave_VIIRS_HANTS
Array_VIIRS_HANTS_Corrected = Array_VIIRS_HANTS * Factor
End_array = np.ones(np.shape(Array_outliers_mask)) * np.nan
End_array[Array_outliers_mask==0] =Array_VIIRS_HANTS_Corrected[Array_outliers_mask==0]
End_array[Array_outliers_mask==1] =Array_VIIRS_original[Array_outliers_mask==1]
else:
End_array = Array_VIIRS_HANTS
geo = dest_VIIRS_original.GetGeoTransform()
proj = dest_outliers.GetProjection()
else:
Array_VIIRS_HANTS = dest_VIIRS_HANTS.GetRasterBand(1).ReadAsArray()
End_array = Array_VIIRS_HANTS
dest_test = None
i = 0
while dest_test == None:
date2 = Dates[i]
year2 = date2.year
month2= date2.month
day2 = date2.day
try:
filename_VIIRS_original2 = r"G:\SEBAL_Tadla\PROBAV-VIIRS\HANTS_input\Albedo\Albedo_PROBAV_%d%02d%02d.tif" %(year2,month2,day2)
dest_test = gdal.Open(filename_VIIRS_original2)
geo = dest_test.GetGeoTransform()
proj = dest_test.GetProjection()
except:
i+=1
import wa.General.data_conversions as DC
name = r"G:\SEBAL_Tadla\PROBAV-VIIRS\HANTS_end\Albedo\Albedo_PROBAV_%d%02d%02d.tif"%(year,month,day)
DC.Save_as_tiff(name, End_array, geo, proj)
################################## All input is now calculated, so preprosessing can start ########################
# Open preprosessing excel the Vegetation_Height sheet
ws_veg = wb_veg['Vegetation_Height']
# Define output name for the LandUse map
dst_FileName = os.path.join(output_folder,'LU_%s.tif' %Var_name)
# Open LU data
LU_dest = gdal.Open(LU_data_FileName)
LU_data = LU_dest.GetRasterBand(1).ReadAsArray()
# Reproject the LAI to the same projection as LU
dest1, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(LAI_FileName, LU_data_FileName) ## input after HANTS
LAI_proj = dest1.GetRasterBand(1).ReadAsArray()
# Read out the excel file coefficient numbers
Array = np.zeros([ws_veg.max_row-1,4])
for j in ['A','C','D','E']:
j_number={'A' : 0, 'C' : 1, 'D' : 2, 'E' : 3}
for i in range(2,ws_veg.max_row+1):
Value = (ws_veg['%s%s' %(j,i)].value)
Array[i-2, j_number[j]] = Value
# Create maps with the coefficient numbers for the right land cover
coeff = np.zeros([int(np.shape(LU_data)[0]),int(np.shape(LU_data)[1]),3])
for coeff_nmbr in range(0,3):
for Class in range(0,len(Array)):
coeff[LU_data==Array[Class,0],coeff_nmbr] = Array[Class,coeff_nmbr+1]
# Get some dimensions of the projected dataset
band_data = dest1.GetRasterBand(1)
ncol_data = dest1.RasterXSize
nrow_data = dest1.RasterYSize
shape_data=[ncol_data, nrow_data]
# Calculate the vegetation height in the LU projection
Veg_Height_proj = coeff[:,:,0] * np.power(LAI_proj,2) + coeff[:,:,1] * LAI_proj + coeff[:,:,2]
Veg_Height_proj = np.clip(Veg_Height_proj, 0, 600)
# Save the vegetation height in the lU projection in the temporary directory
Veg_Height_proj_FileName = os.path.join(output_folder_temp,'Veg_Height_proj.tif')
save_GeoTiff_proy(dest1, Veg_Height_proj, Veg_Height_proj_FileName, shape_data, nband=1)
# Reproject the Veg_height to the LAI projection
dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(Veg_Height_proj_FileName, LAI_FileName)
# Get some dimensions of the original dataset
band_data = dest.GetRasterBand(1)
ncol_data = dest.RasterXSize
nrow_data = dest.RasterYSize
# Open the Veg_height with the same projection as LAI
Veg_Height = band_data.ReadAsArray(0, 0, ncol_data, nrow_data)
Veg_Height[Veg_Height == 0] = np.nan
# Save Vegetation Height in the end folder
dst_FileName = os.path.join(output_folder,'Vegetation_Height_%s.tif' %Var_name)
save_GeoTiff_proy(dest, Veg_Height, dst_FileName, shape, nband=1)
######################## calculate p-factor by using the Landuse map #########################
ws_p = wb_veg['p-factor']
Array_P = np.zeros([ws_p.max_row-1,2])
for j in ['A','C']:
j_number={'A' : 0, 'C' : 1}
for i in range(2,ws_p.max_row+1):
Value = (ws_p['%s%s' %(j,i)].value)
Array_P[i-2, j_number[j]] = Value
p_factor = np.zeros([int(np.shape(LU_data)[0]),int(np.shape(LU_data)[1])])
for Class in range(0,len(Array_P)):
p_factor[LU_data==Array_P[Class,0]] = Array_P[Class,1]
p_factor[p_factor == 0] = np.nan
dst_FileName = os.path.join(output_folder_temp,'p-factor_proj.tif')
save_GeoTiff_proy(dest1, p_factor, dst_FileName, shape_data, nband=1)
dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(dst_FileName, LAI_FileName)
band_data = dest.GetRasterBand(1) # Get the reprojected dem band
ncol_data = dest.RasterXSize
nrow_data = dest.RasterYSize
p_factor = band_data.ReadAsArray(0, 0, ncol_data, nrow_data)
p_factor[p_factor == 0] = np.nan
dst_pfactor_FileName = os.path.join(output_folder,'p-factor_%s.tif' %Var_name)
save_GeoTiff_proy(dest, p_factor, dst_pfactor_FileName, shape, nband=1)
######################## calculate c-factor by using the Landuse map #########################
ws_c = wb_veg['C-factor']
Array_C = np.zeros([ws_c.max_row-1,2])
for j in ['A','C']:
j_number={'A' : 0, 'C' : 1}
for i in range(2,ws_c.max_row+1):
Value = (ws_c['%s%s' %(j,i)].value)
Array_C[i-2, j_number[j]] = Value
c_factor = np.zeros([int(np.shape(LU_data)[0]),int(np.shape(LU_data)[1])])
for Class in range(0,len(Array_C)):
c_factor[LU_data==Array_C[Class,0]] = Array_C[Class,1]
c_factor[np.logical_and(c_factor != 3.0, c_factor != 4.0)] = np.nan
LUE_max = np.zeros([int(np.shape(LU_data)[0]),int(np.shape(LU_data)[1])])
LUE_max[c_factor == 3] = 2.5
LUE_max[c_factor == 4] = 4.5
LUE_max[LUE_max == 0] = np.nan
dst_FileName = os.path.join(output_folder_temp,'LUE_max_proj.tif')
save_GeoTiff_proy(dest1, LUE_max, dst_FileName, shape_data, nband=1)
dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(dst_FileName, LAI_FileName)
band_data = dest.GetRasterBand(1) # Get the reprojected dem band
ncol_data = dest.RasterXSize
nrow_data = dest.RasterYSize
LUE_max = band_data.ReadAsArray(0, 0, ncol_data, nrow_data)
LUE_max[LUE_max == 0] = np.nan
dst_LUEmax_FileName = os.path.join(output_folder,'LUE_max_%s.tif' %Var_name)
save_GeoTiff_proy(dest, LUE_max, dst_LUEmax_FileName, shape, nband=1)
############################# delete temporary directory ########################
shutil.rmtree(output_folder_temp)
#################################################################################
'''
# Functions
#################################################################################
def Create_Buffer(Data_In):
'''
This function creates a 3D array which is used to apply the moving window
'''
Buffer_area = 7 # A block of 2 times Buffer_area + 1 will be 1 if there is the pixel in the middle is 1
Data_Out=np.empty((len(Data_In),len(Data_In[1])))
Data_Out[:,:] = Data_In
for ypixel in range(0,Buffer_area + 1):
for xpixel in range(1,Buffer_area + 1):
if ypixel==0:
for xpixel in range(1,Buffer_area + 1):
Data_Out[:,0:-xpixel] += Data_In[:,xpixel:]
Data_Out[:,xpixel:] += Data_In[:,:-xpixel]
for ypixel in range(1,Buffer_area + 1):
Data_Out[ypixel:,:] += Data_In[:-ypixel,:]
Data_Out[0:-ypixel,:] += Data_In[ypixel:,:]
else:
Data_Out[0:-xpixel,ypixel:] += Data_In[xpixel:,:-ypixel]
Data_Out[xpixel:,ypixel:] += Data_In[:-xpixel,:-ypixel]
Data_Out[0:-xpixel,0:-ypixel] += Data_In[xpixel:,ypixel:]
Data_Out[xpixel:,0:-ypixel] += Data_In[:-xpixel,ypixel:]
Data_Out[Data_Out>0.1] = 1
Data_Out[Data_Out<=0.1] = 0
return(Data_Out)
#------------------------------------------------------------------------------
def Get_epsg(g):
try:
# Get info of the dataset that is used for transforming
gland_proj = g.GetProjection()
Projection=gland_proj.split('EPSG","')
epsg_to=int((str(Projection[-1]).split(']')[0])[0:-1])
except:
epsg_to=4326
print('Was not able to get the projection, so WGS84 is assumed')
return(epsg_to)
#------------------------------------------------------------------------------
def gap_filling(data,NoDataValue):
"""
This function fills the no data gaps in a numpy array
Keyword arguments:
dataset -- Array
NoDataValue -- Value that must be filled
"""
# fill the no data values
if NoDataValue is np.nan:
mask = ~(np.isnan(data))
else:
mask = ~(data==NoDataValue)
xx, yy = np.meshgrid(np.arange(data.shape[1]), np.arange(data.shape[0]))
xym = np.vstack( (np.ravel(xx[mask]), np.ravel(yy[mask])) ).T
data0 = np.ravel( data[:,:][mask] )
interp0 = scipy.interpolate.NearestNDInterpolator( xym, data0 )
data_end = interp0(np.ravel(xx), np.ravel(yy)).reshape( xx.shape )
return (data_end)
#------------------------------------------------------------------------------
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.configurationprocessor;
import javax.lang.model.element.ExecutableElement;
import javax.lang.model.element.TypeElement;
import javax.lang.model.element.VariableElement;
import javax.lang.model.type.TypeMirror;
import org.springframework.boot.configurationprocessor.metadata.ItemDeprecation;
/**
* A {@link PropertyDescriptor} for a constructor parameter.
*
* @author Stephane Nicoll
* @author Phillip Webb
*/
class ConstructorParameterPropertyDescriptor extends ParameterPropertyDescriptor {
private final ExecutableElement setter;
private final VariableElement field;
ConstructorParameterPropertyDescriptor(String name, TypeMirror type, VariableElement parameter,
TypeElement declaringElement, ExecutableElement getter, ExecutableElement setter, VariableElement field) {
super(name, type, parameter, declaringElement, getter);
this.setter = setter;
this.field = field;
}
@Override
protected ItemDeprecation resolveItemDeprecation(MetadataGenerationEnvironment environment) {
return resolveItemDeprecation(environment, getGetter(), this.setter, this.field);
}
@Override
protected boolean isMarkedAsNested(MetadataGenerationEnvironment environment) {
return environment.getNestedConfigurationPropertyAnnotation(this.field) != null;
}
@Override
protected String resolveDescription(MetadataGenerationEnvironment environment) {
return environment.getTypeUtils().getJavaDoc(this.field);
}
} | java | github | https://github.com/spring-projects/spring-boot | configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/ConstructorParameterPropertyDescriptor.java |
import unittest
from dnd_character import Character, modifier
# Tests adapted from `problem-specifications//canonical-data.json` @ v1.1.0
class DnDCharacterTest(unittest.TestCase):
def test_modifier_for_score_3_is_n4(self):
self.assertEqual(modifier(3), -4)
def test_modifier_for_score_4_is_n3(self):
self.assertEqual(modifier(4), -3)
def test_modifier_for_score_5_is_n3(self):
self.assertEqual(modifier(5), -3)
def test_modifier_for_score_6_is_n2(self):
self.assertEqual(modifier(6), -2)
def test_modifier_for_score_7_is_n2(self):
self.assertEqual(modifier(7), -2)
def test_modifier_for_score_8_is_n1(self):
self.assertEqual(modifier(8), -1)
def test_modifier_for_score_9_is_n1(self):
self.assertEqual(modifier(9), -1)
def test_modifier_for_score_10_is_0(self):
self.assertEqual(modifier(10), 0)
def test_modifier_for_score_11_is_0(self):
self.assertEqual(modifier(11), 0)
def test_modifier_for_score_12_is_1(self):
self.assertEqual(modifier(12), 1)
def test_modifier_for_score_13_is_1(self):
self.assertEqual(modifier(13), 1)
def test_modifier_for_score_14_is_2(self):
self.assertEqual(modifier(14), 2)
def test_modifier_for_score_15_is_2(self):
self.assertEqual(modifier(15), 2)
def test_modifier_for_score_16_is_3(self):
self.assertEqual(modifier(16), 3)
def test_modifier_for_score_17_is_3(self):
self.assertEqual(modifier(17), 3)
def test_modifier_for_score_18_is_4(self):
self.assertEqual(modifier(18), 4)
def test_random_ability_is_within_range(self):
self.assertIn(Character().ability(), range(3, 19))
def test_random_character_is_valid(self):
Char = Character()
self.assertIn(Char.strength, range(3, 19))
self.assertIn(Char.dexterity, range(3, 19))
self.assertIn(Char.constitution, range(3, 19))
self.assertIn(Char.intelligence, range(3, 19))
self.assertIn(Char.wisdom, range(3, 19))
self.assertIn(Char.charisma, range(3, 19))
self.assertEqual(
Char.hitpoints,
10 + modifier(Char.constitution))
def test_each_ability_is_only_calculated_once(self):
Char = Character()
self.assertEqual(Char.strength, Char.strength)
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
import doctest
from insights.parsers import redhat_release
from insights.parsers.redhat_release import RedhatRelease
from insights.tests import context_wrap
REDHAT_RELEASE1 = """
Red Hat Enterprise Linux Server release 6.7 (Santiago)
""".strip()
REDHAT_RELEASE2 = """
Red Hat Enterprise Linux Server release 7.2 (Maipo)
""".strip()
REDHAT_RELEASE3 = """
Red Hat Enterprise Linux release 7.5-0.14
""".strip()
RHVH_RHV40 = """
Red Hat Enterprise Linux release 7.3
""".strip()
RHEVH_RHEV35 = """
Red Hat Enterprise Virtualization Hypervisor release 6.7 (20160219.0.el6ev)
""".strip()
FEDORA = """
Fedora release 23 (Twenty Three)
""".strip()
REDHAT_RELEASE8 = """
Red Hat Enterprise Linux release 8.2 (Ootpa)
""".strip()
REDHAT_RELEASE10 = """
Red Hat Enterprise Linux Server release 6.10(Santiago)
""".strip()
REDHAT_RELEASE_BETA = """
Red Hat Enterprise Linux Server release 8.5 Beta (Ootpa)
""".strip()
CENTOS_STREAM = """
CentOS Stream release 8
""".strip()
CENTOS_7 = """
CentOS Linux release 7.6.1810 (Core)
""".strip()
def test_rhe6():
release = RedhatRelease(context_wrap(REDHAT_RELEASE1))
assert release.raw == REDHAT_RELEASE1
assert release.major == 6
assert release.minor == 7
assert release.version == "6.7"
assert release.is_rhel
assert release.product == "Red Hat Enterprise Linux Server"
def test_rhe7():
release = RedhatRelease(context_wrap(REDHAT_RELEASE2))
assert release.raw == REDHAT_RELEASE2
assert release.major == 7
assert release.minor == 2
assert release.version == "7.2"
assert release.is_rhel
assert release.product == "Red Hat Enterprise Linux Server"
def test_rhe75_0_14():
release = RedhatRelease(context_wrap(REDHAT_RELEASE3))
assert release.raw == REDHAT_RELEASE3
assert release.major == 7
assert release.minor == 5
assert release.version == "7.5-0.14"
assert release.is_rhel
assert release.product == "Red Hat Enterprise Linux"
def test_rhevh35():
release = RedhatRelease(context_wrap(RHEVH_RHEV35))
assert release.raw == RHEVH_RHEV35
assert release.major == 6
assert release.minor == 7
assert release.version == "6.7"
assert not release.is_rhel
assert release.product == "Red Hat Enterprise Virtualization Hypervisor"
def test_rhvh40():
release = RedhatRelease(context_wrap(RHVH_RHV40))
assert release.raw == RHVH_RHV40
assert release.major == 7
assert release.minor == 3
assert release.version == "7.3"
assert release.is_rhel
assert release.product == "Red Hat Enterprise Linux"
def test_fedora23():
release = RedhatRelease(context_wrap(FEDORA))
assert release.raw == FEDORA
assert release.major == 23
assert release.minor is None
assert release.version == "23"
assert not release.is_rhel
assert release.is_fedora
assert release.product == "Fedora"
def test_rhel6_10():
release = RedhatRelease(context_wrap(REDHAT_RELEASE10))
assert release.raw == REDHAT_RELEASE10
assert release.major == 6
assert release.minor == 10
assert release.version == "6.10"
assert release.is_rhel
assert release.product == "Red Hat Enterprise Linux Server"
def test_rhel8():
release = RedhatRelease(context_wrap(REDHAT_RELEASE8))
assert release.raw == REDHAT_RELEASE8
assert release.major == 8
assert release.minor == 2
assert release.version == "8.2"
assert release.is_rhel
assert release.product == "Red Hat Enterprise Linux"
def test_rhel_beta():
release = RedhatRelease(context_wrap(REDHAT_RELEASE_BETA))
assert release.raw == REDHAT_RELEASE_BETA
assert release.major == 8
assert release.minor == 5
assert release.version == "8.5"
assert release.is_rhel
assert release.is_beta
assert release.parsed['code_name'] == 'Ootpa'
assert release.product == "Red Hat Enterprise Linux Server"
def test_centos_stream():
release = RedhatRelease(context_wrap(CENTOS_STREAM))
assert release.major == 8
assert release.minor is None
assert release.product == 'CentOS Stream'
assert release.is_centos
assert not release.is_rhel
def test_centos_7():
release = RedhatRelease(context_wrap(CENTOS_7))
assert release.major == 7
assert release.minor == 6
assert release.product == 'CentOS Linux'
assert release.code_name == 'Core'
assert release.is_centos
assert not release.is_rhel
def test_examples():
release = RedhatRelease(context_wrap(REDHAT_RELEASE2))
globs = {
'rh_release': release
}
failed, tested = doctest.testmod(redhat_release, globs=globs)
assert failed == 0 | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_stat
version_added: "1.7"
short_description: Get information about Windows files
description:
- Returns information about a Windows file.
- For non-Windows targets, use the M(stat) module instead.
options:
path:
description:
- The full path of the file/object to get the facts of; both forward and
back slashes are accepted.
type: path
required: yes
aliases: [ dest, name ]
get_checksum:
description:
- Whether to return a checksum of the file (default sha1)
type: bool
default: yes
version_added: "2.1"
checksum_algorithm:
description:
- Algorithm to determine checksum of file.
- Will throw an error if the host is unable to use specified algorithm.
type: str
default: sha1
choices: [ md5, sha1, sha256, sha384, sha512 ]
version_added: "2.3"
follow:
description:
- Whether to follow symlinks or junction points.
- In the case of C(path) pointing to another link, then that will
be followed until no more links are found.
type: bool
default: no
version_added: "2.8"
seealso:
- module: stat
- module: win_acl
- module: win_file
- module: win_owner
author:
- Chris Church (@cchurch)
'''
EXAMPLES = r'''
- name: Obtain information about a file
win_stat:
path: C:\foo.ini
register: file_info
- name: Obtain information about a folder
win_stat:
path: C:\bar
register: folder_info
- name: Get MD5 checksum of a file
win_stat:
path: C:\foo.ini
get_checksum: yes
checksum_algorithm: md5
register: md5_checksum
- debug:
var: md5_checksum.stat.checksum
- name: Get SHA1 checksum of file
win_stat:
path: C:\foo.ini
get_checksum: yes
register: sha1_checksum
- debug:
var: sha1_checksum.stat.checksum
- name: Get SHA256 checksum of file
win_stat:
path: C:\foo.ini
get_checksum: yes
checksum_algorithm: sha256
register: sha256_checksum
- debug:
var: sha256_checksum.stat.checksum
'''
RETURN = r'''
changed:
description: Whether anything was changed
returned: always
type: bool
sample: true
stat:
description: dictionary containing all the stat data
returned: success
type: complex
contains:
attributes:
description: Attributes of the file at path in raw form.
returned: success, path exists
type: str
sample: "Archive, Hidden"
checksum:
description: The checksum of a file based on checksum_algorithm specified.
returned: success, path exist, path is a file, get_checksum == True
checksum_algorithm specified is supported
type: str
sample: 09cb79e8fc7453c84a07f644e441fd81623b7f98
creationtime:
description: The create time of the file represented in seconds since epoch.
returned: success, path exists
type: float
sample: 1477984205.15
exists:
description: If the path exists or not.
returned: success
type: bool
sample: true
extension:
description: The extension of the file at path.
returned: success, path exists, path is a file
type: str
sample: ".ps1"
filename:
description: The name of the file (without path).
returned: success, path exists, path is a file
type: str
sample: foo.ini
hlnk_targets:
description: List of other files pointing to the same file (hard links), excludes the current file.
returned: success, path exists
type: list
sample:
- C:\temp\file.txt
- C:\Windows\update.log
isarchive:
description: If the path is ready for archiving or not.
returned: success, path exists
type: bool
sample: true
isdir:
description: If the path is a directory or not.
returned: success, path exists
type: bool
sample: true
ishidden:
description: If the path is hidden or not.
returned: success, path exists
type: bool
sample: true
isjunction:
description: If the path is a junction point or not.
returned: success, path exists
type: bool
sample: true
islnk:
description: If the path is a symbolic link or not.
returned: success, path exists
type: bool
sample: true
isreadonly:
description: If the path is read only or not.
returned: success, path exists
type: bool
sample: true
isreg:
description: If the path is a regular file.
returned: success, path exists
type: bool
sample: true
isshared:
description: If the path is shared or not.
returned: success, path exists
type: bool
sample: true
lastaccesstime:
description: The last access time of the file represented in seconds since epoch.
returned: success, path exists
type: float
sample: 1477984205.15
lastwritetime:
description: The last modification time of the file represented in seconds since epoch.
returned: success, path exists
type: float
sample: 1477984205.15
lnk_source:
description: Target of the symlink normalized for the remote filesystem.
returned: success, path exists and the path is a symbolic link or junction point
type: str
sample: C:\temp\link
lnk_target:
description: Target of the symlink. Note that relative paths remain relative.
returned: success, path exists and the path is a symbolic link or junction point
type: str
sample: ..\link
nlink:
description: Number of links to the file (hard links).
returned: success, path exists
type: int
sample: 1
owner:
description: The owner of the file.
returned: success, path exists
type: str
sample: BUILTIN\Administrators
path:
description: The full absolute path to the file.
returned: success, path exists, file exists
type: str
sample: C:\foo.ini
sharename:
description: The name of share if folder is shared.
returned: success, path exists, file is a directory and isshared == True
type: str
sample: file-share
size:
description: The size in bytes of a file or folder.
returned: success, path exists, file is not a link
type: int
sample: 1024
''' | unknown | codeparrot/codeparrot-clean | ||
<?php
namespace Illuminate\Tests\Redis;
use Illuminate\Contracts\Redis\Connector;
use Illuminate\Foundation\Application;
use Illuminate\Redis\RedisManager;
use Mockery as m;
use PHPUnit\Framework\TestCase;
class RedisManagerExtensionTest extends TestCase
{
/**
* @var \Illuminate\Redis\RedisManager
*/
protected $redis;
protected function setUp(): void
{
parent::setUp();
$this->redis = new RedisManager(new Application, 'my_custom_driver', [
'default' => [
'host' => 'some-host',
'port' => 'some-port',
'database' => 5,
'timeout' => 0.5,
],
'clusters' => [
'my-cluster' => [
[
'host' => 'some-host',
'port' => 'some-port',
'database' => 5,
'timeout' => 0.5,
],
],
],
]);
$this->redis->extend('my_custom_driver', function () {
return new FakeRedisConnector;
});
}
public function testUsingCustomRedisConnectorWithSingleRedisInstance()
{
$this->assertSame(
'my-redis-connection', $this->redis->resolve()
);
}
public function testUsingCustomRedisConnectorWithRedisClusterInstance()
{
$this->assertSame(
'my-redis-cluster-connection', $this->redis->resolve('my-cluster')
);
}
public function testParseConnectionConfigurationForCluster()
{
$name = 'my-cluster';
$config = [
[
'url1',
'url2',
'url3',
],
];
$redis = new RedisManager(new Application, 'my_custom_driver', [
'clusters' => [
$name => $config,
],
]);
$redis->extend('my_custom_driver', function () use ($config) {
return m::mock(Connector::class)
->shouldReceive('connectToCluster')
->once()
->withArgs(function ($configArg) use ($config) {
return $config === $configArg;
})
->getMock();
});
$redis->resolve($name);
}
}
class FakeRedisConnector implements Connector
{
/**
* Create a new clustered Predis connection.
*
* @param array $config
* @param array $options
* @return string
*/
public function connect(array $config, array $options)
{
return 'my-redis-connection';
}
/**
* Create a new clustered Predis connection.
*
* @param array $config
* @param array $clusterOptions
* @param array $options
* @return string
*/
public function connectToCluster(array $config, array $clusterOptions, array $options)
{
return 'my-redis-cluster-connection';
}
} | php | github | https://github.com/laravel/framework | tests/Redis/RedisManagerExtensionTest.php |
#!/usr/bin/python
# -*- coding: ascii -*-
###########################################################################
# PBKDF2.py - PKCS#5 v2.0 Password-Based Key Derivation
#
# Copyright (C) 2007, 2008 Dwayne C. Litzenberger <dlitz@dlitz.net>
# All rights reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation.
#
# THE AUTHOR PROVIDES THIS SOFTWARE ``AS IS'' AND ANY EXPRESSED OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Country of origin: Canada
#
###########################################################################
# Sample PBKDF2 usage:
# from Crypto.Cipher import AES
# from PBKDF2 import PBKDF2
# import os
#
# salt = os.urandom(8) # 64-bit salt
# key = PBKDF2("This passphrase is a secret.", salt).read(32) # 256-bit key
# iv = os.urandom(16) # 128-bit IV
# cipher = AES.new(key, AES.MODE_CBC, iv)
# ...
#
# Sample crypt() usage:
# from PBKDF2 import crypt
# pwhash = crypt("secret")
# alleged_pw = raw_input("Enter password: ")
# if pwhash == crypt(alleged_pw, pwhash):
# print "Password good"
# else:
# print "Invalid password"
#
###########################################################################
# History:
#
# 2007-07-27 Dwayne C. Litzenberger <dlitz@dlitz.net>
# - Initial Release (v1.0)
#
# 2007-07-31 Dwayne C. Litzenberger <dlitz@dlitz.net>
# - Bugfix release (v1.1)
# - SECURITY: The PyCrypto XOR cipher (used, if available, in the _strxor
# function in the previous release) silently truncates all keys to 64
# bytes. The way it was used in the previous release, this would only be
# problem if the pseudorandom function that returned values larger than
# 64 bytes (so SHA1, SHA256 and SHA512 are fine), but I don't like
# anything that silently reduces the security margin from what is
# expected.
#
# 2008-06-17 Dwayne C. Litzenberger <dlitz@dlitz.net>
# - Compatibility release (v1.2)
# - Add support for older versions of Python (2.2 and 2.3).
#
###########################################################################
__version__ = "1.2"
from builtins import chr
from builtins import zip
from builtins import range
from builtins import object
import string
from struct import pack
from binascii import b2a_hex
from random import randint
try:
# Use PyCrypto (if available)
from Crypto.Hash import HMAC, SHA as SHA1
except ImportError:
# PyCrypto not available. Use the Python standard library.
import hmac as HMAC
import sha as SHA1
def strxor(a, b):
return "".join([chr(ord(x) ^ ord(y)) for (x, y) in zip(a, b)])
def b64encode(data, chars="+/"):
tt = string.maketrans("+/", chars)
return data.encode('base64').replace("\n", "").translate(tt)
class PBKDF2(object):
"""PBKDF2.py : PKCS#5 v2.0 Password-Based Key Derivation
This implementation takes a passphrase and a salt (and optionally an
iteration count, a digest module, and a MAC module) and provides a
file-like object from which an arbitrarily-sized key can be read.
If the passphrase and/or salt are unicode objects, they are encoded as
UTF-8 before they are processed.
The idea behind PBKDF2 is to derive a cryptographic key from a
passphrase and a salt.
PBKDF2 may also be used as a strong salted password hash. The
'crypt' function is provided for that purpose.
Remember: Keys generated using PBKDF2 are only as strong as the
passphrases they are derived from.
"""
def __init__(self, passphrase, salt, iterations=1000,
digestmodule=SHA1, macmodule=HMAC):
self.__macmodule = macmodule
self.__digestmodule = digestmodule
self._setup(passphrase, salt, iterations, self._pseudorandom)
def _pseudorandom(self, key, msg):
"""Pseudorandom function. e.g. HMAC-SHA1"""
return self.__macmodule.new(key=key, msg=msg,
digestmod=self.__digestmodule).digest()
def read(self, bytes):
"""Read the specified number of key bytes."""
if self.closed:
raise ValueError("file-like object is closed")
size = len(self.__buf)
blocks = [self.__buf]
i = self.__blockNum
while size < bytes:
i += 1
if i > 0xffffffff or i < 1:
# We could return "" here, but
raise OverflowError("derived key too long")
block = self.__f(i)
blocks.append(block)
size += len(block)
buf = "".join(blocks)
retval = buf[:bytes]
self.__buf = buf[bytes:]
self.__blockNum = i
return retval
def __f(self, i):
# i must fit within 32 bits
assert 1 <= i <= 0xffffffff
U = self.__prf(self.__passphrase, self.__salt + pack("!L", i))
result = U
for j in range(2, 1+self.__iterations):
U = self.__prf(self.__passphrase, U)
result = strxor(result, U)
return result
def hexread(self, octets):
"""Read the specified number of octets. Return them as hexadecimal.
Note that len(obj.hexread(n)) == 2*n.
"""
return b2a_hex(self.read(octets))
def _setup(self, passphrase, salt, iterations, prf):
# Sanity checks:
# passphrase and salt must be str or unicode (in the latter
# case, we convert to UTF-8)
if isinstance(passphrase, str):
passphrase = passphrase.encode("UTF-8")
if not isinstance(passphrase, str):
raise TypeError("passphrase must be str or unicode")
if isinstance(salt, str):
salt = salt.encode("UTF-8")
if not isinstance(salt, str):
raise TypeError("salt must be str or unicode")
# iterations must be an integer >= 1
if not isinstance(iterations, (int, int)):
raise TypeError("iterations must be an integer")
if iterations < 1:
raise ValueError("iterations must be at least 1")
# prf must be callable
if not callable(prf):
raise TypeError("prf must be callable")
self.__passphrase = passphrase
self.__salt = salt
self.__iterations = iterations
self.__prf = prf
self.__blockNum = 0
self.__buf = ""
self.closed = False
def close(self):
"""Close the stream."""
if not self.closed:
del self.__passphrase
del self.__salt
del self.__iterations
del self.__prf
del self.__blockNum
del self.__buf
self.closed = True
def crypt(word, salt=None, iterations=None):
"""PBKDF2-based unix crypt(3) replacement.
The number of iterations specified in the salt overrides the 'iterations'
parameter.
The effective hash length is 192 bits.
"""
# Generate a (pseudo-)random salt if the user hasn't provided one.
if salt is None:
salt = _makesalt()
# salt must be a string or the us-ascii subset of unicode
if isinstance(salt, str):
salt = salt.encode("us-ascii")
if not isinstance(salt, str):
raise TypeError("salt must be a string")
# word must be a string or unicode (in the latter case, we convert to UTF-8)
if isinstance(word, str):
word = word.encode("UTF-8")
if not isinstance(word, str):
raise TypeError("word must be a string or unicode")
# Try to extract the real salt and iteration count from the salt
if salt.startswith("$p5k2$"):
(iterations, salt, dummy) = salt.split("$")[2:5]
if iterations == "":
iterations = 400
else:
converted = int(iterations, 16)
if iterations != "%x" % converted: # lowercase hex, minimum digits
raise ValueError("Invalid salt")
iterations = converted
if not (iterations >= 1):
raise ValueError("Invalid salt")
# Make sure the salt matches the allowed character set
allowed = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789./"
for ch in salt:
if ch not in allowed:
raise ValueError("Illegal character %r in salt" % (ch,))
if iterations is None or iterations == 400:
iterations = 400
salt = "$p5k2$$" + salt
else:
salt = "$p5k2$%x$%s" % (iterations, salt)
rawhash = PBKDF2(word, salt, iterations).read(24)
return salt + "$" + b64encode(rawhash, "./")
# Add crypt as a static method of the PBKDF2 class
# This makes it easier to do "from PBKDF2 import PBKDF2" and still use
# crypt.
PBKDF2.crypt = staticmethod(crypt)
def _makesalt():
"""Return a 48-bit pseudorandom salt for crypt().
This function is not suitable for generating cryptographic secrets.
"""
binarysalt = "".join([pack("@H", randint(0, 0xffff)) for i in range(3)])
return b64encode(binarysalt, "./")
def test_pbkdf2():
"""Module self-test"""
from binascii import a2b_hex
#
# Test vectors from RFC 3962
#
# Test 1
result = PBKDF2("password", "ATHENA.MIT.EDUraeburn", 1).read(16)
expected = a2b_hex("cdedb5281bb2f801565a1122b2563515")
if result != expected:
raise RuntimeError("self-test failed")
# Test 2
result = PBKDF2("password", "ATHENA.MIT.EDUraeburn", 1200).hexread(32)
expected = ("5c08eb61fdf71e4e4ec3cf6ba1f5512b"
"a7e52ddbc5e5142f708a31e2e62b1e13")
if result != expected:
raise RuntimeError("self-test failed")
# Test 3
result = PBKDF2("X"*64, "pass phrase equals block size", 1200).hexread(32)
expected = ("139c30c0966bc32ba55fdbf212530ac9"
"c5ec59f1a452f5cc9ad940fea0598ed1")
if result != expected:
raise RuntimeError("self-test failed")
# Test 4
result = PBKDF2("X"*65, "pass phrase exceeds block size", 1200).hexread(32)
expected = ("9ccad6d468770cd51b10e6a68721be61"
"1a8b4d282601db3b36be9246915ec82a")
if result != expected:
raise RuntimeError("self-test failed")
#
# Other test vectors
#
# Chunked read
f = PBKDF2("kickstart", "workbench", 256)
result = f.read(17)
result += f.read(17)
result += f.read(1)
result += f.read(2)
result += f.read(3)
expected = PBKDF2("kickstart", "workbench", 256).read(40)
if result != expected:
raise RuntimeError("self-test failed")
#
# crypt() test vectors
#
# crypt 1
result = crypt("cloadm", "exec")
expected = '$p5k2$$exec$r1EWMCMk7Rlv3L/RNcFXviDefYa0hlql'
if result != expected:
raise RuntimeError("self-test failed")
# crypt 2
result = crypt("gnu", '$p5k2$c$u9HvcT4d$.....')
expected = '$p5k2$c$u9HvcT4d$Sd1gwSVCLZYAuqZ25piRnbBEoAesaa/g'
if result != expected:
raise RuntimeError("self-test failed")
# crypt 3
result = crypt("dcl", "tUsch7fU", iterations=13)
expected = "$p5k2$d$tUsch7fU$nqDkaxMDOFBeJsTSfABsyn.PYUXilHwL"
if result != expected:
raise RuntimeError("self-test failed")
# crypt 4 (unicode)
result = crypt(u'\u0399\u03c9\u03b1\u03bd\u03bd\u03b7\u03c2',
'$p5k2$$KosHgqNo$9mjN8gqjt02hDoP0c2J0ABtLIwtot8cQ')
expected = '$p5k2$$KosHgqNo$9mjN8gqjt02hDoP0c2J0ABtLIwtot8cQ'
if result != expected:
raise RuntimeError("self-test failed")
if __name__ == '__main__':
test_pbkdf2()
# vim:set ts=4 sw=4 sts=4 expandtab: | unknown | codeparrot/codeparrot-clean | ||
#ifndef HEADER_CURL_TFTP_H
#define HEADER_CURL_TFTP_H
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at https://curl.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
* SPDX-License-Identifier: curl
*
***************************************************************************/
extern const struct Curl_scheme Curl_scheme_tftp;
#define TFTP_BLKSIZE_MIN 8
#define TFTP_BLKSIZE_MAX 65464
#endif /* HEADER_CURL_TFTP_H */ | c | github | https://github.com/curl/curl | lib/tftp.h |
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Replaces gyp files in tree with files from here that
make the build use system libraries.
"""
import optparse
import os.path
import shutil
import sys
REPLACEMENTS = {
'use_system_expat': 'third_party/expat/expat.gyp',
'use_system_ffmpeg': 'third_party/ffmpeg/ffmpeg.gyp',
'use_system_flac': 'third_party/flac/flac.gyp',
'use_system_harfbuzz': 'third_party/harfbuzz-ng/harfbuzz.gyp',
'use_system_icu': 'third_party/icu/icu.gyp',
'use_system_jsoncpp': 'third_party/jsoncpp/jsoncpp.gyp',
'use_system_libevent': 'third_party/libevent/libevent.gyp',
'use_system_libjpeg': 'third_party/libjpeg/libjpeg.gyp',
'use_system_libpng': 'third_party/libpng/libpng.gyp',
'use_system_libusb': 'third_party/libusb/libusb.gyp',
'use_system_libvpx': 'third_party/libvpx/libvpx.gyp',
'use_system_libwebp': 'third_party/libwebp/libwebp.gyp',
'use_system_libxml': 'third_party/libxml/libxml.gyp',
'use_system_libxslt': 'third_party/libxslt/libxslt.gyp',
'use_system_opus': 'third_party/opus/opus.gyp',
'use_system_re2': 'third_party/re2/re2.gyp',
'use_system_snappy': 'third_party/snappy/snappy.gyp',
'use_system_speex': 'third_party/speex/speex.gyp',
'use_system_sqlite': 'third_party/sqlite/sqlite.gyp',
'use_system_v8': 'v8/tools/gyp/v8.gyp',
'use_system_zlib': 'third_party/zlib/zlib.gyp',
}
def DoMain(argv):
my_dirname = os.path.dirname(__file__)
source_tree_root = os.path.abspath(
os.path.join(my_dirname, '..', '..', '..'))
parser = optparse.OptionParser()
# Accept arguments in gyp command-line syntax, so that the caller can re-use
# command-line for this script and gyp.
parser.add_option('-D', dest='defines', action='append')
parser.add_option('--undo', action='store_true')
options, args = parser.parse_args(argv)
for flag, path in REPLACEMENTS.items():
if '%s=1' % flag not in options.defines:
continue
if options.undo:
# Restore original file, and also remove the backup.
# This is meant to restore the source tree to its original state.
os.rename(os.path.join(source_tree_root, path + '.orig'),
os.path.join(source_tree_root, path))
else:
# Create a backup copy for --undo.
shutil.copyfile(os.path.join(source_tree_root, path),
os.path.join(source_tree_root, path + '.orig'))
# Copy the gyp file from directory of this script to target path.
shutil.copyfile(os.path.join(my_dirname, os.path.basename(path)),
os.path.join(source_tree_root, path))
return 0
if __name__ == '__main__':
sys.exit(DoMain(sys.argv)) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Description: generate inputs and targets for the DLRM benchmark
#
# Utility function(s) to download and pre-process public data sets
# - Criteo Kaggle Display Advertising Challenge Dataset
# https://labs.criteo.com/2014/02/kaggle-display-advertising-challenge-dataset
# - Criteo Terabyte Dataset
# https://labs.criteo.com/2013/12/download-terabyte-click-logs
#
# After downloading dataset, run:
# getCriteoAdData(
# datafile="<path-to-train.txt>",
# o_filename=kaggleAdDisplayChallenge_processed.npz,
# max_ind_range=-1,
# sub_sample_rate=0.0,
# days=7,
# data_split='train',
# randomize='total',
# criteo_kaggle=True,
# memory_map=False
# )
# getCriteoAdData(
# datafile="<path-to-day_{0,...,23}>",
# o_filename=terabyte_processed.npz,
# max_ind_range=-1,
# sub_sample_rate=0.0,
# days=24,
# data_split='train',
# randomize='total',
# criteo_kaggle=False,
# memory_map=False
# )
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
# import os
from os import path
# import io
# from io import StringIO
# import collections as coll
import numpy as np
def convertUStringToDistinctIntsDict(mat, convertDicts, counts):
# Converts matrix of unicode strings into distinct integers.
#
# Inputs:
# mat (np.array): array of unicode strings to convert
# convertDicts (list): dictionary for each column
# counts (list): number of different categories in each column
#
# Outputs:
# out (np.array): array of output integers
# convertDicts (list): dictionary for each column
# counts (list): number of different categories in each column
# check if convertDicts and counts match correct length of mat
if len(convertDicts) != mat.shape[1] or len(counts) != mat.shape[1]:
print("Length of convertDicts or counts does not match input shape")
print("Generating convertDicts and counts...")
convertDicts = [{} for _ in range(mat.shape[1])]
counts = [0 for _ in range(mat.shape[1])]
# initialize output
out = np.zeros(mat.shape)
for j in range(mat.shape[1]):
for i in range(mat.shape[0]):
# add to convertDict and increment count
if mat[i, j] not in convertDicts[j]:
convertDicts[j][mat[i, j]] = counts[j]
counts[j] += 1
out[i, j] = convertDicts[j][mat[i, j]]
return out, convertDicts, counts
def convertUStringToDistinctIntsUnique(mat, mat_uni, counts):
# mat is an array of 0,...,# samples, with each being 26 categorical features
# check if mat_unique and counts match correct length of mat
if len(mat_uni) != mat.shape[1] or len(counts) != mat.shape[1]:
print("Length of mat_unique or counts does not match input shape")
print("Generating mat_unique and counts...")
mat_uni = [np.array([]) for _ in range(mat.shape[1])]
counts = [0 for _ in range(mat.shape[1])]
# initialize output
out = np.zeros(mat.shape)
ind_map = [np.array([]) for _ in range(mat.shape[1])]
# find out and assign unique ids to features
for j in range(mat.shape[1]):
m = mat_uni[j].size
mat_concat = np.concatenate((mat_uni[j], mat[:, j]))
mat_uni[j], ind_map[j] = np.unique(mat_concat, return_inverse=True)
out[:, j] = ind_map[j][m:]
counts[j] = mat_uni[j].size
return out, mat_uni, counts
def processCriteoAdData(d_path, d_file, npzfile, split, convertDicts, pre_comp_counts):
# Process Kaggle Display Advertising Challenge or Terabyte Dataset
# by converting unicode strings in X_cat to integers and
# converting negative integer values in X_int.
#
# Loads data in the form "{kaggle|terabyte}_day_i.npz" where i is the day.
#
# Inputs:
# d_path (str): path for {kaggle|terabyte}_day_i.npz files
# split (int): total number of splits in the dataset (typically 7 or 24)
# process data if not all files exist
for i in range(split):
filename_i = npzfile + "_{0}_processed.npz".format(i)
if path.exists(filename_i):
print("Using existing " + filename_i, end="\r")
else:
with np.load(npzfile + "_{0}.npz".format(i)) as data:
# categorical features
'''
# Approach 1a: using empty dictionaries
X_cat, convertDicts, counts = convertUStringToDistinctIntsDict(
data["X_cat"], convertDicts, counts
)
'''
'''
# Approach 1b: using empty np.unique
X_cat, convertDicts, counts = convertUStringToDistinctIntsUnique(
data["X_cat"], convertDicts, counts
)
'''
# Approach 2a: using pre-computed dictionaries
X_cat_t = np.zeros(data["X_cat_t"].shape)
for j in range(26):
for k, x in enumerate(data["X_cat_t"][j, :]):
X_cat_t[j, k] = convertDicts[j][x]
# continuous features
X_int = data["X_int"]
X_int[X_int < 0] = 0
# targets
y = data["y"]
np.savez_compressed(
filename_i,
# X_cat = X_cat,
X_cat=np.transpose(X_cat_t), # transpose of the data
X_int=X_int,
y=y,
)
print("Processed " + filename_i, end="\r")
print("")
# sanity check (applicable only if counts have been pre-computed & are re-computed)
# for j in range(26):
# if pre_comp_counts[j] != counts[j]:
# sys.exit("ERROR: Sanity check on counts has failed")
# print("\nSanity check on counts passed")
return
def concatCriteoAdData(
d_path,
d_file,
npzfile,
trafile,
days,
data_split,
randomize,
total_per_file,
total_count,
memory_map,
o_filename
):
# Concatenates different days and saves the result.
#
# Inputs:
# days (int): total number of days in the dataset (typically 7 or 24)
# d_path (str): path for {kaggle|terabyte}_day_i.npz files
# o_filename (str): output file name
#
# Output:
# o_file (str): output file path
if memory_map:
# dataset break up per fea
# tar_fea = 1 # single target
den_fea = 13 # 13 dense features
spa_fea = 26 # 26 sparse features
# tad_fea = tar_fea + den_fea
# tot_fea = tad_fea + spa_fea
# create offset per file
offset_per_file = np.array([0] + [x for x in total_per_file])
for i in range(days):
offset_per_file[i + 1] += offset_per_file[i]
'''
# Approach 1, 2 and 3 use indices, while Approach 4 does not use them
# create indices
indices = np.arange(total_count)
if data_split == "none":
if randomize == "total":
indices = np.random.permutation(indices)
else:
indices = np.array_split(indices, offset_per_file[1:-1])
# randomize train data (per day)
if randomize == "day": # or randomize == "total":
for i in range(len(indices) - 1):
indices[i] = np.random.permutation(indices[i])
print("Randomized indices per day ...")
train_indices = np.concatenate(indices[:-1])
test_indices = indices[-1]
# randomize train data (across days)
if randomize == "total":
train_indices = np.random.permutation(train_indices)
print("Randomized indices across days ...")
indices = np.concatenate((train_indices, test_indices))
# no reordering
# indices = np.arange(total_count)
'''
'''
# Approach 1: simple and slow (no grouping is used)
# check if data already exists
recreate_flag = False
for j in range(tot_fea):
filename_j = trafile + "_{0}_reordered.npy".format(j)
if path.exists(filename_j):
print("Using existing " + filename_j)
else:
recreate_flag = True
# load, reorder and concatenate data (memmap all reordered files per feature)
if recreate_flag:
# init reordered files (.npy appended automatically)
z = np.zeros((total_count))
for j in range(tot_fea):
filename_j = trafile + "_{0}_reordered".format(j)
np.save(filename_j, z)
print("Creating " + filename_j)
for i in range(days):
filename_i = d_path + npzfile + "_{0}_processed.npz".format(i)
with np.load(filename_i) as data:
X_cat_t = np.transpose(data["X_cat"])
X_int_t = np.transpose(data["X_int"])
y = data["y"]
size = len(y)
# sanity check
if total_per_file[i] != size:
sys.exit("ERROR: sanity check on number of samples failed")
# setup start and end ranges
start = offset_per_file[i]
end = offset_per_file[i + 1]
# print(filename_i)
# print("start=" + str(start) + " end=" + str(end)
# + " diff=" + str(end - start) + "=" + str(total_per_file[i]))
for j in range(tot_fea):
filename_j = trafile + "_{0}_reordered.npy".format(j)
fj = np.load(filename_j, mmap_mode='r+')
if j < tar_fea:
fj[indices[start:end]] = y
elif tar_fea <= j and j < tad_fea:
fj[indices[start:end]] = X_int_t[j - tar_fea, :]
else:
fj[indices[start:end]] = X_cat_t[j - tad_fea, :]
del fj
else:
print("Reordered fea files already exist, skipping ...")
# check if data already exists
recreate_flag = False
for i in range(days):
filename_i = d_path + npzfile + "_{0}_reordered.npz".format(i)
if path.exists(filename_i):
print("Using existing " + filename_i)
else:
recreate_flag = True
# split reordered data by files (memmap all reordered files per feature)
# on the day boundary del the file object and memmap again
if recreate_flag:
for i in range(days):
filename_i = d_path + npzfile + "_{0}_reordered.npz".format(i)
size = total_per_file[i]
X_int_t = np.zeros((den_fea, size))
X_cat_t = np.zeros((spa_fea, size))
# setup start and end ranges
start = offset_per_file[i]
end = offset_per_file[i + 1]
print("Creating " + filename_i)
# print("start=" + str(start) + " end=" + str(end)
# + " diff=" + str(end - start) + "=" + str(total_per_file[i]))
for j in range(tot_fea):
filename_j = trafile + "_{0}_reordered.npy".format(j)
fj = np.load(filename_j, mmap_mode='r')
if j < tar_fea:
y = fj[start:end]
elif tar_fea <= j and j < tad_fea:
X_int_t[j - tar_fea, :] = fj[start:end]
else:
X_cat_t[j - tad_fea, :] = fj[start:end]
del fj
np.savez_compressed(
filename_i,
X_cat=np.transpose(X_cat_t), # transpose of the data
X_int=np.transpose(X_int_t), # transpose of the data
y=y,
)
else:
print("Reordered day files already exist, skipping ...")
'''
'''
# Approach 2: group days
# check if data already exists
recreate_flag = False
for j in range(tot_fea):
filename_j = trafile + "_{0}_reordered.npy".format(j)
if path.exists(filename_j):
print("Using existing " + filename_j)
else:
recreate_flag = True
# load, reorder and concatenate data (memmap all reordered files per feature)
if recreate_flag:
# init reordered files (.npy appended automatically)
z = np.zeros((total_count))
for j in range(tot_fea):
filename_j = trafile + "_{0}_reordered".format(j)
np.save(filename_j, z)
print("Creating " + filename_j)
group_day = 3 # e.g. 8, 4 or 3
group_num = days // group_day
file_group = [i*group_day for i in range(group_num)] + [days]
for ii in range(group_num):
# for last may be group_size != group_num, therefore reset it below
group_size = file_group[ii + 1] - file_group[ii]
X_cat_t = [0]*group_size
X_int_t = [0]*group_size
y = [0]*group_size
start = [0]*group_size
end = [0]*group_size
for ig in range(group_size):
i = file_group[ii] + ig
filename_i = d_path + npzfile + "_{0}_processed.npz".format(i)
# setup start and end ranges
start[ig] = offset_per_file[i]
end[ig] = offset_per_file[i + 1]
# print(filename_i)
# load a group of files
with np.load(filename_i) as data:
X_cat_t[ig] = np.transpose(data["X_cat"])
X_int_t[ig] = np.transpose(data["X_int"])
y[ig] = data["y"]
# sanity check
if total_per_file[i] != len(y[ig]):
sys.exit("ERROR: sanity check on number of samples failed")
# print("start=" + str(start) + " end=" + str(end)
# + " diff=" + str(end[ig]-start[ig]) + "=" + str(total_per_file[i]))
for j in range(tot_fea):
filename_j = trafile + "_{0}_reordered.npy".format(j)
fj = np.load(filename_j, mmap_mode='r+')
for ig in range(group_size):
if j < tar_fea:
fj[indices[start[ig]:end[ig]]] = y[ig]
elif tar_fea <= j and j < tad_fea:
fj[indices[start[ig]:end[ig]]] = X_int_t[ig][j - tar_fea, :]
else:
fj[indices[start[ig]:end[ig]]] = X_cat_t[ig][j - tad_fea, :]
del fj
else:
print("Reordered fea files already exist, skipping ...")
# check if data already exists
recreate_flag = False
for i in range(days):
filename_i = d_path + npzfile + "_{0}_reordered.npz".format(i)
if path.exists(filename_i):
print("Using existing " + filename_i)
else:
recreate_flag = True
# split reordered data by files (memmap all reordered files per feature)
# on the day boundary del the file object and memmap again
if recreate_flag:
for ii in range(group_num):
# for last may be group_size != group_num, therefore reset it below
group_size = file_group[ii + 1] - file_group[ii]
X_cat_t= []; X_int_t = []
for ig in range(group_size):
i = file_group[ii] + ig
X_int_t.append(np.zeros((den_fea, total_per_file[i])))
X_cat_t.append(np.zeros((spa_fea, total_per_file[i])))
y = [0]*group_size
start = [0]*group_size
end = [0]*group_size
for j in range(tot_fea):
filename_j = trafile + "_{0}_reordered.npy".format(j)
fj = np.load(filename_j, mmap_mode='r')
# load a group of files
for ig in range(group_size):
i = file_group[ii] + ig
# setup start and end ranges
start[ig] = offset_per_file[i]
end[ig] = offset_per_file[i + 1]
# load data for the group of files
if j < tar_fea:
y[ig] = fj[start[ig]:end[ig]]
elif tar_fea <= j and j < tad_fea:
X_int_t[ig][j - tar_fea, :] = fj[start[ig]:end[ig]]
else:
X_cat_t[ig][j - tad_fea, :] = fj[start[ig]:end[ig]]
del fj
for ig in range(group_size):
i = file_group[ii] + ig
filename_i = d_path + npzfile + "_{0}_reordered.npz".format(i)
print("Creating " + filename_i)
np.savez_compressed(
filename_i,
X_cat=np.transpose(X_cat_t[ig]), # transpose of the data
X_int=np.transpose(X_int_t[ig]), # transpose of the data
y=y[ig],
)
else:
print("Reordered day files already exist, skipping ...")
'''
'''
# Approach 3: group features
# check if data already exists
group_fea = 5 # e.g. 8, 5 or 4
group_num = tot_fea // group_fea
if tot_fea % group_fea != 0: # sanity check
sys.exit("ERROR: the group_fea must divided tot_fea evenly.")
recreate_flag = False
for jn in range(group_num):
filename_j = trafile + "_{0}_reordered{1}.npy".format(
jn, group_fea
)
if path.exists(filename_j):
print("Using existing " + filename_j)
else:
recreate_flag = True
# load, reorder and concatenate data (memmap all reordered files per feature)
if recreate_flag:
# init reordered files (.npy appended automatically)
z = np.zeros((group_fea, total_count))
for jn in range(group_num):
filename_j = trafile + "_{0}_reordered{1}".format(
jn, group_fea
)
np.save(filename_j, z)
print("Creating " + filename_j)
for i in range(days):
filename_i = d_path + npzfile + "_{0}_processed.npz".format(i)
with np.load(filename_i) as data:
X_cat_t = np.transpose(data["X_cat"])
X_int_t = np.transpose(data["X_int"])
y = data["y"]
size = len(y)
# sanity check
if total_per_file[i] != size:
sys.exit("ERROR: sanity check on number of samples failed")
# setup start and end ranges
start = offset_per_file[i]
end = offset_per_file[i + 1]
# print(filename_i)
# print("start=" + str(start) + " end=" + str(end)
# + " diff=" + str(end - start) + "=" + str(total_per_file[i]))
for jn in range(group_num):
filename_j = trafile + "_{0}_reordered{1}.npy".format(
jn, group_fea
)
fj = np.load(filename_j, mmap_mode='r+')
for jg in range(group_fea):
j = jn * group_fea + jg
# print("j=" + str(j) + " jn=" + str(jn) + " jg=" + str(jg))
if j < tar_fea:
fj[jg, indices[start:end]] = y
elif tar_fea <= j and j < tad_fea:
fj[jg, indices[start:end]] = X_int_t[j - tar_fea, :]
else:
fj[jg, indices[start:end]] = X_cat_t[j - tad_fea, :]
del fj
else:
print("Reordered fea files already exist, skipping ...")
# check if data already exists
recreate_flag = False
for i in range(days):
filename_i = d_path + npzfile + "_{0}_reordered.npz".format(i)
if path.exists(filename_i):
print("Using existing" + filename_i)
else:
recreate_flag = True
# split reordered data by files (memmap all reordered files per feature)
# on the day boundary del the file object and memmap again
if recreate_flag:
for i in range(days):
filename_i = d_path + npzfile + "_{0}_reordered.npz".format(i)
size = total_per_file[i]
X_int_t = np.zeros((den_fea, size))
X_cat_t = np.zeros((spa_fea, size))
# setup start and end ranges
start = offset_per_file[i]
end = offset_per_file[i + 1]
print("Creating " + filename_i)
# print("start=" + str(start) + " end=" + str(end)
# + " diff=" + str(end - start) + "=" + str(total_per_file[i]))
for jn in range(group_num):
filename_j = trafile + "_{0}_reordered{1}.npy".format(
jn, group_fea
)
fj = np.load(filename_j, mmap_mode='r')
for jg in range(group_fea):
j = jn * group_fea + jg
# print("j=" + str(j) + " jn=" + str(jn) + " jg=" + str(jg))
if j < tar_fea:
y = fj[jg, start:end]
elif tar_fea <= j and j < tad_fea:
X_int_t[j - tar_fea, :] = fj[jg, start:end]
else:
X_cat_t[j - tad_fea, :] = fj[jg, start:end]
del fj
np.savez_compressed(
filename_i,
X_cat=np.transpose(X_cat_t), # transpose of the data
X_int=np.transpose(X_int_t), # transpose of the data
y=y,
)
else:
print("Reordered day files already exist, skipping ...")
'''
# Approach 4: Fisher-Yates-Rao (FYR) shuffle algorithm
# 1st pass of FYR shuffle
# check if data already exists
recreate_flag = False
for j in range(days):
filename_j_y = npzfile + "_{0}_intermediate_y.npy".format(j)
filename_j_d = npzfile + "_{0}_intermediate_d.npy".format(j)
filename_j_s = npzfile + "_{0}_intermediate_s.npy".format(j)
if (
path.exists(filename_j_y)
and path.exists(filename_j_d)
and path.exists(filename_j_s)
):
print(
"Using existing\n"
+ filename_j_y + "\n"
+ filename_j_d + "\n"
+ filename_j_s
)
else:
recreate_flag = True
# reorder across buckets using sampling
if recreate_flag:
# init intermediate files (.npy appended automatically)
for j in range(days):
filename_j_y = npzfile + "_{0}_intermediate_y".format(j)
filename_j_d = npzfile + "_{0}_intermediate_d".format(j)
filename_j_s = npzfile + "_{0}_intermediate_s".format(j)
np.save(filename_j_y, np.zeros((total_per_file[j])))
np.save(filename_j_d, np.zeros((total_per_file[j], den_fea)))
np.save(filename_j_s, np.zeros((total_per_file[j], spa_fea)))
# start processing files
total_counter = [0] * days
for i in range(days):
filename_i = npzfile + "_{0}_processed.npz".format(i)
with np.load(filename_i) as data:
X_cat = data["X_cat"]
X_int = data["X_int"]
y = data["y"]
size = len(y)
# sanity check
if total_per_file[i] != size:
sys.exit("ERROR: sanity check on number of samples failed")
# debug prints
print("Reordering (1st pass) " + filename_i)
# create buckets using sampling of random ints
# from (discrete) uniform distribution
buckets = []
for _j in range(days):
buckets.append([])
counter = [0] * days
days_to_sample = days if data_split == "none" else days - 1
if randomize == "total":
rand_u = np.random.randint(low=0, high=days_to_sample, size=size)
for k in range(size):
# sample and make sure elements per buckets do not overflow
if data_split == "none" or i < days - 1:
# choose bucket
p = rand_u[k]
# retry of the bucket is full
while total_counter[p] + counter[p] >= total_per_file[p]:
p = np.random.randint(low=0, high=days_to_sample)
else: # preserve the last day/bucket if needed
p = i
buckets[p].append(k)
counter[p] += 1
else: # randomize is day or none
for k in range(size):
# do not sample, preserve the data in this bucket
p = i
buckets[p].append(k)
counter[p] += 1
# sanity check
if np.sum(counter) != size:
sys.exit("ERROR: sanity check on number of samples failed")
# debug prints
# print(counter)
# print(str(np.sum(counter)) + " = " + str(size))
# print([len(x) for x in buckets])
# print(total_counter)
# partially feel the buckets
for j in range(days):
filename_j_y = npzfile + "_{0}_intermediate_y.npy".format(j)
filename_j_d = npzfile + "_{0}_intermediate_d.npy".format(j)
filename_j_s = npzfile + "_{0}_intermediate_s.npy".format(j)
start = total_counter[j]
end = total_counter[j] + counter[j]
# target buckets
fj_y = np.load(filename_j_y, mmap_mode='r+')
# print("start=" + str(start) + " end=" + str(end)
# + " end - start=" + str(end - start) + " "
# + str(fj_y[start:end].shape) + " "
# + str(len(buckets[j])))
fj_y[start:end] = y[buckets[j]]
del fj_y
# dense buckets
fj_d = np.load(filename_j_d, mmap_mode='r+')
# print("start=" + str(start) + " end=" + str(end)
# + " end - start=" + str(end - start) + " "
# + str(fj_d[start:end, :].shape) + " "
# + str(len(buckets[j])))
fj_d[start:end, :] = X_int[buckets[j], :]
del fj_d
# sparse buckets
fj_s = np.load(filename_j_s, mmap_mode='r+')
# print("start=" + str(start) + " end=" + str(end)
# + " end - start=" + str(end - start) + " "
# + str(fj_s[start:end, :].shape) + " "
# + str(len(buckets[j])))
fj_s[start:end, :] = X_cat[buckets[j], :]
del fj_s
# update counters for next step
total_counter[j] += counter[j]
# 2nd pass of FYR shuffle
# check if data already exists
for j in range(days):
filename_j = npzfile + "_{0}_reordered.npz".format(j)
if path.exists(filename_j):
print("Using existing " + filename_j)
else:
recreate_flag = True
# reorder within buckets
if recreate_flag:
for j in range(days):
filename_j_y = npzfile + "_{0}_intermediate_y.npy".format(j)
filename_j_d = npzfile + "_{0}_intermediate_d.npy".format(j)
filename_j_s = npzfile + "_{0}_intermediate_s.npy".format(j)
fj_y = np.load(filename_j_y)
fj_d = np.load(filename_j_d)
fj_s = np.load(filename_j_s)
indices = range(total_per_file[j])
if randomize == "day" or randomize == "total":
if data_split == "none" or j < days - 1:
indices = np.random.permutation(range(total_per_file[j]))
filename_r = npzfile + "_{0}_reordered.npz".format(j)
print("Reordering (2nd pass) " + filename_r)
np.savez_compressed(
filename_r,
X_cat=fj_s[indices, :],
X_int=fj_d[indices, :],
y=fj_y[indices],
)
'''
# sanity check (under no reordering norms should be zero)
for i in range(days):
filename_i_o = npzfile + "_{0}_processed.npz".format(i)
print(filename_i_o)
with np.load(filename_i_o) as data_original:
X_cat_o = data_original["X_cat"]
X_int_o = data_original["X_int"]
y_o = data_original["y"]
filename_i_r = npzfile + "_{0}_reordered.npz".format(i)
print(filename_i_r)
with np.load(filename_i_r) as data_reordered:
X_cat_r = data_reordered["X_cat"]
X_int_r = data_reordered["X_int"]
y_r = data_reordered["y"]
print(np.linalg.norm(y_o - y_r))
print(np.linalg.norm(X_int_o - X_int_r))
print(np.linalg.norm(X_cat_o - X_cat_r))
'''
else:
print("Concatenating multiple days into %s.npz file" % str(d_path + o_filename))
# load and concatenate data
for i in range(days):
filename_i = npzfile + "_{0}_processed.npz".format(i)
with np.load(filename_i) as data:
if i == 0:
X_cat = data["X_cat"]
X_int = data["X_int"]
y = data["y"]
else:
X_cat = np.concatenate((X_cat, data["X_cat"]))
X_int = np.concatenate((X_int, data["X_int"]))
y = np.concatenate((y, data["y"]))
print("Loaded day:", i, "y = 1:", len(y[y == 1]), "y = 0:", len(y[y == 0]))
with np.load(d_path + d_file + "_fea_count.npz") as data:
counts = data["counts"]
print("Loaded counts!")
np.savez_compressed(
d_path + o_filename + ".npz",
X_cat=X_cat,
X_int=X_int,
y=y,
counts=counts,
)
return d_path + o_filename + ".npz"
def transformCriteoAdData(X_cat, X_int, y, days, data_split, randomize, total_per_file):
# Transforms Criteo Kaggle or terabyte data by applying log transformation
# on dense features and converting everything to appropriate tensors.
#
# Inputs:
# X_cat (ndarray): array of integers corresponding to preprocessed
# categorical features
# X_int (ndarray): array of integers corresponding to dense features
# y (ndarray): array of bool corresponding to labels
# data_split(str): flag for splitting dataset into training/validation/test
# sets
# randomize (str): determines randomization scheme
# "none": no randomization
# "day": randomizes each day"s data (only works if split = True)
# "total": randomizes total dataset
#
# Outputs:
# if split:
# X_cat_train (tensor): sparse features for training set
# X_int_train (tensor): dense features for training set
# y_train (tensor): labels for training set
# X_cat_val (tensor): sparse features for validation set
# X_int_val (tensor): dense features for validation set
# y_val (tensor): labels for validation set
# X_cat_test (tensor): sparse features for test set
# X_int_test (tensor): dense features for test set
# y_test (tensor): labels for test set
# else:
# X_cat (tensor): sparse features
# X_int (tensor): dense features
# y (tensor): label
# define initial set of indices
indices = np.arange(len(y))
# create offset per file
offset_per_file = np.array([0] + [x for x in total_per_file])
for i in range(days):
offset_per_file[i + 1] += offset_per_file[i]
# split dataset
if data_split == 'train':
indices = np.array_split(indices, offset_per_file[1:-1])
# randomize train data (per day)
if randomize == "day": # or randomize == "total":
for i in range(len(indices) - 1):
indices[i] = np.random.permutation(indices[i])
print("Randomized indices per day ...")
train_indices = np.concatenate(indices[:-1])
test_indices = indices[-1]
test_indices, val_indices = np.array_split(test_indices, 2)
print("Defined training and testing indices...")
# randomize train data (across days)
if randomize == "total":
train_indices = np.random.permutation(train_indices)
print("Randomized indices across days ...")
# indices = np.concatenate((train_indices, test_indices))
# create training, validation, and test sets
X_cat_train = X_cat[train_indices]
X_int_train = X_int[train_indices]
y_train = y[train_indices]
X_cat_val = X_cat[val_indices]
X_int_val = X_int[val_indices]
y_val = y[val_indices]
X_cat_test = X_cat[test_indices]
X_int_test = X_int[test_indices]
y_test = y[test_indices]
print("Split data according to indices...")
X_cat_train = X_cat_train.astype(np.long)
X_int_train = np.log(X_int_train.astype(np.float32) + 1)
y_train = y_train.astype(np.float32)
X_cat_val = X_cat_val.astype(np.long)
X_int_val = np.log(X_int_val.astype(np.float32) + 1)
y_val = y_val.astype(np.float32)
X_cat_test = X_cat_test.astype(np.long)
X_int_test = np.log(X_int_test.astype(np.float32) + 1)
y_test = y_test.astype(np.float32)
print("Converted to tensors...done!")
return (
X_cat_train,
X_int_train,
y_train,
X_cat_val,
X_int_val,
y_val,
X_cat_test,
X_int_test,
y_test,
)
else:
# randomize data
if randomize == "total":
indices = np.random.permutation(indices)
print("Randomized indices...")
X_cat = X_cat[indices].astype(np.long)
X_int = np.log(X_int[indices].astype(np.float32) + 1)
y = y[indices].astype(np.float32)
print("Converted to tensors...done!")
return (X_cat, X_int, y, [], [], [], [], [], [])
def getCriteoAdData(
datafile,
o_filename,
max_ind_range=-1,
sub_sample_rate=0.0,
days=7,
data_split='train',
randomize='total',
criteo_kaggle=True,
memory_map=False
):
# Passes through entire dataset and defines dictionaries for categorical
# features and determines the number of total categories.
#
# Inputs:
# datafile : path to downloaded raw data file
# o_filename (str): saves results under o_filename if filename is not ""
#
# Output:
# o_file (str): output file path
#split the datafile into path and filename
lstr = datafile.split("/")
d_path = "/".join(lstr[0:-1]) + "/"
d_file = lstr[-1].split(".")[0] if criteo_kaggle else lstr[-1]
npzfile = d_path + ((d_file + "_day") if criteo_kaggle else d_file)
trafile = d_path + ((d_file + "_fea") if criteo_kaggle else "fea")
# count number of datapoints in training set
total_file = d_path + d_file + "_day_count.npz"
if path.exists(total_file):
with np.load(total_file) as data:
total_per_file = list(data["total_per_file"])
total_count = np.sum(total_per_file)
print("Skipping counts per file (already exist)")
else:
total_count = 0
total_per_file = []
if criteo_kaggle:
# WARNING: The raw data consists of a single train.txt file
# Each line in the file is a sample, consisting of 13 continuous and
# 26 categorical features (an extra space indicates that feature is
# missing and will be interpreted as 0).
if path.exists(datafile):
print("Reading data from path=%s" % (datafile))
with open(str(datafile)) as f:
for _ in f:
total_count += 1
total_per_file.append(total_count)
# reset total per file due to split
num_data_per_split, extras = divmod(total_count, days)
total_per_file = [num_data_per_split] * days
for j in range(extras):
total_per_file[j] += 1
# split into days (simplifies code later on)
file_id = 0
boundary = total_per_file[file_id]
nf = open(npzfile + "_" + str(file_id), "w")
with open(str(datafile)) as f:
for j, line in enumerate(f):
if j == boundary:
nf.close()
file_id += 1
nf = open(npzfile + "_" + str(file_id), "w")
boundary += total_per_file[file_id]
nf.write(line)
nf.close()
else:
sys.exit("ERROR: Criteo Kaggle Display Ad Challenge Dataset path is invalid; please download from https://labs.criteo.com/2014/02/kaggle-display-advertising-challenge-dataset")
else:
# WARNING: The raw data consist of day_0.gz,... ,day_23.gz text files
# Each line in the file is a sample, consisting of 13 continuous and
# 26 categorical features (an extra space indicates that feature is
# missing and will be interpreted as 0).
for i in range(days):
datafile_i = datafile + "_" + str(i) # + ".gz"
if path.exists(str(datafile_i)):
print("Reading data from path=%s" % (str(datafile_i)))
# file day_<number>
total_per_file_count = 0
with open(str(datafile_i)) as f:
for _ in f:
total_per_file_count += 1
total_per_file.append(total_per_file_count)
total_count += total_per_file_count
else:
sys.exit("ERROR: Criteo Terabyte Dataset path is invalid; please download from https://labs.criteo.com/2013/12/download-terabyte-click-logs")
# process a file worth of data and reinitialize data
# note that a file main contain a single or multiple splits
def process_one_file(
datfile,
npzfile,
split,
num_data_in_split,
):
with open(str(datfile)) as f:
y = np.zeros(num_data_in_split, dtype="i4") # 4 byte int
X_int = np.zeros((num_data_in_split, 13), dtype="i4") # 4 byte int
X_cat = np.zeros((num_data_in_split, 26), dtype="i4") # 4 byte int
if sub_sample_rate == 0.0:
rand_u = 1.0
else:
rand_u = np.random.uniform(low=0.0, high=1.0, size=num_data_in_split)
i = 0
for k, line in enumerate(f):
# process a line (data point)
line = line.split('\t')
# set missing values to zero
for j in range(len(line)):
if (line[j] == '') or (line[j] == '\n'):
line[j] = '0'
# sub-sample data by dropping zero targets, if needed
target = np.int32(line[0])
if target == 0 and \
(rand_u if sub_sample_rate == 0.0 else rand_u[k]) < sub_sample_rate:
continue
y[i] = target
X_int[i] = np.array(line[1:14], dtype=np.int32)
if max_ind_range > 0:
X_cat[i] = np.array(
list(map(lambda x: int(x, 16) % max_ind_range, line[14:])),
dtype=np.int32
)
else:
X_cat[i] = np.array(
list(map(lambda x: int(x, 16), line[14:])),
dtype=np.int32
)
# count uniques
for j in range(26):
convertDicts[j][X_cat[i][j]] = 1
# debug prints
print(
"Load %d/%d Split: %d Label True: %d Stored: %d"
% (
i,
num_data_in_split,
split,
target,
y[i],
),
end="\r",
)
i += 1
# store num_data_in_split samples or extras at the end of file
# count uniques
# X_cat_t = np.transpose(X_cat)
# for j in range(26):
# for x in X_cat_t[j,:]:
# convertDicts[j][x] = 1
# store parsed
filename_s = npzfile + "_{0}.npz".format(split)
if path.exists(filename_s):
print("\nSkip existing " + filename_s)
else:
np.savez_compressed(
filename_s,
X_int=X_int[0:i, :],
# X_cat=X_cat[0:i, :],
X_cat_t=np.transpose(X_cat[0:i, :]), # transpose of the data
y=y[0:i],
)
print("\nSaved " + npzfile + "_{0}.npz!".format(split))
return i
# create all splits (reuse existing files if possible)
recreate_flag = False
convertDicts = [{} for _ in range(26)]
# WARNING: to get reproducable sub-sampling results you must reset the seed below
# np.random.seed(123)
# in this case there is a single split in each day
for i in range(days):
datfile_i = npzfile + "_{0}".format(i) # + ".gz"
npzfile_i = npzfile + "_{0}.npz".format(i)
npzfile_p = npzfile + "_{0}_processed.npz".format(i)
if path.exists(npzfile_i):
print("Skip existing " + npzfile_i)
elif path.exists(npzfile_p):
print("Skip existing " + npzfile_p)
else:
recreate_flag = True
total_per_file[i] = process_one_file(
datfile_i,
npzfile,
i,
total_per_file[i],
)
# report and save total into a file
total_count = np.sum(total_per_file)
if not path.exists(total_file):
np.savez_compressed(total_file, total_per_file=total_per_file)
print("Total number of samples:", total_count)
print("Divided into days/splits:\n", total_per_file)
# dictionary files
counts = np.zeros(26, dtype=np.int32)
if recreate_flag:
# create dictionaries
for j in range(26):
for i, x in enumerate(convertDicts[j]):
convertDicts[j][x] = i
dict_file_j = d_path + d_file + "_fea_dict_{0}.npz".format(j)
if not path.exists(dict_file_j):
np.savez_compressed(
dict_file_j,
unique=np.array(list(convertDicts[j]), dtype=np.int32)
)
counts[j] = len(convertDicts[j])
# store (uniques and) counts
count_file = d_path + d_file + "_fea_count.npz"
if not path.exists(count_file):
np.savez_compressed(count_file, counts=counts)
else:
# create dictionaries (from existing files)
for j in range(26):
with np.load(d_path + d_file + "_fea_dict_{0}.npz".format(j)) as data:
unique = data["unique"]
for i, x in enumerate(unique):
convertDicts[j][x] = i
# load (uniques and) counts
with np.load(d_path + d_file + "_fea_count.npz") as data:
counts = data["counts"]
# process all splits
processCriteoAdData(d_path, d_file, npzfile, days, convertDicts, counts)
o_file = concatCriteoAdData(
d_path,
d_file,
npzfile,
trafile,
days,
data_split,
randomize,
total_per_file,
total_count,
memory_map,
o_filename
)
return o_file
def loadDataset(
dataset,
max_ind_range,
sub_sample_rate,
randomize,
data_split,
raw_path="",
pro_data="",
memory_map=False
):
# dataset
if dataset == "kaggle":
days = 7
o_filename = "kaggleAdDisplayChallenge_processed"
elif dataset == "terabyte":
days = 24
o_filename = "terabyte_processed"
else:
raise(ValueError("Data set option is not supported"))
# split the datafile into path and filename
lstr = raw_path.split("/")
d_path = "/".join(lstr[0:-1]) + "/"
d_file = lstr[-1].split(".")[0] if dataset == "kaggle" else lstr[-1]
npzfile = d_path + ((d_file + "_day") if dataset == "kaggle" else d_file)
# trafile = d_path + ((d_file + "_fea") if dataset == "kaggle" else "fea")
# check if pre-processed data is available
data_ready = True
if memory_map:
for i in range(days):
reo_data = d_path + npzfile + "_{0}_reordered.npz".format(i)
if not path.exists(str(reo_data)):
data_ready = False
else:
if not path.exists(str(pro_data)):
data_ready = False
# pre-process data if needed
# WARNNING: when memory mapping is used we get a collection of files
if data_ready:
print("Reading pre-processed data=%s" % (str(pro_data)))
file = str(pro_data)
else:
print("Reading raw data=%s" % (str(raw_path)))
file = getCriteoAdData(
raw_path,
o_filename,
max_ind_range,
sub_sample_rate,
days,
data_split,
randomize,
dataset == "kaggle",
memory_map
)
return file, days
if __name__ == "__main__":
### import packages ###
import argparse
### parse arguments ###
parser = argparse.ArgumentParser(
description="Preprocess Criteo dataset"
)
# model related parameters
parser.add_argument("--max-ind-range", type=int, default=-1)
parser.add_argument("--data-sub-sample-rate", type=float, default=0.0) # in [0, 1]
parser.add_argument("--data-randomize", type=str, default="total") # or day or none
parser.add_argument("--memory-map", action="store_true", default=False)
parser.add_argument("--data-set", type=str, default="kaggle") # or terabyte
parser.add_argument("--raw-data-file", type=str, default="")
parser.add_argument("--processed-data-file", type=str, default="")
args = parser.parse_args()
loadDataset(
args.data_set,
args.max_ind_range,
args.data_sub_sample_rate,
args.data_randomize,
"train",
args.raw_data_file,
args.processed_data_file,
args.memory_map
) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python3
# Copyright (c) 2014-present, The osquery authors
#
# This source code is licensed as defined by the LICENSE file found in the
# root directory of this source tree.
#
# SPDX-License-Identifier: (Apache-2.0 OR GPL-2.0-only)
import json
import os
import sys
try:
import argparse
except ImportError:
print("Cannot import argparse.")
exit(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=(
"List files from compile_commands.json."
))
parser.add_argument(
"--build", metavar="PATH",
help="Path to osquery build (./build/<sys>/) directory"
)
parser.add_argument(
"--base", metavar="PATH", default="",
help="Real path of source base."
)
args = parser.parse_args()
commands_path = os.path.join(args.build, "compile_commands.json")
if not os.path.exists(commands_path):
print("Cannot find '%s'" % (commands_path))
exit(1)
with open(commands_path, 'r') as fh: content = fh.read()
data = json.loads(content)
for file in data:
if file['file'].find("_tests.cpp") > 0 or file['file'].find("_benchmark") > 0:
continue
if file['file'].find("gtest") > 0:
continue
print(file['file'])
pass | unknown | codeparrot/codeparrot-clean | ||
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
str_to_int,
ExtractorError
)
class AppleConnectIE(InfoExtractor):
_VALID_URL = r'https?://itunes\.apple\.com/\w{0,2}/?post/idsa\.(?P<id>[\w-]+)'
_TEST = {
'url': 'https://itunes.apple.com/us/post/idsa.4ab17a39-2720-11e5-96c5-a5b38f6c42d3',
'md5': 'e7c38568a01ea45402570e6029206723',
'info_dict': {
'id': '4ab17a39-2720-11e5-96c5-a5b38f6c42d3',
'ext': 'm4v',
'title': 'Energy',
'uploader': 'Drake',
'thumbnail': r're:^https?://.*\.jpg$',
'upload_date': '20150710',
'timestamp': 1436545535,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
try:
video_json = self._html_search_regex(
r'class="auc-video-data">(\{.*?\})', webpage, 'json')
except ExtractorError:
raise ExtractorError('This post doesn\'t contain a video', expected=True)
video_data = self._parse_json(video_json, video_id)
timestamp = str_to_int(self._html_search_regex(r'data-timestamp="(\d+)"', webpage, 'timestamp'))
like_count = str_to_int(self._html_search_regex(r'(\d+) Loves', webpage, 'like count'))
return {
'id': video_id,
'url': video_data['sslSrc'],
'title': video_data['title'],
'description': video_data['description'],
'uploader': video_data['artistName'],
'thumbnail': video_data['artworkUrl'],
'timestamp': timestamp,
'like_count': like_count,
} | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf_8 -*-
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
# Crypto Key Database
import os
import sqlite3
import shutil
import uuid
import random
import base64
import Crypto.PublicKey.RSA
import Crypto.Random.random
import Crypto.Cipher.PKCS1_OAEP
import Crypto.Hash.SHA256
# increment keydb_version on DB schema changes
keydb_version = 2017090101
# main key db
class KeyDb:
def __init__(self,dbpath):
self._dbpath = dbpath
# create new DB if it doesn't exist
if not os.path.isfile(self._dbpath):
db_create(self._dbpath)
# connect to db
conn = sqlite3.connect(self._dbpath)
conn.isolation_level = None
c = conn.cursor()
# enable cell size checking
c.execute('PRAGMA cell_size_check = 1')
# optimize and quick-check on open
c.execute('PRAGMA quick_check')
check_result = c.fetchone()[0]
if check_result != 'ok':
raise ValueError("DB Check failed: " + check_result)
c.execute('PRAGMA optimize')
# check current db version against code version
# perform upgrade if necessary
c.execute('PRAGMA user_version')
current_db_version = c.fetchone()[0]
conn.close()
if current_db_version < keydb_version:
self._Upgrade(current_db_version)
def New(self,parent_key_id=None,bits=2048,password=None,expiry='+2 years'):
conn = sqlite3.connect(self._dbpath)
conn.isolation_level = None
c = conn.cursor()
new_uuid = str(uuid.uuid4())
key_priv = Crypto.PublicKey.RSA.generate(bits)
key_pub = key_priv.publickey()
store_password = None
if parent_key_id:
store_password = base64.standard_b64encode(self.Encrypt(parent_key_id,password))
c.execute('DELETE FROM pubkey WHERE key_id=?', (new_uuid,))
c.execute('DELETE FROM privkey WHERE key_id=?', (new_uuid,))
c.execute('INSERT INTO pubkey (key_id, key_expiry, key) \
VALUES (?, datetime(\'now\', ?), ?)',
(new_uuid, expiry, key_pub.exportKey(),)
)
c.execute('INSERT INTO privkey (key_id, key_unlock_key_id, key_unlock_password, key) \
VALUES (?, ?, ?, ?)',
(new_uuid, parent_key_id, store_password, key_priv.exportKey(passphrase=password),)
)
conn.close()
return new_uuid
def Del(self,key_id):
conn = sqlite3.connect(self._dbpath)
conn.isolation_level = None
c = conn.cursor()
conn.close()
def Check(self,key_id):
conn = sqlite3.connect(self._dbpath)
conn.isolation_level = None
c = conn.cursor()
conn.close()
def HttpImport(self,data):
pass
def HttpExport(self,data):
pass
def ImportPubkey(self,key_id,key):
conn = sqlite3.connect(self._dbpath)
conn.isolation_level = None
c = conn.cursor()
conn.close()
def ExportPubkey(self,key_id):
conn = sqlite3.connect(self._dbpath)
conn.isolation_level = None
c = conn.cursor()
conn.close()
def Encrypt(self,key_id,data):
# RSA PubKey Encryption of data
# fetch public key
conn = sqlite3.connect(self._dbpath)
conn.isolation_level = None
c = conn.cursor()
c.execute('SELECT key FROM pubkey WHERE key_id = ? AND key_expiry > datetime(\'now\')', (key_id,))
row = c.fetchone()
key_pub = None
if not row:
raise ValueError("Key not found in database")
# create RSA key object
key_pub = Crypto.PublicKey.RSA.importKey(row[0])
# RSA encryption
cipher = Crypto.Cipher.PKCS1_OAEP.new(key_pub, hashAlgo=Crypto.Hash.SHA256)
message = cipher.encrypt(data.encode('utf-8'))
conn.close()
return message
def Decrypt(self,key_id,password,data):
# RSA PubKey Decryption of data
# fetch public key
conn = sqlite3.connect(self._dbpath)
conn.isolation_level = None
c = conn.cursor()
c.execute('SELECT key FROM privkey WHERE key_id = ?', (key_id,))
row = c.fetchone()
key_priv = None
if not row:
raise ValueError("Key not found in database")
# create RSA key object
key = row[0]
key_priv = Crypto.PublicKey.RSA.importKey(key,passphrase=password)
if not key_priv:
raise ValueError("Key could not be loaded, bad password?")
# RSA encryption
cipher = Crypto.Cipher.PKCS1_OAEP.new(key_priv, hashAlgo=Crypto.Hash.SHA256)
message = cipher.decrypt(data)
conn.close()
return message.decode('utf-8')
def Sign(self,key_id,password,data):
conn = sqlite3.connect(self._dbpath)
conn.isolation_level = None
c = conn.cursor()
conn.close()
def Verify(self,key_id,data):
conn = sqlite3.connect(self._dbpath)
conn.isolation_level = None
c = conn.cursor()
conn.close()
def KeyPassword(self,key_id):
# return the password stored in the db for the key (should be encrypted)
conn = sqlite3.connect(self._dbpath)
conn.isolation_level = None
c = conn.cursor()
c.execute('SELECT key_unlock_password FROM privkey WHERE key_id = ?', (key_id,))
row = c.fetchone()
conn.close()
if not row:
return None
else:
return base64.standard_b64decode(row[0])
def _Upgrade(self,current_db_version):
# connect to DB handle
conn = sqlite3.connect(self._dbpath)
conn.isolation_level = None
c = conn.cursor()
# current_db_version == 0 means DB is brand new
# If not brand new, back it up and perform full checks
if current_db_version > 0:
c.execute('PRAGMA database_list')
dbpath = c.fetchone()[2]
# back up DB before modifying
# lock the entire DB
# see https://sqlite.org/pragma.html#pragma_locking_mode
c.execute('PRAGMA locking_mode = EXCLUSIVE')
# write some data to obtain an exclusive lock
c.execute('CREATE TABLE __temp_upgrade (temp INT)')
c.execute('INSERT INTO __temp_upgrade (temp) values (1)')
c.execute('SELECT * FROM __temp_upgrade')
c.execute('DROP TABLE __temp_upgrade')
c.execute('PRAGMA query_only = 1')
# copy DB file while we have an exclusive lock
backupdbpath = dbpath + '-backup-v' + str(current_db_version)
shutil.copyfile(dbpath, backupdbpath)
# unlock & write again to release exclusive lock
c.execute('PRAGMA query_only = 0')
c.execute('PRAGMA locking_mode = NORMAL')
c.execute('CREATE TABLE __temp_upgrade (temp INT)')
c.execute('INSERT INTO __temp_upgrade (temp) values (1)')
c.execute('SELECT * FROM __temp_upgrade')
c.execute('DROP TABLE __temp_upgrade')
# perform integrity check
c.execute('PRAGMA integrity_check')
check_result = c.fetchone()[0]
if check_result != 'ok':
raise ValueError("DB Check failed: " + check_result)
# perform upgrades
# IMPORTANT: upgrades are performed IN ORDER
# remember to set current_db_version to the new version
# Example:
#if current_db_version < 2017090101:
# c.execute('CREATE TABLE foo(bar INT, baz TEXT)')
# c.execute('PRAGMA user_version = 2017090101')
# current_db_version = 2017090101
#
#if current_db_version < 2017090102:
# c.execute('alter table foo add column blah text')
# c.execute('PRAGMA user_version = 2017090102')
# current_db_version = 2017090102
# version 2017090101
# initial version
if current_db_version < 2017090101:
c.execute('CREATE TABLE privkey (key_id TEXT PRIMARY KEY NOT NULL, key TEXT, key_unlock_key_id TEXT, key_unlock_password TEXT)')
c.execute('CREATE TABLE pubkey (key_id TEXT PRIMARY KEY NOT NULL, key TEXT, key_expiry TEXT)')
c.execute('PRAGMA user_version = 2017090101')
current_db_version = 2017090101
# End of upgrades, run an optimize and vacuum too
c.execute('PRAGMA optimize')
c.execute('VACUUM')
conn.close()
# in-memory password storage scrambling function for key passwords
class KeyPw:
def __init__(self):
# possible characters for randomly-generated passwords (typable ASCII)
self.pwchars = list('~!@#$%^&*()_+1234567890-=QWERTYUIOP{}|qwertyuiop[]\\ASDFGHJKL:"asdfghjkl;\'ZXCVBNM<>?zxcvbnm,./ ')
# create RSA key pair to use during this session to encrypt key passwords
self._session_key_priv = Crypto.PublicKey.RSA.generate(1024)
self._session_key_pub = self._session_key_priv.publickey()
def New(self,length=32):
# generate password of length (default 32) characters from list in self.pwchars
# max length is 128 characters (1024 bits in session RSA key)
maxbytes = self._session_key_priv.size() / 8
if length > maxbytes:
raise ValueError("Length must not be larger than RSA key size")
new_password = []
for i in range(length):
new_password.append(Crypto.Random.random.choice(self.pwchars))
newpw = ''.join(new_password)
return newpw
def SessionEncrypt(self,plainpw):
cipher = Crypto.Cipher.PKCS1_OAEP.new(self._session_key_pub, hashAlgo=Crypto.Hash.SHA256)
message = cipher.encrypt(plainpw.encode('utf-8'))
return message
def SessionDecrypt(self,encpw):
cipher = Crypto.Cipher.PKCS1_OAEP.new(self._session_key_priv, hashAlgo=Crypto.Hash.SHA256)
message = cipher.decrypt(encpw)
return message.decode('utf-8')
def db_create(dbpath):
conn = sqlite3.connect(dbpath)
conn.isolation_level = None
c = conn.cursor()
# set initial version to 0
# so first upgrade doesn't bother backing up
c.execute('PRAGMA user_version = 0')
# enable cell size checking
c.execute('PRAGMA cell_size_check = 1')
# set 4k page size
c.execute('PRAGMA page_size = 4096')
# set UTF-8 encoding
c.execute('PRAGMA encoding = "UTF-8"')
# vacuum to make page size stick
c.execute('VACUUM')
conn.close() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Handling of the <message> element.
'''
import re
import types
from grit.node import base
import grit.format.rc_header
import grit.format.rc
from grit import clique
from grit import exception
from grit import lazy_re
from grit import tclib
from grit import util
# Finds whitespace at the start and end of a string which can be multiline.
_WHITESPACE = lazy_re.compile('(?P<start>\s*)(?P<body>.+?)(?P<end>\s*)\Z',
re.DOTALL | re.MULTILINE)
class MessageNode(base.ContentNode):
'''A <message> element.'''
# For splitting a list of things that can be separated by commas or
# whitespace
_SPLIT_RE = lazy_re.compile('\s*,\s*|\s+')
def __init__(self):
super(MessageNode, self).__init__()
# Valid after EndParsing, this is the MessageClique that contains the
# source message and any translations of it that have been loaded.
self.clique = None
# We don't send leading and trailing whitespace into the translation
# console, but rather tack it onto the source message and any
# translations when formatting them into RC files or what have you.
self.ws_at_start = '' # Any whitespace characters at the start of the text
self.ws_at_end = '' # --"-- at the end of the text
# A list of "shortcut groups" this message is in. We check to make sure
# that shortcut keys (e.g. &J) within each shortcut group are unique.
self.shortcut_groups_ = []
# Formatter-specific data used to control the output of individual strings.
# formatter_data is a space separated list of C preprocessor-style
# definitions. Names without values are given the empty string value.
# Example: "foo=5 bar baz=100"
self.formatter_data = {}
def _IsValidChild(self, child):
return isinstance(child, (PhNode))
def _IsValidAttribute(self, name, value):
if name not in ['name', 'offset', 'translateable', 'desc', 'meaning',
'internal_comment', 'shortcut_groups', 'custom_type',
'validation_expr', 'use_name_for_id', 'sub_variable',
'formatter_data']:
return False
if (name in ('translateable', 'sub_variable') and
value not in ['true', 'false']):
return False
return True
def MandatoryAttributes(self):
return ['name|offset']
def DefaultAttributes(self):
return {
'custom_type' : '',
'desc' : '',
'formatter_data' : '',
'internal_comment' : '',
'meaning' : '',
'shortcut_groups' : '',
'sub_variable' : 'false',
'translateable' : 'true',
'use_name_for_id' : 'false',
'validation_expr' : '',
}
def HandleAttribute(self, attrib, value):
base.ContentNode.HandleAttribute(self, attrib, value)
if attrib == 'formatter_data':
# Parse value, a space-separated list of defines, into a dict.
# Example: "foo=5 bar" -> {'foo':'5', 'bar':''}
for item in value.split():
name, sep, val = item.partition('=')
self.formatter_data[name] = val
def GetTextualIds(self):
'''
Returns the concatenation of the parent's node first_id and
this node's offset if it has one, otherwise just call the
superclass' implementation
'''
if 'offset' in self.attrs:
# we search for the first grouping node in the parents' list
# to take care of the case where the first parent is an <if> node
grouping_parent = self.parent
import grit.node.empty
while grouping_parent and not isinstance(grouping_parent,
grit.node.empty.GroupingNode):
grouping_parent = grouping_parent.parent
assert 'first_id' in grouping_parent.attrs
return [grouping_parent.attrs['first_id'] + '_' + self.attrs['offset']]
else:
return super(MessageNode, self).GetTextualIds()
def IsTranslateable(self):
return self.attrs['translateable'] == 'true'
def EndParsing(self):
super(MessageNode, self).EndParsing()
# Make the text (including placeholder references) and list of placeholders,
# then strip and store leading and trailing whitespace and create the
# tclib.Message() and a clique to contain it.
text = ''
placeholders = []
for item in self.mixed_content:
if isinstance(item, types.StringTypes):
text += item
else:
presentation = item.attrs['name'].upper()
text += presentation
ex = ' '
if len(item.children):
ex = item.children[0].GetCdata()
original = item.GetCdata()
placeholders.append(tclib.Placeholder(presentation, original, ex))
m = _WHITESPACE.match(text)
if m:
self.ws_at_start = m.group('start')
self.ws_at_end = m.group('end')
text = m.group('body')
self.shortcut_groups_ = self._SPLIT_RE.split(self.attrs['shortcut_groups'])
self.shortcut_groups_ = [i for i in self.shortcut_groups_ if i != '']
description_or_id = self.attrs['desc']
if description_or_id == '' and 'name' in self.attrs:
description_or_id = 'ID: %s' % self.attrs['name']
assigned_id = None
if self.attrs['use_name_for_id'] == 'true':
assigned_id = self.attrs['name']
message = tclib.Message(text=text, placeholders=placeholders,
description=description_or_id,
meaning=self.attrs['meaning'],
assigned_id=assigned_id)
self.InstallMessage(message)
def InstallMessage(self, message):
'''Sets this node's clique from a tclib.Message instance.
Args:
message: A tclib.Message.
'''
self.clique = self.UberClique().MakeClique(message, self.IsTranslateable())
for group in self.shortcut_groups_:
self.clique.AddToShortcutGroup(group)
if self.attrs['custom_type'] != '':
self.clique.SetCustomType(util.NewClassInstance(self.attrs['custom_type'],
clique.CustomType))
elif self.attrs['validation_expr'] != '':
self.clique.SetCustomType(
clique.OneOffCustomType(self.attrs['validation_expr']))
def SubstituteMessages(self, substituter):
'''Applies substitution to this message.
Args:
substituter: a grit.util.Substituter object.
'''
message = substituter.SubstituteMessage(self.clique.GetMessage())
if message is not self.clique.GetMessage():
self.InstallMessage(message)
def GetCliques(self):
if self.clique:
return [self.clique]
else:
return []
def Translate(self, lang):
'''Returns a translated version of this message.
'''
assert self.clique
msg = self.clique.MessageForLanguage(lang,
self.PseudoIsAllowed(),
self.ShouldFallbackToEnglish()
).GetRealContent()
return msg.replace('[GRITLANGCODE]', lang)
def NameOrOffset(self):
if 'name' in self.attrs:
return self.attrs['name']
else:
return self.attrs['offset']
def ExpandVariables(self):
'''We always expand variables on Messages.'''
return True
def GetDataPackPair(self, lang, encoding):
'''Returns a (id, string) pair that represents the string id and the string
in the specified encoding, where |encoding| is one of the encoding values
accepted by util.Encode. This is used to generate the data pack data file.
'''
from grit.format import rc_header
id_map = rc_header.GetIds(self.GetRoot())
id = id_map[self.GetTextualIds()[0]]
message = self.ws_at_start + self.Translate(lang) + self.ws_at_end
return id, util.Encode(message, encoding)
def IsResourceMapSource(self):
return True
def GeneratesResourceMapEntry(self, output_all_resource_defines,
is_active_descendant):
return is_active_descendant
@staticmethod
def Construct(parent, message, name, desc='', meaning='', translateable=True):
'''Constructs a new message node that is a child of 'parent', with the
name, desc, meaning and translateable attributes set using the same-named
parameters and the text of the message and any placeholders taken from
'message', which must be a tclib.Message() object.'''
# Convert type to appropriate string
translateable = 'true' if translateable else 'false'
node = MessageNode()
node.StartParsing('message', parent)
node.HandleAttribute('name', name)
node.HandleAttribute('desc', desc)
node.HandleAttribute('meaning', meaning)
node.HandleAttribute('translateable', translateable)
items = message.GetContent()
for ix, item in enumerate(items):
if isinstance(item, types.StringTypes):
# Ensure whitespace at front and back of message is correctly handled.
if ix == 0:
item = "'''" + item
if ix == len(items) - 1:
item = item + "'''"
node.AppendContent(item)
else:
phnode = PhNode()
phnode.StartParsing('ph', node)
phnode.HandleAttribute('name', item.GetPresentation())
phnode.AppendContent(item.GetOriginal())
if len(item.GetExample()) and item.GetExample() != ' ':
exnode = ExNode()
exnode.StartParsing('ex', phnode)
exnode.AppendContent(item.GetExample())
exnode.EndParsing()
phnode.AddChild(exnode)
phnode.EndParsing()
node.AddChild(phnode)
node.EndParsing()
return node
class PhNode(base.ContentNode):
'''A <ph> element.'''
def _IsValidChild(self, child):
return isinstance(child, ExNode)
def MandatoryAttributes(self):
return ['name']
def EndParsing(self):
super(PhNode, self).EndParsing()
# We only allow a single example for each placeholder
if len(self.children) > 1:
raise exception.TooManyExamples()
def GetTextualIds(self):
# The 'name' attribute is not an ID.
return []
class ExNode(base.ContentNode):
'''An <ex> element.'''
pass | unknown | codeparrot/codeparrot-clean | ||
from django.core.exceptions import ValidationError
from .factories import PhotoSizeFactory
from .helpers import PhotologueBaseTest
class PhotoSizeNameTest(PhotologueBaseTest):
def test_valid_name(self):
"""We are restricted in what names we can enter."""
photosize = PhotoSizeFactory()
photosize.name = None
with self.assertRaisesMessage(ValidationError, 'This field cannot be null.'):
photosize.full_clean()
photosize = PhotoSizeFactory(name='')
with self.assertRaisesMessage(ValidationError, 'This field cannot be blank.'):
photosize.full_clean()
for name in ('a space', 'UPPERCASE', 'bad?chars'):
photosize = PhotoSizeFactory(name=name)
with self.assertRaisesMessage(ValidationError,
'Use only plain lowercase letters (ASCII), numbers and underscores.'):
photosize.full_clean()
for name in ('label', '2_words'):
photosize = PhotoSizeFactory(name=name)
photosize.full_clean() | unknown | codeparrot/codeparrot-clean | ||
use std::fmt::{Debug, Formatter};
use std::io;
use rustc_public_bridge::bridge;
use serde::Serialize;
use crate::abi::FnAbi;
use crate::crate_def::CrateDef;
use crate::mir::Body;
use crate::ty::{Allocation, ClosureDef, ClosureKind, FnDef, GenericArgs, Ty, index_impl};
use crate::{CrateItem, DefId, Error, ItemKind, Opaque, Symbol, ThreadLocalIndex, with};
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize)]
pub enum MonoItem {
Fn(Instance),
Static(StaticDef),
GlobalAsm(Opaque),
}
#[derive(Copy, Clone, PartialEq, Eq, Hash, Serialize)]
pub struct Instance {
/// The type of instance.
pub kind: InstanceKind,
/// An ID used to get the instance definition from the compiler.
/// Do not use this field directly.
pub def: InstanceDef,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Serialize)]
pub enum InstanceKind {
/// A user defined item.
Item,
/// A compiler intrinsic function.
Intrinsic,
/// A virtual function definition stored in a VTable.
/// The `idx` field indicates the position in the VTable for this instance.
Virtual { idx: usize },
/// A compiler generated shim.
Shim,
}
impl Instance {
/// Get the arguments this instance was instantiated with.
pub fn args(&self) -> GenericArgs {
with(|cx| cx.instance_args(self.def))
}
/// Get the body of an Instance.
///
/// The body will be eagerly monomorphized and all constants will already be evaluated.
///
/// This method will return the intrinsic fallback body if one was defined.
pub fn body(&self) -> Option<Body> {
with(|context| context.instance_body(self.def))
}
/// Check whether this instance has a body available.
///
/// For intrinsics with fallback body, this will return `true`. It is up to the user to decide
/// whether to specialize the intrinsic or to use its fallback body.
///
/// For more information on fallback body, see <https://github.com/rust-lang/rust/issues/93145>.
///
/// This call is much cheaper than `instance.body().is_some()`, since it doesn't try to build
/// the rustc_public's IR body.
pub fn has_body(&self) -> bool {
with(|cx| cx.has_body(self.def.def_id()))
}
pub fn is_foreign_item(&self) -> bool {
with(|cx| cx.is_foreign_item(self.def.def_id()))
}
/// Get the instance type with generic instantiations applied and lifetimes erased.
pub fn ty(&self) -> Ty {
with(|context| context.instance_ty(self.def))
}
/// Retrieve information about this instance binary interface.
pub fn fn_abi(&self) -> Result<FnAbi, Error> {
with(|cx| cx.instance_abi(self.def))
}
/// Retrieve the instance's mangled name used for calling the given instance.
///
/// This will also look up the correct name of instances from upstream crates.
pub fn mangled_name(&self) -> Symbol {
with(|context| context.instance_mangled_name(self.def))
}
/// Retrieve the instance name for diagnostic messages.
///
/// This will return the specialized name, e.g., `std::vec::Vec<u8>::new`.
pub fn name(&self) -> Symbol {
with(|context| context.instance_name(self.def, false))
}
/// Return a trimmed name of the given instance including its args.
///
/// If a symbol name can only be imported from one place for a type, and as
/// long as it was not glob-imported anywhere in the current crate, we trim its
/// path and print only the name.
pub fn trimmed_name(&self) -> Symbol {
with(|context| context.instance_name(self.def, true))
}
/// Retrieve the plain intrinsic name of an instance if it's an intrinsic.
///
/// The plain name does not include type arguments (as `trimmed_name` does),
/// which is more convenient to match with intrinsic symbols.
pub fn intrinsic_name(&self) -> Option<Symbol> {
match self.kind {
InstanceKind::Intrinsic => {
Some(with(|context| context.intrinsic(self.def.def_id()).unwrap().fn_name()))
}
InstanceKind::Item | InstanceKind::Virtual { .. } | InstanceKind::Shim => None,
}
}
/// Resolve an instance starting from a function definition and generic arguments.
pub fn resolve(def: FnDef, args: &GenericArgs) -> Result<Instance, Error> {
with(|context| {
context.resolve_instance(def, args).ok_or_else(|| {
bridge::Error::new(format!("Failed to resolve `{def:?}` with `{args:?}`"))
})
})
}
/// Resolve the drop in place for a given type.
pub fn resolve_drop_in_place(ty: Ty) -> Instance {
with(|cx| cx.resolve_drop_in_place(ty))
}
/// Resolve an instance for a given function pointer.
pub fn resolve_for_fn_ptr(def: FnDef, args: &GenericArgs) -> Result<Instance, Error> {
with(|context| {
context.resolve_for_fn_ptr(def, args).ok_or_else(|| {
bridge::Error::new(format!("Failed to resolve `{def:?}` with `{args:?}`"))
})
})
}
/// Resolve a closure with the expected kind.
pub fn resolve_closure(
def: ClosureDef,
args: &GenericArgs,
kind: ClosureKind,
) -> Result<Instance, Error> {
with(|context| {
context.resolve_closure(def, args, kind).ok_or_else(|| {
bridge::Error::new(format!("Failed to resolve `{def:?}` with `{args:?}`"))
})
})
}
/// Check whether this instance is an empty shim.
///
/// Allow users to check if this shim can be ignored when called directly.
///
/// We have decided not to export different types of Shims to rustc_public users, however, this
/// is a query that can be very helpful for users when processing DropGlue.
///
/// When generating code for a Drop terminator, users can ignore an empty drop glue.
/// These shims are only needed to generate a valid Drop call done via VTable.
pub fn is_empty_shim(&self) -> bool {
self.kind == InstanceKind::Shim && with(|cx| cx.is_empty_drop_shim(self.def))
}
/// Try to constant evaluate the instance into a constant with the given type.
///
/// This can be used to retrieve a constant that represents an intrinsic return such as
/// `type_id`.
pub fn try_const_eval(&self, const_ty: Ty) -> Result<Allocation, Error> {
with(|cx| cx.eval_instance(self.def, const_ty))
}
/// Emit the body of this instance if it has one.
pub fn emit_mir<W: io::Write>(&self, w: &mut W) -> io::Result<()> {
if let Some(body) = self.body() { body.dump(w, &self.name()) } else { Ok(()) }
}
}
impl Debug for Instance {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Instance")
.field("kind", &self.kind)
.field("def", &self.mangled_name())
.field("args", &self.args())
.finish()
}
}
/// Try to convert a crate item into an instance.
/// The item cannot be generic in order to be converted into an instance.
impl TryFrom<CrateItem> for Instance {
type Error = crate::Error;
fn try_from(item: CrateItem) -> Result<Self, Self::Error> {
with(|context| {
let def_id = item.def_id();
if !context.requires_monomorphization(def_id) {
Ok(context.mono_instance(def_id))
} else {
Err(bridge::Error::new("Item requires monomorphization".to_string()))
}
})
}
}
/// Try to convert an instance into a crate item.
/// Only user defined instances can be converted.
impl TryFrom<Instance> for CrateItem {
type Error = crate::Error;
fn try_from(value: Instance) -> Result<Self, Self::Error> {
with(|context| {
if value.kind == InstanceKind::Item && context.has_body(value.def.def_id()) {
Ok(CrateItem(context.instance_def_id(value.def)))
} else {
Err(bridge::Error::new(format!("Item kind `{:?}` cannot be converted", value.kind)))
}
})
}
}
impl From<Instance> for MonoItem {
fn from(value: Instance) -> Self {
MonoItem::Fn(value)
}
}
impl From<StaticDef> for MonoItem {
fn from(value: StaticDef) -> Self {
MonoItem::Static(value)
}
}
impl From<StaticDef> for CrateItem {
fn from(value: StaticDef) -> Self {
CrateItem(value.0)
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct InstanceDef(usize, ThreadLocalIndex);
index_impl!(InstanceDef);
impl CrateDef for InstanceDef {
fn def_id(&self) -> DefId {
with(|context| context.instance_def_id(*self))
}
}
crate_def! {
/// Holds information about a static variable definition.
#[derive(Serialize)]
pub StaticDef;
}
impl TryFrom<CrateItem> for StaticDef {
type Error = crate::Error;
fn try_from(value: CrateItem) -> Result<Self, Self::Error> {
if matches!(value.kind(), ItemKind::Static) {
Ok(StaticDef(value.0))
} else {
Err(bridge::Error::new(format!("Expected a static item, but found: {value:?}")))
}
}
}
impl TryFrom<Instance> for StaticDef {
type Error = crate::Error;
fn try_from(value: Instance) -> Result<Self, Self::Error> {
StaticDef::try_from(CrateItem::try_from(value)?)
}
}
impl From<StaticDef> for Instance {
fn from(value: StaticDef) -> Self {
// A static definition should always be convertible to an instance.
with(|cx| cx.mono_instance(value.def_id()))
}
}
impl StaticDef {
/// Return the type of this static definition.
pub fn ty(&self) -> Ty {
with(|cx| cx.def_ty(self.0))
}
/// Evaluate a static's initializer, returning the allocation of the initializer's memory.
pub fn eval_initializer(&self) -> Result<Allocation, Error> {
with(|cx| cx.eval_static_initializer(*self))
}
} | rust | github | https://github.com/rust-lang/rust | compiler/rustc_public/src/mir/mono.rs |
import string, random
words = open("Alice's Adventures in Wonderland by Lewis Carroll.txt").read().split('\n')
xwords = open('words.txt').read().split('\n')
emma = open('Emma by Jane Austen.txt').read().split('\n')
def break_book_into_words(book):
#apostrophes are a problem, don't => dont
lower_book = ''
no_punct = {ord(c): None for c in string.punctuation + string.digits}
for line in book:
line = line.replace('-', ' ') #hyphenated words are a problem and are removed
lower_book += (line.translate(no_punct).lower().strip() + ' ')
return lower_book.split()
#print(sorted(break_book_into_words(words), key=len))
"""
dictonary.setdefault(key, value) does the work of:
if key in dictonary:
dictonary[key].append(value)
else:
dictonary[key] = [value]
in 1 line. Use .setdefault() whenever creating a dict
"""
def unique_word_count(word_list):
#count each use of each word in a list of words
dict1 = {}
for word in word_list:
dict1.setdefault(word, 0)
dict1[word] += 1
return sorted([(value, key) for key, value in dict1.items()],reverse=True)
#print(unique_word_count(break_book_into_words(emma)))
def make_list_a_dict(word_list):
#make a list into a hashable dict
dict1 = {}
for word in word_list:
dict1.setdefault(word, 0)
return dict1
#print(make_list_a_dict(xwords))
def cross_check_words(word_list, check_list):
#remove words in word_list found in check_list and return result
dict1 = {}
for word in word_list:
if word not in check_list:
dict1.setdefault(word, 0)
dict1[word] += 1
return sorted([(value, key) for key, value in dict1.items()],reverse=True)
"""
nested loops with lists is slow
"""
#print(cross_check_words(break_book_into_words(words), xwords)) #[Finished in 27.7s]
"""
it's worth making a list into a dict when nesting loops.
"""
#print(cross_check_words(break_book_into_words(words), make_list_a_dict(xwords))) #[Finished in 0.1s]
#print(len(break_book_into_words(emma))) #total word count
#print(len(unique_word_count(break_book_into_words(emma)))) #total different words
def top_10_words(book, num=10):
hist = unique_word_count(break_book_into_words(book))
print('the top %s most common words are:' % num)
for freq, word in hist[:num]:
print(word, freq, sep='\t'*2)
#top_10_words(emma)
"""
#sep='\t'*2 tells python to use two tab seperators rather than a space like so:
the top 10 most common words are:
to 5242
the 5204
and 4897
of 4293
i 3191
a 3130
it 2529
her 2483
was 2400
she 2364
The optional argument 'num=10' defaults to 10 but can be changed when calling the function
optional arguments always follow the required ones.
""" | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import filecmp
import gyp.common
import gyp.xcodeproj_file
import errno
import os
import sys
import posixpath
import re
import shutil
import subprocess
import tempfile
# Project files generated by this module will use _intermediate_var as a
# custom Xcode setting whose value is a DerivedSources-like directory that's
# project-specific and configuration-specific. The normal choice,
# DERIVED_FILE_DIR, is target-specific, which is thought to be too restrictive
# as it is likely that multiple targets within a single project file will want
# to access the same set of generated files. The other option,
# PROJECT_DERIVED_FILE_DIR, is unsuitable because while it is project-specific,
# it is not configuration-specific. INTERMEDIATE_DIR is defined as
# $(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION).
_intermediate_var = 'INTERMEDIATE_DIR'
# SHARED_INTERMEDIATE_DIR is the same, except that it is shared among all
# targets that share the same BUILT_PRODUCTS_DIR.
_shared_intermediate_var = 'SHARED_INTERMEDIATE_DIR'
_library_search_paths_var = 'LIBRARY_SEARCH_PATHS'
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.dylib',
# INTERMEDIATE_DIR is a place for targets to build up intermediate products.
# It is specific to each build environment. It is only guaranteed to exist
# and be constant within the context of a project, corresponding to a single
# input file. Some build environments may allow their intermediate directory
# to be shared on a wider scale, but this is not guaranteed.
'INTERMEDIATE_DIR': '$(%s)' % _intermediate_var,
'OS': 'mac',
'PRODUCT_DIR': '$(BUILT_PRODUCTS_DIR)',
'LIB_DIR': '$(BUILT_PRODUCTS_DIR)',
'RULE_INPUT_ROOT': '$(INPUT_FILE_BASE)',
'RULE_INPUT_EXT': '$(INPUT_FILE_SUFFIX)',
'RULE_INPUT_NAME': '$(INPUT_FILE_NAME)',
'RULE_INPUT_PATH': '$(INPUT_FILE_PATH)',
'RULE_INPUT_DIRNAME': '$(INPUT_FILE_DIRNAME)',
'SHARED_INTERMEDIATE_DIR': '$(%s)' % _shared_intermediate_var,
'CONFIGURATION_NAME': '$(CONFIGURATION)',
}
# The Xcode-specific sections that hold paths.
generator_additional_path_sections = [
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
# 'mac_framework_dirs', input already handles _dirs endings.
]
# The Xcode-specific keys that exist on targets and aren't moved down to
# configurations.
generator_additional_non_configuration_keys = [
'mac_bundle',
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
'xcode_create_dependents_test_runner',
]
# We want to let any rules apply to files that are resources also.
generator_extra_sources_for_rules = [
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
]
# Xcode's standard set of library directories, which don't need to be duplicated
# in LIBRARY_SEARCH_PATHS. This list is not exhaustive, but that's okay.
xcode_standard_library_dirs = frozenset([
'$(SDKROOT)/usr/lib',
'$(SDKROOT)/usr/local/lib',
])
def CreateXCConfigurationList(configuration_names):
xccl = gyp.xcodeproj_file.XCConfigurationList({'buildConfigurations': []})
if len(configuration_names) == 0:
configuration_names = ['Default']
for configuration_name in configuration_names:
xcbc = gyp.xcodeproj_file.XCBuildConfiguration({
'name': configuration_name})
xccl.AppendProperty('buildConfigurations', xcbc)
xccl.SetProperty('defaultConfigurationName', configuration_names[0])
return xccl
class XcodeProject(object):
def __init__(self, gyp_path, path, build_file_dict):
self.gyp_path = gyp_path
self.path = path
self.project = gyp.xcodeproj_file.PBXProject(path=path)
projectDirPath = gyp.common.RelativePath(
os.path.dirname(os.path.abspath(self.gyp_path)),
os.path.dirname(path) or '.')
self.project.SetProperty('projectDirPath', projectDirPath)
self.project_file = \
gyp.xcodeproj_file.XCProjectFile({'rootObject': self.project})
self.build_file_dict = build_file_dict
# TODO(mark): add destructor that cleans up self.path if created_dir is
# True and things didn't complete successfully. Or do something even
# better with "try"?
self.created_dir = False
try:
os.makedirs(self.path)
self.created_dir = True
except OSError, e:
if e.errno != errno.EEXIST:
raise
def Finalize1(self, xcode_targets, serialize_all_tests):
# Collect a list of all of the build configuration names used by the
# various targets in the file. It is very heavily advised to keep each
# target in an entire project (even across multiple project files) using
# the same set of configuration names.
configurations = []
for xct in self.project.GetProperty('targets'):
xccl = xct.GetProperty('buildConfigurationList')
xcbcs = xccl.GetProperty('buildConfigurations')
for xcbc in xcbcs:
name = xcbc.GetProperty('name')
if name not in configurations:
configurations.append(name)
# Replace the XCConfigurationList attached to the PBXProject object with
# a new one specifying all of the configuration names used by the various
# targets.
try:
xccl = CreateXCConfigurationList(configurations)
self.project.SetProperty('buildConfigurationList', xccl)
except:
sys.stderr.write("Problem with gyp file %s\n" % self.gyp_path)
raise
# The need for this setting is explained above where _intermediate_var is
# defined. The comments below about wanting to avoid project-wide build
# settings apply here too, but this needs to be set on a project-wide basis
# so that files relative to the _intermediate_var setting can be displayed
# properly in the Xcode UI.
#
# Note that for configuration-relative files such as anything relative to
# _intermediate_var, for the purposes of UI tree view display, Xcode will
# only resolve the configuration name once, when the project file is
# opened. If the active build configuration is changed, the project file
# must be closed and reopened if it is desired for the tree view to update.
# This is filed as Apple radar 6588391.
xccl.SetBuildSetting(_intermediate_var,
'$(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION)')
xccl.SetBuildSetting(_shared_intermediate_var,
'$(SYMROOT)/DerivedSources/$(CONFIGURATION)')
# Set user-specified project-wide build settings and config files. This
# is intended to be used very sparingly. Really, almost everything should
# go into target-specific build settings sections. The project-wide
# settings are only intended to be used in cases where Xcode attempts to
# resolve variable references in a project context as opposed to a target
# context, such as when resolving sourceTree references while building up
# the tree tree view for UI display.
# Any values set globally are applied to all configurations, then any
# per-configuration values are applied.
for xck, xcv in self.build_file_dict.get('xcode_settings', {}).iteritems():
xccl.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in self.build_file_dict:
config_ref = self.project.AddOrGetFileInRootGroup(
self.build_file_dict['xcode_config_file'])
xccl.SetBaseConfiguration(config_ref)
build_file_configurations = self.build_file_dict.get('configurations', {})
if build_file_configurations:
for config_name in configurations:
build_file_configuration_named = \
build_file_configurations.get(config_name, {})
if build_file_configuration_named:
xcc = xccl.ConfigurationNamed(config_name)
for xck, xcv in build_file_configuration_named.get('xcode_settings',
{}).iteritems():
xcc.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in build_file_configuration_named:
config_ref = self.project.AddOrGetFileInRootGroup(
build_file_configurations[config_name]['xcode_config_file'])
xcc.SetBaseConfiguration(config_ref)
# Sort the targets based on how they appeared in the input.
# TODO(mark): Like a lot of other things here, this assumes internal
# knowledge of PBXProject - in this case, of its "targets" property.
# ordinary_targets are ordinary targets that are already in the project
# file. run_test_targets are the targets that run unittests and should be
# used for the Run All Tests target. support_targets are the action/rule
# targets used by GYP file targets, just kept for the assert check.
ordinary_targets = []
run_test_targets = []
support_targets = []
# targets is full list of targets in the project.
targets = []
# does the it define it's own "all"?
has_custom_all = False
# targets_for_all is the list of ordinary_targets that should be listed
# in this project's "All" target. It includes each non_runtest_target
# that does not have suppress_wildcard set.
targets_for_all = []
for target in self.build_file_dict['targets']:
target_name = target['target_name']
toolset = target['toolset']
qualified_target = gyp.common.QualifiedTarget(self.gyp_path, target_name,
toolset)
xcode_target = xcode_targets[qualified_target]
# Make sure that the target being added to the sorted list is already in
# the unsorted list.
assert xcode_target in self.project._properties['targets']
targets.append(xcode_target)
ordinary_targets.append(xcode_target)
if xcode_target.support_target:
support_targets.append(xcode_target.support_target)
targets.append(xcode_target.support_target)
if not int(target.get('suppress_wildcard', False)):
targets_for_all.append(xcode_target)
if target_name.lower() == 'all':
has_custom_all = True;
# If this target has a 'run_as' attribute, add its target to the
# targets, and add it to the test targets.
if target.get('run_as'):
# Make a target to run something. It should have one
# dependency, the parent xcode target.
xccl = CreateXCConfigurationList(configurations)
run_target = gyp.xcodeproj_file.PBXAggregateTarget({
'name': 'Run ' + target_name,
'productName': xcode_target.GetProperty('productName'),
'buildConfigurationList': xccl,
},
parent=self.project)
run_target.AddDependency(xcode_target)
command = target['run_as']
script = ''
if command.get('working_directory'):
script = script + 'cd "%s"\n' % \
gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
command.get('working_directory'))
if command.get('environment'):
script = script + "\n".join(
['export %s="%s"' %
(key, gyp.xcodeproj_file.ConvertVariablesToShellSyntax(val))
for (key, val) in command.get('environment').iteritems()]) + "\n"
# Some test end up using sockets, files on disk, etc. and can get
# confused if more then one test runs at a time. The generator
# flag 'xcode_serialize_all_test_runs' controls the forcing of all
# tests serially. It defaults to True. To get serial runs this
# little bit of python does the same as the linux flock utility to
# make sure only one runs at a time.
command_prefix = ''
if serialize_all_tests:
command_prefix = \
"""python -c "import fcntl, subprocess, sys
file = open('$TMPDIR/GYP_serialize_test_runs', 'a')
fcntl.flock(file.fileno(), fcntl.LOCK_EX)
sys.exit(subprocess.call(sys.argv[1:]))" """
# If we were unable to exec for some reason, we want to exit
# with an error, and fixup variable references to be shell
# syntax instead of xcode syntax.
script = script + 'exec ' + command_prefix + '%s\nexit 1\n' % \
gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
gyp.common.EncodePOSIXShellList(command.get('action')))
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'shellScript': script,
'showEnvVarsInLog': 0,
})
run_target.AppendProperty('buildPhases', ssbp)
# Add the run target to the project file.
targets.append(run_target)
run_test_targets.append(run_target)
xcode_target.test_runner = run_target
# Make sure that the list of targets being replaced is the same length as
# the one replacing it, but allow for the added test runner targets.
assert len(self.project._properties['targets']) == \
len(ordinary_targets) + len(support_targets)
self.project._properties['targets'] = targets
# Get rid of unnecessary levels of depth in groups like the Source group.
self.project.RootGroupsTakeOverOnlyChildren(True)
# Sort the groups nicely. Do this after sorting the targets, because the
# Products group is sorted based on the order of the targets.
self.project.SortGroups()
# Create an "All" target if there's more than one target in this project
# file and the project didn't define its own "All" target. Put a generated
# "All" target first so that people opening up the project for the first
# time will build everything by default.
if len(targets_for_all) > 1 and not has_custom_all:
xccl = CreateXCConfigurationList(configurations)
all_target = gyp.xcodeproj_file.PBXAggregateTarget(
{
'buildConfigurationList': xccl,
'name': 'All',
},
parent=self.project)
for target in targets_for_all:
all_target.AddDependency(target)
# TODO(mark): This is evil because it relies on internal knowledge of
# PBXProject._properties. It's important to get the "All" target first,
# though.
self.project._properties['targets'].insert(0, all_target)
# The same, but for run_test_targets.
if len(run_test_targets) > 1:
xccl = CreateXCConfigurationList(configurations)
run_all_tests_target = gyp.xcodeproj_file.PBXAggregateTarget(
{
'buildConfigurationList': xccl,
'name': 'Run All Tests',
},
parent=self.project)
for run_test_target in run_test_targets:
run_all_tests_target.AddDependency(run_test_target)
# Insert after the "All" target, which must exist if there is more than
# one run_test_target.
self.project._properties['targets'].insert(1, run_all_tests_target)
def Finalize2(self, xcode_targets, xcode_target_to_target_dict):
# Finalize2 needs to happen in a separate step because the process of
# updating references to other projects depends on the ordering of targets
# within remote project files. Finalize1 is responsible for sorting duty,
# and once all project files are sorted, Finalize2 can come in and update
# these references.
# To support making a "test runner" target that will run all the tests
# that are direct dependents of any given target, we look for
# xcode_create_dependents_test_runner being set on an Aggregate target,
# and generate a second target that will run the tests runners found under
# the marked target.
for bf_tgt in self.build_file_dict['targets']:
if int(bf_tgt.get('xcode_create_dependents_test_runner', 0)):
tgt_name = bf_tgt['target_name']
toolset = bf_tgt['toolset']
qualified_target = gyp.common.QualifiedTarget(self.gyp_path,
tgt_name, toolset)
xcode_target = xcode_targets[qualified_target]
if isinstance(xcode_target, gyp.xcodeproj_file.PBXAggregateTarget):
# Collect all the run test targets.
all_run_tests = []
pbxtds = xcode_target.GetProperty('dependencies')
for pbxtd in pbxtds:
pbxcip = pbxtd.GetProperty('targetProxy')
dependency_xct = pbxcip.GetProperty('remoteGlobalIDString')
if hasattr(dependency_xct, 'test_runner'):
all_run_tests.append(dependency_xct.test_runner)
# Directly depend on all the runners as they depend on the target
# that builds them.
if len(all_run_tests) > 0:
run_all_target = gyp.xcodeproj_file.PBXAggregateTarget({
'name': 'Run %s Tests' % tgt_name,
'productName': tgt_name,
},
parent=self.project)
for run_test_target in all_run_tests:
run_all_target.AddDependency(run_test_target)
# Insert the test runner after the related target.
idx = self.project._properties['targets'].index(xcode_target)
self.project._properties['targets'].insert(idx + 1, run_all_target)
# Update all references to other projects, to make sure that the lists of
# remote products are complete. Otherwise, Xcode will fill them in when
# it opens the project file, which will result in unnecessary diffs.
# TODO(mark): This is evil because it relies on internal knowledge of
# PBXProject._other_pbxprojects.
for other_pbxproject in self.project._other_pbxprojects.keys():
self.project.AddOrGetProjectReference(other_pbxproject)
self.project.SortRemoteProductReferences()
# Give everything an ID.
self.project_file.ComputeIDs()
# Make sure that no two objects in the project file have the same ID. If
# multiple objects wind up with the same ID, upon loading the file, Xcode
# will only recognize one object (the last one in the file?) and the
# results are unpredictable.
self.project_file.EnsureNoIDCollisions()
def Write(self):
# Write the project file to a temporary location first. Xcode watches for
# changes to the project file and presents a UI sheet offering to reload
# the project when it does change. However, in some cases, especially when
# multiple projects are open or when Xcode is busy, things don't work so
# seamlessly. Sometimes, Xcode is able to detect that a project file has
# changed but can't unload it because something else is referencing it.
# To mitigate this problem, and to avoid even having Xcode present the UI
# sheet when an open project is rewritten for inconsequential changes, the
# project file is written to a temporary file in the xcodeproj directory
# first. The new temporary file is then compared to the existing project
# file, if any. If they differ, the new file replaces the old; otherwise,
# the new project file is simply deleted. Xcode properly detects a file
# being renamed over an open project file as a change and so it remains
# able to present the "project file changed" sheet under this system.
# Writing to a temporary file first also avoids the possible problem of
# Xcode rereading an incomplete project file.
(output_fd, new_pbxproj_path) = \
tempfile.mkstemp(suffix='.tmp', prefix='project.pbxproj.gyp.',
dir=self.path)
try:
output_file = os.fdopen(output_fd, 'wb')
self.project_file.Print(output_file)
output_file.close()
pbxproj_path = os.path.join(self.path, 'project.pbxproj')
same = False
try:
same = filecmp.cmp(pbxproj_path, new_pbxproj_path, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(new_pbxproj_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(new_pbxproj_path, 0666 & ~umask)
os.rename(new_pbxproj_path, pbxproj_path)
except Exception:
# Don't leave turds behind. In fact, if this code was responsible for
# creating the xcodeproj directory, get rid of that too.
os.unlink(new_pbxproj_path)
if self.created_dir:
shutil.rmtree(self.path, True)
raise
cached_xcode_version = None
def InstalledXcodeVersion():
"""Fetches the installed version of Xcode, returns empty string if it is
unable to figure it out."""
global cached_xcode_version
if not cached_xcode_version is None:
return cached_xcode_version
# Default to an empty string
cached_xcode_version = ''
# Collect the xcodebuild's version information.
try:
import subprocess
cmd = ['/usr/bin/xcodebuild', '-version']
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
xcodebuild_version_info = proc.communicate()[0]
# Any error, return empty string
if proc.returncode:
xcodebuild_version_info = ''
except OSError:
# We failed to launch the tool
xcodebuild_version_info = ''
# Pull out the Xcode version itself.
match_line = re.search('^Xcode (.*)$', xcodebuild_version_info, re.MULTILINE)
if match_line:
cached_xcode_version = match_line.group(1)
# Done!
return cached_xcode_version
def AddSourceToTarget(source, type, pbxp, xct):
# TODO(mark): Perhaps source_extensions and library_extensions can be made a
# little bit fancier.
source_extensions = ['c', 'cc', 'cpp', 'cxx', 'm', 'mm', 's']
# .o is conceptually more of a "source" than a "library," but Xcode thinks
# of "sources" as things to compile and "libraries" (or "frameworks") as
# things to link with. Adding an object file to an Xcode target's frameworks
# phase works properly.
library_extensions = ['a', 'dylib', 'framework', 'o']
basename = posixpath.basename(source)
(root, ext) = posixpath.splitext(basename)
if ext:
ext = ext[1:].lower()
if ext in source_extensions and type != 'none':
xct.SourcesPhase().AddFile(source)
elif ext in library_extensions and type != 'none':
xct.FrameworksPhase().AddFile(source)
else:
# Files that aren't added to a sources or frameworks build phase can still
# go into the project file, just not as part of a build phase.
pbxp.AddOrGetFileInRootGroup(source)
def AddResourceToTarget(resource, pbxp, xct):
# TODO(mark): Combine with AddSourceToTarget above? Or just inline this call
# where it's used.
xct.ResourcesPhase().AddFile(resource)
def AddHeaderToTarget(header, pbxp, xct, is_public):
# TODO(mark): Combine with AddSourceToTarget above? Or just inline this call
# where it's used.
settings = '{ATTRIBUTES = (%s, ); }' % ('Private', 'Public')[is_public]
xct.HeadersPhase().AddFile(header, settings)
_xcode_variable_re = re.compile('(\$\((.*?)\))')
def ExpandXcodeVariables(string, expansions):
"""Expands Xcode-style $(VARIABLES) in string per the expansions dict.
In some rare cases, it is appropriate to expand Xcode variables when a
project file is generated. For any substring $(VAR) in string, if VAR is a
key in the expansions dict, $(VAR) will be replaced with expansions[VAR].
Any $(VAR) substring in string for which VAR is not a key in the expansions
dict will remain in the returned string.
"""
matches = _xcode_variable_re.findall(string)
if matches == None:
return string
matches.reverse()
for match in matches:
(to_replace, variable) = match
if not variable in expansions:
continue
replacement = expansions[variable]
string = re.sub(re.escape(to_replace), replacement, string)
return string
def EscapeXCodeArgument(s):
"""We must escape the arguments that we give to XCode so that it knows not to
split on spaces and to respect backslash and quote literals."""
s = s.replace('\\', '\\\\')
s = s.replace('"', '\\"')
return '"' + s + '"'
def PerformBuild(data, configurations, params):
options = params['options']
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
xcodeproj_path = build_file_root + options.suffix + '.xcodeproj'
if options.generator_output:
xcodeproj_path = os.path.join(options.generator_output, xcodeproj_path)
for config in configurations:
arguments = ['xcodebuild', '-project', xcodeproj_path]
arguments += ['-configuration', config]
print "Building [%s]: %s" % (config, arguments)
subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
generator_flags = params.get('generator_flags', {})
parallel_builds = generator_flags.get('xcode_parallel_builds', True)
serialize_all_tests = \
generator_flags.get('xcode_serialize_all_test_runs', True)
project_version = generator_flags.get('xcode_project_version', None)
skip_excluded_files = \
not generator_flags.get('xcode_list_excluded_files', True)
xcode_projects = {}
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
xcodeproj_path = build_file_root + options.suffix + '.xcodeproj'
if options.generator_output:
xcodeproj_path = os.path.join(options.generator_output, xcodeproj_path)
xcp = XcodeProject(build_file, xcodeproj_path, build_file_dict)
xcode_projects[build_file] = xcp
pbxp = xcp.project
if parallel_builds:
pbxp.SetProperty('attributes',
{'BuildIndependentTargetsInParallel': 'YES'})
if project_version:
xcp.project_file.SetXcodeVersion(project_version)
# Add gyp/gypi files to project
if not generator_flags.get('standalone'):
main_group = pbxp.GetProperty('mainGroup')
build_group = gyp.xcodeproj_file.PBXGroup({'name': 'Build'})
main_group.AppendChild(build_group)
for included_file in build_file_dict['included_files']:
build_group.AddOrGetFileByPath(included_file, False)
xcode_targets = {}
xcode_target_to_target_dict = {}
for qualified_target in target_list:
[build_file, target_name, toolset] = \
gyp.common.ParseQualifiedTarget(qualified_target)
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise Exception(
'Multiple toolsets not supported in xcode build (target %s)' %
qualified_target)
configuration_names = [spec['default_configuration']]
for configuration_name in sorted(spec['configurations'].keys()):
if configuration_name not in configuration_names:
configuration_names.append(configuration_name)
xcp = xcode_projects[build_file]
pbxp = xcp.project
# Set up the configurations for the target according to the list of names
# supplied.
xccl = CreateXCConfigurationList(configuration_names)
# Create an XCTarget subclass object for the target. The type with
# "+bundle" appended will be used if the target has "mac_bundle" set.
# loadable_modules not in a mac_bundle are mapped to
# com.googlecode.gyp.xcode.bundle, a pseudo-type that xcode.py interprets
# to create a single-file mh_bundle.
_types = {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.googlecode.gyp.xcode.bundle',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
'executable+bundle': 'com.apple.product-type.application',
'loadable_module+bundle': 'com.apple.product-type.bundle',
'shared_library+bundle': 'com.apple.product-type.framework',
}
target_properties = {
'buildConfigurationList': xccl,
'name': target_name,
}
type = spec['type']
is_bundle = int(spec.get('mac_bundle', 0))
if type != 'none':
type_bundle_key = type
if is_bundle:
type_bundle_key += '+bundle'
xctarget_type = gyp.xcodeproj_file.PBXNativeTarget
try:
target_properties['productType'] = _types[type_bundle_key]
except KeyError, e:
gyp.common.ExceptionAppend(e, "-- unknown product type while "
"writing target %s" % target_name)
raise
else:
xctarget_type = gyp.xcodeproj_file.PBXAggregateTarget
assert not is_bundle, (
'mac_bundle targets cannot have type none (target "%s")' %
target_name)
target_product_name = spec.get('product_name')
if target_product_name is not None:
target_properties['productName'] = target_product_name
xct = xctarget_type(target_properties, parent=pbxp,
force_outdir=spec.get('product_dir'),
force_prefix=spec.get('product_prefix'),
force_extension=spec.get('product_extension'))
pbxp.AppendProperty('targets', xct)
xcode_targets[qualified_target] = xct
xcode_target_to_target_dict[xct] = spec
spec_actions = spec.get('actions', [])
spec_rules = spec.get('rules', [])
# Xcode has some "issues" with checking dependencies for the "Compile
# sources" step with any source files/headers generated by actions/rules.
# To work around this, if a target is building anything directly (not
# type "none"), then a second target is used to run the GYP actions/rules
# and is made a dependency of this target. This way the work is done
# before the dependency checks for what should be recompiled.
support_xct = None
if type != 'none' and (spec_actions or spec_rules):
support_xccl = CreateXCConfigurationList(configuration_names);
support_target_properties = {
'buildConfigurationList': support_xccl,
'name': target_name + ' Support',
}
if target_product_name:
support_target_properties['productName'] = \
target_product_name + ' Support'
support_xct = \
gyp.xcodeproj_file.PBXAggregateTarget(support_target_properties,
parent=pbxp)
pbxp.AppendProperty('targets', support_xct)
xct.AddDependency(support_xct)
# Hang the support target off the main target so it can be tested/found
# by the generator during Finalize.
xct.support_target = support_xct
prebuild_index = 0
# Add custom shell script phases for "actions" sections.
for action in spec_actions:
# There's no need to write anything into the script to ensure that the
# output directories already exist, because Xcode will look at the
# declared outputs and automatically ensure that they exist for us.
# Do we have a message to print when this action runs?
message = action.get('message')
if message:
message = 'echo note: ' + gyp.common.EncodePOSIXShellArgument(message)
else:
message = ''
# Turn the list into a string that can be passed to a shell.
action_string = gyp.common.EncodePOSIXShellList(action['action'])
# Convert Xcode-type variable references to sh-compatible environment
# variable references.
message_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(message)
action_string_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
action_string)
script = ''
# Include the optional message
if message_sh:
script += message_sh + '\n'
# Be sure the script runs in exec, and that if exec fails, the script
# exits signalling an error.
script += 'exec ' + action_string_sh + '\nexit 1\n'
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'inputPaths': action['inputs'],
'name': 'Action "' + action['action_name'] + '"',
'outputPaths': action['outputs'],
'shellScript': script,
'showEnvVarsInLog': 0,
})
if support_xct:
support_xct.AppendProperty('buildPhases', ssbp)
else:
# TODO(mark): this assumes too much knowledge of the internals of
# xcodeproj_file; some of these smarts should move into xcodeproj_file
# itself.
xct._properties['buildPhases'].insert(prebuild_index, ssbp)
prebuild_index = prebuild_index + 1
# TODO(mark): Should verify that at most one of these is specified.
if int(action.get('process_outputs_as_sources', False)):
for output in action['outputs']:
AddSourceToTarget(output, type, pbxp, xct)
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
for output in action['outputs']:
AddResourceToTarget(output, pbxp, xct)
# tgt_mac_bundle_resources holds the list of bundle resources so
# the rule processing can check against it.
if is_bundle:
tgt_mac_bundle_resources = spec.get('mac_bundle_resources', [])
else:
tgt_mac_bundle_resources = []
# Add custom shell script phases driving "make" for "rules" sections.
#
# Xcode's built-in rule support is almost powerful enough to use directly,
# but there are a few significant deficiencies that render them unusable.
# There are workarounds for some of its inadequacies, but in aggregate,
# the workarounds added complexity to the generator, and some workarounds
# actually require input files to be crafted more carefully than I'd like.
# Consequently, until Xcode rules are made more capable, "rules" input
# sections will be handled in Xcode output by shell script build phases
# performed prior to the compilation phase.
#
# The following problems with Xcode rules were found. The numbers are
# Apple radar IDs. I hope that these shortcomings are addressed, I really
# liked having the rules handled directly in Xcode during the period that
# I was prototyping this.
#
# 6588600 Xcode compiles custom script rule outputs too soon, compilation
# fails. This occurs when rule outputs from distinct inputs are
# interdependent. The only workaround is to put rules and their
# inputs in a separate target from the one that compiles the rule
# outputs. This requires input file cooperation and it means that
# process_outputs_as_sources is unusable.
# 6584932 Need to declare that custom rule outputs should be excluded from
# compilation. A possible workaround is to lie to Xcode about a
# rule's output, giving it a dummy file it doesn't know how to
# compile. The rule action script would need to touch the dummy.
# 6584839 I need a way to declare additional inputs to a custom rule.
# A possible workaround is a shell script phase prior to
# compilation that touches a rule's primary input files if any
# would-be additional inputs are newer than the output. Modifying
# the source tree - even just modification times - feels dirty.
# 6564240 Xcode "custom script" build rules always dump all environment
# variables. This is a low-prioroty problem and is not a
# show-stopper.
rules_by_ext = {}
for rule in spec_rules:
rules_by_ext[rule['extension']] = rule
# First, some definitions:
#
# A "rule source" is a file that was listed in a target's "sources"
# list and will have a rule applied to it on the basis of matching the
# rule's "extensions" attribute. Rule sources are direct inputs to
# rules.
#
# Rule definitions may specify additional inputs in their "inputs"
# attribute. These additional inputs are used for dependency tracking
# purposes.
#
# A "concrete output" is a rule output with input-dependent variables
# resolved. For example, given a rule with:
# 'extension': 'ext', 'outputs': ['$(INPUT_FILE_BASE).cc'],
# if the target's "sources" list contained "one.ext" and "two.ext",
# the "concrete output" for rule input "two.ext" would be "two.cc". If
# a rule specifies multiple outputs, each input file that the rule is
# applied to will have the same number of concrete outputs.
#
# If any concrete outputs are outdated or missing relative to their
# corresponding rule_source or to any specified additional input, the
# rule action must be performed to generate the concrete outputs.
# concrete_outputs_by_rule_source will have an item at the same index
# as the rule['rule_sources'] that it corresponds to. Each item is a
# list of all of the concrete outputs for the rule_source.
concrete_outputs_by_rule_source = []
# concrete_outputs_all is a flat list of all concrete outputs that this
# rule is able to produce, given the known set of input files
# (rule_sources) that apply to it.
concrete_outputs_all = []
# messages & actions are keyed by the same indices as rule['rule_sources']
# and concrete_outputs_by_rule_source. They contain the message and
# action to perform after resolving input-dependent variables. The
# message is optional, in which case None is stored for each rule source.
messages = []
actions = []
for rule_source in rule.get('rule_sources', []):
rule_source_dirname, rule_source_basename = \
posixpath.split(rule_source)
(rule_source_root, rule_source_ext) = \
posixpath.splitext(rule_source_basename)
# These are the same variable names that Xcode uses for its own native
# rule support. Because Xcode's rule engine is not being used, they
# need to be expanded as they are written to the makefile.
rule_input_dict = {
'INPUT_FILE_BASE': rule_source_root,
'INPUT_FILE_SUFFIX': rule_source_ext,
'INPUT_FILE_NAME': rule_source_basename,
'INPUT_FILE_PATH': rule_source,
'INPUT_FILE_DIRNAME': rule_source_dirname,
}
concrete_outputs_for_this_rule_source = []
for output in rule.get('outputs', []):
# Fortunately, Xcode and make both use $(VAR) format for their
# variables, so the expansion is the only transformation necessary.
# Any remaning $(VAR)-type variables in the string can be given
# directly to make, which will pick up the correct settings from
# what Xcode puts into the environment.
concrete_output = ExpandXcodeVariables(output, rule_input_dict)
concrete_outputs_for_this_rule_source.append(concrete_output)
# Add all concrete outputs to the project.
pbxp.AddOrGetFileInRootGroup(concrete_output)
concrete_outputs_by_rule_source.append( \
concrete_outputs_for_this_rule_source)
concrete_outputs_all.extend(concrete_outputs_for_this_rule_source)
# TODO(mark): Should verify that at most one of these is specified.
if int(rule.get('process_outputs_as_sources', False)):
for output in concrete_outputs_for_this_rule_source:
AddSourceToTarget(output, type, pbxp, xct)
# If the file came from the mac_bundle_resources list or if the rule
# is marked to process outputs as bundle resource, do so.
was_mac_bundle_resource = rule_source in tgt_mac_bundle_resources
if was_mac_bundle_resource or \
int(rule.get('process_outputs_as_mac_bundle_resources', False)):
for output in concrete_outputs_for_this_rule_source:
AddResourceToTarget(output, pbxp, xct)
# Do we have a message to print when this rule runs?
message = rule.get('message')
if message:
message = gyp.common.EncodePOSIXShellArgument(message)
message = ExpandXcodeVariables(message, rule_input_dict)
messages.append(message)
# Turn the list into a string that can be passed to a shell.
action_string = gyp.common.EncodePOSIXShellList(rule['action'])
action = ExpandXcodeVariables(action_string, rule_input_dict)
actions.append(action)
if len(concrete_outputs_all) > 0:
# TODO(mark): There's a possibilty for collision here. Consider
# target "t" rule "A_r" and target "t_A" rule "r".
makefile_name = '%s.make' % re.sub(
'[^a-zA-Z0-9_]', '_' , '%s_%s' % (target_name, rule['rule_name']))
makefile_path = os.path.join(xcode_projects[build_file].path,
makefile_name)
# TODO(mark): try/close? Write to a temporary file and swap it only
# if it's got changes?
makefile = open(makefile_path, 'wb')
# make will build the first target in the makefile by default. By
# convention, it's called "all". List all (or at least one)
# concrete output for each rule source as a prerequisite of the "all"
# target.
makefile.write('all: \\\n')
for concrete_output_index in \
xrange(0, len(concrete_outputs_by_rule_source)):
# Only list the first (index [0]) concrete output of each input
# in the "all" target. Otherwise, a parallel make (-j > 1) would
# attempt to process each input multiple times simultaneously.
# Otherwise, "all" could just contain the entire list of
# concrete_outputs_all.
concrete_output = \
concrete_outputs_by_rule_source[concrete_output_index][0]
if concrete_output_index == len(concrete_outputs_by_rule_source) - 1:
eol = ''
else:
eol = ' \\'
makefile.write(' %s%s\n' % (concrete_output, eol))
for (rule_source, concrete_outputs, message, action) in \
zip(rule['rule_sources'], concrete_outputs_by_rule_source,
messages, actions):
makefile.write('\n')
# Add a rule that declares it can build each concrete output of a
# rule source. Collect the names of the directories that are
# required.
concrete_output_dirs = []
for concrete_output_index in xrange(0, len(concrete_outputs)):
concrete_output = concrete_outputs[concrete_output_index]
if concrete_output_index == 0:
bol = ''
else:
bol = ' '
makefile.write('%s%s \\\n' % (bol, concrete_output))
concrete_output_dir = posixpath.dirname(concrete_output)
if (concrete_output_dir and
concrete_output_dir not in concrete_output_dirs):
concrete_output_dirs.append(concrete_output_dir)
makefile.write(' : \\\n')
# The prerequisites for this rule are the rule source itself and
# the set of additional rule inputs, if any.
prerequisites = [rule_source]
prerequisites.extend(rule.get('inputs', []))
for prerequisite_index in xrange(0, len(prerequisites)):
prerequisite = prerequisites[prerequisite_index]
if prerequisite_index == len(prerequisites) - 1:
eol = ''
else:
eol = ' \\'
makefile.write(' %s%s\n' % (prerequisite, eol))
# Make sure that output directories exist before executing the rule
# action.
if len(concrete_output_dirs) > 0:
makefile.write('\t@mkdir -p "%s"\n' %
'" "'.join(concrete_output_dirs))
# The rule message and action have already had the necessary variable
# substitutions performed.
if message:
# Mark it with note: so Xcode picks it up in build output.
makefile.write('\t@echo note: %s\n' % message)
makefile.write('\t%s\n' % action)
makefile.close()
# It might be nice to ensure that needed output directories exist
# here rather than in each target in the Makefile, but that wouldn't
# work if there ever was a concrete output that had an input-dependent
# variable anywhere other than in the leaf position.
# Don't declare any inputPaths or outputPaths. If they're present,
# Xcode will provide a slight optimization by only running the script
# phase if any output is missing or outdated relative to any input.
# Unfortunately, it will also assume that all outputs are touched by
# the script, and if the outputs serve as files in a compilation
# phase, they will be unconditionally rebuilt. Since make might not
# rebuild everything that could be declared here as an output, this
# extra compilation activity is unnecessary. With inputPaths and
# outputPaths not supplied, make will always be called, but it knows
# enough to not do anything when everything is up-to-date.
# To help speed things up, pass -j COUNT to make so it does some work
# in parallel. Don't use ncpus because Xcode will build ncpus targets
# in parallel and if each target happens to have a rules step, there
# would be ncpus^2 things going. With a machine that has 2 quad-core
# Xeons, a build can quickly run out of processes based on
# scheduling/other tasks, and randomly failing builds are no good.
script = \
"""JOB_COUNT="$(/usr/sbin/sysctl -n hw.ncpu)"
if [ "${JOB_COUNT}" -gt 4 ]; then
JOB_COUNT=4
fi
exec "${DEVELOPER_BIN_DIR}/make" -f "${PROJECT_FILE_PATH}/%s" -j "${JOB_COUNT}"
exit 1
""" % makefile_name
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'name': 'Rule "' + rule['rule_name'] + '"',
'shellScript': script,
'showEnvVarsInLog': 0,
})
if support_xct:
support_xct.AppendProperty('buildPhases', ssbp)
else:
# TODO(mark): this assumes too much knowledge of the internals of
# xcodeproj_file; some of these smarts should move into xcodeproj_file
# itself.
xct._properties['buildPhases'].insert(prebuild_index, ssbp)
prebuild_index = prebuild_index + 1
# Extra rule inputs also go into the project file. Concrete outputs were
# already added when they were computed.
groups = ['inputs', 'inputs_excluded']
if skip_excluded_files:
groups = [x for x in groups if not x.endswith('_excluded')]
for group in groups:
for item in rule.get(group, []):
pbxp.AddOrGetFileInRootGroup(item)
# Add "sources".
for source in spec.get('sources', []):
(source_root, source_extension) = posixpath.splitext(source)
if source_extension[1:] not in rules_by_ext:
# AddSourceToTarget will add the file to a root group if it's not
# already there.
AddSourceToTarget(source, type, pbxp, xct)
else:
pbxp.AddOrGetFileInRootGroup(source)
# Add "mac_bundle_resources" and "mac_framework_private_headers" if
# it's a bundle of any type.
if is_bundle:
for resource in tgt_mac_bundle_resources:
(resource_root, resource_extension) = posixpath.splitext(resource)
if resource_extension[1:] not in rules_by_ext:
AddResourceToTarget(resource, pbxp, xct)
else:
pbxp.AddOrGetFileInRootGroup(resource)
for header in spec.get('mac_framework_private_headers', []):
AddHeaderToTarget(header, pbxp, xct, False)
# Add "mac_framework_headers". These can be valid for both frameworks
# and static libraries.
if is_bundle or type == 'static_library':
for header in spec.get('mac_framework_headers', []):
AddHeaderToTarget(header, pbxp, xct, True)
# Add "copies".
pbxcp_dict = {}
for copy_group in spec.get('copies', []):
dest = copy_group['destination']
if dest[0] not in ('/', '$'):
# Relative paths are relative to $(SRCROOT).
dest = '$(SRCROOT)/' + dest
# Coalesce multiple "copies" sections in the same target with the same
# "destination" property into the same PBXCopyFilesBuildPhase, otherwise
# they'll wind up with ID collisions.
pbxcp = pbxcp_dict.get(dest, None)
if pbxcp is None:
pbxcp = gyp.xcodeproj_file.PBXCopyFilesBuildPhase({
'name': 'Copy to ' + copy_group['destination']
},
parent=xct)
pbxcp.SetDestination(dest)
# TODO(mark): The usual comment about this knowing too much about
# gyp.xcodeproj_file internals applies.
xct._properties['buildPhases'].insert(prebuild_index, pbxcp)
pbxcp_dict[dest] = pbxcp
for file in copy_group['files']:
pbxcp.AddFile(file)
# Excluded files can also go into the project file.
if not skip_excluded_files:
for key in ['sources', 'mac_bundle_resources', 'mac_framework_headers',
'mac_framework_private_headers']:
excluded_key = key + '_excluded'
for item in spec.get(excluded_key, []):
pbxp.AddOrGetFileInRootGroup(item)
# So can "inputs" and "outputs" sections of "actions" groups.
groups = ['inputs', 'inputs_excluded', 'outputs', 'outputs_excluded']
if skip_excluded_files:
groups = [x for x in groups if not x.endswith('_excluded')]
for action in spec.get('actions', []):
for group in groups:
for item in action.get(group, []):
# Exclude anything in BUILT_PRODUCTS_DIR. They're products, not
# sources.
if not item.startswith('$(BUILT_PRODUCTS_DIR)/'):
pbxp.AddOrGetFileInRootGroup(item)
for postbuild in spec.get('postbuilds', []):
action_string_sh = gyp.common.EncodePOSIXShellList(postbuild['action'])
script = 'exec ' + action_string_sh + '\nexit 1\n'
# Make the postbuild step depend on the output of ld or ar from this
# target. Apparently putting the script step after the link step isn't
# sufficient to ensure proper ordering in all cases. With an input
# declared but no outputs, the script step should run every time, as
# desired.
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'inputPaths': ['$(BUILT_PRODUCTS_DIR)/$(EXECUTABLE_PATH)'],
'name': 'Postbuild "' + postbuild['postbuild_name'] + '"',
'shellScript': script,
'showEnvVarsInLog': 0,
})
xct.AppendProperty('buildPhases', ssbp)
# Add dependencies before libraries, because adding a dependency may imply
# adding a library. It's preferable to keep dependencies listed first
# during a link phase so that they can override symbols that would
# otherwise be provided by libraries, which will usually include system
# libraries. On some systems, ld is finicky and even requires the
# libraries to be ordered in such a way that unresolved symbols in
# earlier-listed libraries may only be resolved by later-listed libraries.
# The Mac linker doesn't work that way, but other platforms do, and so
# their linker invocations need to be constructed in this way. There's
# no compelling reason for Xcode's linker invocations to differ.
if 'dependencies' in spec:
for dependency in spec['dependencies']:
xct.AddDependency(xcode_targets[dependency])
# The support project also gets the dependencies (in case they are
# needed for the actions/rules to work).
if support_xct:
support_xct.AddDependency(xcode_targets[dependency])
if 'libraries' in spec:
for library in spec['libraries']:
xct.FrameworksPhase().AddFile(library)
# Add the library's directory to LIBRARY_SEARCH_PATHS if necessary.
# I wish Xcode handled this automatically.
library_dir = posixpath.dirname(library)
if library_dir not in xcode_standard_library_dirs and (
not xct.HasBuildSetting(_library_search_paths_var) or
library_dir not in xct.GetBuildSetting(_library_search_paths_var)):
xct.AppendBuildSetting(_library_search_paths_var, library_dir)
for configuration_name in configuration_names:
configuration = spec['configurations'][configuration_name]
xcbc = xct.ConfigurationNamed(configuration_name)
for include_dir in configuration.get('mac_framework_dirs', []):
xcbc.AppendBuildSetting('FRAMEWORK_SEARCH_PATHS', include_dir)
for include_dir in configuration.get('include_dirs', []):
xcbc.AppendBuildSetting('HEADER_SEARCH_PATHS', include_dir)
if 'defines' in configuration:
for define in configuration['defines']:
set_define = EscapeXCodeArgument(define)
xcbc.AppendBuildSetting('GCC_PREPROCESSOR_DEFINITIONS', set_define)
if 'xcode_settings' in configuration:
for xck, xcv in configuration['xcode_settings'].iteritems():
xcbc.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in configuration:
config_ref = pbxp.AddOrGetFileInRootGroup(
configuration['xcode_config_file'])
xcbc.SetBaseConfiguration(config_ref)
build_files = []
for build_file, build_file_dict in data.iteritems():
if build_file.endswith('.gyp'):
build_files.append(build_file)
for build_file in build_files:
xcode_projects[build_file].Finalize1(xcode_targets, serialize_all_tests)
for build_file in build_files:
xcode_projects[build_file].Finalize2(xcode_targets,
xcode_target_to_target_dict)
for build_file in build_files:
xcode_projects[build_file].Write() | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2015 HGST
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from os_brick.initiator import connector
from nova.tests.unit.virt.libvirt.volume import test_volume
from nova.virt.libvirt.volume import hgst
# Actual testing of the os_brick HGST driver done in the os_brick testcases
# Here we're concerned only with the small API shim that connects Nova
# so these will be pretty simple cases.
class LibvirtHGSTVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase):
def test_libvirt_hgst_driver_type(self):
drvr = hgst.LibvirtHGSTVolumeDriver(self.fake_conn)
self.assertIsInstance(drvr.connector, connector.HGSTConnector)
def test_libvirt_hgst_driver_connect(self):
def brick_conn_vol(data):
return {'path': '/dev/space01'}
drvr = hgst.LibvirtHGSTVolumeDriver(self.fake_conn)
drvr.connector.connect_volume = brick_conn_vol
di = {'path': '/dev/space01', 'name': 'space01'}
ci = {'data': di}
drvr.connect_volume(ci, None)
self.assertEqual('/dev/space01',
ci['data']['device_path'])
def test_libvirt_hgst_driver_get_config(self):
drvr = hgst.LibvirtHGSTVolumeDriver(self.fake_conn)
di = {'path': '/dev/space01', 'name': 'space01', 'type': 'raw',
'dev': 'vda1', 'bus': 'pci0', 'device_path': '/dev/space01'}
ci = {'data': di}
conf = drvr.get_config(ci, di)
self.assertEqual('block', conf.source_type)
self.assertEqual('/dev/space01', conf.source_path)
def test_libvirt_hgst_driver_disconnect(self):
drvr = hgst.LibvirtHGSTVolumeDriver(self.fake_conn)
drvr.connector.disconnect_volume = mock.MagicMock()
di = {'path': '/dev/space01', 'name': 'space01', 'type': 'raw',
'dev': 'vda1', 'bus': 'pci0', 'device_path': '/dev/space01'}
ci = {'data': di}
drvr.disconnect_volume(ci, di)
drvr.connector.disconnect_volume.assert_called_once_with(
di, None) | unknown | codeparrot/codeparrot-clean | ||
# (c) 2016, Ansible by Red Hat <info@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.six import string_types
def pct_to_int(value, num_items, min_value=1):
'''
Converts a given value to a percentage if specified as "x%",
otherwise converts the given value to an integer.
'''
if isinstance(value, string_types) and value.endswith('%'):
value_pct = int(value.replace("%", ""))
return int((value_pct / 100.0) * num_items) or min_value
else:
return int(value) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# coding=utf8
"""
Reference archive table from suite and add path to archive root
@contact: Debian FTP Master <ftpmaster@debian.org>
@copyright: 2012 Ansgar Burchardt <ansgar@debian.org>
@license: GNU General Public License version 2 or later
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
################################################################################
import psycopg2
from daklib.dak_exceptions import DBUpdateError
from daklib.config import Config
################################################################################
def do_update(self):
print __doc__
try:
cnf = Config()
c = self.db.cursor()
archive_root = cnf["Dir::Root"]
c.execute("ALTER TABLE archive ADD COLUMN path TEXT NOT NULL DEFAULT %s", (archive_root,))
c.execute("ALTER TABLE archive ALTER COLUMN path DROP DEFAULT")
c.execute("ALTER TABLE archive ADD COLUMN mode CHAR(4) NOT NULL DEFAULT '0644' CHECK (mode SIMILAR TO '[0-7]{4}')")
c.execute("ALTER TABLE archive ADD COLUMN tainted BOOLEAN NOT NULL DEFAULT 'f'")
c.execute("ALTER TABLE archive ADD COLUMN use_morgue BOOLEAN NOT NULL DEFAULT 't'")
c.execute("SELECT id FROM archive")
(archive_id,) = c.fetchone()
if c.fetchone() is not None:
raise DBUpdateError("Cannot automatically upgrade form installation with multiple archives.")
c.execute("ALTER TABLE suite ADD COLUMN archive_id INT REFERENCES archive(id) NOT NULL DEFAULT %s", (archive_id,))
c.execute("ALTER TABLE suite ALTER COLUMN archive_id DROP DEFAULT")
c.execute("UPDATE config SET value = '73' WHERE name = 'db_revision'")
self.db.commit()
except psycopg2.ProgrammingError as msg:
self.db.rollback()
raise DBUpdateError('Unable to apply sick update 73, rollback issued. Error message : %s' % (str(msg))) | unknown | codeparrot/codeparrot-clean | ||
/*
@Copyright Barrett Adair 2015-2017
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
*/
#ifndef BOOST_CLBL_TRTS_ADD_MEMBER_CV_HPP
#define BOOST_CLBL_TRTS_ADD_MEMBER_CV_HPP
#include <boost/callable_traits/detail/core.hpp>
namespace boost { namespace callable_traits {
//[ add_member_cv_hpp
/*`
[section:ref_add_member_cv add_member_cv]
[heading Header]
``#include <boost/callable_traits/add_member_cv.hpp>``
[heading Definition]
*/
template<typename T>
using add_member_cv_t = //see below
//<-
#ifdef BOOST_CLBL_TRTS_DISABLE_ABOMINABLE_FUNCTIONS
detail::sfinae_try<
typename detail::traits<T>::add_member_cv,
detail::fail_when_same<typename detail::traits<T>::add_member_cv,
detail::abominable_functions_not_supported_on_this_compiler,
this_compiler_doesnt_support_abominable_function_types>,
detail::fail_if_invalid<typename detail::traits<T>::add_member_cv,
member_qualifiers_are_illegal_for_this_type>>;
#else
detail::try_but_fail_if_invalid<
typename detail::traits<T>::add_member_cv,
member_qualifiers_are_illegal_for_this_type>;
#endif // #ifdef BOOST_CLBL_TRTS_DISABLE_ABOMINABLE_FUNCTIONS
namespace detail {
template<typename T, typename = std::false_type>
struct add_member_cv_impl {};
template<typename T>
struct add_member_cv_impl <T, typename std::is_same<
add_member_cv_t<T>, detail::dummy>::type>
{
using type = add_member_cv_t<T>;
};
}
//->
template<typename T>
struct add_member_cv : detail::add_member_cv_impl<T> {};
//<-
}} // namespace boost::callable_traits
//->
/*`
[heading Constraints]
* `T` must be a function type or a member function pointer type
* If `T` is a pointer, it may not be cv/ref qualified
[heading Behavior]
* A substitution failure occurs if the constraints are violated.
* Adds member `const` and `volatile` qualifiers to `T`, if not already present.
[heading Input/Output Examples]
[table
[[`T`] [`add_member_cv_t<T>`]]
[[`int()`] [`int() const volatile`]]
[[`int(foo::*)()`] [`int(foo::*)() const volatile`]]
[[`int(foo::*)() &`] [`int(foo::*)() const volatile &`]]
[[`int(foo::*)() &&`] [`int(foo::*)() const volatile &&`]]
[[`int(foo::*)() const`] [`int(foo::*)() const volatile`]]
[[`int(foo::*)() volatile`] [`int(foo::*)() const volatile`]]
[[`int(foo::*)() transaction_safe`] [`int(foo::*)() const volatile transaction_safe`]]
[[`int`] [(substitution failure)]]
[[`int (&)()`] [(substitution failure)]]
[[`int (*)()`] [(substitution failure)]]
[[`int foo::*`] [(substitution failure)]]
[[`int (foo::* const)()`] [(substitution failure)]]
]
[heading Example Program]
[import ../example/add_member_cv.cpp]
[add_member_cv]
[endsect]
*/
//]
#endif | unknown | github | https://github.com/mysql/mysql-server | extra/boost/boost_1_87_0/boost/callable_traits/add_member_cv.hpp |
#!/usr/bin/env python
"""
Example script to control a Scrapy server using its JSON-RPC web service.
It only provides a reduced functionality as its main purpose is to illustrate
how to write a web service client. Feel free to improve or write you own.
Also, keep in mind that the JSON-RPC API is not stable. The recommended way for
controlling a Scrapy server is through the execution queue (see the "queue"
command).
"""
from __future__ import print_function
import sys, optparse, urllib, json
from six.moves.urllib.parse import urljoin
from scrapy.utils.jsonrpc import jsonrpc_client_call, JsonRpcError
def get_commands():
return {
'help': cmd_help,
'stop': cmd_stop,
'list-available': cmd_list_available,
'list-running': cmd_list_running,
'list-resources': cmd_list_resources,
'get-global-stats': cmd_get_global_stats,
'get-spider-stats': cmd_get_spider_stats,
}
def cmd_help(args, opts):
"""help - list available commands"""
print("Available commands:")
for _, func in sorted(get_commands().items()):
print(" ", func.__doc__)
def cmd_stop(args, opts):
"""stop <spider> - stop a running spider"""
jsonrpc_call(opts, 'crawler/engine', 'close_spider', args[0])
def cmd_list_running(args, opts):
"""list-running - list running spiders"""
for x in json_get(opts, 'crawler/engine/open_spiders'):
print(x)
def cmd_list_available(args, opts):
"""list-available - list name of available spiders"""
for x in jsonrpc_call(opts, 'crawler/spiders', 'list'):
print(x)
def cmd_list_resources(args, opts):
"""list-resources - list available web service resources"""
for x in json_get(opts, '')['resources']:
print(x)
def cmd_get_spider_stats(args, opts):
"""get-spider-stats <spider> - get stats of a running spider"""
stats = jsonrpc_call(opts, 'stats', 'get_stats', args[0])
for name, value in stats.items():
print("%-40s %s" % (name, value))
def cmd_get_global_stats(args, opts):
"""get-global-stats - get global stats"""
stats = jsonrpc_call(opts, 'stats', 'get_stats')
for name, value in stats.items():
print("%-40s %s" % (name, value))
def get_wsurl(opts, path):
return urljoin("http://%s:%s/"% (opts.host, opts.port), path)
def jsonrpc_call(opts, path, method, *args, **kwargs):
url = get_wsurl(opts, path)
return jsonrpc_client_call(url, method, *args, **kwargs)
def json_get(opts, path):
url = get_wsurl(opts, path)
return json.loads(urllib.urlopen(url).read())
def parse_opts():
usage = "%prog [options] <command> [arg] ..."
description = "Scrapy web service control script. Use '%prog help' " \
"to see the list of available commands."
op = optparse.OptionParser(usage=usage, description=description)
op.add_option("-H", dest="host", default="localhost", \
help="Scrapy host to connect to")
op.add_option("-P", dest="port", type="int", default=6080, \
help="Scrapy port to connect to")
opts, args = op.parse_args()
if not args:
op.print_help()
sys.exit(2)
cmdname, cmdargs, opts = args[0], args[1:], opts
commands = get_commands()
if cmdname not in commands:
sys.stderr.write("Unknown command: %s\n\n" % cmdname)
cmd_help(None, None)
sys.exit(1)
return commands[cmdname], cmdargs, opts
def main():
cmd, args, opts = parse_opts()
try:
cmd(args, opts)
except IndexError:
print(cmd.__doc__)
except JsonRpcError as e:
print(str(e))
if e.data:
print("Server Traceback below:")
print(e.data)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
from __future__ import with_statement
import os
def vs9to8(src, dest):
for name in os.listdir(src):
path, ext = os.path.splitext(name)
if ext.lower() not in ('.sln', '.vcproj', '.vsprops'):
continue
filename = os.path.normpath(os.path.join(src, name))
destname = os.path.normpath(os.path.join(dest, name))
print("%s -> %s" % (filename, destname))
with open(filename, 'rU') as fin:
lines = fin.read()
lines = lines.replace('Version="9,00"', 'Version="8.00"')
lines = lines.replace('Version="9.00"', 'Version="8.00"')
lines = lines.replace('Format Version 10.00', 'Format Version 9.00')
lines = lines.replace('Visual Studio 2008', 'Visual Studio 2005')
lines = lines.replace('wininst-9.0', 'wininst-8.0')
lines = lines.replace('..\\', '..\\..\\')
lines = lines.replace('..\\..\\..\\..\\', '..\\..\\..\\')
# Bah. VS8.0 does not expand macros in file names.
# Replace them here.
lines = lines.replace('$(sqlite3Dir)', '..\\..\\..\\sqlite-3.6.21')
lines = lines.replace('$(bsddbDir)\\..\\..', '..\\..\\..\\db-4.7.25.0\\build_windows\\..')
lines = lines.replace('$(bsddbDir)', '..\\..\\..\\db-4.7.25.0\\build_windows')
with open(destname, 'wb') as fout:
lines = lines.replace("\n", "\r\n")
fout.write(lines)
if __name__ == "__main__":
vs9to8(src=".", dest="../PC/VS8.0") | unknown | codeparrot/codeparrot-clean | ||
name: Dairy
products:
- name: cheese
price: 5.5
- name: milk
price: 2.75 | unknown | github | https://github.com/jekyll/jekyll | test/source/_data/categories.01/dairy.yaml |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
***************************************************************************
parse_dash_results.py
---------------------
Date : October 2016
Copyright : (C) 2016 by Nyall Dawson
Email : nyall dot dawson at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import range
__author__ = 'Nyall Dawson'
__date__ = 'October 2016'
__copyright__ = '(C) 2016, Nyall Dawson'
import os
import sys
import argparse
import urllib.request
import urllib.parse
import urllib.error
import re
import json
from PyQt5.QtCore import (Qt)
from PyQt5.QtGui import (
QImage, QColor, qRed, qBlue, qGreen, qAlpha, qRgb, QPixmap)
from PyQt5.QtWidgets import (QDialog,
QApplication,
QLabel,
QVBoxLayout,
QHBoxLayout,
QGridLayout,
QPushButton,
QDoubleSpinBox,
QWidget,
QScrollArea,
QLayout,
QDialogButtonBox,
QListWidget)
import termcolor
import struct
import glob
dash_url = 'https://cdash.orfeo-toolbox.org'
def error(msg):
print(termcolor.colored(msg, 'red'))
sys.exit(1)
def colorDiff(c1, c2):
redDiff = abs(qRed(c1) - qRed(c2))
greenDiff = abs(qGreen(c1) - qGreen(c2))
blueDiff = abs(qBlue(c1) - qBlue(c2))
alphaDiff = abs(qAlpha(c1) - qAlpha(c2))
return max(redDiff, greenDiff, blueDiff, alphaDiff)
def imageFromPath(path):
if (path[:8] == 'https://' or path[:7] == 'file://'):
# fetch remote image
print('Fetching remote ({})'.format(path))
data = urllib.request.urlopen(path).read()
image = QImage()
image.loadFromData(data)
else:
print('Using local ({})'.format(path))
image = QImage(path)
return image
class SelectReferenceImageDialog(QDialog):
def __init__(self, parent, test_name, images):
super().__init__(parent)
self.setWindowTitle('Select reference image')
self.setWindowFlags(Qt.Window)
self.button_box = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
self.button_box.accepted.connect(self.accept)
self.button_box.rejected.connect(self.reject)
layout = QVBoxLayout()
layout.addWidget(QLabel('Found multiple matching reference images for {}'.format(test_name)))
self.list = QListWidget()
layout.addWidget(self.list, 1)
layout.addWidget(self.button_box)
self.setLayout(layout)
for image in images:
self.list.addItem(image)
def selected_image(self):
return self.list.currentItem().text()
class ResultHandler(QDialog):
def __init__(self, parent=None):
super().__init__(parent)
self.setWindowTitle('Dash results')
self.setWindowFlags(Qt.Window)
self.control_label = QLabel()
self.rendered_label = QLabel()
self.diff_label = QLabel()
self.mask_label = QLabel()
self.new_mask_label = QLabel()
self.scrollArea = QScrollArea()
self.widget = QWidget()
self.test_name_label = QLabel()
grid = QGridLayout()
grid.addWidget(self.test_name_label, 0, 0)
grid.addWidget(QLabel('Control'), 1, 0)
grid.addWidget(QLabel('Rendered'), 1, 1)
grid.addWidget(QLabel('Difference'), 1, 2)
grid.addWidget(self.control_label, 2, 0)
grid.addWidget(self.rendered_label, 2, 1)
grid.addWidget(self.diff_label, 2, 2)
grid.addWidget(QLabel('Current Mask'), 3, 0)
grid.addWidget(QLabel('New Mask'), 3, 1)
grid.addWidget(self.mask_label, 4, 0)
grid.addWidget(self.new_mask_label, 4, 1)
grid.setSizeConstraint(QLayout.SetFixedSize)
self.widget.setLayout(grid)
self.scrollArea.setWidget(self.widget)
v_layout = QVBoxLayout()
v_layout.addWidget(self.scrollArea, 1)
next_image_button = QPushButton()
next_image_button.setText('Skip')
next_image_button.pressed.connect(self.load_next)
self.overload_spin = QDoubleSpinBox()
self.overload_spin.setMinimum(1)
self.overload_spin.setMaximum(255)
self.overload_spin.setValue(1)
self.overload_spin.valueChanged.connect(lambda: save_mask_button.setEnabled(False))
preview_mask_button = QPushButton()
preview_mask_button.setText('Preview New Mask')
preview_mask_button.pressed.connect(self.preview_mask)
preview_mask_button.pressed.connect(lambda: save_mask_button.setEnabled(True))
save_mask_button = QPushButton()
save_mask_button.setText('Save New Mask')
save_mask_button.pressed.connect(self.save_mask)
add_ref_image_button = QPushButton()
add_ref_image_button.setText('Add Reference Image')
add_ref_image_button.pressed.connect(self.add_reference_image)
button_layout = QHBoxLayout()
button_layout.addWidget(next_image_button)
button_layout.addWidget(QLabel('Mask diff multiplier:'))
button_layout.addWidget(self.overload_spin)
button_layout.addWidget(preview_mask_button)
button_layout.addWidget(save_mask_button)
button_layout.addWidget(add_ref_image_button)
button_layout.addStretch()
v_layout.addLayout(button_layout)
self.setLayout(v_layout)
def closeEvent(self, event):
self.reject()
def parse_url(self, url):
parts = urllib.parse.urlsplit(url)
apiurl = urllib.parse.urlunsplit((parts.scheme, parts.netloc, '/api/v1/testDetails.php', parts.query, parts.fragment))
print('Fetching dash results from api: {}'.format(apiurl))
page = urllib.request.urlopen(apiurl)
content = json.loads(page.read().decode('utf-8'))
# build up list of rendered images
measurement_img = [img for img in content['test']['images'] if img['role'].startswith('Rendered Image')]
images = {}
for img in measurement_img:
m = re.search(r'Rendered Image (.*?)(\s|$)', img['role'])
test_name = m.group(1)
rendered_image = 'displayImage.php?imgid={}'.format(img['imgid'])
images[test_name] = '{}/{}'.format(dash_url, rendered_image)
if images:
print('Found images:\n')
for title, url in images.items():
print(' ' + termcolor.colored(title, attrs=['bold']) + ' : ' + url)
else:
print(termcolor.colored('No images found\n', 'yellow'))
self.images = images
self.load_next()
def load_next(self):
if not self.images:
# all done
self.accept()
exit(0)
test_name, rendered_image = self.images.popitem()
self.test_name_label.setText(test_name)
print(termcolor.colored('\n' + test_name, attrs=['bold']))
control_image = self.get_control_image_path(test_name)
if not control_image:
self.load_next()
return
self.mask_image_path = control_image[:-4] + '_mask.png'
self.load_images(control_image, rendered_image, self.mask_image_path)
def load_images(self, control_image_path, rendered_image_path, mask_image_path):
self.control_image = imageFromPath(control_image_path)
if not self.control_image:
error('Could not read control image {}'.format(control_image_path))
self.rendered_image = imageFromPath(rendered_image_path)
if not self.rendered_image:
error(
'Could not read rendered image {}'.format(rendered_image_path))
if not self.rendered_image.width() == self.control_image.width() or not self.rendered_image.height() == self.control_image.height():
print(
'Size mismatch - control image is {}x{}, rendered image is {}x{}'.format(self.control_image.width(),
self.control_image.height(
),
self.rendered_image.width(
),
self.rendered_image.height()))
max_width = min(
self.rendered_image.width(), self.control_image.width())
max_height = min(
self.rendered_image.height(), self.control_image.height())
# read current mask, if it exist
self.mask_image = imageFromPath(mask_image_path)
if self.mask_image.isNull():
print(
'Mask image does not exist, creating {}'.format(mask_image_path))
self.mask_image = QImage(
self.control_image.width(), self.control_image.height(), QImage.Format_ARGB32)
self.mask_image.fill(QColor(0, 0, 0))
self.diff_image = self.create_diff_image(
self.control_image, self.rendered_image, self.mask_image)
if not self.diff_image:
self.load_next()
return
self.control_label.setPixmap(QPixmap.fromImage(self.control_image))
self.control_label.setFixedSize(self.control_image.size())
self.rendered_label.setPixmap(QPixmap.fromImage(self.rendered_image))
self.rendered_label.setFixedSize(self.rendered_image.size())
self.mask_label.setPixmap(QPixmap.fromImage(self.mask_image))
self.mask_label.setFixedSize(self.mask_image.size())
self.diff_label.setPixmap(QPixmap.fromImage(self.diff_image))
self.diff_label.setFixedSize(self.diff_image.size())
self.preview_mask()
def preview_mask(self):
self.new_mask_image = self.create_mask(
self.control_image, self.rendered_image, self.mask_image, self.overload_spin.value())
self.new_mask_label.setPixmap(QPixmap.fromImage(self.new_mask_image))
self.new_mask_label.setFixedSize(self.new_mask_image.size())
def save_mask(self):
self.new_mask_image.save(self.mask_image_path, "png")
self.load_next()
def add_reference_image(self):
if os.path.abspath(self.control_images_base_path) == os.path.abspath(self.found_control_image_path):
images = glob.glob(os.path.join(self.found_control_image_path, '*.png'))
default_path = os.path.join(self.found_control_image_path, 'set1')
os.makedirs(default_path)
for image in images:
imgname = os.path.basename(image)
os.rename(image, os.path.join(default_path, imgname))
for i in range(2, 100):
new_path = os.path.join(self.control_images_base_path, 'set' + str(i))
if not os.path.exists(new_path):
break
else:
raise RuntimeError('Could not find a suitable directory for another set of reference images')
os.makedirs(new_path)
control_image_name = os.path.basename(self.found_image)
self.rendered_image.save(os.path.join(new_path, control_image_name))
self.load_next()
def create_mask(self, control_image, rendered_image, mask_image, overload=1):
max_width = min(rendered_image.width(), control_image.width())
max_height = min(rendered_image.height(), control_image.height())
new_mask_image = QImage(
control_image.width(), control_image.height(), QImage.Format_ARGB32)
new_mask_image.fill(QColor(0, 0, 0))
# loop through pixels in rendered image and compare
mismatch_count = 0
linebytes = max_width * 4
for y in range(max_height):
control_scanline = control_image.constScanLine(
y).asstring(linebytes)
rendered_scanline = rendered_image.constScanLine(
y).asstring(linebytes)
mask_scanline = mask_image.scanLine(y).asstring(linebytes)
for x in range(max_width):
currentTolerance = qRed(
struct.unpack('I', mask_scanline[x * 4:x * 4 + 4])[0])
if currentTolerance == 255:
# ignore pixel
new_mask_image.setPixel(
x, y, qRgb(currentTolerance, currentTolerance, currentTolerance))
continue
expected_rgb = struct.unpack(
'I', control_scanline[x * 4:x * 4 + 4])[0]
rendered_rgb = struct.unpack(
'I', rendered_scanline[x * 4:x * 4 + 4])[0]
difference = min(
255, int(colorDiff(expected_rgb, rendered_rgb) * overload))
if difference > currentTolerance:
# update mask image
new_mask_image.setPixel(
x, y, qRgb(difference, difference, difference))
mismatch_count += 1
else:
new_mask_image.setPixel(
x, y, qRgb(currentTolerance, currentTolerance, currentTolerance))
return new_mask_image
def get_control_image_path(self, test_name):
if os.path.isfile(test_name):
return test_name
# else try and find matching test image
script_folder = os.path.dirname(os.path.realpath(sys.argv[0]))
control_images_folder = os.path.join(
script_folder, '../tests/testdata/control_images')
matching_control_images = [x[0]
for x in os.walk(control_images_folder) if test_name + '/' in x[0] or x[0].endswith(test_name)]
self.control_images_base_path = os.path.commonprefix(matching_control_images)
if len(matching_control_images) > 1:
for item in matching_control_images:
print(' - ' + item)
dlg = SelectReferenceImageDialog(self, test_name, matching_control_images)
if not dlg.exec_():
return None
self.found_control_image_path = dlg.selected_image()
elif len(matching_control_images) == 0:
print(termcolor.colored('No matching control images found for {}'.format(test_name), 'yellow'))
return None
else:
self.found_control_image_path = matching_control_images[0]
# check for a single matching expected image
images = glob.glob(os.path.join(self.found_control_image_path, '*.png'))
filtered_images = [i for i in images if not i[-9:] == '_mask.png']
if len(filtered_images) > 1:
error(
'Found multiple matching control images for {}'.format(test_name))
elif len(filtered_images) == 0:
error('No matching control images found for {}'.format(test_name))
self.found_image = filtered_images[0]
print('Found matching control image: {}'.format(self.found_image))
return self.found_image
def create_diff_image(self, control_image, rendered_image, mask_image):
# loop through pixels in rendered image and compare
mismatch_count = 0
max_width = min(rendered_image.width(), control_image.width())
max_height = min(rendered_image.height(), control_image.height())
linebytes = max_width * 4
diff_image = QImage(
control_image.width(), control_image.height(), QImage.Format_ARGB32)
diff_image.fill(QColor(152, 219, 249))
for y in range(max_height):
control_scanline = control_image.constScanLine(
y).asstring(linebytes)
rendered_scanline = rendered_image.constScanLine(
y).asstring(linebytes)
mask_scanline = mask_image.scanLine(y).asstring(linebytes)
for x in range(max_width):
currentTolerance = qRed(
struct.unpack('I', mask_scanline[x * 4:x * 4 + 4])[0])
if currentTolerance == 255:
# ignore pixel
continue
expected_rgb = struct.unpack(
'I', control_scanline[x * 4:x * 4 + 4])[0]
rendered_rgb = struct.unpack(
'I', rendered_scanline[x * 4:x * 4 + 4])[0]
difference = colorDiff(expected_rgb, rendered_rgb)
if difference > currentTolerance:
# update mask image
diff_image.setPixel(x, y, qRgb(255, 0, 0))
mismatch_count += 1
if mismatch_count:
return diff_image
else:
print(termcolor.colored('No mismatches', 'green'))
return None
def main():
app = QApplication(sys.argv)
parser = argparse.ArgumentParser(
description='''A tool to automatically update test image masks based on results submitted to cdash.
It will take local control images from the QGIS source and rendered images from test results
on cdash to create a mask.
When using it, carefully check, that the rendered images from the test results are acceptable and
that the new masks will only mask regions on the image that indeed allow for variation.
If the resulting mask is too tolerant, consider adding a new control image next to the existing one.
''')
parser.add_argument('dash_url', help='URL to a dash result with images. E.g. https://cdash.orfeo-toolbox.org/testDetails.php?test=15052561&build=27712')
args = parser.parse_args()
w = ResultHandler()
w.parse_url(args.dash_url)
w.exec_()
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# pylint: disable=E0601,W0622,W0611
# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Wrappers around some builtins introduced in python 2.3, 2.4 and
2.5, making them available in for earlier versions of python.
See another compatibility snippets from other projects:
:mod:`lib2to3.fixes`
:mod:`coverage.backward`
:mod:`unittest2.compatibility`
"""
__docformat__ = "restructuredtext en"
import os
import sys
import types
from warnings import warn
# not used here, but imported to preserve API
from six.moves import builtins
if sys.version_info < (3, 0):
str_to_bytes = str
def str_encode(string, encoding):
if isinstance(string, unicode):
return string.encode(encoding)
return str(string)
else:
def str_to_bytes(string):
return str.encode(string)
# we have to ignore the encoding in py3k to be able to write a string into a
# TextIOWrapper or like object (which expect an unicode string)
def str_encode(string, encoding):
return str(string)
# See also http://bugs.python.org/issue11776
if sys.version_info[0] == 3:
def method_type(callable, instance, klass):
# api change. klass is no more considered
return types.MethodType(callable, instance)
else:
# alias types otherwise
method_type = types.MethodType
# Pythons 2 and 3 differ on where to get StringIO
if sys.version_info < (3, 0):
from cStringIO import StringIO
FileIO = file
BytesIO = StringIO
reload = reload
else:
from io import FileIO, BytesIO, StringIO
from imp import reload
from logilab.common.deprecation import deprecated
# Other projects import these from here, keep providing them for
# backwards compat
any = deprecated('use builtin "any"')(any)
all = deprecated('use builtin "all"')(all) | unknown | codeparrot/codeparrot-clean | ||
/*-------------------------------------------------------------------------
*
* nodeCtescan.c
* routines to handle CteScan nodes.
*
* Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/backend/executor/nodeCtescan.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "executor/executor.h"
#include "executor/nodeCtescan.h"
#include "miscadmin.h"
static TupleTableSlot *CteScanNext(CteScanState *node);
/* ----------------------------------------------------------------
* CteScanNext
*
* This is a workhorse for ExecCteScan
* ----------------------------------------------------------------
*/
static TupleTableSlot *
CteScanNext(CteScanState *node)
{
EState *estate;
ScanDirection dir;
bool forward;
Tuplestorestate *tuplestorestate;
bool eof_tuplestore;
TupleTableSlot *slot;
/*
* get state info from node
*/
estate = node->ss.ps.state;
dir = estate->es_direction;
forward = ScanDirectionIsForward(dir);
tuplestorestate = node->leader->cte_table;
tuplestore_select_read_pointer(tuplestorestate, node->readptr);
slot = node->ss.ss_ScanTupleSlot;
/*
* If we are not at the end of the tuplestore, or are going backwards, try
* to fetch a tuple from tuplestore.
*/
eof_tuplestore = tuplestore_ateof(tuplestorestate);
if (!forward && eof_tuplestore)
{
if (!node->leader->eof_cte)
{
/*
* When reversing direction at tuplestore EOF, the first
* gettupleslot call will fetch the last-added tuple; but we want
* to return the one before that, if possible. So do an extra
* fetch.
*/
if (!tuplestore_advance(tuplestorestate, forward))
return NULL; /* the tuplestore must be empty */
}
eof_tuplestore = false;
}
/*
* If we can fetch another tuple from the tuplestore, return it.
*
* Note: we have to use copy=true in the tuplestore_gettupleslot call,
* because we are sharing the tuplestore with other nodes that might write
* into the tuplestore before we get called again.
*/
if (!eof_tuplestore)
{
if (tuplestore_gettupleslot(tuplestorestate, forward, true, slot))
return slot;
if (forward)
eof_tuplestore = true;
}
/*
* If necessary, try to fetch another row from the CTE query.
*
* Note: the eof_cte state variable exists to short-circuit further calls
* of the CTE plan. It's not optional, unfortunately, because some plan
* node types are not robust about being called again when they've already
* returned NULL.
*/
if (eof_tuplestore && !node->leader->eof_cte)
{
TupleTableSlot *cteslot;
/*
* We can only get here with forward==true, so no need to worry about
* which direction the subplan will go.
*/
cteslot = ExecProcNode(node->cteplanstate);
if (TupIsNull(cteslot))
{
node->leader->eof_cte = true;
return NULL;
}
/*
* There are corner cases where the subplan could change which
* tuplestore read pointer is active, so be sure to reselect ours
* before storing the tuple we got.
*/
tuplestore_select_read_pointer(tuplestorestate, node->readptr);
/*
* Append a copy of the returned tuple to tuplestore. NOTE: because
* our read pointer is certainly in EOF state, its read position will
* move forward over the added tuple. This is what we want. Also,
* any other readers will *not* move past the new tuple, which is what
* they want.
*/
tuplestore_puttupleslot(tuplestorestate, cteslot);
/*
* We MUST copy the CTE query's output tuple into our own slot. This
* is because other CteScan nodes might advance the CTE query before
* we are called again, and our output tuple must stay stable over
* that.
*/
return ExecCopySlot(slot, cteslot);
}
/*
* Nothing left ...
*/
return ExecClearTuple(slot);
}
/*
* CteScanRecheck -- access method routine to recheck a tuple in EvalPlanQual
*/
static bool
CteScanRecheck(CteScanState *node, TupleTableSlot *slot)
{
/* nothing to check */
return true;
}
/* ----------------------------------------------------------------
* ExecCteScan(node)
*
* Scans the CTE sequentially and returns the next qualifying tuple.
* We call the ExecScan() routine and pass it the appropriate
* access method functions.
* ----------------------------------------------------------------
*/
static TupleTableSlot *
ExecCteScan(PlanState *pstate)
{
CteScanState *node = castNode(CteScanState, pstate);
return ExecScan(&node->ss,
(ExecScanAccessMtd) CteScanNext,
(ExecScanRecheckMtd) CteScanRecheck);
}
/* ----------------------------------------------------------------
* ExecInitCteScan
* ----------------------------------------------------------------
*/
CteScanState *
ExecInitCteScan(CteScan *node, EState *estate, int eflags)
{
CteScanState *scanstate;
ParamExecData *prmdata;
/* check for unsupported flags */
Assert(!(eflags & EXEC_FLAG_MARK));
/*
* For the moment we have to force the tuplestore to allow REWIND, because
* we might be asked to rescan the CTE even though upper levels didn't
* tell us to be prepared to do it efficiently. Annoying, since this
* prevents truncation of the tuplestore. XXX FIXME
*
* Note: if we are in an EPQ recheck plan tree, it's likely that no access
* to the tuplestore is needed at all, making this even more annoying.
* It's not worth improving that as long as all the read pointers would
* have REWIND anyway, but if we ever improve this logic then that aspect
* should be considered too.
*/
eflags |= EXEC_FLAG_REWIND;
/*
* CteScan should not have any children.
*/
Assert(outerPlan(node) == NULL);
Assert(innerPlan(node) == NULL);
/*
* create new CteScanState for node
*/
scanstate = makeNode(CteScanState);
scanstate->ss.ps.plan = (Plan *) node;
scanstate->ss.ps.state = estate;
scanstate->ss.ps.ExecProcNode = ExecCteScan;
scanstate->eflags = eflags;
scanstate->cte_table = NULL;
scanstate->eof_cte = false;
/*
* Find the already-initialized plan for the CTE query.
*/
scanstate->cteplanstate = (PlanState *) list_nth(estate->es_subplanstates,
node->ctePlanId - 1);
/*
* The Param slot associated with the CTE query is used to hold a pointer
* to the CteState of the first CteScan node that initializes for this
* CTE. This node will be the one that holds the shared state for all the
* CTEs, particularly the shared tuplestore.
*/
prmdata = &(estate->es_param_exec_vals[node->cteParam]);
Assert(prmdata->execPlan == NULL);
Assert(!prmdata->isnull);
scanstate->leader = castNode(CteScanState, DatumGetPointer(prmdata->value));
if (scanstate->leader == NULL)
{
/* I am the leader */
prmdata->value = PointerGetDatum(scanstate);
scanstate->leader = scanstate;
scanstate->cte_table = tuplestore_begin_heap(true, false, work_mem);
tuplestore_set_eflags(scanstate->cte_table, scanstate->eflags);
scanstate->readptr = 0;
}
else
{
/* Not the leader */
/* Create my own read pointer, and ensure it is at start */
scanstate->readptr =
tuplestore_alloc_read_pointer(scanstate->leader->cte_table,
scanstate->eflags);
tuplestore_select_read_pointer(scanstate->leader->cte_table,
scanstate->readptr);
tuplestore_rescan(scanstate->leader->cte_table);
}
/*
* Miscellaneous initialization
*
* create expression context for node
*/
ExecAssignExprContext(estate, &scanstate->ss.ps);
/*
* The scan tuple type (ie, the rowtype we expect to find in the work
* table) is the same as the result rowtype of the CTE query.
*/
ExecInitScanTupleSlot(estate, &scanstate->ss,
ExecGetResultType(scanstate->cteplanstate),
&TTSOpsMinimalTuple);
/*
* Initialize result type and projection.
*/
ExecInitResultTypeTL(&scanstate->ss.ps);
ExecAssignScanProjectionInfo(&scanstate->ss);
/*
* initialize child expressions
*/
scanstate->ss.ps.qual =
ExecInitQual(node->scan.plan.qual, (PlanState *) scanstate);
return scanstate;
}
/* ----------------------------------------------------------------
* ExecEndCteScan
*
* frees any storage allocated through C routines.
* ----------------------------------------------------------------
*/
void
ExecEndCteScan(CteScanState *node)
{
/*
* If I am the leader, free the tuplestore.
*/
if (node->leader == node)
{
tuplestore_end(node->cte_table);
node->cte_table = NULL;
}
}
/* ----------------------------------------------------------------
* ExecReScanCteScan
*
* Rescans the relation.
* ----------------------------------------------------------------
*/
void
ExecReScanCteScan(CteScanState *node)
{
Tuplestorestate *tuplestorestate = node->leader->cte_table;
if (node->ss.ps.ps_ResultTupleSlot)
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
ExecScanReScan(&node->ss);
/*
* Clear the tuplestore if a new scan of the underlying CTE is required.
* This implicitly resets all the tuplestore's read pointers. Note that
* multiple CTE nodes might redundantly clear the tuplestore; that's OK,
* and not unduly expensive. We'll stop taking this path as soon as
* somebody has attempted to read something from the underlying CTE
* (thereby causing its chgParam to be cleared).
*/
if (node->leader->cteplanstate->chgParam != NULL)
{
tuplestore_clear(tuplestorestate);
node->leader->eof_cte = false;
}
else
{
/*
* Else, just rewind my own pointer. Either the underlying CTE
* doesn't need a rescan (and we can re-read what's in the tuplestore
* now), or somebody else already took care of it.
*/
tuplestore_select_read_pointer(tuplestorestate, node->readptr);
tuplestore_rescan(tuplestorestate);
}
} | c | github | https://github.com/postgres/postgres | src/backend/executor/nodeCtescan.c |
#
# This program creates a Deck of Cards
#
from collections import namedtuple
from random import choice
# What is the card composed of? A card is made up of a rank and suit
Card = namedtuple('Card', ['rank', 'suit'])
suit_values = dict(spades=3, hearts=2, diamonds=1, clubs=0)
def spades_high(card):
rank_index = FrenchDeck.ranks.index(card.rank)
return rank_index * len(suit_values) + suit_values[card.suit]
class FrenchDeck:
# create an array of cards regardless of the suit
ranks = [str(n) for n in range(2, 11)] + ['Jack', 'Queen', 'King', 'Ace']
# ranks = [str(n) for n in range(2,11)] + list('JQKA')
# create an array of suits regardless of the cards
suits = 'spades hearts diamonds clubs'.split()
# initialize cards in the constructor
def __init__(self):
self._cards = [Card(r, s) for r in self.ranks for s in self.suits]
# return length of the deck
def __len__(self):
return len(self._cards)
# returns an item from the deck
def __getitem__(self, index):
return self._cards[index]
def __str__(self):
return "Deck contains: %s" % '\n'.join([str(card) for card in self._cards])
if __name__ == '__main__':
new_deck = FrenchDeck()
# deck responds to the len() function
print('I bought a new deck of cards. It has %i cards in it' % len(new_deck))
random_card = choice(new_deck)
print('A random card is: %s of %s' % (random_card.rank, random_card.suit))
# reading specific cards is possible because of the __getitem__ api
last_card = new_deck[-1].rank + ' of ' + new_deck[-1].suit
first_card = new_deck[0].rank + ' of ' + new_deck[0].suit
print('Last card in the deck is "%s" and first card is "%s"' % (last_card, first_card))
# slicing is possible with deck thanks to __getitem__
top_3_cards = new_deck[:3]
print('Top 3 cards in the deck are %s' % [str(card) for card in top_3_cards])
picking_aces = new_deck[12::13]
print('Picking the aces in the deck: %s' % [str(ace) for ace in picking_aces])
# deck is iterable thanks to __getitem__
print('Here is the entire deck:')
for card in new_deck:
print(card)
# If a collection has no __contains__ method, the in operator does a sequential scan
queen_of_hearts = Card('Queen', 'Hearts')
print('Does the deck contain %s: %s' % (queen_of_hearts, queen_of_hearts in new_deck))
joker = Card('Joker', '')
print('Does the deck contain %s: %s' % (joker, joker in new_deck))
print('After sorting, the cards will look as follows: ')
for card in sorted(new_deck, key=spades_high):
print(card) | unknown | codeparrot/codeparrot-clean | ||
# It's most useful to run these tests with ThreadSanitizer enabled.
import sys
import functools
import threading
import time
import unittest
import _testinternalcapi
import warnings
from test.support import threading_helper
class TestBase(unittest.TestCase):
pass
def do_race(func1, func2):
"""Run func1() and func2() repeatedly in separate threads."""
n = 1000
barrier = threading.Barrier(2)
def repeat(func):
barrier.wait()
for _i in range(n):
func()
threads = [
threading.Thread(target=functools.partial(repeat, func1)),
threading.Thread(target=functools.partial(repeat, func2)),
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
@threading_helper.requires_working_threading()
class TestRaces(TestBase):
def test_racing_cell_set(self):
"""Test cell object gettr/settr properties."""
def nested_func():
x = 0
def inner():
nonlocal x
x += 1
# This doesn't race because LOAD_DEREF and STORE_DEREF on the
# cell object use critical sections.
do_race(nested_func, nested_func)
def nested_func2():
x = 0
def inner():
y = x
frame = sys._getframe(1)
frame.f_locals["x"] = 2
return inner
def mutate_func2():
inner = nested_func2()
cell = inner.__closure__[0]
old_value = cell.cell_contents
cell.cell_contents = 1000
time.sleep(0)
cell.cell_contents = old_value
time.sleep(0)
# This revealed a race with cell_set_contents() since it was missing
# the critical section.
do_race(nested_func2, mutate_func2)
def test_racing_cell_cmp_repr(self):
"""Test cell object compare and repr methods."""
def nested_func():
x = 0
y = 0
def inner():
return x + y
return inner.__closure__
cell_a, cell_b = nested_func()
def mutate():
cell_a.cell_contents += 1
def access():
cell_a == cell_b
s = repr(cell_a)
# cell_richcompare() and cell_repr used to have data races
do_race(mutate, access)
def test_racing_load_super_attr(self):
"""Test (un)specialization of LOAD_SUPER_ATTR opcode."""
class C:
def __init__(self):
try:
super().__init__
super().__init__()
except RuntimeError:
pass # happens if __class__ is replaced with non-type
def access():
C()
def mutate():
# Swap out the super() global with a different one
real_super = super
globals()["super"] = lambda s=1: s
time.sleep(0)
globals()["super"] = real_super
time.sleep(0)
# Swap out the __class__ closure value with a non-type
cell = C.__init__.__closure__[0]
real_class = cell.cell_contents
cell.cell_contents = 99
time.sleep(0)
cell.cell_contents = real_class
# The initial PR adding specialized opcodes for LOAD_SUPER_ATTR
# had some races (one with the super() global changing and one
# with the cell binding being changed).
do_race(access, mutate)
def test_racing_to_bool(self):
seq = [1]
class C:
def __bool__(self):
return False
def access():
if seq:
return 1
else:
return 2
def mutate():
nonlocal seq
seq = [1]
time.sleep(0)
seq = C()
time.sleep(0)
do_race(access, mutate)
def test_racing_store_attr_slot(self):
class C:
__slots__ = ['x', '__dict__']
c = C()
def set_slot():
for i in range(10):
c.x = i
time.sleep(0)
def change_type():
def set_x(self, x):
pass
def get_x(self):
pass
C.x = property(get_x, set_x)
time.sleep(0)
del C.x
time.sleep(0)
do_race(set_slot, change_type)
def set_getattribute():
C.__getattribute__ = lambda self, x: x
time.sleep(0)
del C.__getattribute__
time.sleep(0)
do_race(set_slot, set_getattribute)
def test_racing_store_attr_instance_value(self):
class C:
pass
c = C()
def set_value():
for i in range(100):
c.x = i
set_value()
def read():
x = c.x
def mutate():
# Adding a property for 'x' should unspecialize it.
C.x = property(lambda self: None, lambda self, x: None)
time.sleep(0)
del C.x
time.sleep(0)
do_race(read, mutate)
def test_racing_store_attr_with_hint(self):
class C:
pass
c = C()
for i in range(29):
setattr(c, f"_{i}", None)
def set_value():
for i in range(100):
c.x = i
set_value()
def read():
x = c.x
def mutate():
# Adding a property for 'x' should unspecialize it.
C.x = property(lambda self: None, lambda self, x: None)
time.sleep(0)
del C.x
time.sleep(0)
do_race(read, mutate)
def make_shared_key_dict(self):
class C:
pass
a = C()
a.x = 1
return a.__dict__
def test_racing_store_attr_dict(self):
"""Test STORE_ATTR with various dictionary types."""
class C:
pass
c = C()
def set_value():
for i in range(20):
c.x = i
def mutate():
nonlocal c
c.x = 1
self.assertTrue(_testinternalcapi.has_inline_values(c))
for i in range(30):
setattr(c, f"_{i}", None)
self.assertFalse(_testinternalcapi.has_inline_values(c.__dict__))
c.__dict__ = self.make_shared_key_dict()
self.assertTrue(_testinternalcapi.has_split_table(c.__dict__))
c.__dict__[1] = None
self.assertFalse(_testinternalcapi.has_split_table(c.__dict__))
c = C()
do_race(set_value, mutate)
def test_racing_recursion_limit(self):
def something_recursive():
def count(n):
if n > 0:
return count(n - 1) + 1
return 0
count(50)
def set_recursion_limit():
for limit in range(100, 200):
sys.setrecursionlimit(limit)
do_race(something_recursive, set_recursion_limit)
@threading_helper.requires_working_threading()
class TestWarningsRaces(TestBase):
def setUp(self):
self.saved_filters = warnings.filters[:]
warnings.resetwarnings()
# Add multiple filters to the list to increase odds of race.
for lineno in range(20):
warnings.filterwarnings('ignore', message='not matched', category=Warning, lineno=lineno)
# Override showwarning() so that we don't actually show warnings.
def showwarning(*args):
pass
warnings.showwarning = showwarning
def tearDown(self):
warnings.filters[:] = self.saved_filters
warnings.showwarning = warnings._showwarning_orig
def test_racing_warnings_filter(self):
# Modifying the warnings.filters list while another thread is using
# warn() should not crash or race.
def modify_filters():
time.sleep(0)
warnings.filters[:] = [('ignore', None, UserWarning, None, 0)]
time.sleep(0)
warnings.filters[:] = self.saved_filters
def emit_warning():
warnings.warn('dummy message', category=UserWarning)
do_race(modify_filters, emit_warning)
if __name__ == "__main__":
unittest.main() | python | github | https://github.com/python/cpython | Lib/test/test_free_threading/test_races.py |
import PyQt4.uic
import os
import PyQt4
from PyQt4.QtGui import (QLabel, QDialog, QFileDialog,
QPixmap, QGridLayout, QLayout,
QWidget)
from PyQt4.QtCore import (QByteArray, QBuffer,
QIODevice, QEvent, QObject, pyqtSignal)
import logging
import images_rc
basepath = os.path.dirname(__file__)
uipath = os.path.join(basepath,'imagewidget.ui')
widgetForm, baseClass= PyQt4.uic.loadUiType(uipath)
class QMapImageWidget(baseClass, widgetForm):
openRequest = pyqtSignal(QPixmap)
def __init__(self, data=None, parent=None):
super(QMapImageWidget, self).__init__(parent)
self.setupUi(self)
self.setStyleSheet(":hover {background-color: #dddddd;}")
self.selectbutton.setVisible(False)
self.deletebutton.setVisible(False)
self.selectbutton.clicked.connect(self.selectImage)
self.deletebutton.clicked.connect(self.removeImage)
self.isDefault = True
self.loadImage(data)
self.image.mouseReleaseEvent = self.imageClick
self.installEventFilter(self)
def eventFilter(self, parent, event):
""" Handle mouse click events for disabled widget state """
if event.type() == QEvent.MouseButtonRelease:
if self.isDefault:
return QObject.eventFilter(self, parent, event)
self.openRequest.emit(self.image.pixmap())
return QObject.eventFilter(self, parent, event)
def selectImage(self):
# Show the file picker
image = QFileDialog.getOpenFileName(self, "Select Image", "", "Images (*.jpg)")
if image is None or not image:
return
pix = QPixmap(image)
self.loadFromPixMap(pix)
def removeImage(self):
pix = QPixmap(":/images/images/add.png")
self.loadFromPixMap(pix)
self.image.setScaledContents(False)
self.isDefault = True
def imageClick(self, event):
if self.isDefault:
self.selectImage()
else:
self.openRequest.emit(self.image.pixmap())
def loadFromPixMap(self, pixmap):
self.image.setScaledContents(True)
self.image.setPixmap(pixmap)
self.isDefault = False
def loadImage(self, data):
"""
Load the image into the widget using a bytearray
An empty picture will result in the default placeholder
image.
"""
if data is None or not data:
self.removeImage()
return
pix = QPixmap()
r = pix.loadFromData(data, 'JPG')
self.image.setScaledContents(True)
self.image.setPixmap(pix)
self.isDefault = False
def getImage(self):
""" Return the loaded image """
if self.isDefault:
return None
pix = self.image.pixmap()
by = QByteArray()
buf = QBuffer(by)
buf.open(QIODevice.WriteOnly)
pix.save(buf, "JPG")
return by
def enterEvent(self, event):
# Don't show the image controls if we are on the default image
# or in a disabled state
if self.isDefault or not self.isEnabled():
return
self.selectbutton.setVisible(True)
self.deletebutton.setVisible(True)
def leaveEvent(self, event):
self.selectbutton.setVisible(False)
self.deletebutton.setVisible(False) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 17-7-22 上午12:41
# @Author : tom.lee
# @docs : http://old.sebug.net/paper/books/scipydoc/numpy_intro.html
# @File : study_numpy.py
# @Software: PyCharm
"""
numpy
Numpy是Python的一个科学计算的库,提供了矩阵运算的功能,其一般与Scipy,matplotlib一起使用.
NumPy提供了两种基本的对象:
ndarray(N-dimensional array object)ndarray(数组)是存储单一数据类型的多维数组;
ufunc(universal function object)而 ufunc则是能够对数组进行处理的函数。
"""
import numpy as np
def split_line():
print '*' * 6 ** 2
def np_version():
"""
版本
:return:
"""
print np.version.version
def np_list():
"""
numpy 数组 :
只能存储一种数据结构,
使用 "numpy.array()"来创建,
使用" dtype = numpy.类型" 来显示指定
:return:
"""
# 创建
l = np.array([1, 2, 3], dtype=np.int8)
a = np.array([1, 2, 3, 4])
b = np.array((5, 6, 7, 8))
c = np.array([[1, 2, 3, 4], [4, 5, 6, 7], [7, 8, 9, 10]])
print 'l:', l
print 'a:', a
print 'b:', b
print 'c:', c
split_line()
# 类型
print l.dtype, c.dtype
split_line()
# 大小: 数组a的shape只有一个元素,因此它是一维数组。
# 而数组c的shape有两个元素,因此它是二维数组,其中第0轴的长度为3,第1轴的长度为4
print l.shape, c.shape
split_line()
# 改变数组每个轴的长度 : 只是改变每个轴的大小,数组元素在内存中的位置并没有改变
c.shape = 4, 3
print c
split_line()
# 当某个轴的元素为-1时,将根据数组元素的个数自动计算此轴的长度,因此下面的程序将数组c的shape改为了(2,6)
c.shape = 2, -1
print c
split_line()
# 使用数组的reshape方法,可以创建一个改变了尺寸的新数组,原数组的shape保持不变
# 注意此时数组a和d其实共享数据存储内存区域
d = a.reshape((2, 2))
print 'a:', a
print 'd:', d
split_line()
def np_list_create():
# 使用xrange创建一维数组 [start,end,步长)包含起始位置,不包含终止位置,
# 元素个数: (end-start)/步长
np_lst = np.arange(0, 10, 1)
print np_lst
print '大小:%d' % np_lst.shape
split_line()
# 等差数列
# linspace(strat,end,size), [start,end]包含起始位置和终止位置,一共创建size个元素
# 可以通过endpoint关键字指定是否包括终值
print np.linspace(0, 1, 12)
split_line()
# 等比数列
# logspace(开始指数,结束指数,数量,底数默认10)
print np.logspace(0, 2, 20)
split_line()
def np_list_by_byte():
"""
使用frombuffer, fromstring, fromfile等函数可以从字节序列创建数组
使用时一定要传入dtype参数
Python的字符串实际上是字节序列,每个字符占一个字节,
因此如果从字符串s创建一个8bit的整数数组的话,所得到的数组正好就是字符串中每个字符的ASCII编码
:return:
"""
s = 'abcdefg'
print np.frombuffer(s, dtype=np.int8)
split_line()
print np.fromstring(s, dtype=np.int8)
split_line()
# 如果从字符串s创建16bit的整数数组,那么两个相邻的字节就表示一个整数,
# 把字节98和字节97当作一个16位的整数, 它的值就是98*256+97 = 25185。
# 可以看出内存中是以little endian(低位字节在前)方式保存数据的。
# 所以字符串的长度必须是偶数
print np.fromstring('abcdefgh', dtype=np.int16)
split_line()
def np_list_by_func():
"""
通过函数创建数组
:return:
"""
# fromfunction 传入一个函数,和表示一个维度大小的可迭代对象(元组,列表)
# 即(10,)表示一维数组,一维元素10个,此时函数接收一个参数
# (5,6)表示二维数组,一维元素5个,二维元素6个,此时函数接收2个参数
print np.fromfunction(lambda x: x + 1, (10,))
print np.fromfunction(lambda x, y: (x + 1) * (y + 1), (5, 6))
split_line()
def np_list_opt():
"""
numpy 列表基本操作和python list基本一致
:return:
"""
l = np.arange(10, 1, -1)
print l
print '做小值:', l.min()
print '最大值:', l.max()
print '下标0的元素:', l[0]
split_line()
# 高级用法,不会共享内存空间,以上操作会共享内存空间
print l[np.array([1, 5, 3])] # 使用数组获取下标元素
print l[[1, 5, 3]] # 使用列表获取下标元素
split_line()
# 列表直接过滤
print l[l > 3] # 直接获取列表大于3的值
print l > 3 # 判断列表元素是否大于3返回一个boolean 列表
split_line()
if __name__ == '__main__':
# np_version()
# np_list()
np_list_create()
# np_list_by_byte()
# np_list_by_func()
# np_list_opt()
print np.fromfunction(lambda x: x, (10,)) | unknown | codeparrot/codeparrot-clean | ||
#ifndef SRC_COMPILE_CACHE_H_
#define SRC_COMPILE_CACHE_H_
#if defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS
#include <cinttypes>
#include <memory>
#include <string>
#include <string_view>
#include <unordered_map>
#include "v8.h"
namespace node {
class Environment;
#define CACHED_CODE_TYPES(V) \
V(kCommonJS, 0) \
V(kESM, 1) \
V(kStrippedTypeScript, 2) \
V(kTransformedTypeScript, 3) \
V(kTransformedTypeScriptWithSourceMaps, 4)
enum class CachedCodeType : uint8_t {
#define V(type, value) type = value,
CACHED_CODE_TYPES(V)
#undef V
};
struct CompileCacheEntry {
std::unique_ptr<v8::ScriptCompiler::CachedData> cache{nullptr};
uint32_t cache_key;
uint32_t code_hash;
uint32_t code_size;
std::string cache_filename;
std::string source_filename;
CachedCodeType type;
bool refreshed = false;
bool persisted = false;
// Copy the cache into a new store for V8 to consume. Caller takes
// ownership.
v8::ScriptCompiler::CachedData* CopyCache() const;
const char* type_name() const;
};
#define COMPILE_CACHE_STATUS(V) \
V(FAILED) /* Failed to enable the cache */ \
V(ENABLED) /* Was not enabled before, and now enabled. */ \
V(ALREADY_ENABLED) /* Was already enabled. */ \
V(DISABLED) /* Has been disabled by NODE_DISABLE_COMPILE_CACHE. */
enum class CompileCacheEnableStatus : uint8_t {
#define V(status) status,
COMPILE_CACHE_STATUS(V)
#undef V
};
struct CompileCacheEnableResult {
CompileCacheEnableStatus status;
std::string cache_directory;
std::string message; // Set in case of failure.
};
enum class EnableOption : uint8_t { DEFAULT, PORTABLE };
class CompileCacheHandler {
public:
explicit CompileCacheHandler(Environment* env);
CompileCacheEnableResult Enable(Environment* env,
const std::string& dir,
EnableOption option = EnableOption::DEFAULT);
void Persist();
CompileCacheEntry* GetOrInsert(v8::Local<v8::String> code,
v8::Local<v8::String> filename,
CachedCodeType type);
void MaybeSave(CompileCacheEntry* entry,
v8::Local<v8::Function> func,
bool rejected);
void MaybeSave(CompileCacheEntry* entry,
v8::Local<v8::Module> mod,
bool rejected);
void MaybeSave(CompileCacheEntry* entry, std::string_view transpiled);
std::string_view cache_dir() { return compile_cache_dir_; }
private:
void ReadCacheFile(CompileCacheEntry* entry);
template <typename T>
void MaybeSaveImpl(CompileCacheEntry* entry,
v8::Local<T> func_or_mod,
bool rejected);
template <typename... Args>
inline void Debug(const char* format, Args&&... args) const;
static constexpr size_t kMagicNumberOffset = 0;
static constexpr size_t kCodeSizeOffset = 1;
static constexpr size_t kCacheSizeOffset = 2;
static constexpr size_t kCodeHashOffset = 3;
static constexpr size_t kCacheHashOffset = 4;
static constexpr size_t kHeaderCount = 5;
v8::Isolate* isolate_ = nullptr;
bool is_debug_ = false;
std::string compile_cache_dir_;
std::string normalized_compile_cache_dir_;
EnableOption portable_ = EnableOption::DEFAULT;
std::unordered_map<uint32_t, std::unique_ptr<CompileCacheEntry>>
compiler_cache_store_;
};
} // namespace node
#endif // defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS
#endif // SRC_COMPILE_CACHE_H_ | c | github | https://github.com/nodejs/node | src/compile_cache.h |
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package securitycontext
import (
"os"
"runtime"
"strconv"
"strings"
"sync"
)
// possibleCPUs returns the number of possible CPUs on this host.
func possibleCPUs() (cpus []int) {
if ncpu := possibleCPUsParsed(); ncpu != nil {
return ncpu
}
for i := range runtime.NumCPU() {
cpus = append(cpus, i)
}
return cpus
}
// possibleCPUsParsed is parsing the amount of possible CPUs on this host from
// /sys/devices.
var possibleCPUsParsed = sync.OnceValue(func() (cpus []int) {
data, err := os.ReadFile("/sys/devices/system/cpu/possible")
if err != nil {
return nil
}
ranges := strings.SplitSeq(strings.TrimSpace(string(data)), ",")
for r := range ranges {
if rStart, rEnd, ok := strings.Cut(r, "-"); !ok {
cpu, err := strconv.Atoi(rStart)
if err != nil {
return nil
}
cpus = append(cpus, cpu)
} else {
var start, end int
start, err := strconv.Atoi(rStart)
if err != nil {
return nil
}
end, err = strconv.Atoi(rEnd)
if err != nil {
return nil
}
for i := start; i <= end; i++ {
cpus = append(cpus, i)
}
}
}
return cpus
}) | go | github | https://github.com/kubernetes/kubernetes | pkg/securitycontext/util_linux.go |
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/memory-controllers/ingenic,nemc.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Ingenic SoCs NAND / External Memory Controller (NEMC)
maintainers:
- Paul Cercueil <paul@crapouillou.net>
properties:
$nodename:
pattern: "^memory-controller@[0-9a-f]+$"
compatible:
oneOf:
- enum:
- ingenic,jz4740-nemc
- ingenic,jz4780-nemc
- items:
- const: ingenic,jz4725b-nemc
- const: ingenic,jz4740-nemc
"#address-cells":
const: 2
"#size-cells":
const: 1
ranges: true
reg:
maxItems: 1
clocks:
maxItems: 1
patternProperties:
".*@[0-9]+$":
type: object
$ref: mc-peripheral-props.yaml#
additionalProperties: true
required:
- compatible
- "#address-cells"
- "#size-cells"
- ranges
- reg
- clocks
additionalProperties: false
examples:
- |
#include <dt-bindings/clock/ingenic,jz4780-cgu.h>
#include <dt-bindings/gpio/gpio.h>
nemc: memory-controller@13410000 {
compatible = "ingenic,jz4780-nemc";
reg = <0x13410000 0x10000>;
#address-cells = <2>;
#size-cells = <1>;
ranges = <1 0 0x1b000000 0x1000000>,
<2 0 0x1a000000 0x1000000>,
<3 0 0x19000000 0x1000000>,
<4 0 0x18000000 0x1000000>,
<5 0 0x17000000 0x1000000>,
<6 0 0x16000000 0x1000000>;
clocks = <&cgu JZ4780_CLK_NEMC>;
ethernet@6 {
compatible = "davicom,dm9000";
davicom,no-eeprom;
pinctrl-names = "default";
pinctrl-0 = <&pins_nemc_cs6>;
reg = <6 0 1>, /* addr */
<6 2 1>; /* data */
ingenic,nemc-tAS = <15>;
ingenic,nemc-tAH = <10>;
ingenic,nemc-tBP = <20>;
ingenic,nemc-tAW = <50>;
ingenic,nemc-tSTRV = <100>;
reset-gpios = <&gpf 12 GPIO_ACTIVE_HIGH>;
vcc-supply = <ð0_power>;
interrupt-parent = <&gpe>;
interrupts = <19 4>;
};
}; | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/memory-controllers/ingenic,nemc.yaml |
import colorsys
import datetime
class ColourScheme(object):
cols_android = {
'light_blue' : '33b5e5',
'blue' : '0099cc',
'light_purple' : 'aa66cc',
'purple' : '9933cc',
'light_green' : '99cc00',
'green' : '669900',
'light_yellow' : 'ffbb33',
'yellow' : 'ff8800',
'light_red' : 'ff4444',
'red' : 'cc0000'}
cols_solarized = {
#'brblack' : '1c1c1c',
'black' : '262626',
#'brgreen' : '585858',
'bryellow' : '626262',
#'brblue' : '808080',
#'brcyan' : '8a8a8a',
#'white' : 'e4e4e4',
'brwhite' : 'ffffd7',
'yellow' : 'af8700',
'brred' : 'd75f00',
'red' : 'd70000',
'magenta' : 'af005f',
'brmagenta' : '5f5faf',
'blue' : '0087ff',
'cyan' : '00afaf',
'green' : '5f8700'}
def __init__(self, seed=None):
self.colours = {}
self.colours.update(self.cols_android)
self.colours.update(self.cols_solarized)
if seed:
self.seed = seed
else:
self.seed = datetime.date.today().day
def get_colour_exp(self, key):
h = (abs(hash(key)) % 1024)/1024.0
(r,g,b) = colorsys.hsv_to_rgb(h, 1.0, 1.0)
return 'rgb({0},{1},{2})'.format(int(r*255), int(g*255), int(b*255))
def get_colour(self, key):
h = (abs(hash(key))+self.seed) % len(self.colours)
return self.colours[self.colours.keys()[h]] | unknown | codeparrot/codeparrot-clean | ||
from django.test import TestCase
from regressiontests.select_related_regress.models import *
class SelectRelatedRegressTests(TestCase):
def test_regression_7110(self):
"""
Regression test for bug #7110.
When using select_related(), we must query the
Device and Building tables using two different aliases (each) in order to
differentiate the start and end Connection fields. The net result is that
both the "connections = ..." queries here should give the same results
without pulling in more than the absolute minimum number of tables
(history has shown that it's easy to make a mistake in the implementation
and include some unnecessary bonus joins).
"""
b=Building.objects.create(name='101')
dev1=Device.objects.create(name="router", building=b)
dev2=Device.objects.create(name="switch", building=b)
dev3=Device.objects.create(name="server", building=b)
port1=Port.objects.create(port_number='4',device=dev1)
port2=Port.objects.create(port_number='7',device=dev2)
port3=Port.objects.create(port_number='1',device=dev3)
c1=Connection.objects.create(start=port1, end=port2)
c2=Connection.objects.create(start=port2, end=port3)
connections=Connection.objects.filter(start__device__building=b, end__device__building=b).order_by('id')
self.assertEqual([(c.id, unicode(c.start), unicode(c.end)) for c in connections],
[(c1.id, u'router/4', u'switch/7'), (c2.id, u'switch/7', u'server/1')])
connections=Connection.objects.filter(start__device__building=b, end__device__building=b).select_related().order_by('id')
self.assertEqual([(c.id, unicode(c.start), unicode(c.end)) for c in connections],
[(c1.id, u'router/4', u'switch/7'), (c2.id, u'switch/7', u'server/1')])
# This final query should only join seven tables (port, device and building
# twice each, plus connection once).
self.assertEqual(connections.query.count_active_tables(), 7)
def test_regression_8106(self):
"""
Regression test for bug #8106.
Same sort of problem as the previous test, but this time there are
more extra tables to pull in as part of the select_related() and some
of them could potentially clash (so need to be kept separate).
"""
us = TUser.objects.create(name="std")
usp = Person.objects.create(user=us)
uo = TUser.objects.create(name="org")
uop = Person.objects.create(user=uo)
s = Student.objects.create(person = usp)
o = Organizer.objects.create(person = uop)
c = Class.objects.create(org=o)
e = Enrollment.objects.create(std=s, cls=c)
e_related = Enrollment.objects.all().select_related()[0]
self.assertEqual(e_related.std.person.user.name, u"std")
self.assertEqual(e_related.cls.org.person.user.name, u"org")
def test_regression_8036(self):
"""
Regression test for bug #8036
the first related model in the tests below
("state") is empty and we try to select the more remotely related
state__country. The regression here was not skipping the empty column results
for country before getting status.
"""
australia = Country.objects.create(name='Australia')
active = ClientStatus.objects.create(name='active')
client = Client.objects.create(name='client', status=active)
self.assertEqual(client.status, active)
self.assertEqual(Client.objects.select_related()[0].status, active)
self.assertEqual(Client.objects.select_related('state')[0].status, active)
self.assertEqual(Client.objects.select_related('state', 'status')[0].status, active)
self.assertEqual(Client.objects.select_related('state__country')[0].status, active)
self.assertEqual(Client.objects.select_related('state__country', 'status')[0].status, active)
self.assertEqual(Client.objects.select_related('status')[0].status, active)
def test_multi_table_inheritance(self):
""" Exercising select_related() with multi-table model inheritance. """
c1 = Child.objects.create(name="child1", value=42)
i1 = Item.objects.create(name="item1", child=c1)
i2 = Item.objects.create(name="item2")
self.assertQuerysetEqual(
Item.objects.select_related("child").order_by("name"),
["<Item: item1>", "<Item: item2>"]
)
def test_regression_12851(self):
"""
Regression for #12851
Deferred fields are used correctly if you select_related a subset
of fields.
"""
australia = Country.objects.create(name='Australia')
active = ClientStatus.objects.create(name='active')
wa = State.objects.create(name="Western Australia", country=australia)
c1 = Client.objects.create(name='Brian Burke', state=wa, status=active)
burke = Client.objects.select_related('state').defer('state__name').get(name='Brian Burke')
self.assertEqual(burke.name, u'Brian Burke')
self.assertEqual(burke.state.name, u'Western Australia')
# Still works if we're dealing with an inherited class
sc1 = SpecialClient.objects.create(name='Troy Buswell', state=wa, status=active, value=42)
troy = SpecialClient.objects.select_related('state').defer('state__name').get(name='Troy Buswell')
self.assertEqual(troy.name, u'Troy Buswell')
self.assertEqual(troy.value, 42)
self.assertEqual(troy.state.name, u'Western Australia')
# Still works if we defer an attribute on the inherited class
troy = SpecialClient.objects.select_related('state').defer('value', 'state__name').get(name='Troy Buswell')
self.assertEqual(troy.name, u'Troy Buswell')
self.assertEqual(troy.value, 42)
self.assertEqual(troy.state.name, u'Western Australia')
# Also works if you use only, rather than defer
troy = SpecialClient.objects.select_related('state').only('name').get(name='Troy Buswell')
self.assertEqual(troy.name, u'Troy Buswell')
self.assertEqual(troy.value, 42)
self.assertEqual(troy.state.name, u'Western Australia') | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
import synthtool.languages.ruby as ruby
import logging
import re
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICGenerator()
v3_library = gapic.ruby_library(
'monitoring', 'v3',
config_path='/google/monitoring/artman_monitoring.yaml',
artman_output_name='google-cloud-ruby/google-cloud-monitoring'
)
s.copy(v3_library / 'acceptance')
s.copy(v3_library / 'lib')
s.copy(v3_library / 'test')
s.copy(v3_library / 'README.md')
s.copy(v3_library / 'LICENSE')
s.copy(v3_library / '.gitignore')
s.copy(v3_library / '.yardopts')
s.copy(v3_library / 'google-cloud-monitoring.gemspec', merge=ruby.merge_gemspec)
# PERMANENT: Use a compatible version of googleapis-common-protos-types
s.replace(
'google-cloud-monitoring.gemspec',
'\n gem\\.add_dependency "google-gax", "~> ([\\d\\.]+)"',
'\n gem.add_dependency "google-gax", "~> \\1"\n gem.add_dependency "googleapis-common-protos-types", ">= 1.0.2"')
# https://github.com/googleapis/gapic-generator/issues/2242
def escape_braces(match):
expr = re.compile('^([^`]*(`[^`]*`[^`]*)*)([^`#\\$\\\\])\\{([\\w,]+)\\}')
content = match.group(0)
while True:
content, count = expr.subn('\\1\\3\\\\\\\\{\\4}', content)
if count == 0:
return content
s.replace(
'lib/google/cloud/**/*.rb',
'\n(\\s+)#[^\n]*[^\n#\\$\\\\]\\{[\\w,]+\\}',
escape_braces)
# https://github.com/googleapis/gapic-generator/issues/2243
s.replace(
'lib/google/cloud/monitoring/*/*_client.rb',
'(\n\\s+class \\w+Client\n)(\\s+)(attr_reader :\\w+_stub)',
'\\1\\2# @private\n\\2\\3')
# https://github.com/googleapis/gapic-generator/issues/2279
s.replace(
'lib/**/*.rb',
'\\A(((#[^\n]*)?\n)*# (Copyright \\d+|Generated by the protocol buffer compiler)[^\n]+\n(#[^\n]*\n)*\n)([^\n])',
'\\1\n\\6')
# https://github.com/googleapis/gapic-generator/issues/2323
s.replace(
[
'lib/**/*.rb',
'README.md'
],
'https://github\\.com/GoogleCloudPlatform/google-cloud-ruby',
'https://github.com/googleapis/google-cloud-ruby'
)
s.replace(
[
'lib/**/*.rb',
'README.md'
],
'https://googlecloudplatform\\.github\\.io/google-cloud-ruby',
'https://googleapis.github.io/google-cloud-ruby'
) | unknown | codeparrot/codeparrot-clean | ||
# django imports
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.db import models
from django.utils.translation import ugettext_lazy as _
class CriteriaObjects(models.Model):
"""Assigns arbitrary criteria to arbitrary content objects.
"""
class Meta:
ordering = ["position"]
verbose_name_plural = "Criteria objects"
app_label = "criteria"
criterion_type = models.ForeignKey(ContentType, verbose_name=_(u"Criterion type"), related_name="criterion")
criterion_id = models.PositiveIntegerField(_(u"Content id"))
criterion = generic.GenericForeignKey(ct_field="criterion_type", fk_field="criterion_id")
content_type = models.ForeignKey(ContentType, verbose_name=_(u"Content type"), related_name="content_type")
content_id = models.PositiveIntegerField(_(u"Content id"))
content = generic.GenericForeignKey(ct_field="content_type", fk_field="content_id")
position = models.PositiveIntegerField(_(u"Position"), default=999) | unknown | codeparrot/codeparrot-clean | ||
import json
import urllib
import openerp
from openerp import http
from openerp.http import request
import openerp.addons.web.controllers.main as webmain
from openerp.addons.web.http import SessionExpiredException
from werkzeug.exceptions import BadRequest
import werkzeug.utils
class google_auth(http.Controller):
@http.route('/google_account/authentication', type='http', auth="none")
def oauth2callback(self, **kw):
""" This route/function is called by Google when user Accept/Refuse the consent of Google """
state = json.loads(kw['state'])
dbname = state.get('d')
service = state.get('s')
url_return = state.get('f')
registry = openerp.modules.registry.RegistryManager.get(dbname)
with registry.cursor() as cr:
if kw.get('code',False):
registry.get('google.%s' % service).set_all_tokens(cr,request.session.uid,kw['code'])
return werkzeug.utils.redirect(url_return)
elif kw.get('error'):
return werkzeug.utils.redirect("%s%s%s" % (url_return ,"?error=" , kw.get('error')))
else:
return werkzeug.utils.redirect("%s%s" % (url_return ,"?error=Unknown_error")) | unknown | codeparrot/codeparrot-clean | ||
from __future__ import unicode_literals, division, absolute_import
import logging
from datetime import datetime, timedelta
from sqlalchemy import Column, Integer, String, Unicode, DateTime, ForeignKey, and_, Index
from sqlalchemy.orm import relation
from flexget import db_schema, options, plugin
from flexget.event import event
from flexget.logger import console
from flexget.manager import Session
from flexget.utils.sqlalchemy_utils import table_columns, table_add_column
from flexget.utils.tools import parse_timedelta
log = logging.getLogger('remember_rej')
Base = db_schema.versioned_base('remember_rejected', 3)
@db_schema.upgrade('remember_rejected')
def upgrade(ver, session):
if ver is None:
columns = table_columns('remember_rejected_entry', session)
if 'uid' in columns:
raise db_schema.UpgradeImpossible
ver = 0
if ver == 0:
log.info('Adding reason column to remember_rejected_entry table.')
table_add_column('remember_rejected_entry', 'reason', String, session)
ver = 1
if ver == 1:
log.info('Adding `added` column to remember_rejected_entry table.')
table_add_column('remember_rejected_entry', 'added', DateTime, session, default=datetime.now)
ver = 2
if ver == 2:
log.info('Adding expires column to remember_rejected_entry table.')
table_add_column('remember_rejected_entry', 'expires', DateTime, session)
ver = 3
return ver
class RememberTask(Base):
__tablename__ = 'remember_rejected_feeds'
id = Column(Integer, primary_key=True)
name = Column(Unicode)
entries = relation('RememberEntry', backref='task', cascade='all, delete, delete-orphan')
class RememberEntry(Base):
__tablename__ = 'remember_rejected_entry'
id = Column(Integer, primary_key=True)
added = Column(DateTime, default=datetime.now)
expires = Column(DateTime)
title = Column(Unicode)
url = Column(String)
rejected_by = Column(String)
reason = Column(String)
task_id = Column('feed_id', Integer, ForeignKey('remember_rejected_feeds.id'), nullable=False)
Index('remember_feed_title_url', RememberEntry.task_id, RememberEntry.title, RememberEntry.url)
class FilterRememberRejected(object):
"""Internal.
Rejects entries which have been rejected in the past.
This is enabled when item is rejected with remember=True flag.
Example::
entry.reject('message', remember=True)
"""
@plugin.priority(0)
def on_task_start(self, task, config):
"""Purge remembered entries if the config has changed."""
with Session() as session:
# See if the task has changed since last run
old_task = session.query(RememberTask).filter(RememberTask.name == task.name).first()
if not task.is_rerun and old_task and task.config_modified:
log.debug('Task config has changed since last run, purging remembered entries.')
session.delete(old_task)
old_task = None
if not old_task:
# Create this task in the db if not present
session.add(RememberTask(name=task.name))
elif not task.is_rerun:
# Delete expired items if this is not a rerun
deleted = session.query(RememberEntry).filter(RememberEntry.task_id == old_task.id).\
filter(RememberEntry.expires < datetime.now()).delete()
if deleted:
log.debug('%s entries have expired from remember_rejected table.' % deleted)
task.config_changed()
@plugin.priority(-255)
def on_task_input(self, task, config):
for entry in task.all_entries:
entry.on_reject(self.on_entry_reject)
@plugin.priority(255)
def on_task_filter(self, task, config):
"""Reject any remembered entries from previous runs"""
with Session() as session:
(task_id,) = session.query(RememberTask.id).filter(RememberTask.name == task.name).first()
reject_entries = session.query(RememberEntry).filter(RememberEntry.task_id == task_id)
if reject_entries.count():
# Reject all the remembered entries
for entry in task.entries:
if not entry.get('url'):
# We don't record or reject any entries without url
continue
reject_entry = reject_entries.filter(and_(RememberEntry.title == entry['title'],
RememberEntry.url == entry['original_url'])).first()
if reject_entry:
entry.reject('Rejected on behalf of %s plugin: %s' %
(reject_entry.rejected_by, reject_entry.reason))
def on_entry_reject(self, entry, remember=None, remember_time=None, **kwargs):
# We only remember rejections that specify the remember keyword argument
if not (remember or remember_time):
return
if not entry.get('title') or not entry.get('original_url'):
log.debug('Can\'t remember rejection for entry without title or url.')
return
if remember_time:
if isinstance(remember_time, basestring):
remember_time = parse_timedelta(remember_time)
message = 'Remembering rejection of `%s`' % entry['title']
if remember_time:
message += ' for %i minutes' % (remember_time.seconds / 60)
log.info(message)
entry['remember_rejected'] = remember_time or remember
@plugin.priority(-255)
def on_task_learn(self, task, config):
with Session() as session:
for entry in task.all_entries:
if not entry.get('remember_rejected'):
continue
expires = None
if isinstance(entry['remember_rejected'], timedelta):
expires = datetime.now() + entry['remember_rejected']
(remember_task_id,) = session.query(RememberTask.id).filter(RememberTask.name == task.name).first()
session.add(RememberEntry(title=entry['title'], url=entry['original_url'], task_id=remember_task_id,
rejected_by=entry.get('rejected_by'), reason=entry.get('reason'),
expires=expires))
def do_cli(manager, options):
if options.rejected_action == 'list':
list_rejected()
elif options.rejected_action == 'clear':
clear_rejected(manager)
def list_rejected():
with Session() as session:
results = session.query(RememberEntry).all()
if not results:
console('No rejected entries recorded by remember_rejected')
else:
console('Rejections remembered by remember_rejected:')
for entry in results:
console('%s from %s by %s because %s' % (entry.title, entry.task.name, entry.rejected_by, entry.reason))
def clear_rejected(manager):
with Session() as session:
results = session.query(RememberEntry).delete()
console('Cleared %i items.' % results)
session.commit()
if results:
manager.config_changed()
@event('manager.db_cleanup')
def db_cleanup(manager, session):
# Remove entries older than 30 days
result = session.query(RememberEntry).filter(RememberEntry.added < datetime.now() - timedelta(days=30)).delete()
if result:
log.verbose('Removed %d entries from remember rejected table.' % result)
@event('plugin.register')
def register_plugin():
plugin.register(FilterRememberRejected, 'remember_rejected', builtin=True, api_ver=2)
@event('options.register')
def register_parser_arguments():
parser = options.register_command('rejected', do_cli, help='list or clear remembered rejections')
subparsers = parser.add_subparsers(dest='rejected_action', metavar='<action>')
subparsers.add_parser('list', help='list all the entries that have been rejected')
subparsers.add_parser('clear', help='clear all rejected entries from database, so they can be retried') | unknown | codeparrot/codeparrot-clean | ||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/clock/ti,am62-audio-refclk.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: TI Audio Reference Clock
maintainers:
- Jai Luthra <j-luthra@ti.com>
properties:
compatible:
items:
- const: ti,am62-audio-refclk
reg:
maxItems: 1
"#clock-cells":
const: 0
clocks:
maxItems: 1
required:
- compatible
- reg
- "#clock-cells"
- clocks
additionalProperties: false
examples:
- |
audio_refclk0: clock@82e0 {
compatible = "ti,am62-audio-refclk";
reg = <0x82e0 0x4>;
clocks = <&k3_clks 157 0>;
assigned-clocks = <&k3_clks 157 0>;
assigned-clock-parents = <&k3_clks 157 8>;
#clock-cells = <0>;
}; | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/clock/ti,am62-audio-refclk.yaml |
import { flushSync } from 'svelte';
import { test } from '../../test';
export default test({
get props() {
return { items: ['one', 'two', 'three'] };
},
html: `
<div>
<input><p>one</p>
</div>
<div>
<input><p>two</p>
</div>
<div>
<input><p>three</p>
</div>
`,
ssrHtml: `
<div>
<input value=one><p>one</p>
</div>
<div>
<input value=two><p>two</p>
</div>
<div>
<input value=three><p>three</p>
</div>
`,
test({ assert, component, target, window }) {
const inputs = [...target.querySelectorAll('input')];
const items = component.items;
const event = new window.Event('input');
assert.equal(inputs[0].value, 'one');
inputs[1].value = 'four';
inputs[1].dispatchEvent(event);
flushSync();
assert.equal(items[1], 'four');
assert.htmlEqual(
target.innerHTML,
`
<div>
<input><p>one</p>
</div>
<div>
<input><p>four</p>
</div>
<div>
<input><p>three</p>
</div>
`
);
items[2] = 'five';
component.items = items;
assert.equal(inputs[2].value, 'five');
assert.htmlEqual(
target.innerHTML,
`
<div>
<input><p>one</p>
</div>
<div>
<input><p>four</p>
</div>
<div>
<input><p>five</p>
</div>
`
);
}
}); | javascript | github | https://github.com/sveltejs/svelte | packages/svelte/tests/runtime-legacy/samples/binding-input-text-contextual/_config.js |
#
# Copyright 2005,2006,2012-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
from fm_emph import fm_deemph
import math
try:
from gnuradio import analog
except ImportError:
import analog_swig as analog
class wfm_rcv_pll(gr.hier_block2):
def __init__(self, demod_rate, audio_decimation):
"""
Hierarchical block for demodulating a broadcast FM signal.
The input is the downconverted complex baseband signal (gr_complex).
The output is two streams of the demodulated audio (float) 0=Left, 1=Right.
Args:
demod_rate: input sample rate of complex baseband input. (float)
audio_decimation: how much to decimate demod_rate to get to audio. (integer)
"""
gr.hier_block2.__init__(self, "wfm_rcv_pll",
gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature
gr.io_signature(2, 2, gr.sizeof_float)) # Output signature
bandwidth = 250e3
audio_rate = demod_rate / audio_decimation
# We assign to self so that outsiders can grab the demodulator
# if they need to. E.g., to plot its output.
#
# input: complex; output: float
loop_bw = 2*math.pi/100.0
max_freq = 2.0*math.pi*90e3/demod_rate
self.fm_demod = analog.pll_freqdet_cf(loop_bw, max_freq,-max_freq)
# input: float; output: float
self.deemph_Left = fm_deemph(audio_rate)
self.deemph_Right = fm_deemph(audio_rate)
# compute FIR filter taps for audio filter
width_of_transition_band = audio_rate / 32
audio_coeffs = filter.firdes.low_pass(1.0 , # gain
demod_rate, # sampling rate
15000 ,
width_of_transition_band,
filter.firdes.WIN_HAMMING)
# input: float; output: float
self.audio_filter = filter.fir_filter_fff(audio_decimation, audio_coeffs)
if 1:
# Pick off the stereo carrier/2 with this filter. It attenuated 10 dB so apply 10 dB gain
# We pick off the negative frequency half because we want to base band by it!
## NOTE THIS WAS HACKED TO OFFSET INSERTION LOSS DUE TO DEEMPHASIS
stereo_carrier_filter_coeffs = \
filter.firdes.complex_band_pass(10.0,
demod_rate,
-19020,
-18980,
width_of_transition_band,
filter.firdes.WIN_HAMMING)
#print "len stereo carrier filter = ",len(stereo_carrier_filter_coeffs)
#print "stereo carrier filter ", stereo_carrier_filter_coeffs
#print "width of transition band = ",width_of_transition_band, " audio rate = ", audio_rate
# Pick off the double side band suppressed carrier Left-Right audio. It is attenuated 10 dB so apply 10 dB gain
stereo_dsbsc_filter_coeffs = \
filter.firdes.complex_band_pass(20.0,
demod_rate,
38000-15000/2,
38000+15000/2,
width_of_transition_band,
filter.firdes.WIN_HAMMING)
#print "len stereo dsbsc filter = ",len(stereo_dsbsc_filter_coeffs)
#print "stereo dsbsc filter ", stereo_dsbsc_filter_coeffs
# construct overlap add filter system from coefficients for stereo carrier
self.stereo_carrier_filter = \
filter.fir_filter_fcc(audio_decimation, stereo_carrier_filter_coeffs)
# carrier is twice the picked off carrier so arrange to do a commplex multiply
self.stereo_carrier_generator = blocks.multiply_cc();
# Pick off the rds signal
stereo_rds_filter_coeffs = \
filter.firdes.complex_band_pass(30.0,
demod_rate,
57000 - 1500,
57000 + 1500,
width_of_transition_band,
filter.firdes.WIN_HAMMING)
#print "len stereo dsbsc filter = ",len(stereo_dsbsc_filter_coeffs)
#print "stereo dsbsc filter ", stereo_dsbsc_filter_coeffs
# construct overlap add filter system from coefficients for stereo carrier
self.rds_signal_filter = \
filter.fir_filter_fcc(audio_decimation, stereo_rds_filter_coeffs)
self.rds_carrier_generator = blocks.multiply_cc();
self.rds_signal_generator = blocks.multiply_cc();
self_rds_signal_processor = blocks.null_sink(gr.sizeof_gr_complex);
loop_bw = 2*math.pi/100.0
max_freq = -2.0*math.pi*18990/audio_rate;
min_freq = -2.0*math.pi*19010/audio_rate;
self.stereo_carrier_pll_recovery = \
analog.pll_refout_cc(loop_bw, max_freq, min_freq);
#self.stereo_carrier_pll_recovery.squelch_enable(False) #pll_refout does not have squelch yet, so disabled for now
# set up mixer (multiplier) to get the L-R signal at baseband
self.stereo_basebander = blocks.multiply_cc();
# pick off the real component of the basebanded L-R signal. The imaginary SHOULD be zero
self.LmR_real = blocks.complex_to_real();
self.Make_Left = blocks.add_ff();
self.Make_Right = blocks.sub_ff();
self.stereo_dsbsc_filter = \
filter.fir_filter_fcc(audio_decimation, stereo_dsbsc_filter_coeffs)
if 1:
# send the real signal to complex filter to pick off the carrier and then to one side of a multiplier
self.connect(self, self.fm_demod, self.stereo_carrier_filter,
self.stereo_carrier_pll_recovery, (self.stereo_carrier_generator,0))
# send the already filtered carrier to the otherside of the carrier
self.connect(self.stereo_carrier_pll_recovery, (self.stereo_carrier_generator,1))
# the resulting signal from this multiplier is the carrier with correct phase but at -38000 Hz.
# send the new carrier to one side of the mixer (multiplier)
self.connect(self.stereo_carrier_generator, (self.stereo_basebander,0))
# send the demphasized audio to the DSBSC pick off filter, the complex
# DSBSC signal at +38000 Hz is sent to the other side of the mixer/multiplier
self.connect(self.fm_demod,self.stereo_dsbsc_filter, (self.stereo_basebander,1))
# the result is BASEBANDED DSBSC with phase zero!
# Pick off the real part since the imaginary is theoretically zero and then to one side of a summer
self.connect(self.stereo_basebander, self.LmR_real, (self.Make_Left,0))
#take the same real part of the DSBSC baseband signal and send it to negative side of a subtracter
self.connect(self.LmR_real,(self.Make_Right,1))
# Make rds carrier by taking the squared pilot tone and multiplying by pilot tone
self.connect(self.stereo_basebander,(self.rds_carrier_generator,0))
self.connect(self.stereo_carrier_pll_recovery,(self.rds_carrier_generator,1))
# take signal, filter off rds, send into mixer 0 channel
self.connect(self.fm_demod,self.rds_signal_filter,(self.rds_signal_generator,0))
# take rds_carrier_generator output and send into mixer 1 channel
self.connect(self.rds_carrier_generator,(self.rds_signal_generator,1))
# send basebanded rds signal and send into "processor" which for now is a null sink
self.connect(self.rds_signal_generator,self_rds_signal_processor)
if 1:
# pick off the audio, L+R that is what we used to have and send it to the summer
self.connect(self.fm_demod, self.audio_filter, (self.Make_Left, 1))
# take the picked off L+R audio and send it to the PLUS side of the subtractor
self.connect(self.audio_filter,(self.Make_Right, 0))
# The result of Make_Left gets (L+R) + (L-R) and results in 2*L
# The result of Make_Right gets (L+R) - (L-R) and results in 2*R
self.connect(self.Make_Left , self.deemph_Left, (self, 0))
self.connect(self.Make_Right, self.deemph_Right, (self, 1))
# NOTE: mono support will require variable number of outputs in hier_block2s
# See ticket:174 in Trac database
#else:
# self.connect (self.fm_demod, self.audio_filter, self) | unknown | codeparrot/codeparrot-clean | ||
#ifndef TABLE_INCLUDED
#define TABLE_INCLUDED
/* Copyright (c) 2000, 2025, Oracle and/or its affiliates.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License, version 2.0,
as published by the Free Software Foundation.
This program is designed to work with certain software (including
but not limited to OpenSSL) that is licensed under separate terms,
as designated in a particular file or component or in included license
documentation. The authors of MySQL hereby grant you an additional
permission to link the program and your derivative works with the
separately licensed software that they have either included with
the program or referenced in the documentation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License, version 2.0, for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
#include <assert.h>
#include <string.h>
#include <sys/types.h>
#include <string>
#include "field_types.h"
#include "lex_string.h"
#include "map_helpers.h"
#include "mem_root_deque.h"
#include "my_alloc.h"
#include "my_base.h"
#include "my_bitmap.h"
#include "my_compiler.h"
#include "mysql/binlog/event/table_id.h" // Table_id
#include "my_inttypes.h"
#include "my_sys.h"
#include "my_table_map.h"
#include "mysql/components/services/bits/mysql_mutex_bits.h"
#include "mysql/components/services/bits/psi_table_bits.h"
#include "mysql/strings/m_ctype.h"
#include "sql/auth/auth_acls.h" // Access_bitmask
#include "sql/dd/types/foreign_key.h" // dd::Foreign_key::enum_rule
#include "sql/enum_query_type.h" // enum_query_type
#include "sql/json_duality_view/dml.h"
#include "sql/key.h"
#include "sql/key_spec.h"
#include "sql/mdl.h" // MDL_wait_for_subgraph
#include "sql/mem_root_array.h"
#include "sql/mysqld_cs.h"
#include "sql/opt_costmodel.h" // Cost_model_table
#include "sql/partition_info.h"
#include "sql/record_buffer.h" // Record_buffer
#include "sql/sql_bitmap.h" // Bitmap
#include "sql/sql_const.h"
#include "sql/sql_list.h"
#include "sql/sql_plist.h"
#include "sql/sql_plugin_ref.h"
#include "sql/sql_sort.h" // Sort_result
#include "sql/tablesample.h"
#include "thr_lock.h"
#include "typelib.h"
class Field;
class Field_longlong;
namespace histograms {
class Histogram;
} // namespace histograms
class ACL_internal_schema_access;
class ACL_internal_table_access;
class Field_json;
/* Structs that defines the TABLE */
class File_parser;
class Value_generator;
class GRANT_TABLE;
class Handler_share;
class Index_hint;
class Item;
class Item_ident;
class Item_field;
class Json_diff_vector;
class Json_seekable_path;
class Json_wrapper;
class MaterializedPathCache;
class Name_string;
class Opt_hints_qb;
class Opt_hints_table;
class Query_result_union;
class Query_block;
class Query_expression;
class Security_context;
class SortingIterator;
class String;
class THD;
class Table_cache_element;
class Table_histograms;
class Table_histograms_collection;
class Table_ref;
class Table_trigger_dispatcher;
class Temp_table_param;
class Trigger;
class handler;
class partition_info;
enum enum_stats_auto_recalc : int;
enum Value_generator_source : short;
enum row_type : int;
struct AccessPath;
struct BytesPerTableRow;
struct COND_EQUAL;
struct HA_CREATE_INFO;
struct LEX;
struct NESTED_JOIN;
struct Partial_update_info;
struct TABLE;
struct TABLE_SHARE;
struct handlerton;
struct Name_resolution_context;
using plan_idx = int;
namespace dd {
class Table;
class View;
enum class enum_table_type;
} // namespace dd
class Common_table_expr;
class Sql_table_check_constraint;
using Sql_table_check_constraint_list =
Mem_root_array<Sql_table_check_constraint>;
class Sql_check_constraint_share;
using Sql_check_constraint_share_list =
Mem_root_array<Sql_check_constraint_share>;
namespace jdv {
class Content_tree_node;
} // namespace jdv
typedef Mem_root_array_YY<LEX_CSTRING> Create_col_name_list;
typedef int64 query_id_t;
enum class enum_json_diff_operation;
bool assert_ref_count_is_locked(const TABLE_SHARE *);
bool assert_invalid_dict_is_locked(const TABLE *);
bool assert_invalid_stats_is_locked(const TABLE *);
[[nodiscard]] const Table_ref *jdv_root_base_table(const Table_ref *);
#define store_record(A, B) \
memcpy((A)->B, (A)->record[0], (size_t)(A)->s->reclength)
#define restore_record(A, B) \
memcpy((A)->record[0], (A)->B, (size_t)(A)->s->reclength)
#define cmp_record(A, B) \
memcmp((A)->record[0], (A)->B, (size_t)(A)->s->reclength)
#define tmp_file_prefix "#sql" /**< Prefix for tmp tables */
#define tmp_file_prefix_length 4
#define TMP_TABLE_KEY_EXTRA 8
#define PLACEHOLDER_TABLE_ROW_ESTIMATE 2
/**
Enumerate possible types of a table from re-execution
standpoint.
Table_ref class has a member of this type.
At prepared statement prepare, this member is assigned a value
as of the current state of the database. Before (re-)execution
of a prepared statement, we check that the value recorded at
prepare matches the type of the object we obtained from the
table definition cache.
@sa check_and_update_table_version()
@sa Execute_observer
@sa Prepared_statement::reprepare()
*/
enum enum_table_ref_type {
/** Initial value set by the parser */
TABLE_REF_NULL = 0,
TABLE_REF_VIEW,
TABLE_REF_BASE_TABLE,
TABLE_REF_I_S_TABLE,
TABLE_REF_TMP_TABLE
};
/**
Enumerate possible status of a identifier name while determining
its validity
*/
enum class Ident_name_check { OK, WRONG, TOO_LONG };
/*************************************************************************/
/**
Object_creation_ctx -- interface for creation context of database objects
(views, stored routines, events, triggers). Creation context -- is a set
of attributes, that should be fixed at the creation time and then be used
each time the object is parsed or executed.
*/
class Object_creation_ctx {
public:
Object_creation_ctx *set_n_backup(THD *thd);
void restore_env(THD *thd, Object_creation_ctx *backup_ctx);
protected:
Object_creation_ctx() = default;
virtual Object_creation_ctx *create_backup_ctx(THD *thd) const = 0;
virtual void delete_backup_ctx() = 0;
virtual void change_env(THD *thd) const = 0;
public:
virtual ~Object_creation_ctx() = default;
};
/*************************************************************************/
/**
Default_object_creation_ctx -- default implementation of
Object_creation_ctx.
*/
class Default_object_creation_ctx : public Object_creation_ctx {
public:
const CHARSET_INFO *get_client_cs() { return m_client_cs; }
const CHARSET_INFO *get_connection_cl() { return m_connection_cl; }
protected:
Default_object_creation_ctx(THD *thd);
Default_object_creation_ctx(const CHARSET_INFO *client_cs,
const CHARSET_INFO *connection_cl);
protected:
Object_creation_ctx *create_backup_ctx(THD *thd) const override;
void delete_backup_ctx() override;
void change_env(THD *thd) const override;
protected:
/**
client_cs stores the value of character_set_client session variable.
The only character set attribute is used.
Client character set is included into query context, because we save
query in the original character set, which is client character set. So,
in order to parse the query properly we have to switch client character
set on parsing.
*/
const CHARSET_INFO *m_client_cs;
/**
connection_cl stores the value of collation_connection session
variable. Both character set and collation attributes are used.
Connection collation is included into query context, because it defines
the character set and collation of text literals in internal
representation of query (item-objects).
*/
const CHARSET_INFO *m_connection_cl;
};
/**
View_creation_ctx -- creation context of view objects.
*/
class View_creation_ctx : public Default_object_creation_ctx {
public:
static View_creation_ctx *create(THD *thd);
static View_creation_ctx *create(THD *thd, Table_ref *view);
private:
View_creation_ctx(THD *thd) : Default_object_creation_ctx(thd) {}
};
/*************************************************************************/
/** Order clause list element */
class Item_rollup_group_item;
struct ORDER {
ORDER() {}
explicit ORDER(Item *grouped_expr) : item_initial(grouped_expr) {}
/// @returns true if item pointer is same as original
bool is_item_original() const { return item[0] == item_initial; }
ORDER *next{nullptr};
/**
If the query block includes non-primitive grouping, then these modifiers are
represented as grouping sets. The variable 'grouping_set_info' functions as
a bitvector, containing the grouping set details. If the 'ith' bit of the
variable is set, then the corresponding element is included in the 'ith'
grouping set. */
MY_BITMAP *grouping_set_info{nullptr};
/**
The initial ordering expression. Usually substituted during resolving
and must not be used during optimization and execution.
*/
Item *item_initial{nullptr}; /* Storage for initial item */
public:
/**
Points at the item in the select fields. Note that this means that
after resolving, it points into a slice (see JOIN::ref_items),
even though the item is not of type Item_ref!
*/
Item **item{&item_initial};
Item_rollup_group_item *rollup_item{nullptr};
enum_order direction{
ORDER_NOT_RELEVANT}; /* Requested direction of ordering */
bool in_field_list{false}; /* true if in select field list */
/**
Tells whether this ORDER element was referenced with an alias or with an
expression in the query, and what the alias was:
SELECT a AS foo GROUP BY foo: "foo".
SELECT a AS foo GROUP BY a: nullptr.
*/
const char *used_alias{nullptr};
/**
When GROUP BY is implemented with a temporary table (i.e. the table takes
care to store only unique group rows, table->group != nullptr), each GROUP
BY expression is stored in a column of the table, which is
'field_in_tmp_table'.
Such field may point into table->record[0] (if we only use it to get its
value from a tmp table's row), or into 'buff' (if we use it to do index
lookup into the tmp table).
*/
Field *field_in_tmp_table{nullptr};
char *buff{nullptr}; /* If tmp-table group */
table_map used{0}, depend_map{0};
bool is_explicit{false}; /* Whether ASC/DESC is explicitly specified */
};
/**
State information for internal tables grants.
This structure is part of the Table_ref, and is updated
during the ACL check process.
@sa GRANT_INFO
*/
struct GRANT_INTERNAL_INFO {
/** True if the internal lookup by schema name was done. */
bool m_schema_lookup_done{false};
/** Cached internal schema access. */
const ACL_internal_schema_access *m_schema_access{nullptr};
/** True if the internal lookup by table name was done. */
bool m_table_lookup_done{false};
/** Cached internal table access. */
const ACL_internal_table_access *m_table_access{nullptr};
};
/**
@brief The current state of the privilege checking process for the current
user, SQL statement and SQL object.
@details The privilege checking process is divided into phases depending on
the level of the privilege to be checked and the type of object to be
accessed. Due to the mentioned scattering of privilege checking
functionality, it is necessary to keep track of the state of the process.
A GRANT_INFO also serves as a cache of the privilege hash tables. Relevant
members are grant_table and version.
*/
struct GRANT_INFO {
GRANT_INFO();
/**
@brief A copy of the privilege information regarding the current host,
database, object and user.
@details The version of this copy is found in GRANT_INFO::version.
*/
GRANT_TABLE *grant_table{nullptr};
/**
@brief Used for cache invalidation when caching privilege information.
@details The privilege information is stored on disk, with dedicated
caches residing in memory: table-level and column-level privileges,
respectively, have their own dedicated caches.
The GRANT_INFO works as a level 1 cache with this member updated to the
current value of the global variable @c grant_version (@c static variable
in sql_acl.cc). It is updated Whenever the GRANT_INFO is refreshed from
the level 2 cache. The level 2 cache is the @c column_priv_hash structure
(@c static variable in sql_acl.cc)
@see grant_version
*/
uint version{0};
/**
@brief The set of privileges that the current user has fulfilled for a
certain host, database, and object.
@details This field is continually updated throughout the access checking
process. In each step the "wanted privilege" is checked against the
fulfilled privileges. When/if the intersection of these sets is empty,
access is granted.
The set is implemented as a bitmap, with the bits defined in sql_acl.h.
*/
Access_bitmask privilege{0};
/** The grant state for internal tables. */
GRANT_INTERNAL_INFO m_internal;
};
enum tmp_table_type {
NO_TMP_TABLE,
NON_TRANSACTIONAL_TMP_TABLE,
TRANSACTIONAL_TMP_TABLE,
INTERNAL_TMP_TABLE,
SYSTEM_TMP_TABLE
};
/**
Category of table found in the table share.
*/
enum enum_table_category {
/**
Unknown value.
*/
TABLE_UNKNOWN_CATEGORY = 0,
/**
Temporary table.
The table is visible only in the session.
Therefore,
- FLUSH TABLES WITH READ LOCK
- SET GLOBAL READ_ONLY = ON
do not apply to this table.
Note that LOCK TABLE t FOR READ/WRITE
can be used on temporary tables.
Temporary tables are not part of the table cache.
2016-06-14 Contrary to what's written in these comments, the truth is:
- tables created by CREATE TEMPORARY TABLE have TABLE_CATEGORY_USER
- tables created by create_tmp_table() (internal ones) have
TABLE_CATEGORY_TEMPORARY.
ha_innodb.cc relies on this observation (so: grep it). If you clean this
up, you may also want to look at 'no_tmp_table'; its enum values' meanings
have degraded over time: INTERNAL_TMP_TABLE is not used for some internal
tmp tables (derived tables). Unification of both enums would be
great. Whatever the result, we need to be able to distinguish the two
types of temporary tables above, as usage patterns are more restricted for
the second type, and allow more optimizations.
*/
TABLE_CATEGORY_TEMPORARY = 1,
/**
User table.
These tables do honor:
- LOCK TABLE t FOR READ/WRITE
- FLUSH TABLES WITH READ LOCK
- SET GLOBAL READ_ONLY = ON
User tables are cached in the table cache.
*/
TABLE_CATEGORY_USER = 2,
/**
System table, maintained by the server.
These tables do honor:
- LOCK TABLE t FOR READ/WRITE
- FLUSH TABLES WITH READ LOCK
- SET GLOBAL READ_ONLY = ON
Typically, writes to system tables are performed by
the server implementation, not explicitly be a user.
System tables are cached in the table cache.
*/
TABLE_CATEGORY_SYSTEM = 3,
/**
Information schema tables.
These tables are an interface provided by the system
to inspect the system metadata.
These tables do *not* honor:
- LOCK TABLE t FOR READ/WRITE
- FLUSH TABLES WITH READ LOCK
- SET GLOBAL READ_ONLY = ON
as there is no point in locking explicitly
an INFORMATION_SCHEMA table.
Nothing is directly written to information schema tables.
Note that this value is not used currently,
since information schema tables are not shared,
but implemented as session specific temporary tables.
*/
/*
TODO: Fixing the performance issues of I_S will lead
to I_S tables in the table cache, which should use
this table type.
*/
TABLE_CATEGORY_INFORMATION = 4,
/**
Log tables.
These tables are an interface provided by the system
to inspect the system logs.
These tables do *not* honor:
- LOCK TABLE t FOR READ/WRITE
- FLUSH TABLES WITH READ LOCK
- SET GLOBAL READ_ONLY = ON
as there is no point in locking explicitly
a LOG table.
An example of LOG tables are:
- mysql.slow_log
- mysql.general_log,
which *are* updated even when there is either
a GLOBAL READ LOCK or a GLOBAL READ_ONLY in effect.
User queries do not write directly to these tables
(there are exceptions for log tables).
The server implementation perform writes.
Log tables are cached in the table cache.
*/
TABLE_CATEGORY_LOG = 5,
/**
Performance schema tables.
These tables are an interface provided by the system
to inspect the system performance data.
These tables do *not* honor:
- LOCK TABLE t FOR READ/WRITE
- FLUSH TABLES WITH READ LOCK
- SET GLOBAL READ_ONLY = ON
as there is no point in locking explicitly
a PERFORMANCE_SCHEMA table.
An example of PERFORMANCE_SCHEMA tables are:
- performance_schema.*
which *are* updated (but not using the handler interface)
even when there is either
a GLOBAL READ LOCK or a GLOBAL READ_ONLY in effect.
User queries do not write directly to these tables
(there are exceptions for SETUP_* tables).
The server implementation perform writes.
Performance tables are cached in the table cache.
*/
TABLE_CATEGORY_PERFORMANCE = 6,
/**
Replication Information Tables.
These tables are used to store replication information.
These tables do *not* honor:
- LOCK TABLE t FOR READ/WRITE
- FLUSH TABLES WITH READ LOCK
- SET GLOBAL READ_ONLY = ON
as there is no point in locking explicitly
a Replication Information table.
An example of replication tables are:
- mysql.slave_master_info
- mysql.slave_relay_log_info,
which *are* updated even when there is either
a GLOBAL READ LOCK or a GLOBAL READ_ONLY in effect.
User queries do not write directly to these tables.
Replication tables are cached in the table cache.
*/
TABLE_CATEGORY_RPL_INFO = 7,
/**
Gtid Table.
The table is used to store gtids.
The table does *not* honor:
- LOCK TABLE t FOR READ/WRITE
- FLUSH TABLES WITH READ LOCK
- SET GLOBAL READ_ONLY = ON
as there is no point in locking explicitly
a Gtid table.
An example of gtid_executed table is:
- mysql.gtid_executed,
which is updated even when there is either
a GLOBAL READ LOCK or a GLOBAL READ_ONLY in effect.
Gtid table is cached in the table cache.
*/
TABLE_CATEGORY_GTID = 8,
/**
A data dictionary table.
Table's with this category will skip checking the
TABLE_SHARE versions because these table structures
are fixed upon server bootstrap.
*/
TABLE_CATEGORY_DICTIONARY = 9,
/**
A ACL metadata table.
For table in this category we will skip row locks when SQL statement
reads them.
*/
TABLE_CATEGORY_ACL_TABLE = 10
};
typedef enum enum_table_category TABLE_CATEGORY;
extern ulong refresh_version;
struct TABLE_FIELD_TYPE {
LEX_CSTRING name;
LEX_CSTRING type;
LEX_CSTRING cset;
};
struct TABLE_FIELD_DEF {
uint count;
const TABLE_FIELD_TYPE *field;
};
class Table_check_intact {
protected:
bool has_keys;
virtual void report_error(uint code, const char *fmt, ...) = 0;
public:
Table_check_intact() : has_keys(false) {}
virtual ~Table_check_intact() = default;
/**
Checks whether a table is intact. Should be done *just* after the table has
been opened.
@param[in] thd Thread handle
@param[in] table The table to check
@param[in] table_def Expected structure of the table (column name
and type)
@retval false OK
@retval true There was an error.
*/
bool check(THD *thd, TABLE *table, const TABLE_FIELD_DEF *table_def);
};
/**
Class representing the fact that some thread waits for table
share to be flushed. Is used to represent information about
such waits in MDL deadlock detector.
*/
class Wait_for_flush : public MDL_wait_for_subgraph {
MDL_context *m_ctx;
TABLE_SHARE *m_share;
uint m_deadlock_weight;
public:
Wait_for_flush(MDL_context *ctx_arg, TABLE_SHARE *share_arg,
uint deadlock_weight_arg)
: m_ctx(ctx_arg),
m_share(share_arg),
m_deadlock_weight(deadlock_weight_arg) {}
MDL_context *get_ctx() const { return m_ctx; }
bool accept_visitor(MDL_wait_for_graph_visitor *dvisitor) override;
uint get_deadlock_weight() const override;
/**
Pointers for participating in the list of waiters for table share.
*/
Wait_for_flush *next_in_share;
Wait_for_flush **prev_in_share;
};
typedef I_P_List<
Wait_for_flush,
I_P_List_adapter<Wait_for_flush, &Wait_for_flush::next_in_share,
&Wait_for_flush::prev_in_share>>
Wait_for_flush_list;
typedef struct Table_share_foreign_key_info {
LEX_CSTRING fk_name;
LEX_CSTRING referenced_table_db;
LEX_CSTRING referenced_table_name;
/**
Name of unique key matching FK in parent table, "" if there is no
unique key.
*/
LEX_CSTRING unique_constraint_name;
dd::Foreign_key::enum_rule update_rule, delete_rule;
uint columns;
/**
Array with names of referencing columns of the FK.
*/
LEX_CSTRING *referencing_column_names;
/**
Array with names of referenced columns of the FK.
*/
LEX_CSTRING *referenced_column_names;
} TABLE_SHARE_FOREIGN_KEY_INFO;
typedef struct Table_share_foreign_key_parent_info {
/**
Since referenced_column_names and referencing_column_names are already
stored in TABLE_SHARE_FOREIGN_KEY_INFO, we avoid duplicating them here and
only add fk_name, allowing check_all_child_fk_ref() to use fk_name to
retrieve the column details from the child table share
*/
LEX_CSTRING fk_name;
LEX_CSTRING referencing_table_db;
LEX_CSTRING referencing_table_name;
dd::Foreign_key::enum_rule update_rule, delete_rule;
} TABLE_SHARE_FOREIGN_KEY_PARENT_INFO;
/**
Definition of name for generated keys, owned by TABLE_SHARE
*/
struct Key_name {
char name[NAME_CHAR_LEN];
};
/**
This structure is shared between different table objects. There is one
instance of table share per one table in the database.
*/
struct TABLE_SHARE {
TABLE_SHARE() = default;
/**
Create a new TABLE_SHARE with the given version number.
@param version the version of the TABLE_SHARE
@param secondary set to true if the TABLE_SHARE represents a table
in a secondary storage engine
*/
TABLE_SHARE(unsigned long version, bool secondary)
: m_version(version), m_secondary_engine(secondary) {}
/*
Managed collection of refererence-counted snapshots of histograms statistics
for the table. TABLE objects acquire/release pointers to histogram
statistics from this collection. A new statistics snapshot is inserted when
the share is initialized and when histograms are updated/dropped.
For temporary tables m_histograms should be nullptr since we do not support
histograms on temporary tables.
*/
Table_histograms_collection *m_histograms{nullptr};
/** Category of this table. */
TABLE_CATEGORY table_category{TABLE_UNKNOWN_CATEGORY};
MEM_ROOT mem_root;
/**
Used to allocate new handler for internal temporary table when the
size limitation of the primary storage engine is exceeded.
*/
MEM_ROOT *alloc_for_tmp_file_handler{nullptr};
TYPELIB keynames; /* Pointers to keynames */
TYPELIB *intervals{nullptr}; /* pointer to interval info */
mysql_mutex_t LOCK_ha_data; /* To protect access to ha_data */
TABLE_SHARE *next{nullptr}, **prev{nullptr}; /* Link to unused shares */
/**
Array of table_cache_instances pointers to elements of table caches
respresenting this table in each of Table_cache instances.
Allocated along with the share itself in alloc_table_share().
Each element of the array is protected by Table_cache::m_lock in the
corresponding Table_cache. False sharing should not be a problem in
this case as elements of this array are supposed to be updated rarely.
*/
Table_cache_element **cache_element{nullptr};
/* The following is copied to each TABLE on OPEN */
Field **field{nullptr};
Field **found_next_number_field{nullptr};
KEY *key_info{nullptr}; /* data of keys defined for the table */
uint *blob_field{nullptr}; /* Index to blobs in Field array */
uchar *default_values{nullptr}; /* row with default values */
LEX_STRING comment{nullptr, 0}; /* Comment about table */
LEX_STRING compress{nullptr, 0}; /* Compression algorithm */
LEX_STRING encrypt_type{nullptr, 0}; /* encryption algorithm */
/** Secondary storage engine. */
LEX_CSTRING secondary_engine{nullptr, 0};
/** Secondary engine load status */
bool secondary_load{false};
const CHARSET_INFO *table_charset{
nullptr}; /* Default charset of string fields */
MY_BITMAP all_set;
/*
Key which is used for looking-up table in table cache and in the list
of thread's temporary tables. Has the form of:
"database_name\0table_name\0" + optional part for temporary tables.
Note that all three 'table_cache_key', 'db' and 'table_name' members
must be set (and be non-zero) for tables in table cache. They also
should correspond to each other.
To ensure this one can use set_table_cache() methods.
*/
LEX_CSTRING table_cache_key{nullptr, 0};
LEX_CSTRING db{nullptr, 0}; /* Pointer to db */
LEX_CSTRING table_name{nullptr, 0}; /* Table name (for open) */
LEX_STRING path{nullptr, 0}; /* Path to .frm file (from datadir) */
LEX_CSTRING normalized_path{nullptr, 0}; /* unpack_filename(path) */
LEX_STRING connect_string{nullptr, 0};
LEX_CSTRING engine_attribute = EMPTY_CSTR;
LEX_CSTRING secondary_engine_attribute = EMPTY_CSTR;
/**
The set of indexes that are not disabled for this table. I.e. it excludes
indexes disabled by `ALTER TABLE ... DISABLE KEYS`, however it does
include invisible indexes. The data dictionary populates this bitmap.
*/
Key_map keys_in_use;
/// The set of visible and enabled indexes for this table.
Key_map visible_indexes;
Key_map keys_for_keyread;
ha_rows min_rows{0}, max_rows{0}; /* create information */
ulong avg_row_length{0}; /* create information */
ulong mysql_version{0}; /* 0 if .frm is created before 5.0 */
ulong reclength{0}; /* Recordlength */
ulong stored_rec_length{0}; /* Stored record length
(no generated-only generated fields) */
ulonglong autoextend_size{0};
plugin_ref db_plugin{nullptr}; /* storage engine plugin */
inline handlerton *db_type() const /* table_type for handler */
{
// assert(db_plugin);
return db_plugin ? plugin_data<handlerton *>(db_plugin) : nullptr;
}
/**
Value of ROW_FORMAT option for the table as provided by user.
Can be different from the real row format used by the storage
engine. ROW_TYPE_DEFAULT value indicates that no explicit
ROW_FORMAT was specified for the table. @sa real_row_type.
*/
enum row_type row_type = {}; // Zero-initialized to ROW_TYPE_DEFAULT
/** Real row format used for the table by the storage engine. */
enum row_type real_row_type = {}; // Zero-initialized to ROW_TYPE_DEFAULT
tmp_table_type tmp_table{NO_TMP_TABLE};
/**
Only for internal temporary tables.
Count of TABLEs (having this TABLE_SHARE) which have a "handler"
(table->file!=nullptr) which is open (ha_open() has been called).
*/
uint tmp_handler_count{0};
/**
Only for internal temporary tables.
Count of TABLEs (having this TABLE_SHARE) which have opened this table.
*/
uint tmp_open_count{0};
// Can only be 1,2,4,8 or 16, but use uint32_t since that how it is
// represented in InnoDB
std::uint32_t key_block_size{0}; /* create key_block_size, if used */
uint stats_sample_pages{0}; /* number of pages to sample during
stats estimation, if used, otherwise 0. */
enum_stats_auto_recalc
stats_auto_recalc{}; /* Automatic recalc of stats.
Zero-initialized to HA_STATS_AUTO_RECALC_DEFAULT
*/
uint null_bytes{0}, last_null_bit_pos{0};
uint fields{0}; /* Number of fields */
uint rec_buff_length{0}; /* Size of table->record[] buffer */
uint keys{0}; /* Number of keys defined for the table*/
uint key_parts{0}; /* Number of key parts of all keys
defined for the table
*/
uint max_key_length{0}; /* Length of the longest key */
uint max_unique_length{0}; /* Length of the longest unique key */
uint total_key_length{0};
/**
Whether this is a temporary table that already has a UNIQUE index (removing
duplicate rows on insert), so that the optimizer does not need to run
DISTINCT itself. Also used for INTERSECT and EXCEPT as a fall-back if
hashing fails (secondary overflow of in-memory hash table, in which case
we revert to de-duplication using the unique key in the output table).
*/
bool is_distinct{false};
uint null_fields{0}; /* number of null fields */
uint blob_fields{0}; /* number of blob fields */
uint varchar_fields{0}; /* number of varchar fields */
/**
For materialized derived tables; @see add_derived_key().
'first' means: having the lowest position in key_info.
*/
uint first_unused_tmp_key{0};
/**
For materialized derived tables: allocated size of key_info array.
*/
uint max_tmp_keys{0};
/**
For materialized derived tables: allocated size of base_key_parts array of
all TABLE objects. Used for generated keys.
*/
uint max_tmp_key_parts{0};
/**
Array of names for generated keys, used for materialized derived tables.
Shared among all TABLE objects referring to this table share.
*/
Key_name *key_names{nullptr};
/**
Records per key array, used for materialized derived tables.
This is a contiguous array, with size given by max_tmp_key_parts.
The array is shared with all TABLE objects referring to this table share.
*/
ulong *base_rec_per_key{nullptr};
/**
Records per key array, float rep., used for materialized derived tables.
This is a contiguous array, with size given by max_tmp_key_parts.
The array is shared with all TABLE objects referring to this table share.
*/
rec_per_key_t *base_rec_per_key_float{nullptr};
/**
Bitmap with flags representing some of table options/attributes.
@sa HA_OPTION_PACK_RECORD, HA_OPTION_PACK_KEYS, ...
@note This is basically copy of HA_CREATE_INFO::table_options bitmap
at the time of table opening/usage.
*/
uint db_create_options{0};
/**
Bitmap with flags representing some of table options/attributes which
are in use by storage engine.
@note db_options_in_use is normally copy of db_create_options but can
be overridden by SE. E.g. MyISAM does this at handler::open() and
handler::info() time.
*/
uint db_options_in_use{0};
uint rowid_field_offset{0}; /* Field_nr +1 to rowid field */
// Primary key index number, used in TABLE::key_info[]. See
// is_missing_primary_key() for more details.
uint primary_key{0};
uint next_number_index{0}; /* autoincrement key number */
uint next_number_key_offset{0}; /* autoinc keypart offset in a key */
uint next_number_keypart{0}; /* autoinc keypart number in a key */
bool error{false}; /* error during open_table_def() */
uint column_bitmap_size{0};
/// Number of generated fields
uint vfields{0};
/// Number of fields having the default value generated
uint gen_def_field_count{0};
bool system{false}; /* Set if system table (one record) */
bool db_low_byte_first{false}; /* Portable row format */
bool crashed{false};
bool is_view{false};
/// Materialized view, materialized directly by a storage engine
bool is_mv_se_materialized{false};
bool m_open_in_progress{false}; /* True: alloc'ed, false: def opened */
mysql::binlog::event::Table_id table_map_id; /* for row-based replication */
/*
Cache for row-based replication table share checks that does not
need to be repeated. Possible values are: -1 when cache value is
not calculated yet, 0 when table *shall not* be replicated, 1 when
table *may* be replicated.
*/
int cached_row_logging_check{0};
/*
Storage media to use for this table (unless another storage
media has been specified on an individual column - in versions
where that is supported)
*/
ha_storage_media default_storage_media{HA_SM_DEFAULT};
/* Name of the tablespace used for this table */
const char *tablespace{nullptr};
/**
Partition meta data. Allocated from TABLE_SHARE::mem_root,
created when reading from the dd tables,
used as template for each TABLE instance.
The reason for having it on the TABLE_SHARE is to be able to reuse the
partition_elements containing partition names, values etc. instead of
allocating them for each TABLE instance.
TODO: Currently it is filled in and then only used for generating
the partition_info_str. The plan is to clone/copy/reference each
TABLE::part_info instance from it.
What is missing before it can be completed:
1) The partition expression, currently created only during parsing which
also needs the current TABLE instance as context for name resolution etc.
2) The partition values, currently the DD stores them as text so it needs
to be converted to field images (which is now done by first parsing the
value text into an Item, then saving the Item result/value into a field
and then finally copy the field image).
*/
partition_info *m_part_info{nullptr};
// TODO: Remove these four variables:
/**
Filled in when reading from frm.
This can simply be removed when removing the .frm support,
since it is already stored in the new DD.
*/
bool auto_partitioned{false};
/**
Storing the full partitioning clause (PARTITION BY ...) which is used
when creating new partition_info object for each new TABLE object by
parsing this string.
These two will be needed until the missing parts above is fixed.
*/
char *partition_info_str{nullptr};
uint partition_info_str_len{0};
/**
Cache the checked structure of this table.
The pointer data is used to describe the structure that
a instance of the table must have. Each element of the
array specifies a field that must exist on the table.
The pointer is cached in order to perform the check only
once -- when the table is loaded from the disk.
*/
const TABLE_FIELD_DEF *table_field_def_cache{nullptr};
/** Main handler's share */
Handler_share *ha_share{nullptr};
/** Instrumentation for this table share. */
PSI_table_share *m_psi{nullptr};
/**
List of tickets representing threads waiting for the share to be flushed.
*/
Wait_for_flush_list m_flush_tickets;
/**
View object holding view definition read from DD. This object is not
cached, and is owned by the table share. We are not able to read it
on demand since we may then get a cache miss while holding LOCK_OPEN.
*/
const dd::View *view_object{nullptr};
/**
Data-dictionary object describing explicit temporary table represented
by this share. NULL for other table types (non-temporary tables, internal
temporary tables). This object is owned by TABLE_SHARE and should be
deleted along with it.
*/
dd::Table *tmp_table_def{nullptr};
/// For materialized derived tables; @see add_derived_key().
Query_block *owner_of_possible_tmp_keys{nullptr};
/**
Arrays with descriptions of foreign keys in which this table participates
as child or parent. We only cache in them information from dd::Table object
which is sufficient for use by prelocking algorithm/to check if table is
referenced by a foreign key.
*/
uint foreign_keys{0};
TABLE_SHARE_FOREIGN_KEY_INFO *foreign_key{nullptr};
uint foreign_key_parents{0};
TABLE_SHARE_FOREIGN_KEY_PARENT_INFO *foreign_key_parent{nullptr};
// List of check constraint share instances.
Sql_check_constraint_share_list *check_constraint_share_list{nullptr};
/**
List of trigger descriptions for the table loaded from the data-dictionary.
Is nullptr if the table doesn't have triggers.
@note The purpose of the Trigger objects in this list is to serve as
template for per-TABLE-object Trigger objects as well as to
store static metadata that may be shared between Trigger instances.
The triggers in this list can't be executed directly.
*/
List<Trigger> *triggers{nullptr};
/**
Schema's read only mode - ON (true) or OFF (false). This is filled in
when the share is initialized with meta data from DD. If the schema is
altered, the tables and share are removed. This can be done since
ALTER SCHEMA acquires exclusive meta data locks on the tables in the
schema. We set this only for non-temporary tables. Otherwise, the value
of the member below is 'NOT_SET'.
*/
enum class Schema_read_only { NOT_SET, RO_OFF, RO_ON };
Schema_read_only schema_read_only{Schema_read_only::NOT_SET};
/**
Set share's table cache key and update its db and table name appropriately.
@param key_buff Buffer with already built table cache key to be
referenced from share.
@param key_length Key length.
@note
Since 'key_buff' buffer will be referenced from share it should has same
life-time as share itself.
This method automatically ensures that TABLE_SHARE::table_name/db have
appropriate values by using table cache key as their source.
*/
void set_table_cache_key(char *key_buff, size_t key_length) {
table_cache_key.str = key_buff;
table_cache_key.length = key_length;
/*
Let us use the fact that the key is "db/0/table_name/0" + optional
part for temporary tables.
*/
db.str = table_cache_key.str;
db.length = strlen(db.str);
table_name.str = db.str + db.length + 1;
table_name.length = strlen(table_name.str);
}
/**
Set share's table cache key and update its db and table name appropriately.
@param key_buff Buffer to be used as storage for table cache key
(should be at least key_length bytes).
@param key Value for table cache key.
@param key_length Key length.
NOTE
Since 'key_buff' buffer will be used as storage for table cache key
it should has same life-time as share itself.
*/
void set_table_cache_key(char *key_buff, const char *key, size_t key_length) {
memcpy(key_buff, key, key_length);
set_table_cache_key(key_buff, key_length);
}
ulonglong get_table_def_version() const { return table_map_id; }
/** Returns the version of this TABLE_SHARE. */
unsigned long version() const { return m_version; }
/**
Set the version of this TABLE_SHARE to zero. This marks the
TABLE_SHARE for automatic removal from the table definition cache
once it is no longer referenced.
*/
void clear_version();
/** Is this table share being expelled from the table definition cache? */
bool has_old_version() const { return version() != refresh_version; }
/**
Convert unrelated members of TABLE_SHARE to one enum
representing its type.
@todo perhaps we need to have a member instead of a function.
*/
enum enum_table_ref_type get_table_ref_type() const {
if (is_view) return TABLE_REF_VIEW;
switch (tmp_table) {
case NO_TMP_TABLE:
return TABLE_REF_BASE_TABLE;
case SYSTEM_TMP_TABLE:
return TABLE_REF_I_S_TABLE;
default:
return TABLE_REF_TMP_TABLE;
}
}
/**
Return a table metadata version.
* for base tables and views, we return table_map_id.
It is assigned from a global counter incremented for each
new table loaded into the table definition cache (TDC).
* for temporary tables it's table_map_id again. But for
temporary tables table_map_id is assigned from
thd->query_id. The latter is assigned from a thread local
counter incremented for every new SQL statement. Since
temporary tables are thread-local, each temporary table
gets a unique id.
* for everything else (e.g. information schema tables),
the version id is zero.
This choice of version id is a large compromise
to have a working prepared statement validation in 5.1. In
future version ids will be persistent, as described in WL#4180.
Let's try to explain why and how this limited solution allows
to validate prepared statements.
Firstly, sets (in mathematical sense) of version numbers
never intersect for different table types. Therefore,
version id of a temporary table is never compared with
a version id of a view, and vice versa.
Secondly, for base tables and views, we know that each DDL flushes
the respective share from the TDC. This ensures that whenever
a table is altered or dropped and recreated, it gets a new
version id.
Unfortunately, since elements of the TDC are also flushed on
LRU basis, this choice of version ids leads to false positives.
E.g. when the TDC size is too small, we may have a SELECT
* FROM INFORMATION_SCHEMA.TABLES flush all its elements, which
in turn will lead to a validation error and a subsequent
reprepare of all prepared statements. This is
considered acceptable, since as long as prepared statements are
automatically reprepared, spurious invalidation is only
a performance hit. Besides, no better simple solution exists.
For temporary tables, using thd->query_id ensures that if
a temporary table was altered or recreated, a new version id is
assigned. This suits validation needs very well and will perhaps
never change.
Metadata of information schema tables never changes.
Thus we can safely assume 0 for a good enough version id.
Finally, by taking into account table type, we always
track that a change has taken place when a view is replaced
with a base table, a base table is replaced with a temporary
table and so on.
@retval 0 For schema tables, DD tables and system views.
non-0 For bases tables, views and temporary tables.
@sa Table_ref::is_table_ref_id_equal()
*/
ulonglong get_table_ref_version() const;
/** Determine if the table is missing a PRIMARY KEY. */
bool is_missing_primary_key() const {
assert(primary_key <= MAX_KEY);
return primary_key == MAX_KEY;
}
uint find_first_unused_tmp_key(const Key_map &k);
bool visit_subgraph(Wait_for_flush *waiting_ticket,
MDL_wait_for_graph_visitor *gvisitor);
bool wait_for_old_version(THD *thd, struct timespec *abstime,
uint deadlock_weight);
/**
The set of indexes that the optimizer may use when creating an execution
plan.
*/
Key_map usable_indexes(const THD *thd) const;
/** Release resources and free memory occupied by the table share. */
void destroy();
/**
How many TABLE objects use this TABLE_SHARE.
@return the reference count
*/
unsigned int ref_count() const {
assert(assert_ref_count_is_locked(this));
return m_ref_count;
}
/**
Increment the reference count by one.
@return the new reference count
*/
unsigned int increment_ref_count() {
assert(assert_ref_count_is_locked(this));
assert(!m_open_in_progress);
return ++m_ref_count;
}
/**
Decrement the reference count by one.
@return the new reference count
*/
unsigned int decrement_ref_count() {
assert(assert_ref_count_is_locked(this));
assert(!m_open_in_progress);
assert(m_ref_count > 0);
return --m_ref_count;
}
/// Does this TABLE_SHARE represent a table in a primary storage engine?
bool is_primary_engine() const { return !m_secondary_engine; }
/// Does this TABLE_SHARE represent a table in a secondary storage engine?
bool is_secondary_engine() const { return m_secondary_engine; }
/**
Does this TABLE_SHARE represent a primary table that has a shadow
copy in a secondary storage engine?
*/
bool has_secondary_engine() const {
return is_primary_engine() && secondary_engine.str != nullptr;
}
/** Returns whether this table is referenced by a foreign key. */
bool is_referenced_by_foreign_key() const { return foreign_key_parents != 0; }
private:
/// How many TABLE objects use this TABLE_SHARE.
unsigned int m_ref_count{0};
/**
TABLE_SHARE version, if changed the TABLE_SHARE must be reopened.
NOTE: The TABLE_SHARE will not be reopened during LOCK TABLES in
close_thread_tables!!!
*/
unsigned long m_version{0};
protected: // To allow access from unit tests.
/// Does this TABLE_SHARE represent a table in a secondary storage engine?
bool m_secondary_engine{false};
};
/**
Class is used as a BLOB field value storage for
intermediate GROUP_CONCAT results. Used only for
GROUP_CONCAT with DISTINCT or ORDER BY options.
*/
class Blob_mem_storage {
private:
MEM_ROOT storage;
/**
Sign that some values were cut
during saving into the storage.
*/
bool truncated_value;
public:
Blob_mem_storage();
~Blob_mem_storage();
void reset() {
storage.ClearForReuse();
truncated_value = false;
}
/**
Function creates duplicate of 'from'
string in 'storage' MEM_ROOT.
@param from string to copy
@param length string length
@retval Pointer to the copied string.
@retval 0 if an error occurred.
*/
char *store(const char *from, size_t length) {
return (char *)memdup_root(&storage, from, length);
}
void set_truncated_value(bool is_truncated_value) {
truncated_value = is_truncated_value;
}
bool is_truncated_value() const { return truncated_value; }
};
/**
Class that represents a single change to a column value in partial
update of a JSON column.
*/
class Binary_diff final {
/// The offset of the start of the change.
size_t m_offset;
/// The size of the portion that is to be replaced.
size_t m_length;
public:
/**
Create a new Binary_diff object.
@param offset the offset of the beginning of the change
@param length the length of the section that is to be replaced
*/
Binary_diff(size_t offset, size_t length)
: m_offset(offset), m_length(length) {}
/// @return the offset of the changed data
size_t offset() const { return m_offset; }
/// @return the length of the changed data
size_t length() const { return m_length; }
/**
Get a pointer to the start of the replacement data.
@param field the column that is updated
@return a pointer to the start of the replacement data
*/
const char *new_data(const Field *field) const;
/**
Get a pointer to the start of the old data to be replaced.
@param field the column that is updated
@return a pointer to the start of old data to be replaced.
*/
const char *old_data(const Field *field) const;
};
/**
Vector of Binary_diff objects.
The Binary_diff objects in the vector should be ordered on offset, and none
of the diffs should be overlapping or adjacent.
*/
using Binary_diff_vector = Mem_root_array<Binary_diff>;
/**
Flags for TABLE::m_status (maximum 8 bits).
The flags define the state of the row buffer in TABLE::record[0].
*/
/**
STATUS_NOT_STARTED is set when table is not accessed yet.
Neither STATUS_NOT_FOUND nor STATUS_NULL_ROW can be set when this flag is set.
*/
#define STATUS_NOT_STARTED 1
/**
Means we were searching for a row and didn't find it. This is used by
storage engines (@see handler::index_read_map()) and the executor, both
when doing an exact row lookup and advancing a scan (no more rows in range).
*/
#define STATUS_NOT_FOUND 2
/// Reserved for use by multi-table update. Means the row has been updated.
#define STATUS_UPDATED 16
/**
Means that table->null_row is set. This is an artificial NULL-filled row
(one example: in outer join, if no match has been found in inner table).
*/
#define STATUS_NULL_ROW 32
/// Reserved for use by multi-table delete. Means the row has been deleted.
#define STATUS_DELETED 64
/* Information for one open table */
enum index_hint_type { INDEX_HINT_IGNORE, INDEX_HINT_USE, INDEX_HINT_FORCE };
/* Bitmap of table's fields */
typedef Bitmap<MAX_FIELDS> Field_map;
/*
NOTE: Despite being a struct (for historical reasons), TABLE has
a nontrivial destructor.
*/
struct TABLE {
TABLE_SHARE *s{nullptr};
handler *file{nullptr};
TABLE *next{nullptr}, *prev{nullptr};
private:
/**
Links for the lists of used/unused TABLE objects for the particular
table in the specific instance of Table_cache (in other words for
specific Table_cache_element object).
Declared as private to avoid direct manipulation with those objects.
One should use methods of I_P_List template instead.
*/
TABLE *cache_next{nullptr}, **cache_prev{nullptr};
/*
Give Table_cache_element access to the above two members to allow
using them for linking TABLE objects in a list.
*/
friend class Table_cache_element;
/**
Links for the LRU list of unused TABLE objects with fully loaded triggers
in the specific instance of Table_cache.
*/
TABLE *triggers_lru_next{nullptr}, **triggers_lru_prev{nullptr};
/*
Give Table_cache access to the above two members to allow using them
for linking TABLE objects in a list.
*/
friend class Table_cache;
public:
// Pointer to the histograms available on the table.
// Protected in the same way as the pointer to the share.
const Table_histograms *histograms{nullptr};
/**
A bitmap marking the hidden generated columns that exists for functional
indexes.
*/
MY_BITMAP fields_for_functional_indexes;
/**
The current session using this table object.
Should be NULL when object is not in use.
For an internal temporary table, it is NULL when the table is closed.
Used for two purposes:
- Signal that the object is in use, and by which session.
- Pass the thread handler to storage handlers.
The field should NOT be used as a general THD reference, instead use
a passed THD reference, or, if there is no such, current_thd.
The reason for this is that we cannot guarantee the field is not NULL.
*/
THD *in_use{nullptr};
Field **field{nullptr}; /* Pointer to fields */
/// Count of hidden fields, if internal temporary table; 0 otherwise.
uint hidden_field_count{0};
uchar *record[2]{nullptr, nullptr}; /* Pointer to records */
uchar *write_row_record{nullptr}; /* Used as optimisation in
THD::write_row */
uchar *insert_values{nullptr}; /* used by INSERT ... UPDATE */
/// Buffer for use in multi-row reads. Initially empty.
Record_buffer m_record_buffer{0, 0, nullptr};
/*
Map of keys that can be used to retrieve all data from this table needed by
the query without reading the row.
Note that the primary clustered key is treated as any other key, so for a
table t with a primary key column p and a second column c, the primary key
will be marked as covering for the query "SELECT p FROM t", but will not be
marked as covering for the query "SELECT p, c FROM t" (even though we can in
some sense retrieve the data from the index).
*/
Key_map covering_keys;
Key_map quick_keys;
/* Merge keys are all keys that had a column referred to in the query */
Key_map merge_keys;
/*
possible_quick_keys is a superset of quick_keys to use with EXPLAIN of
JOIN-less commands (single-table UPDATE and DELETE).
When explaining regular JOINs, we use JOIN_TAB::keys to output the
"possible_keys" column value. However, it is not available for
single-table UPDATE and DELETE commands, since they don't use JOIN
optimizer at the top level. OTOH they directly use the range optimizer,
that collects all keys usable for range access here.
*/
Key_map possible_quick_keys;
/*
A set of keys that can be used in the query that references this
table.
All indexes disabled on the table's TABLE_SHARE (see TABLE::s) will be
subtracted from this set upon instantiation. Thus for any TABLE t it holds
that t.keys_in_use_for_query is a subset of t.s.keys_in_use. Generally we
must not introduce any new keys here (see setup_tables).
The set is implemented as a bitmap.
*/
Key_map keys_in_use_for_query;
/* Map of keys that can be used to calculate GROUP BY without sorting */
Key_map keys_in_use_for_group_by;
/* Map of keys that can be used to calculate ORDER BY without sorting */
Key_map keys_in_use_for_order_by;
KEY *key_info{nullptr}; /* data of keys defined for the table */
/**
Key part array for generated keys, used for materialized derived tables.
This is a contiguous array, with size given by s->max_tmp_key_parts.
*/
KEY_PART_INFO *base_key_parts{nullptr};
Field *next_number_field{nullptr}; /* Set if next_number is activated */
Field *found_next_number_field{nullptr}; /* Set on open */
/// Pointer to generated columns
Field **vfield{nullptr};
/// Pointer to fields having the default value generated
Field **gen_def_fields_ptr{nullptr};
/// Field used by unique constraint
Field *hash_field{nullptr};
// ----------------------------------------------------------------------
// The next few members are used if this (temporary) file is used solely for
// the materialization/computation of an INTERSECT or EXCEPT set operation
// (in addition to hash_field above used to detect duplicate rows). For
// INTERSECT and EXCEPT, we always use the hash field and compute the shape
// of the result set using m_set_counter. The latter is a hidden field
// located between the hash field and the row proper, only present for
// INTERSECT or EXCEPT materialized in a temporary result table. The
// materialized table has no duplicate rows, relying instead of the embedded
// counter to produce the correct number of duplicates with ALL semantics. If
// we have distinct semantics, we squash duplicates. This all happens in the
// reading step of the tmp table (TableScanIterator::Read),
// cf. m_last_operation_is_distinct. For explanation if the logic of the set
// counter, see MaterializeIterator<Profiler>::MaterializeOperand.
//
/// A priori unlimited. We pass this on to TableScanIterator at construction
/// time, q.v., to limit the number of rows out of an EXCEPT or INTERSECT.
/// For these set operations, we do not know enough to enforce the limit at
/// materialize time (as for UNION): only when reading the rows with
/// TableScanIterator do we check the counters.
/// @todo: Ideally, this limit should be communicated to TableScanIterator in
/// some other way.
ha_rows m_limit_rows{HA_POS_ERROR};
private:
/// The set counter. It points to the field in the materialized table
/// holding the counter used to compute INTERSECT and EXCEPT, in record[0].
/// For EXCEPT [DISTINCT | ALL] and INTERSECT DISTINCT this is a simple 64
/// bits counter. For INTERSECT ALL, it is subdivided into two sub counters
/// cf. class HalfCounter, cf. MaterializeOperand. See set_counter().
Field_longlong *m_set_counter{nullptr};
/// If m_set_counter is set: true if last block has DISTINCT semantics,
/// either because it is marked as such, or because we have computed this
/// to give an equivalent answer. If false, we have ALL semantics.
/// It will be true if any DISTINCT is given in the merged N-ary set
/// operation. See is_distinct().
bool m_last_operation_is_distinct{false};
/// If false, any de-duplication happens via an index on this table
/// (e.g. SELECT DISTINCT, set operation). If true, this table represents the
/// output of a set operation, and de-duplication happens via an in-memory
/// hash map, in which case we do not use any index, unless we get secondary
/// overflow.
bool m_deduplicate_with_hash_map{false};
public:
/// True if character set conversions are always strict
bool m_charset_conversion_is_strict{false};
enum Set_operator_type {
SOT_NONE,
SOT_UNION_ALL,
SOT_UNION_DISTINCT,
SOT_INTERSECT_ALL,
SOT_INTERSECT_DISTINCT,
SOT_EXCEPT_ALL,
SOT_EXCEPT_DISTINCT
};
private:
/// Holds the set operation type
Set_operator_type m_set_op_type{SOT_NONE};
public:
/// Test if this tmp table stores the result of a UNION set operation or
/// a single table.
/// @return true if so, else false.
bool is_union_or_table() const { return m_set_counter == nullptr; }
void set_use_hash_map(bool use_hash_map) {
m_deduplicate_with_hash_map = use_hash_map;
}
bool uses_hash_map() const { return m_deduplicate_with_hash_map; }
/// Returns the set operation type
Set_operator_type set_op_type() {
if (m_set_op_type == SOT_NONE) {
assert(is_union_or_table()); // EXCEPT and INTERSECT are already set up
m_set_op_type = is_distinct() ? SOT_UNION_DISTINCT : SOT_UNION_ALL;
}
return m_set_op_type;
}
bool is_intersect() const {
return m_set_op_type == SOT_INTERSECT_ALL ||
m_set_op_type == SOT_INTERSECT_DISTINCT;
}
bool is_except() const {
return m_set_op_type == SOT_EXCEPT_ALL ||
m_set_op_type == SOT_EXCEPT_DISTINCT;
}
bool is_distinct() const { return m_last_operation_is_distinct; }
/**
Initialize the set counter field pointer and the type of set operation
*other than UNION*.
@param set_counter the field in the materialized table that holds the
counter we use to compute intersect or except
@param except if true, EXCEPT, else INTERSECT
@param distinct if true, the set operation is DISTINCT, else ALL
*/
void set_set_op(Field_longlong *set_counter, bool except, bool distinct) {
m_set_counter = set_counter;
m_last_operation_is_distinct = distinct;
assert(m_set_op_type == SOT_NONE);
m_set_op_type = except ? (distinct ? SOT_EXCEPT_DISTINCT : SOT_EXCEPT_ALL)
: distinct ? SOT_INTERSECT_DISTINCT
: SOT_INTERSECT_ALL;
}
Field_longlong *set_counter() { return m_set_counter; }
//
// end of INTERSECT and EXCEPT specific members
// ----------------------------------------------------------------------
Field *fts_doc_id_field{nullptr}; /* Set if FTS_DOC_ID field is present */
/* Table's triggers, 0 if there are no of them */
Table_trigger_dispatcher *triggers{nullptr};
Table_ref *pos_in_table_list{nullptr}; /* Element referring to this table */
/* Position in thd->locked_table_list under LOCK TABLES */
Table_ref *pos_in_locked_tables{nullptr};
ORDER *group{nullptr};
const char *alias{nullptr}; ///< alias or table name
/* foreign key name for which handle is open */
const char *open_for_fk_name{nullptr};
uchar *null_flags{nullptr}; ///< Pointer to the null flags of record[0]
uchar *null_flags_saved{
nullptr}; ///< Saved null_flags while null_row is true
/* containers */
MY_BITMAP def_read_set, def_write_set, tmp_set, pack_row_tmp_set;
/*
Bitmap of fields that one or more query condition refers to. Only
used if optimizer_condition_fanout_filter is turned 'on'.
Currently, only the WHERE clause and ON clause of inner joins is
taken into account but not ON conditions of outer joins.
Furthermore, HAVING conditions apply to groups and are therefore
not useful as table condition filters.
*/
MY_BITMAP cond_set;
/**
Bitmap of table fields (columns), which are explicitly set in the
INSERT INTO statement. It is declared here to avoid memory allocation
on MEM_ROOT).
@sa fields_set_during_insert.
*/
MY_BITMAP def_fields_set_during_insert;
/**
The read set contains the set of columns that the execution engine needs to
process the query. In particular, it is used to tell the storage engine
which columns are needed. For virtual generated columns, the underlying base
columns are also added, since they are required in order to calculate the
virtual generated columns.
Internal operations in the execution engine that need to move rows between
buffers, such as aggregation, sorting, hash join and set operations, should
rather use read_set_internal, since the virtual generated columns have
already been calculated when the row was read from the storage engine.
Set during resolving; every field that gets resolved, sets its own bit
in the read set. In some cases, we switch the read set around during
various phases; note that it is a pointer.
In addition, for binary logging purposes, the bitmaps are set according
to the settings of @@binlog_row_image. Therefore, for logging purposes,
some additional fields, to those specified by the optimizer, may be
flagged in the read and write sets.
@c TABLE::mark_columns_per_binlog_row_image for additional details.
*/
MY_BITMAP *read_set{nullptr};
MY_BITMAP *write_set{nullptr};
/**
A bitmap of fields that are explicitly referenced by the query. This is
mostly the same as read_set, but it does not include base columns of
referenced virtual generated columns unless the base columns are referenced
explicitly in the query.
This is the read set that should be used for determining which columns to
store in join buffers, aggregation buffers, sort buffers, or similar
operations internal to the execution engine. Both because it is unnecessary
to store the implicitly read base columns in the buffer, since they won't
ever be read out of the buffer anyways, and because the base columns may not
even be possible to read, if a covering index scan is used and the index
only contains the virtual column and not all its base columns.
*/
MY_BITMAP read_set_internal;
/**
A pointer to the bitmap of table fields (columns), which are explicitly set
in the INSERT INTO statement.
fields_set_during_insert points to def_fields_set_during_insert
for base (non-temporary) tables. In other cases, it is NULL.
Triggers can not be defined for temporary tables, so this bitmap does not
matter for temporary tables.
@sa def_fields_set_during_insert.
*/
MY_BITMAP *fields_set_during_insert{nullptr};
/*
The ID of the query that opened and is using this table. Has different
meanings depending on the table type.
Temporary tables:
table->query_id is set to thd->query_id for the duration of a statement
and is reset to 0 once it is closed by the same statement. A non-zero
table->query_id means that a statement is using the table even if it's
not the current statement (table is in use by some outer statement).
Non-temporary tables:
Under pre-locked or LOCK TABLES mode: query_id is set to thd->query_id
for the duration of a statement and is reset to 0 once it is closed by
the same statement. A non-zero query_id is used to control which tables
in the list of pre-opened and locked tables are actually being used.
*/
query_id_t query_id{0};
/*
For each key that has quick_keys.is_set(key) == true: estimate of #records
and max #key parts that range access would use.
*/
ha_rows quick_rows[MAX_KEY]{0};
/* Bitmaps of key parts that =const for the entire join. */
key_part_map const_key_parts[MAX_KEY]{0};
uint quick_key_parts[MAX_KEY]{0};
uint quick_n_ranges[MAX_KEY]{0};
/*
Estimate of number of records that satisfy SARGable part of the table
condition, or table->file->records if no SARGable condition could be
constructed.
This value is used by join optimizer as an estimate of number of records
that will pass the table condition (condition that depends on fields of
this table and constants)
*/
ha_rows quick_condition_rows{0};
uint lock_position{0}; /* Position in MYSQL_LOCK.table */
uint lock_data_start{0}; /* Start pos. in MYSQL_LOCK.locks */
uint lock_count{0}; /* Number of locks */
uint db_stat{0}; /* mode of file as in handler.h */
int current_lock{0}; /* Type of lock on table */
// List of table check constraints.
Sql_table_check_constraint_list *table_check_constraint_list{nullptr};
private:
/**
If true, this table is inner w.r.t. some outer join operation, all columns
are nullable (in the query), and null_row may be true.
*/
bool nullable{false};
uint8 m_status{0}; /* What's in record[0] */
public:
/*
If true, the current table row is considered to have all columns set to
NULL, including columns declared as "not null" (see nullable).
@todo make it private, currently join buffering changes it through a pointer
*/
bool null_row{false};
bool copy_blobs{false}; /* copy_blobs when storing */
/*
TODO: Each of the following flags take up 8 bits. They can just as easily
be put into one single unsigned long and instead of taking up 18
bytes, it would take up 4.
*/
bool force_index{false};
/**
Flag set when the statement contains FORCE INDEX FOR ORDER BY
See Table_ref::process_index_hints().
*/
bool force_index_order{false};
/**
Flag set when the statement contains FORCE INDEX FOR GROUP BY
See Table_ref::process_index_hints().
*/
bool force_index_group{false};
bool const_table{false};
/// True if writes to this table should not write rows and just write keys.
bool no_rows{false};
/**
If set, the optimizer has found that row retrieval should access index
tree only.
*/
bool key_read{false};
/**
Certain statements which need the full row, set this to ban index-only
access.
*/
bool no_keyread{false};
/**
If set, indicate that the table is not replicated by the server.
*/
bool no_replicate{false};
/* To signal that the table is associated with a HANDLER statement */
bool open_by_handler{false};
/**
To indicate that value of the auto_increment field was provided
explicitly by the user or from some other source (e.g. in case of
INSERT ... SELECT, ALTER TABLE or LOAD DATA) and not as default
or result of conversion from NULL value.
@note Since auto_increment fields are always non-NULL we can't find
out using methods of Field class if 0 value stored in such field
was provided explicitly or is result of applying default/conversion
from NULL value. In the former case no new auto_increment value
needs to be generated in MODE_NO_AUTO_VALUE_ON_ZERO mode, while
the latter cases require new value generation. Hence the need
for this flag.
@note Used only in the MODE_NO_AUTO_VALUE_ON_ZERO mode and only
by handler::write_row().
*/
bool autoinc_field_has_explicit_non_null_value{false};
bool alias_name_used{false}; /* true if table_name is alias */
bool get_fields_in_item_tree{false}; /* Signal to fix_field */
private:
/**
This TABLE object is invalid and cannot be reused. TABLE object might have
inconsistent info or handler might not allow some operations.
For example, TABLE might have inconsistent info about partitioning.
We also use this flag to avoid calling handler::reset() for partitioned
InnoDB tables after in-place ALTER TABLE API commit phase and to force
closing table after REPAIR TABLE has failed during its prepare phase as
well.
@note This member can be set only by thread that owns/has opened the
table and while holding its THD::LOCK_thd_data lock.
It can be read without locking by this owner thread, or by some other
thread concurrently after acquiring owner's THD::LOCK_thd_data.
@note The TABLE will not be reopened under LOCK TABLES in
close_thread_tables().
*/
bool m_invalid_dict{false};
/**
This TABLE object is invalid and cannot be reused as it has outdated
rec_per_key and handler stats.
@note This member is protected from concurrent access to it by lock of
Table Cache's partition to which this TABLE object belongs,
*/
bool m_invalid_stats{false};
/**
For tmp tables. true <=> tmp table has been instantiated.
Also indicates that table was successfully opened since
we immediately delete tmp tables which we fail to open.
*/
bool created{false};
public:
/// For a materializable derived or SJ table: true if has been materialized
bool materialized{false};
struct /* field connections */
{
class JOIN_TAB *join_tab{nullptr};
class QEP_TAB *qep_tab{nullptr};
thr_lock_type lock_type{TL_UNLOCK}; /* How table is used */
bool not_exists_optimize{false};
/*
true <=> range optimizer found that there is no rows satisfying
table conditions.
*/
bool impossible_range{false};
} reginfo;
/**
@todo This member should not be declared in-line. That makes it
impossible for any function that does memory allocation to take a const
reference to a TABLE object.
*/
MEM_ROOT mem_root;
/**
Initialized in Item_func_group_concat::setup for appropriate
temporary table if GROUP_CONCAT is used with ORDER BY | DISTINCT
and BLOB field count > 0.
*/
Blob_mem_storage *blob_storage{nullptr};
/**
Not owned by the TABLE; used only from filesort_free_buffers().
See comments on SortingIterator::CleanupAfterQuery().
*/
SortingIterator *sorting_iterator{nullptr};
SortingIterator *duplicate_removal_iterator{nullptr};
/**
The result of applying a unique operation (by row ID) to the table, if done.
In particular, this is done in some forms of index merge.
*/
Sort_result unique_result;
partition_info *part_info{nullptr}; /* Partition related information */
/* If true, all partitions have been pruned away */
bool all_partitions_pruned_away{false};
MDL_ticket *mdl_ticket{nullptr};
private:
/// Cost model object for operations on this table
Cost_model_table m_cost_model;
/// Estimate for the amount of data to read per row fetched from this table.
/// The estimate is only calculated when using the hypergraph optimizer.
const BytesPerTableRow *m_bytes_per_row{nullptr};
#ifndef NDEBUG
/**
Internal tmp table sequential number. Increased in the order of
creation. Used for debugging purposes when many tmp tables are used
during execution (e.g several windows with window functions)
*/
uint tmp_table_seq_id{0};
#endif
public:
void reset();
void init(THD *thd, Table_ref *tl);
bool init_tmp_table(THD *thd, TABLE_SHARE *share, MEM_ROOT *m_root,
CHARSET_INFO *charset, const char *alias, Field **fld,
uint *blob_fld, bool is_virtual);
bool fill_item_list(mem_root_deque<Item *> *item_list) const;
void clear_column_bitmaps(void);
void prepare_for_position(void);
void mark_column_used(Field *field, enum enum_mark_columns mark);
void mark_columns_used_by_index_no_reset(uint index, MY_BITMAP *map,
uint key_parts = 0) const;
void mark_columns_used_by_index(uint index);
void mark_auto_increment_column(void);
void mark_columns_needed_for_update(THD *thd, bool mark_binlog_columns);
void mark_columns_needed_for_delete(THD *thd);
void mark_columns_needed_for_insert(THD *thd);
void mark_columns_per_binlog_row_image(THD *thd);
void mark_generated_columns(bool is_update);
void mark_gcol_in_maps(const Field *field);
void mark_check_constraint_columns(bool is_update);
void column_bitmaps_set(MY_BITMAP *read_set_arg, MY_BITMAP *write_set_arg);
inline void column_bitmaps_set_no_signal(MY_BITMAP *read_set_arg,
MY_BITMAP *write_set_arg) {
read_set = read_set_arg;
write_set = write_set_arg;
}
inline void use_all_columns() {
column_bitmaps_set(&s->all_set, &s->all_set);
}
inline void default_column_bitmaps() {
read_set = &def_read_set;
write_set = &def_write_set;
}
void invalidate_dict();
void invalidate_stats();
/**
@note Can be called by thread owning table without additional locking, and
by any other thread which has acquired owner's THD::LOCK_thd_data lock.
*/
inline bool has_invalid_dict() const {
assert(assert_invalid_dict_is_locked(this));
return !db_stat || m_invalid_dict;
}
/// @note Can be called by thread owning Table_cache::m_lock
inline bool has_invalid_stats() {
assert(assert_invalid_stats_is_locked(this));
return m_invalid_stats;
}
/// @returns first non-hidden column
Field **visible_field_ptr() const { return field + hidden_field_count; }
/// @returns count of visible fields
uint visible_field_count() const { return s->fields - hidden_field_count; }
bool alloc_tmp_keys(uint new_key_count, uint new_key_part_count,
bool modify_share);
bool add_tmp_key(Field_map *key_parts, bool invisible, bool modify_share);
void move_tmp_key(int old_idx, bool modify_share);
void drop_unused_tmp_keys(bool modify_share);
void set_keyread(bool flag);
/**
Check whether the given index has a virtual generated columns.
@param index_no the given index to check
@returns true if if index is defined over at least one virtual generated
column
*/
inline bool index_contains_some_virtual_gcol(uint index_no) const {
assert(index_no < s->keys);
return key_info[index_no].flags & HA_VIRTUAL_GEN_KEY;
}
void update_const_key_parts(Item *conds);
bool check_read_removal(uint index);
ptrdiff_t default_values_offset() const {
return (ptrdiff_t)(s->default_values - record[0]);
}
/// @returns true if a storage engine handler object is assigned to table
bool has_storage_handler() const { return file != nullptr; }
/// Set storage handler for temporary table
void set_storage_handler(handler *file_arg) {
// Ensure consistent call order
assert((file == nullptr && file_arg != nullptr) ||
(file != nullptr && file_arg == nullptr));
assert(!is_created());
assert(file_arg->inited == handler::NONE);
file = file_arg;
}
/// Return true if table is instantiated, and false otherwise.
bool is_created() const { return created; }
/**
Set the table as "created", and enable flags in storage engine
that could not be enabled without an instantiated table.
*/
void set_created();
/**
Set the contents of table to be "deleted", ie "not created", after having
deleted the contents.
*/
void set_deleted() { created = materialized = false; }
/// Set table as nullable, ie it is inner wrt some outer join
void set_nullable() { nullable = true; }
/// Return whether table is nullable
bool is_nullable() const { return nullable; }
/// @return true if table contains one or more generated columns
bool has_gcol() const { return vfield; }
/**
Life cycle of the row buffer is as follows:
- The initial state is "not started".
- When reading a row through the storage engine handler, the status is set
as "has row" or "no row", depending on whether a row was found or not.
The "not started" state is cleared, as well as the "null row" state,
the updated state and the deleted state.
- When making a row available in record[0], make sure to update row status
similarly to how the storage engine handler does it.
- If a NULL-extended row is needed in join execution, the "null row" state
is set. Note that this can be combined with "has row" if a row was read
but condition on it was evaluated to false (happens for single-row
lookup), or "no row" if no more rows could be read.
Note also that for the "null row" state, the NULL bits inside the
row are set to one, so the row inside the row buffer is no longer usable,
unless the NULL bits are saved in a separate buffer.
- The "is updated" and "is deleted" states are set when row is updated or
deleted, respectively.
*/
/// Set status for row buffer as "not started"
void set_not_started() {
m_status = STATUS_NOT_STARTED | STATUS_NOT_FOUND;
null_row = false;
}
/// @return true if a row operation has been done
bool is_started() const { return !(m_status & STATUS_NOT_STARTED); }
/// Set status for row buffer: contains row
void set_found_row() {
m_status = 0;
null_row = false;
}
/**
Set status for row buffer: contains no row. This is set when
- A lookup operation finds no row
- A scan operation scans past the last row of the range.
- An error in generating key values before calling storage engine.
*/
void set_no_row() {
m_status = STATUS_NOT_FOUND;
null_row = false;
}
/**
Set "row found" status from handler result
@param status 0 if row was found, <> 0 if row was not found
*/
void set_row_status_from_handler(int status) {
m_status = status ? STATUS_NOT_FOUND : 0;
null_row = false;
}
/**
Set current row as "null row", for use in null-complemented outer join.
The row buffer may or may not contain a valid row.
set_null_row() and reset_null_row() are used by the join executor to
signal the presence or absence of a NULL-extended row for an outer joined
table. Null rows may also be used to specify rows that are all NULL in
grouing operations.
@note this is a destructive operation since the NULL value bit vector
is overwritten. Caching operations must be aware of this.
*/
void set_null_row() {
null_row = true;
m_status |= STATUS_NULL_ROW;
if (s->null_bytes > 0) memset(null_flags, 255, s->null_bytes);
}
/// Clear "null row" status for the current row
void reset_null_row() {
null_row = false;
m_status &= ~STATUS_NULL_ROW;
}
/// Set "updated" property for the current row
void set_updated_row() {
assert(is_started() && has_row());
m_status |= STATUS_UPDATED;
}
/// Set "deleted" property for the current row
void set_deleted_row() {
assert(is_started() && has_row());
m_status |= STATUS_DELETED;
}
/// @return true if there is a row in row buffer
bool has_row() const { return !(m_status & STATUS_NOT_FOUND); }
/// @return true if current row is null-extended
bool has_null_row() const { return null_row; }
/// @return true if current row has been updated (multi-table update)
bool has_updated_row() const { return m_status & STATUS_UPDATED; }
/// @return true if current row has been deleted (multi-table delete)
bool has_deleted_row() const { return m_status & STATUS_DELETED; }
/// Save the NULL flags of the current row into the designated buffer.
/// This should be done before null-complementing a table accessed
/// with EQRefIterator or a const table, as they need to be able to
/// restore the original contents of the record buffer before
/// reading the next row. This is necessary because of their special
/// code for avoiding table access if the same row should be
/// accessed by the next read.
void save_null_flags() {
if (s->null_bytes > 0) memcpy(null_flags_saved, null_flags, s->null_bytes);
}
/// Restore the NULL flags of the current row from the designated buffer
void restore_null_flags() {
if (s->null_bytes > 0) memcpy(null_flags, null_flags_saved, s->null_bytes);
}
/// Empties internal temporary table (deletes rows, closes scan)
bool empty_result_table();
/**
Initialize the optimizer cost model.
This function should be called each time a new query is started.
@param cost_model_server the main cost model object for the query
*/
void init_cost_model(const Cost_model_server *cost_model_server) {
m_cost_model.init(cost_model_server, this);
}
/**
Return the cost model object for this table.
*/
const Cost_model_table *cost_model() const { return &m_cost_model; }
/// Set the estimate for the number of bytes to read per row in this table.
void set_bytes_per_row(const BytesPerTableRow *bytes_per_row) {
m_bytes_per_row = bytes_per_row;
}
/// Get the estimate for the number of bytes to read per row in this table.
const BytesPerTableRow *bytes_per_row() const { return m_bytes_per_row; }
/**
Bind all the table's value generator columns in all the forms:
stored/virtual GC, default expressions and checked constraints.
@details When a table is opened from the dictionary, the Value Generator
expressions are bound during opening (see fix_value_generator_fields()).
After query execution, Item::cleanup() is called on them
(see cleanup_value_generator_items()). When the table is opened from the
table cache, the Value Generetor(s) need to be bound again and this
function does that.
*/
void bind_value_generators_to_fields();
/**
Clean any state in items associated with generated columns to be ready for
the next statement.
*/
void cleanup_value_generator_items();
#ifndef NDEBUG
void set_tmp_table_seq_id(uint arg) { tmp_table_seq_id = arg; }
#endif
/**
Update covering keys depending on max read key length.
Update available covering keys for the table, based on a constrained field
and the identified covering prefix keys: If the matched part of field is
longer than the index prefix,
the prefix index cannot be used as a covering index.
@param[in] field Pointer to field object
@param[in] key_read_length Max read key length
@param[in] covering_prefix_keys Covering prefix keys
*/
void update_covering_prefix_keys(Field *field, uint16 key_read_length,
Key_map *covering_prefix_keys);
/**
Returns the primary engine handler for the table.
If none exist, nullptr is returned.
*/
handler *get_primary_handler() const;
private:
/**
Bitmap that tells which columns are eligible for partial update in an
update statement.
The bitmap is lazily allocated in the TABLE's mem_root when
#mark_column_for_partial_update() is called.
*/
MY_BITMAP *m_partial_update_columns{nullptr};
/**
Object which contains execution time state used for partial update
of JSON columns.
It is allocated in the execution mem_root by #setup_partial_update() if
there are columns that have been marked as eligible for partial update.
*/
Partial_update_info *m_partial_update_info{nullptr};
/**
This flag decides whether or not we should log the drop temporary table
command.
*/
bool should_binlog_drop_if_temp_flag{false};
public:
/**
Does this table have any columns that can be updated using partial update
in the current row?
@return whether any columns in the current row can be updated using partial
update
*/
bool has_binary_diff_columns() const;
/**
Get the list of binary diffs that have been collected for a given column in
the current row, or `nullptr` if partial update cannot be used for that
column.
@param field the column to get binary diffs for
@return the list of binary diffs for the column, or `nullptr` if the column
cannot be updated using partial update
*/
const Binary_diff_vector *get_binary_diffs(const Field *field) const;
/**
Mark a given column as one that can potentially be updated using
partial update during execution of an update statement.
Whether it is actually updated using partial update, is not
determined until execution time, since that depends both on the
data that is in the column and the new data that is written to the
column.
This function should be called during preparation of an update
statement.
@param field a column which is eligible for partial update
@retval false on success
@retval true on out-of-memory
*/
bool mark_column_for_partial_update(const Field *field);
/**
Has this column been marked for partial update?
Note that this only tells if the column satisfies the syntactical
requirements for being partially updated. Use #is_binary_diff_enabled() or
#is_logical_diff_enabled() instead to see if partial update should be used
on the column.
@param field the column to check
@return whether the column has been marked for partial update
*/
bool is_marked_for_partial_update(const Field *field) const;
/**
Does this table have any columns that were marked with
#mark_column_for_partial_update()?
Note that this only tells if any of the columns satisfy the syntactical
requirements for being partially updated. Use
#has_binary_diff_columns(), #is_binary_diff_enabled() or
#is_logical_diff_enabled() instead to see if partial update should be used
on a column.
*/
bool has_columns_marked_for_partial_update() const;
/**
Enable partial update of JSON columns in this table. It is only
enabled for the columns that have previously been marked for
partial update using #mark_column_for_partial_update().
@param logical_diffs should logical JSON diffs be collected in addition
to the physical binary diffs?
This function should be called once per statement execution, when
the update statement is optimized.
@retval false on success
@retval true on out-of-memory
*/
bool setup_partial_update(bool logical_diffs);
/**
@see setup_partial_update(bool)
This is a wrapper that auto-computes the value of the parameter
logical_diffs.
@retval false on success
@retval true on out-of-memory
*/
bool setup_partial_update();
/**
Add a binary diff for a column that is updated using partial update.
@param field the column that is being updated
@param offset the offset of the changed portion
@param length the length of the changed portion
@retval false on success
@retval true on out-of-memory
*/
bool add_binary_diff(const Field *field, size_t offset, size_t length);
/**
Clear the diffs that have been collected for partial update of
JSON columns, and re-enable partial update for any columns where
partial update was temporarily disabled for the current row.
Should be called between each row that is updated.
*/
void clear_partial_update_diffs();
/**
Clean up state used for partial update of JSON columns.
This function should be called at the end of each statement
execution.
*/
void cleanup_partial_update();
/**
Temporarily disable collection of binary diffs for a column in the current
row.
This function is called during execution to disable partial update of a
column that was previously marked as eligible for partial update with
#mark_column_for_partial_update() during preparation.
Partial update of this column will be re-enabled when we go to the next
row.
@param field the column to stop collecting binary diffs for
*/
void disable_binary_diffs_for_current_row(const Field *field);
/**
Temporarily disable collection of Json_diff objects describing the
logical changes of a JSON column in the current row.
Collection of logical JSON diffs is re-enabled when we go to the next row.
@param field the column to stop collecting logical JSON diffs for
*/
void disable_logical_diffs_for_current_row(const Field *field) const;
/**
Get a buffer that can be used to hold the partially updated column value
while performing partial update.
*/
String *get_partial_update_buffer();
/**
Add a logical JSON diff describing a logical change to a JSON column in
partial update.
@param field the column that is updated
@param path the JSON path that is changed
@param operation the operation to perform
@param new_value the new value in the path
@throws std::bad_alloc if memory cannot be allocated
*/
void add_logical_diff(const Field_json *field, const Json_seekable_path &path,
enum_json_diff_operation operation,
const Json_wrapper *new_value);
/**
Get the list of JSON diffs that have been collected for a given column in
the current row, or `nullptr` if partial update cannot be used for that
column.
@param field the column to get JSON diffs for
@return the list of JSON diffs for the column, or `nullptr` if the column
cannot be updated using partial update
*/
const Json_diff_vector *get_logical_diffs(const Field_json *field) const;
/**
Is partial update using binary diffs enabled on this JSON column?
@param field the column to check
@return whether the column can be updated with binary diffs
*/
bool is_binary_diff_enabled(const Field *field) const;
/**
Is partial update using logical diffs enabled on this JSON column?
@param field the column to check
@return whether the column can be updated with JSON diffs
*/
bool is_logical_diff_enabled(const Field *field) const;
/**
Virtual fields of type BLOB have a flag m_keep_old_value. This flag is set
to false for all such fields in this table.
*/
void blobs_need_not_keep_old_value();
/**
Set the variable should_binlog_drop_if_temp_flag, so that
the logging of temporary tables can be decided.
@param should_binlog the value to set flag should_binlog_drop_if_temp_flag
*/
void set_binlog_drop_if_temp(bool should_binlog);
/**
@return whether should_binlog_drop_if_temp_flag flag is
set or not
*/
bool should_binlog_drop_if_temp(void) const;
/**
Find the histogram for the given field index.
@note If this is called on a TABLE object that belongs to a secondary
engine, it will take a round-trip through the handler in order to obtain the
histogram from the TABLE object associated with the primary engine. This is
done to avoid storing histograms on both the primary and secondary
TABLE_SHARE.
@param field_index The index of the field we want to find a histogram for.
@retval nullptr if no histogram is found.
@retval Pointer to a histogram if one is found.
*/
const histograms::Histogram *find_histogram(uint field_index) const;
};
static inline void empty_record(TABLE *table) {
restore_record(table, s->default_values);
if (table->s->null_bytes > 0)
memset(table->null_flags, 255, table->s->null_bytes);
}
#define MY_I_S_MAYBE_NULL 1
#define MY_I_S_UNSIGNED 2
struct ST_FIELD_INFO {
/**
This is used as column name.
*/
const char *field_name;
/**
For string-type columns, this is the maximum number of
characters. Otherwise, it is the 'display-length' for the column.
For the data type MYSQL_TYPE_DATETIME this field specifies the
number of digits in the fractional part of time value.
*/
uint field_length;
/**
This denotes data type for the column. For the most part, there seems to
be one entry in the enum for each SQL data type, although there seem to
be a number of additional entries in the enum.
*/
enum_field_types field_type;
int value;
/**
This is used to set column attributes. By default, columns are @c NOT
@c NULL and @c SIGNED, and you can deviate from the default
by setting the appropriate flags. You can use either one of the flags
@c MY_I_S_MAYBE_NULL and @c MY_I_S_UNSIGNED or
combine them using the bitwise or operator @c |. Both flags are
defined in table.h.
*/
uint field_flags; // Field attributes (maybe_null, signed, unsigned etc.)
const char *old_name;
uint open_method; // Not used
};
struct ST_SCHEMA_TABLE {
const char *table_name;
ST_FIELD_INFO *fields_info;
/* Fill table with data */
int (*fill_table)(THD *thd, Table_ref *tables, Item *cond);
/* Handle fields for old SHOW */
int (*old_format)(THD *thd, ST_SCHEMA_TABLE *schema_table);
int (*process_table)(THD *thd, Table_ref *tables, TABLE *table, bool res,
LEX_CSTRING db_name, LEX_CSTRING table_name);
bool hidden;
};
/**
Strategy for how to process a view or derived table (merge or materialization)
*/
enum enum_view_algorithm {
VIEW_ALGORITHM_UNDEFINED = 0,
VIEW_ALGORITHM_TEMPTABLE = 1,
VIEW_ALGORITHM_MERGE = 2
};
enum class enum_view_type {
UNDEFINED,
SQL_VIEW, // Traditional SQL VIEW
JSON_DUALITY_VIEW // JSON Duality view
};
#define VIEW_SUID_INVOKER 0
#define VIEW_SUID_DEFINER 1
#define VIEW_SUID_DEFAULT 2
/* view WITH CHECK OPTION parameter options */
#define VIEW_CHECK_NONE 0
#define VIEW_CHECK_LOCAL 1
#define VIEW_CHECK_CASCADED 2
/* result of view WITH CHECK OPTION parameter check */
#define VIEW_CHECK_OK 0
#define VIEW_CHECK_ERROR 1
#define VIEW_CHECK_SKIP 2
/** The threshold size a blob field buffer before it is freed */
#define MAX_TDC_BLOB_SIZE 65536
/**
Struct that describes an expression selected from a derived table or view.
*/
struct Field_translator {
/**
Points to an item that represents the expression.
If the item is determined to be unused, the pointer is set to NULL.
*/
Item *item;
/// Name of selected expression
const char *name;
};
/*
Column reference of a NATURAL/USING join. Since column references in
joins can be both from views and stored tables, may point to either a
Field (for tables), or a Field_translator (for views).
*/
class Natural_join_column {
public:
Field_translator *view_field; /* Column reference of merge view. */
Item_field *table_field; /* Column reference of table or temp view. */
Table_ref *table_ref; /* Original base table/view reference. */
/*
True if a common join column of two NATURAL/USING join operands. Notice
that when we have a hierarchy of nested NATURAL/USING joins, a column can
be common at some level of nesting but it may not be common at higher
levels of nesting. Thus this flag may change depending on at which level
we are looking at some column.
*/
bool is_common;
public:
Natural_join_column(Field_translator *field_param, Table_ref *tab);
Natural_join_column(Item_field *field_param, Table_ref *tab);
const char *name();
Item_ident *create_item(THD *thd);
Field *field();
const char *table_name();
const char *db_name();
GRANT_INFO *grant();
};
/**
This is generic enum. It may be reused in the ACL statements
for clauses that can map to the values defined in this enum.
*/
enum class Lex_acl_attrib_udyn {
UNCHANGED, /* The clause is not specified */
DEFAULT, /* Default value of clause is specified */
YES, /* Value that maps to True is specified */
NO /* Value that maps to False is specified */
};
struct LEX_MFA {
LEX_CSTRING plugin;
LEX_CSTRING auth;
LEX_CSTRING generated_password;
LEX_CSTRING challenge_response;
LEX_CSTRING client_plugin;
uint nth_factor;
/*
The following flags are indicators for the SQL syntax used while
parsing CREATE/ALTER user. While other members are self-explanatory,
'uses_authentication_string_clause' signifies if the password is in
hash form (if the var was set to true) or not.
*/
bool uses_identified_by_clause;
bool uses_authentication_string_clause;
bool uses_identified_with_clause;
bool has_password_generator;
/* flag set during CREATE USER .. INITIAL AUTHENTICATION BY */
bool passwordless;
/* flag set during ALTER USER .. ADD nth FACTOR */
bool add_factor;
/* flag set during ALTER USER .. MODIFY nth FACTOR */
bool modify_factor;
/* flag set during ALTER USER .. DROP nth FACTOR */
bool drop_factor;
/*
flag used during authentication and to decide if server should
be in sandbox mode or not
*/
bool requires_registration;
/* flag set during ALTER USER .. nth FACTOR UNREGISTER */
bool unregister;
/* flag set during ALTER USER .. INITIATE REGISTRATION */
bool init_registration;
/* flag set during ALTER USER .. FINISH REGISTRATION */
bool finish_registration;
LEX_MFA() { reset(); }
void reset() {
plugin = EMPTY_CSTR;
auth = NULL_CSTR;
generated_password = NULL_CSTR;
challenge_response = NULL_CSTR;
client_plugin = NULL_CSTR;
nth_factor = 1;
uses_identified_by_clause = false;
uses_authentication_string_clause = false;
uses_identified_with_clause = false;
has_password_generator = false;
passwordless = false;
add_factor = false;
drop_factor = false;
modify_factor = false;
requires_registration = false;
unregister = false;
init_registration = false;
finish_registration = false;
}
void copy(LEX_MFA *m, MEM_ROOT *alloc);
};
/*
This structure holds the specifications relating to
ALTER user ... PASSWORD EXPIRE ...
*/
struct LEX_ALTER {
bool update_password_expired_fields;
bool update_password_expired_column;
bool use_default_password_lifetime;
uint16 expire_after_days;
bool update_account_locked_column;
bool account_locked;
uint32 password_history_length;
bool use_default_password_history;
bool update_password_history;
uint32 password_reuse_interval;
bool use_default_password_reuse_interval;
bool update_password_reuse_interval;
uint failed_login_attempts;
bool update_failed_login_attempts;
int password_lock_time;
bool update_password_lock_time;
/* Holds the specification of 'PASSWORD REQUIRE CURRENT' clause. */
Lex_acl_attrib_udyn update_password_require_current;
void cleanup() {
update_password_expired_fields = false;
update_password_expired_column = false;
use_default_password_lifetime = true;
expire_after_days = 0;
update_account_locked_column = false;
account_locked = false;
use_default_password_history = true;
update_password_history = false;
use_default_password_reuse_interval = true;
update_password_reuse_interval = false;
update_password_require_current = Lex_acl_attrib_udyn::UNCHANGED;
password_history_length = 0;
password_reuse_interval = 0;
update_password_lock_time = false;
update_failed_login_attempts = false;
failed_login_attempts = 0;
password_lock_time = 0;
}
};
/*
This structure holds the specifications related to
mysql user and the associated auth details.
*/
struct LEX_USER {
LEX_CSTRING user;
LEX_CSTRING host;
LEX_CSTRING current_auth;
bool uses_replace_clause;
bool retain_current_password;
bool discard_old_password;
LEX_ALTER alter_status;
/* restrict MFA methods to atmost 3 authentication plugins */
LEX_MFA first_factor_auth_info;
List<LEX_MFA> mfa_list;
bool with_initial_auth;
void init() {
user = NULL_CSTR;
host = NULL_CSTR;
current_auth = NULL_CSTR;
uses_replace_clause = false;
retain_current_password = false;
discard_old_password = false;
alter_status.account_locked = false;
alter_status.expire_after_days = 0;
alter_status.update_account_locked_column = false;
alter_status.update_password_expired_column = false;
alter_status.update_password_expired_fields = false;
alter_status.use_default_password_lifetime = true;
alter_status.use_default_password_history = true;
alter_status.update_password_require_current =
Lex_acl_attrib_udyn::UNCHANGED;
alter_status.password_history_length = 0;
alter_status.password_reuse_interval = 0;
alter_status.failed_login_attempts = 0;
alter_status.password_lock_time = 0;
alter_status.update_failed_login_attempts = false;
alter_status.update_password_lock_time = false;
first_factor_auth_info.reset();
mfa_list.clear();
with_initial_auth = false;
}
LEX_USER() { init(); }
bool add_mfa_identifications(LEX_MFA *factor2, LEX_MFA *factor3 = nullptr);
/*
Allocates the memory in the THD mem pool and initialize the members of
this struct. It is preferable to use this method to create a LEX_USER
rather allocating the memory in the THD and initializing the members
explicitly.
*/
static LEX_USER *alloc(THD *thd);
static LEX_USER *alloc(THD *thd, LEX_STRING *user, LEX_STRING *host);
/*
Initialize the members of this struct. It is preferable to use this method
to initialize a LEX_USER rather initializing the members explicitly.
*/
static LEX_USER *init(LEX_USER *to_init, THD *thd, LEX_STRING *user,
LEX_STRING *host);
};
/**
Derive type of metadata lock to be requested for table used by a DML
statement from the type of THR_LOCK lock requested for this table.
*/
inline enum enum_mdl_type mdl_type_for_dml(enum thr_lock_type lock_type) {
return lock_type >= TL_WRITE_ALLOW_WRITE
? (lock_type == TL_WRITE_LOW_PRIORITY ? MDL_SHARED_WRITE_LOW_PRIO
: MDL_SHARED_WRITE)
: MDL_SHARED_READ;
}
/**
Type of table which can be open for an element of table list.
*/
enum enum_open_type {
OT_TEMPORARY_OR_BASE = 0,
OT_TEMPORARY_ONLY,
OT_BASE_ONLY
};
/**
This structure is used to keep info about possible key for the result table
of a derived table/view.
The 'referenced_by' is the table map of tables to which this possible
key corresponds.
The 'used_field' is a map of fields of which this key consists of.
See also the comment for the Table_ref::update_derived_keys function.
*/
class Derived_key {
public:
table_map referenced_by;
Field_map used_fields;
uint key_part_count{0};
};
class Table_function;
/*
Table reference in the FROM clause.
These table references can be of several types that correspond to
different SQL elements. Below we list all types of TABLE_LISTs with
the necessary conditions to determine when a Table_ref instance
belongs to a certain type.
1) table (Table_ref::view == NULL)
- base table
(Table_ref::derived == NULL)
- subquery - Table_ref::table is a temp table
(Table_ref::derived != NULL)
- information schema table
(Table_ref::schema_table != NULL)
NOTICE: for schema tables Table_ref::field_translation may be != NULL
2) view (Table_ref::view != NULL)
- merge (Table_ref::effective_algorithm == VIEW_ALGORITHM_MERGE)
also (Table_ref::field_translation != NULL)
- temptable(Table_ref::effective_algorithm == VIEW_ALGORITHM_TEMPTABLE)
also (Table_ref::field_translation == NULL)
3) nested table reference (Table_ref::nested_join != NULL)
- table sequence - e.g. (t1, t2, t3)
TODO: how to distinguish from a JOIN?
- general JOIN
TODO: how to distinguish from a table sequence?
- NATURAL JOIN
(Table_ref::natural_join != NULL)
- JOIN ... USING
(Table_ref::join_using_fields != NULL)
- semi-join
;
*/
class Table_ref {
public:
Table_ref() = default;
/**
Only to be used by legacy code that temporarily needs a Table_ref,
more specifically: Query_result_create::binlog_show_create_table().
*/
explicit Table_ref(TABLE *table_arg) : table(table_arg) {}
/// Constructor that can be used when the strings are null terminated.
Table_ref(const char *db_name, const char *table_name,
enum thr_lock_type lock_type)
: Table_ref(db_name, strlen(db_name), table_name, strlen(table_name),
table_name, lock_type) {}
/**
Creates a Table_ref object with pre-allocated strings for database,
table and alias.
*/
Table_ref(TABLE *table_arg, const char *db_name_arg, size_t db_length_arg,
const char *table_name_arg, size_t table_name_length_arg,
const char *alias_arg, enum thr_lock_type lock_type_arg)
: db(db_name_arg),
table_name(table_name_arg),
alias(alias_arg),
m_map(1),
table(table_arg),
m_lock_descriptor{lock_type_arg},
db_length(db_length_arg),
table_name_length(table_name_length_arg) {
MDL_REQUEST_INIT(&mdl_request, MDL_key::TABLE, db, table_name,
mdl_type_for_dml(m_lock_descriptor.type), MDL_TRANSACTION);
}
/// Constructor that can be used when the strings are null terminated.
Table_ref(const char *db_name, const char *table_name, const char *alias,
enum thr_lock_type lock_type)
: Table_ref(db_name, strlen(db_name), table_name, strlen(table_name),
alias, lock_type) {}
/**
This constructor can be used when a Table_ref is needed for an
existing temporary table. These typically have very long table names, since
it is a fully qualified path. For this reason, the table is set to the
alias. The database name is left blank. The lock descriptor is set to
TL_READ.
*/
Table_ref(TABLE *table_arg, const char *alias_arg)
: db(""),
table_name(alias_arg),
alias(alias_arg),
m_map(1),
table(table_arg),
m_lock_descriptor{TL_READ},
db_length(0),
table_name_length(strlen(alias_arg)) {
MDL_REQUEST_INIT(&mdl_request, MDL_key::TABLE, db, table_name,
mdl_type_for_dml(m_lock_descriptor.type), MDL_TRANSACTION);
}
/**
Sets an explicit enum_mdl_type value, without initializing
m_lock_descriptor.
*/
Table_ref(TABLE *table_arg, const char *alias_arg, enum_mdl_type mdl_type)
: db(table_arg->s->db.str),
table_name(table_arg->s->table_name.str),
alias(alias_arg),
m_map(1),
table(table_arg),
db_length(table_arg->s->db.length),
table_name_length(table_arg->s->table_name.length) {
MDL_REQUEST_INIT(&mdl_request, MDL_key::TABLE, db, table_name, mdl_type,
MDL_TRANSACTION);
}
Table_ref(const char *db_name, const char *table_name_arg,
enum thr_lock_type lock_type_arg,
enum enum_mdl_type mdl_request_type)
: db(db_name),
table_name(table_name_arg),
alias(table_name_arg),
m_map(1),
m_lock_descriptor{lock_type_arg},
db_length(strlen(db_name)),
table_name_length(strlen(table_name_arg)) {
MDL_REQUEST_INIT(&mdl_request, MDL_key::TABLE, db, table_name,
mdl_type_for_dml(m_lock_descriptor.type), MDL_TRANSACTION);
mdl_request.set_type(mdl_request_type);
}
Table_ref(const char *db_name, size_t db_length_arg,
const char *table_name_arg, size_t table_name_length_arg,
enum thr_lock_type lock_type_arg,
enum enum_mdl_type mdl_request_type)
: db(db_name),
table_name(table_name_arg),
alias(table_name_arg),
m_map(1),
m_lock_descriptor{lock_type_arg},
db_length(db_length_arg),
table_name_length(table_name_length_arg) {
MDL_REQUEST_INIT(&mdl_request, MDL_key::TABLE, db, table_name,
mdl_type_for_dml(m_lock_descriptor.type), MDL_TRANSACTION);
mdl_request.set_type(mdl_request_type);
}
Table_ref(const char *db_name, size_t db_length_arg,
const char *table_name_arg, size_t table_name_length_arg,
enum thr_lock_type lock_type_arg)
: db(db_name),
table_name(table_name_arg),
alias(table_name_arg),
m_map(1),
m_lock_descriptor{lock_type_arg},
db_length(db_length_arg),
table_name_length(table_name_length_arg) {}
/**
Sets an explicit enum_mdl_type value, without initializing
m_lock_descriptor.
*/
Table_ref(const char *db_name, size_t db_length_arg,
const char *table_name_arg, size_t table_name_length_arg,
const char *alias_arg, enum enum_mdl_type mdl_request_type)
: db(db_name),
table_name(table_name_arg),
alias(alias_arg),
m_map(1),
db_length(db_length_arg),
table_name_length(table_name_length_arg) {
MDL_REQUEST_INIT(&mdl_request, MDL_key::TABLE, db, table_name,
mdl_type_for_dml(m_lock_descriptor.type), MDL_TRANSACTION);
mdl_request.set_type(mdl_request_type);
}
Table_ref(const char *db_name, size_t db_length_arg,
const char *table_name_arg, size_t table_name_length_arg,
const char *alias_arg, enum thr_lock_type lock_type_arg,
enum enum_mdl_type mdl_request_type)
: db(db_name),
table_name(table_name_arg),
alias(alias_arg),
m_map(1),
m_lock_descriptor{lock_type_arg},
db_length(db_length_arg),
table_name_length(table_name_length_arg) {
MDL_REQUEST_INIT(&mdl_request, MDL_key::TABLE, db, table_name,
mdl_type_for_dml(m_lock_descriptor.type), MDL_TRANSACTION);
mdl_request.set_type(mdl_request_type);
}
Table_ref(const char *db_name_arg, size_t db_length_arg,
const char *table_name_arg, size_t table_name_length_arg,
const char *alias_arg, enum thr_lock_type lock_type_arg)
: db(db_name_arg),
table_name(table_name_arg),
alias(alias_arg),
m_map(1),
m_lock_descriptor{lock_type_arg},
db_length(db_length_arg),
table_name_length(table_name_length_arg) {
MDL_REQUEST_INIT(&mdl_request, MDL_key::TABLE, db, table_name,
mdl_type_for_dml(m_lock_descriptor.type), MDL_TRANSACTION);
}
/// Create a Table_ref object representing a nested join
static Table_ref *new_nested_join(MEM_ROOT *allocator, const char *alias,
Table_ref *embedding,
mem_root_deque<Table_ref *> *belongs_to,
Query_block *select);
Item **join_cond_ref() { return &m_join_cond; }
Item *join_cond() const { return m_join_cond; }
void set_join_cond(Item *val) {
// If optimization has started, it's too late to change m_join_cond.
assert(m_join_cond_optim == nullptr || m_join_cond_optim == (Item *)1);
m_join_cond = val;
}
Item *join_cond_optim() const { return m_join_cond_optim; }
void set_join_cond_optim(Item *cond) {
/*
Either we are setting to "empty", or there must pre-exist a
permanent condition.
*/
assert(cond == nullptr || cond == (Item *)1 || m_join_cond != nullptr);
m_join_cond_optim = cond;
}
Item **join_cond_optim_ref() { return &m_join_cond_optim; }
/// @returns true if semi-join nest
bool is_sj_nest() const { return m_is_sj_or_aj_nest && !m_join_cond; }
/// @returns true if anti-join nest
bool is_aj_nest() const { return m_is_sj_or_aj_nest && m_join_cond; }
/// @returns true if anti/semi-join nest
bool is_sj_or_aj_nest() const { return m_is_sj_or_aj_nest; }
/// Makes the next a semi/antijoin nest
void set_sj_or_aj_nest() {
assert(!m_is_sj_or_aj_nest);
m_is_sj_or_aj_nest = true;
}
/// Merge tables from a query block into a nested join structure
bool merge_underlying_tables(Query_block *select);
/// Reset table
void reset();
/// Evaluate the check option of a view
int view_check_option(THD *thd) const;
/// Produce a textual identification of this object
void print(const THD *thd, String *str, enum_query_type query_type) const;
/// Check which single table inside a view that matches a table map
bool check_single_table(Table_ref **table_ref, table_map map);
/// Allocate a buffer for inserted column values
bool set_insert_values(MEM_ROOT *mem_root);
Table_ref *first_leaf_for_name_resolution();
/**
Retrieve the last (right-most) leaf in a nested join tree with
respect to name resolution.
Given that 'this' is a nested table reference, recursively walk
down the right-most children of 'this' until we reach a leaf
table reference with respect to name resolution.
The right-most child of a nested table reference is the first
element in the list of children because the children are inserted
in reverse order.
@return
- If 'this' is a nested table reference - the right-most child
of the tree rooted in 'this',
- else - 'this'
*/
Table_ref *last_leaf_for_name_resolution();
bool is_leaf_for_name_resolution() const;
/// Return the outermost view this table belongs to, or itself
inline const Table_ref *top_table() const {
return belong_to_view ? belong_to_view : this;
}
inline Table_ref *top_table() {
return const_cast<Table_ref *>(
const_cast<const Table_ref *>(this)->top_table());
}
/// Prepare check option for a view
bool prepare_check_option(THD *thd, bool is_cascaded = false);
/// Merge WHERE condition of view or derived table into outer query
bool merge_where(THD *thd);
/// Prepare replace filter for a view (used for REPLACE command)
bool prepare_replace_filter(THD *thd);
/// Return true if this represents a named view
bool is_view() const { return view != nullptr; }
/// Return true if this represents a derived table (an unnamed view)
bool is_derived() const { return derived != nullptr && view == nullptr; }
/// Return true if this represents a named view or a derived table
bool is_view_or_derived() const { return derived != nullptr; }
/// Return true if this represents a non-materialized view or a derived table
bool is_non_materialized_view_or_derived() const {
return is_view_or_derived() && !is_mv_se_available();
}
/// Return true if this represents a table function
bool is_table_function() const { return table_function != nullptr; }
/**
@returns true if this is a recursive reference inside the definition of a
recursive CTE.
@note that it starts its existence as a dummy derived table, until the
end of resolution when it's not a derived table anymore, just a reference
to the materialized temporary table. Whereas a non-recursive
reference to the recursive CTE is a derived table.
*/
bool is_recursive_reference() const { return m_is_recursive_reference; }
/// @returns true if this is a base table (permanent or temporary)
bool is_base_table() const {
return !(is_view_or_derived() || is_table_function() ||
is_recursive_reference());
}
/**
@see is_recursive_reference().
@returns true if error
*/
bool set_recursive_reference();
/**
@returns true for a table that represents an optimizer internal table,
is a derived table, a recursive reference, a table function.
Internal tables are only visible inside a query expression, and is hence
not visible in any schema, or need any kind of privilege checking.
*/
bool is_internal() const {
return is_derived() || is_recursive_reference() || is_table_function();
}
/**
@returns true for a table that is a placeholder, ie a derived table,
a view, a recursive reference, a table function or a schema table.
A table is also considered to be a placeholder if it does not have a
TABLE object for some other reason.
*/
bool is_placeholder() const {
return is_view_or_derived() || is_recursive_reference() ||
is_table_function() || schema_table || table == nullptr;
}
/// Return true if view or derived table and can be merged
bool is_mergeable() const;
/**
Checks if this is a table that contains zero rows or one row, and that can
be materialized during optimization.
@returns true if materializable table contains one or zero rows, and
materialization during optimization is permitted
Returning true, if the hypergraph optimizer is not active, implies that the
table is materialized during optimization, so it need not be optimized
during execution. The hypergraph optimizer does not care about const tables,
so such tables are not executed during optimization time when it is active.
*/
bool materializable_is_const(THD *thd) const;
/// @returns true if this is a derived table containing a stored function.
bool has_stored_program() const;
/// Return true if this is a derived table or view that is merged
bool is_merged() const { return effective_algorithm == VIEW_ALGORITHM_MERGE; }
/// Set table to be merged
void set_merged() {
assert(effective_algorithm == VIEW_ALGORITHM_UNDEFINED);
effective_algorithm = VIEW_ALGORITHM_MERGE;
}
/// Return true if this is a materializable derived table/view
bool uses_materialization() const {
return effective_algorithm == VIEW_ALGORITHM_TEMPTABLE;
}
/// Set table to be materialized
void set_uses_materialization() {
// @todo We should do this only once, but currently we cannot:
// assert(effective_algorithm == VIEW_ALGORITHM_UNDEFINED);
assert(effective_algorithm != VIEW_ALGORITHM_MERGE);
effective_algorithm = VIEW_ALGORITHM_TEMPTABLE;
}
/// Return true if table is updatable
bool is_updatable() const { return m_updatable; }
/// Set table as updatable. (per default, a table is non-updatable)
void set_updatable() { m_updatable = true; }
/// Return true if table is insertable-into
bool is_insertable() const { return m_insertable; }
/// Set table as insertable-into. (per default, a table is not insertable)
void set_insertable() { m_insertable = true; }
/// Return true if table is being updated
bool is_updated() const { return m_updated; }
/// Set table and all referencing views as being updated
void set_updated() {
for (Table_ref *tr = this; tr != nullptr; tr = tr->referencing_view)
tr->m_updated = true;
}
/// Return true if table is being inserted into
bool is_inserted() const { return m_inserted; }
/// Set table and all referencing views as being inserted into
void set_inserted() {
for (Table_ref *tr = this; tr != nullptr; tr = tr->referencing_view)
tr->m_inserted = true;
}
/// Return true if table is being deleted from
bool is_deleted() const { return m_deleted; }
/// Set table and all referencing views as being deleted from
void set_deleted() {
for (Table_ref *tr = this; tr != nullptr; tr = tr->referencing_view)
tr->m_deleted = true;
}
/// Set table as full-text search (default is not fulltext searched)
void set_fulltext_searched() { m_fulltext_searched = true; }
/// Returns true if a MATCH function references this table.
bool is_fulltext_searched() const { return m_fulltext_searched; }
/// Is this table only available in an external storage engine?
bool is_external() const;
/**
Set table as readonly, ie it is neither updatable, insertable nor
deletable during this statement.
*/
void set_readonly() {
m_updatable = false;
m_insertable = false;
}
/**
Return true if this is a view or derived table that is defined over
more than one base table, and false otherwise.
*/
bool is_multiple_tables() const {
if (is_view_or_derived()) {
assert(is_merged()); // Cannot be a materialized view
return leaf_tables_count() > 1;
} else {
assert(nested_join == nullptr); // Must be a base table
return false;
}
}
/// Return no. of base tables a merged view or derived table is defined over.
uint leaf_tables_count() const;
/// Return first leaf table of a base table or a view/derived table
Table_ref *first_leaf_table() {
Table_ref *tr = this;
while (tr->merge_underlying_list) tr = tr->merge_underlying_list;
return tr;
}
/// Return any leaf table that is not an inner table of an outer join
/// @todo WL#6570 with prepare-once, replace with first_leaf_table()
/// when WL#6059 is merged in (it really converts RIGHT JOIN to
/// LEFT JOIN so the first leaf is part of a LEFT JOIN,
/// guaranteed).
Table_ref *any_outer_leaf_table() {
Table_ref *tr = this;
while (tr->merge_underlying_list) {
tr = tr->merge_underlying_list;
/*
"while" is used, however, an "if" might be sufficient since there is
no more than one inner table in a join nest (with outer_join true).
*/
while (tr->outer_join) tr = tr->next_local;
}
return tr;
}
/**
Set the LEX object of a view (will also define this as a view).
@note: The value 1 is used to indicate a view but without a valid
query object. Use only if the LEX object is not going to
be used in later processing.
*/
void set_view_query(LEX *lex) { view = lex; }
/// Return the valid LEX object for a view.
LEX *view_query() const {
assert(view != nullptr && view != (LEX *)1);
return view;
}
/**
Set the query expression of a derived table or view.
(Will also define this as a derived table, unless it is a named view.)
*/
void set_derived_query_expression(Query_expression *query_expr) {
derived = query_expr;
}
/// Return the query expression of a derived table or view.
Query_expression *derived_query_expression() const {
assert(derived);
return derived;
}
/// Resolve a derived table or view reference
bool resolve_derived(THD *thd, bool apply_semijoin);
/// Optimize the query expression representing a derived table/view
bool optimize_derived(THD *thd);
/// Create result table for a materialized derived table/view
bool create_materialized_table(THD *thd);
/// Materialize derived table
bool materialize_derived(THD *thd);
/// Check if we can push outer where condition to this derived table
bool can_push_condition_to_derived(THD *thd);
/// Return the number of hidden fields added for the temporary table
/// created for this derived table.
uint get_hidden_field_count_for_derived() const;
/// Prepare security context for a view
bool prepare_security(THD *thd);
Security_context *find_view_security_context(THD *thd);
bool prepare_view_security_context(THD *thd);
/**
Compiles the tagged hints list and fills up TABLE::keys_in_use_for_query,
TABLE::keys_in_use_for_group_by, TABLE::keys_in_use_for_order_by,
TABLE::force_index and TABLE::covering_keys.
*/
bool process_index_hints(const THD *thd, TABLE *table);
/**
Compare the version of metadata from the previous execution
(if any) with values obtained from the current table
definition cache element.
@sa check_and_update_table_version()
*/
bool is_table_ref_id_equal(TABLE_SHARE *s) const {
return (m_table_ref_type == s->get_table_ref_type() &&
m_table_ref_version == s->get_table_ref_version());
}
/**
Record the value of metadata version of the corresponding
table definition cache element in this parse tree node.
@sa check_and_update_table_version()
*/
void set_table_ref_id(TABLE_SHARE *s) {
set_table_ref_id(s->get_table_ref_type(), s->get_table_ref_version());
}
void set_table_ref_id(enum_table_ref_type table_ref_type_arg,
ulonglong table_ref_version_arg) {
m_table_ref_type = table_ref_type_arg;
m_table_ref_version = table_ref_version_arg;
}
/**
If a derived table, returns query block id of first underlying query block.
Zero if not derived.
*/
uint query_block_id() const;
/**
This is for showing in EXPLAIN.
If a derived table, returns query block id of first underlying query block
of first materialized Table_ref instance. Zero if not derived.
*/
uint query_block_id_for_explain() const;
/**
@brief Returns the name of the database that the referenced table belongs
to.
*/
const char *get_db_name() const { return db; }
/**
@brief Returns the name of the table that this Table_ref represents.
@details The unqualified table name or view name for a table or view,
respectively.
*/
const char *get_table_name() const { return table_name; }
int fetch_number_of_rows(
ha_rows fallback_estimate = PLACEHOLDER_TABLE_ROW_ESTIMATE);
bool update_derived_keys(THD *, Field *, Item **, uint, bool *);
bool generate_keys(THD *thd);
/// Setup a derived table to use materialization
bool setup_materialized_derived(THD *thd);
bool setup_materialized_derived_tmp_table(THD *thd);
/// Setup a table function to use materialization
bool setup_table_function(THD *thd);
bool create_field_translation(THD *thd);
/**
@brief Returns the outer join nest that this Table_ref belongs to, if
any.
@details There are two kinds of join nests, outer-join nests and semi-join
nests. This function returns non-NULL in the following cases:
@li 1. If this table/nest is embedded in a nest and this nest IS NOT a
semi-join nest. (In other words, it is an outer-join nest.)
@li 2. If this table/nest is embedded in a nest and this nest IS a
semi-join nest, but this semi-join nest is embedded in another
nest. (This other nest will be an outer-join nest, since all inner
joined nested semi-join nests have been merged in
@c simplify_joins() ).
Note: This function assumes that @c simplify_joins() has been performed.
Before that, join nests will be present for all types of join.
@return outer join nest, or NULL if none.
*/
Table_ref *outer_join_nest() const {
if (!embedding) return nullptr;
if (embedding->is_sj_nest()) return embedding->embedding;
return embedding;
}
/**
Return true if this table is an inner table of some outer join.
Examine all the embedding join nests of the table.
@note This function works also before redundant join nests have been
eliminated.
@return true if table is an inner table of some outer join, false otherwise.
*/
bool is_inner_table_of_outer_join() const {
if (outer_join) return true;
for (Table_ref *emb = embedding; emb; emb = emb->embedding) {
if (emb->outer_join) return true;
}
return false;
}
/**
Return the base table entry of an updatable table.
In DELETE and UPDATE, a view used as a target table must be mergeable,
updatable and defined over a single table.
*/
const Table_ref *updatable_base_table() const {
const Table_ref *tbl = this;
// For JDVs we return the root (outermost) base table
if (tbl->is_json_duality_view()) {
return jdv_root_base_table(tbl);
}
assert(tbl->is_updatable() && !tbl->is_multiple_tables());
while (tbl->is_view_or_derived()) {
tbl = tbl->merge_underlying_list;
assert(tbl->is_updatable() && !tbl->is_multiple_tables());
}
return tbl;
}
Table_ref *updatable_base_table() {
return const_cast<Table_ref *>(
static_cast<const Table_ref *>(this)->updatable_base_table());
}
/**
Mark that there is a NATURAL JOIN or JOIN ... USING between two tables.
This function marks that table b should be joined with a either via
a NATURAL JOIN or via JOIN ... USING. Both join types are special
cases of each other, so we treat them together. The function
setup_conds() creates a list of equal condition between all fields
of the same name for NATURAL JOIN or the fields in
Table_ref::join_using_fields for JOIN ... USING.
The list of equality conditions is stored
either in b->join_cond(), or in JOIN::conds, depending on whether there
was an outer join.
EXAMPLE
@verbatim
SELECT * FROM t1 NATURAL LEFT JOIN t2
<=>
SELECT * FROM t1 LEFT JOIN t2 ON (t1.i=t2.i and t1.j=t2.j ... )
SELECT * FROM t1 NATURAL JOIN t2 WHERE <some_cond>
<=>
SELECT * FROM t1, t2 WHERE (t1.i=t2.i and t1.j=t2.j and <some_cond>)
SELECT * FROM t1 JOIN t2 USING(j) WHERE <some_cond>
<=>
SELECT * FROM t1, t2 WHERE (t1.j=t2.j and <some_cond>)
@endverbatim
@param b Right join argument.
*/
void add_join_natural(Table_ref *b) { b->natural_join = this; }
/**
Set granted privileges for a table.
Can be used when generating temporary tables that are also used in
resolver process, such as when generating a UNION table
@param privilege Privileges granted for this table.
*/
void set_privileges(Access_bitmask privilege) {
grant.privilege |= privilege;
}
bool save_properties();
void restore_properties();
/*
List of tables local to a subquery or the top-level SELECT (used by
SQL_I_List). Considers views as leaves (unlike 'next_leaf' below).
Created at parse time in Query_block::add_table_to_list() ->
table_list.link_in_list().
*/
Table_ref *next_local{nullptr};
/* link in a global list of all queries tables */
Table_ref *next_global{nullptr}, **prev_global{nullptr};
const char *db{nullptr}, *table_name{nullptr}, *alias{nullptr};
/*
Target tablespace name: When creating or altering tables, this
member points to the tablespace_name in the HA_CREATE_INFO struct.
*/
LEX_CSTRING target_tablespace_name{nullptr, 0};
char *option{nullptr}; /* Used by cache index */
/** Table level optimizer hints for this table. */
Opt_hints_table *opt_hints_table{nullptr};
/* Hints for query block of this table. */
Opt_hints_qb *opt_hints_qb{nullptr};
void set_lock(const Lock_descriptor &descriptor) {
m_lock_descriptor = descriptor;
}
const Lock_descriptor &lock_descriptor() const { return m_lock_descriptor; }
bool is_derived_unfinished_materialization() const;
private:
/**
The members below must be kept aligned so that (1 << m_tableno) == m_map.
A table that takes part in a join operation must be assigned a unique
table number.
*/
uint m_tableno{0}; ///< Table number within query block
table_map m_map{0}; ///< Table map, derived from m_tableno
/**
If this table or join nest is the Y in "X [LEFT] JOIN Y ON C", this
member points to C. May also be generated from JOIN ... USING clause.
It may be modified only by permanent transformations (permanent = done
once for all executions of a prepared statement).
*/
Item *m_join_cond{nullptr};
bool m_is_sj_or_aj_nest{false};
public:
/*
(Valid only for semi-join nests) Bitmap of tables that are within the
semi-join (this is different from bitmap of all nest's children because
tables that were pulled out of the semi-join nest remain listed as
nest's children).
*/
table_map sj_inner_tables{0};
/*
During parsing - left operand of NATURAL/USING join where 'this' is
the right operand. After parsing (this->natural_join == this) iff
'this' represents a NATURAL or USING join operation. Thus after
parsing 'this' is a NATURAL/USING join iff (natural_join != NULL).
*/
Table_ref *natural_join{nullptr};
/*
True if 'this' represents a nested join that is a NATURAL JOIN.
For one of the operands of 'this', the member 'natural_join' points
to the other operand of 'this'.
*/
bool is_natural_join{false};
/* Field names in a USING clause for JOIN ... USING. */
List<String> *join_using_fields{nullptr};
/*
Explicitly store the result columns of either a NATURAL/USING join or
an operand of such a join.
*/
List<Natural_join_column> *join_columns{nullptr};
/* true if join_columns contains all columns of this table reference. */
bool is_join_columns_complete{false};
/*
List of nodes in a nested join tree, that should be considered as
leaves with respect to name resolution. The leaves are: views,
top-most nodes representing NATURAL/USING joins, subqueries, and
base tables. All of these Table_ref instances contain a
materialized list of columns. The list is local to a subquery.
*/
Table_ref *next_name_resolution_table{nullptr};
/* Index names in a "... JOIN ... USE/IGNORE INDEX ..." clause. */
List<Index_hint> *index_hints{nullptr};
TABLE *table{nullptr}; /* opened table */
mysql::binlog::event::Table_id
table_id{}; /* table id (from binlog) for opened table */
/*
Query_result for derived table to pass it from table creation to table
filling procedure
*/
Query_result_union *derived_result{nullptr};
/*
Reference from aux_tables to local list entry of main select of
multi-delete statement:
delete t1 from t2,t1 where t1.a<'B' and t2.b=t1.b;
here it will be reference of first occurrence of t1 to second (as you
can see this lists can't be merged)
*/
Table_ref *correspondent_table{nullptr};
/*
Holds the function used as the table function
*/
Table_function *table_function{nullptr};
Item *sampling_percentage{nullptr};
/**
For a view or derived table: Add materialize_path and table_path to
m_materialized_path_cache.
*/
void AddMaterializedPathToCache(THD *thd, AccessPath *materialize_path,
const AccessPath *table_path);
/**
Search m_materialized_path_cache for a materialization path for
'table_path'. Return that materialization path, or nullptr if none
is found.
*/
AccessPath *GetCachedMaterializedPath(const AccessPath *table_path);
/// Empty m_materialized_path_cache.
void ClearMaterializedPathCache() { m_materialized_path_cache = nullptr; }
private:
/// Sampling information.
tablesample_type sampling_type{
tablesample_type::UNSPECIFIED_TABLESAMPLE_TYPE};
double sampling_percentage_val{0};
/**
This field is set to non-null for derived tables and views. It points
to the Query_expression representing the derived table/view.
E.g. for a query
@verbatim SELECT * FROM (SELECT a FROM t1) b @endverbatim
*/
Query_expression *derived{nullptr}; /* Query_expression of derived table */
/// If non-NULL, the CTE which this table is derived from.
Common_table_expr *m_common_table_expr{nullptr};
/**
If the user has specified column names with the syntaxes "table name
parenthesis column names":
WITH qn(column names) AS (select...)
or
FROM (select...) dt(column names)
or
CREATE VIEW v(column_names) AS ...
then this points to the list of column names. NULL otherwise.
*/
const Create_col_name_list *m_derived_column_names{nullptr};
/**
If we've previously made an access path for “derived”, it is cached here.
This is useful if we need to plan the query block twice (the hypergraph
optimizer can do so, with and without in2exists predicates), both saving
work and avoiding issues when we try to throw away the old items_to_copy
for a new (identical) one.
*/
MaterializedPathCache *m_materialized_path_cache{nullptr};
public:
ST_SCHEMA_TABLE *schema_table{nullptr}; /* Information_schema table */
Query_block *schema_query_block{nullptr};
/*
True when the view field translation table is used to convert
schema table fields for backwards compatibility with SHOW command.
*/
bool schema_table_reformed{false};
/* link to query_block where this table was used */
Query_block *query_block{nullptr};
private:
LEX *view{nullptr}; /* link on VIEW lex for merging */
/// m_mv_se_materialized true indicates that the view is a materialized view
/// that is materialized by a storage engine directly.
bool m_mv_se_materialized{false};
/// m_mv_se_name is the name of the storage engine that might do the
/// materialization.
LEX_CSTRING m_mv_se_name{.str = nullptr, .length = 0};
/// m_mv_se_available indicates that the current Table_ref is using
/// the materialized view. A Table_ref can be a materialized view (as
/// indicated by m_mv_se_materialized), which is determined by its definition,
/// yet the materialization might not be used during the current lifetime of
/// this object, if the SE does not make it available for some reason.
bool m_mv_se_available{false};
public:
/// Array of selected expressions from a derived table or view.
Field_translator *field_translation{nullptr};
/// pointer to element after last one in translation table above
Field_translator *field_translation_end{nullptr};
/*
List (based on next_local) of underlying tables of this view. I.e. it
does not include the tables of subqueries used in the view. Is set only
for merged views.
*/
Table_ref *merge_underlying_list{nullptr};
/*
- 0 for base tables
- in case of the view it is the list of all (not only underlying
tables but also used in subquery ones) tables of the view.
*/
mem_root_deque<Table_ref *> *view_tables{nullptr};
/* most upper view this table belongs to */
Table_ref *belong_to_view{nullptr};
/*
The view directly referencing this table
(non-zero only for merged underlying tables of a view).
*/
Table_ref *referencing_view{nullptr};
/* Ptr to parent MERGE table list item. See top comment in ha_myisammrg.cc */
Table_ref *parent_l{nullptr};
/*
Security context (non-zero only for tables which belong
to view with SQL SECURITY DEFINER)
*/
Security_context *security_ctx{nullptr};
/*
This view security context (non-zero only for views with
SQL SECURITY DEFINER)
*/
Security_context *view_sctx{nullptr};
/*
List of all base tables local to a subquery including all view
tables. Unlike 'next_local', this in this list views are *not*
leaves. Created in setup_tables() -> make_leaf_tables().
*/
Table_ref *next_leaf{nullptr};
Item *derived_where_cond{nullptr}; ///< WHERE condition from derived table
Item *check_option{nullptr}; ///< WITH CHECK OPTION condition
Item *replace_filter{nullptr}; ///< Filter for REPLACE command
LEX_STRING select_stmt{nullptr, 0}; ///< text of (CREATE/SELECT) statement
LEX_STRING source{nullptr, 0}; ///< source of CREATE VIEW
LEX_STRING timestamp{nullptr, 0}; ///< GMT time stamp of last operation
LEX_USER definer; ///< definer of view
void set_tablesample(tablesample_type sampling_type_arg,
Item *sampling_percentage_arg) {
sampling_type = sampling_type_arg;
sampling_percentage = sampling_percentage_arg;
}
bool has_tablesample() const {
return sampling_type != tablesample_type::UNSPECIFIED_TABLESAMPLE_TYPE;
}
bool update_sampling_percentage();
double get_sampling_percentage() const;
bool validate_tablesample_clause(THD *thd);
tablesample_type get_sampling_type() const { return sampling_type; }
/**
@note: This field is currently not reliable when read from dictionary:
If an underlying view is changed, updatable_view is not changed,
due to lack of dependency checking in dictionary implementation.
Prefer to use is_updatable() during preparation and optimization.
*/
ulonglong updatable_view{0}; ///< VIEW can be updated
bool is_mv_se_available() const { return m_mv_se_available; }
void set_mv_se_available(bool mv_available) {
m_mv_se_available = mv_available;
}
bool is_mv_se_materialized() const { return m_mv_se_materialized; }
void set_mv_se_materialized(bool is_mv) { m_mv_se_materialized = is_mv; }
const LEX_CSTRING &get_mv_se_name() const { return m_mv_se_name; }
void set_mv_se_name(const char *engine_name) {
m_mv_se_name.str = engine_name;
m_mv_se_name.length = strlen(engine_name);
}
void set_mv_se_name(const LEX_CSTRING &engine_name) {
m_mv_se_name = engine_name;
}
/**
@brief The declared algorithm, if this is a view.
@details One of
- VIEW_ALGORITHM_UNDEFINED
- VIEW_ALGORITHM_TEMPTABLE
- VIEW_ALGORITHM_MERGE
@todo Replace with an enum
*/
ulonglong algorithm{0};
ulonglong view_suid{0}; ///< view is suid (true by default)
ulonglong with_check{0}; ///< WITH CHECK OPTION
private:
/// The view algorithm that is actually used, if this is a view.
enum_view_algorithm effective_algorithm{VIEW_ALGORITHM_UNDEFINED};
Lock_descriptor m_lock_descriptor;
public:
GRANT_INFO grant;
public:
/// True if right argument of LEFT JOIN; false in other cases (i.e. if left
/// argument of LEFT JOIN, if argument of INNER JOIN; RIGHT JOINs are
/// converted to LEFT JOIN during contextualization).
bool outer_join{false};
/// True if was originally the left argument of a RIGHT JOIN, before we
/// made it the right argument of a LEFT JOIN.
bool join_order_swapped{false};
uint shared{0}; /* Used in multi-upd */
size_t db_length{0};
size_t table_name_length{0};
private:
/// True if VIEW/TABLE is updatable, based on analysis of query (SQL rules).
bool m_updatable{false};
/// True if VIEW/TABLE is insertable, based on analysis of query (SQL rules).
bool m_insertable{false};
/// True if table is target of UPDATE statement, or updated in IODKU stmt.
bool m_updated{false};
/// True if table is target of INSERT statement.
bool m_inserted{false};
/// True if table is target of DELETE statement, or deleted in REPLACE stmt.
bool m_deleted{false};
bool m_fulltext_searched{false}; ///< True if fulltext searched
public:
bool straight{false}; /* optimize with prev table */
/**
True for tables and views being changed in a data change statement.
Also true for tables subject to a SELECT ... FOR UPDATE.
Also used by replication to filter out statements that can be ignored,
especially important for multi-table UPDATE and DELETE.
*/
bool updating{false};
/// preload only non-leaf nodes (IS THIS USED???)
bool ignore_leaves{false};
/**
The set of tables in the query block that this table depends on.
Can be set due to outer join, join order hints or NOT EXISTS relationship.
*/
table_map dep_tables{0};
/// The outer tables that an outer join's join condition depends on
table_map join_cond_dep_tables{0};
/**
Is non-NULL if this table reference is a nested join, ie it represents
the inner tables of an outer join, the tables contained in the
parentheses of an inner join (eliminated during resolving), the tables
referenced in a derived table or view, in a semi-join nest, the tables
from the subquery.
*/
NESTED_JOIN *nested_join{nullptr};
/// The nested join containing this table reference.
Table_ref *embedding{nullptr};
/// The join list immediately containing this table reference
mem_root_deque<Table_ref *> *join_list{nullptr};
/// stop PS caching
bool cacheable_table{false};
/**
Used to store foreign key name to identify correct table handle from
thd->open_tables during find_fk_table_from_open_tables() call
*/
const char *open_for_fk_name{nullptr};
/**
Specifies which kind of table should be open for this element
of table list.
*/
enum_open_type open_type{OT_TEMPORARY_OR_BASE};
/* true if this merged view contain auto_increment field */
bool contain_auto_increment{false};
/// true <=> VIEW CHECK OPTION condition is processed (also for prep. stmts)
bool check_option_processed{false};
/// true <=> Filter condition is processed
bool replace_filter_processed{false};
dd::enum_table_type required_type{};
char timestamp_buffer[20]{0}; /* buffer for timestamp (19+1) */
/*
This Table_ref object is just placeholder for prelocking, it will be
used for implicit LOCK TABLES only and won't be used in real statement.
*/
bool prelocking_placeholder{false};
/**
Indicates that if Table_ref object corresponds to the table/view
which requires special handling.
*/
enum {
/* Normal open. */
OPEN_NORMAL = 0,
/* Associate a table share only if the the table exists. */
OPEN_IF_EXISTS,
/*
Associate a table share only if the the table exists.
Also upgrade metadata lock to exclusive if table doesn't exist.
*/
OPEN_FOR_CREATE,
/* Don't associate a table share. */
OPEN_STUB
} open_strategy{OPEN_NORMAL};
bool internal_tmp_table{false};
/** true if an alias for this table was specified in the SQL. */
bool is_alias{false};
/** true if the table is referred to in the statement using a fully
qualified name (@<db_name@>.@<table_name@>).
*/
bool is_fqtn{false};
/**
If true, this table is a derived (materialized) table which was created
from a scalar subquery, cf.
Query_block::transform_scalar_subqueries_to_join_with_derived
*/
bool m_was_scalar_subquery{false};
/* View creation context. */
View_creation_ctx *view_creation_ctx{nullptr};
/*
Attributes to save/load view creation context in/from frm-file.
They are required only to be able to use existing parser to load
view-definition file. As soon as the parser parsed the file, view
creation context is initialized and the attributes become redundant.
These attributes MUST NOT be used for any purposes but the parsing.
*/
LEX_CSTRING view_client_cs_name{nullptr, 0};
LEX_CSTRING view_connection_cl_name{nullptr, 0};
/*
View definition (SELECT-statement) in the UTF-form.
*/
LEX_STRING view_body_utf8{nullptr, 0};
// True, If this is a system view
bool is_system_view{false};
/// If view, then type of a view.
enum_view_type view_type{enum_view_type::UNDEFINED};
/// If json duality view, then represents duality view content tree node.
jdv::Content_tree_node *jdv_content_tree{nullptr};
/*
Set to 'true' if this is a DD table being opened in the context of a
dictionary operation. Note that when 'false', this may still be a DD
table when opened in a non-DD context, e.g. as part of an I_S view
query.
*/
bool is_dd_ctx_table{false};
/* End of view definition context. */
/* List of possible keys. Valid only for materialized derived tables/views. */
List<Derived_key> derived_key_list;
/**
Indicates what triggers we need to pre-load for this Table_ref
when opening an associated TABLE. This is filled after
the parsed tree is created.
*/
uint8 trg_event_map{0};
bool schema_table_filled{false};
MDL_request mdl_request;
/// if true, EXPLAIN can't explain view due to insufficient rights.
bool view_no_explain{false};
/* List to carry partition names from PARTITION (...) clause in statement */
List<String> *partition_names{nullptr};
/// Set table number
void set_tableno(uint tableno) {
assert(tableno < MAX_TABLES);
m_tableno = tableno;
m_map = (table_map)1 << tableno;
}
/// Return table number
uint tableno() const { return m_tableno; }
/// Return table map derived from table number
table_map map() const {
assert(((table_map)1 << m_tableno) == m_map);
return m_map;
}
/// If non-NULL, the CTE which this table is derived from.
Common_table_expr *common_table_expr() const { return m_common_table_expr; }
void set_common_table_expr(Common_table_expr *c) { m_common_table_expr = c; }
/// @see m_derived_column_names
const Create_col_name_list *derived_column_names() const {
return m_derived_column_names;
}
void set_derived_column_names(const Create_col_name_list *d) {
m_derived_column_names = d;
}
/**
* @brief If view, then check if view is JSON duality view.
*
* @return true If view is JSON duality view.
* @return false Otherwise.
*/
bool is_json_duality_view() const {
return (view_type == enum_view_type::JSON_DUALITY_VIEW);
}
private:
/*
A group of members set and used only during JOIN::optimize().
*/
/**
Optimized copy of m_join_cond (valid for one single
execution). Initialized by Query_block::get_optimizable_conditions().
*/
Item *m_join_cond_optim{nullptr};
public:
COND_EQUAL *cond_equal{nullptr}; ///< Used with outer join
/// true <=> this table is a const one and was optimized away.
bool optimized_away{false};
/**
true <=> all possible keys for a derived table were collected and
could be re-used while statement re-execution.
*/
bool derived_keys_ready{false};
private:
/// If a recursive reference inside the definition of a CTE.
bool m_is_recursive_reference{false};
// End of group for optimization
/** See comments for set_metadata_id() */
enum_table_ref_type m_table_ref_type{TABLE_REF_NULL};
/** See comments for TABLE_SHARE::get_table_ref_version() */
ulonglong m_table_ref_version{0};
/*
All members whose names are suffixed with "_saved" are duplicated in
class TABLE but actually belong in this class. They are saved from class
TABLE when preparing a statement and restored when executing the statement.
They are not required for a regular (non-prepared) statement.
*/
Key_map covering_keys_saved;
Key_map merge_keys_saved;
Key_map keys_in_use_for_query_saved;
Key_map keys_in_use_for_group_by_saved;
Key_map keys_in_use_for_order_by_saved;
bool nullable_saved{false};
bool force_index_saved{false};
bool force_index_order_saved{false};
bool force_index_group_saved{false};
MY_BITMAP lock_partitions_saved;
MY_BITMAP read_set_saved;
MY_BITMAP write_set_saved;
MY_BITMAP read_set_internal_saved;
};
/*
Iterator over the fields of a generic table reference.
*/
class Field_iterator {
public:
virtual ~Field_iterator() = default;
virtual void set(Table_ref *) = 0;
virtual void next() = 0;
virtual bool end_of_fields() = 0; /* Return 1 at end of list */
virtual const char *name() = 0;
virtual Item_ident *create_item(THD *) = 0;
virtual Field *field() = 0;
};
/*
Iterator over the fields of a base table, view with temporary
table, or subquery.
*/
class Field_iterator_table : public Field_iterator {
Field **ptr;
public:
Field_iterator_table() : ptr(nullptr) {}
void set(Table_ref *table) override { ptr = table->table->field; }
void set_table(TABLE *table) { ptr = table->field; }
void next() override { ptr++; }
bool end_of_fields() override { return *ptr == nullptr; }
const char *name() override;
Item_ident *create_item(THD *thd) override;
Field *field() override { return *ptr; }
};
/**
Iterator over the fields of a merged derived table or view.
*/
class Field_iterator_view : public Field_iterator {
Field_translator *ptr, *array_end;
Table_ref *view;
public:
Field_iterator_view() : ptr(nullptr), array_end(nullptr) {}
void set(Table_ref *table) override;
void next() override { ptr++; }
bool end_of_fields() override { return ptr == array_end; }
const char *name() override;
Item_ident *create_item(THD *thd) override;
Item **item_ptr() { return &ptr->item; }
Field *field() override { return nullptr; }
inline Item *item() { return ptr->item; }
Field_translator *field_translator() { return ptr; }
};
/*
Field_iterator interface to the list of materialized fields of a
NATURAL/USING join.
*/
class Field_iterator_natural_join : public Field_iterator {
List_iterator_fast<Natural_join_column> column_ref_it;
Natural_join_column *cur_column_ref;
public:
Field_iterator_natural_join() : cur_column_ref(nullptr) {}
~Field_iterator_natural_join() override = default;
void set(Table_ref *table) override;
void next() override;
bool end_of_fields() override { return !cur_column_ref; }
const char *name() override { return cur_column_ref->name(); }
Item_ident *create_item(THD *thd) override {
return cur_column_ref->create_item(thd);
}
Field *field() override { return cur_column_ref->field(); }
Natural_join_column *column_ref() { return cur_column_ref; }
};
/**
Generic iterator over the fields of an arbitrary table reference.
This class unifies the various ways of iterating over the columns
of a table reference depending on the type of SQL entity it
represents. If such an entity represents a nested table reference,
this iterator encapsulates the iteration over the columns of the
members of the table reference.
The implementation assumes that all underlying NATURAL/USING table
references already contain their result columns and are linked into
the list Table_ref::next_name_resolution_table.
*/
class Field_iterator_table_ref : public Field_iterator {
Table_ref *table_ref, *first_leaf, *last_leaf;
Field_iterator_table table_field_it;
Field_iterator_view view_field_it;
Field_iterator_natural_join natural_join_it;
Field_iterator *field_it;
void set_field_iterator();
public:
Field_iterator_table_ref() : field_it(nullptr) {}
void set(Table_ref *table) override;
void next() override;
bool end_of_fields() override {
return (table_ref == last_leaf && field_it->end_of_fields());
}
const char *name() override { return field_it->name(); }
const char *get_table_name();
const char *get_db_name();
GRANT_INFO *grant();
Item_ident *create_item(THD *thd) override {
return field_it->create_item(thd);
}
Field *field() override { return field_it->field(); }
Natural_join_column *get_or_create_column_ref(THD *thd,
Table_ref *parent_table_ref);
Natural_join_column *get_natural_column_ref();
};
struct OPEN_TABLE_LIST {
OPEN_TABLE_LIST *next;
char *db, *table;
uint32 in_use, locked;
};
static inline my_bitmap_map *tmp_use_all_columns(TABLE *table,
MY_BITMAP *bitmap) {
my_bitmap_map *old = bitmap->bitmap;
bitmap->bitmap = table->s->all_set.bitmap; // does not repoint last_word_ptr
return old;
}
static inline void tmp_restore_column_map(MY_BITMAP *bitmap,
my_bitmap_map *old) {
bitmap->bitmap = old;
}
/* The following is only needed for debugging */
static inline my_bitmap_map *dbug_tmp_use_all_columns(TABLE *table
[[maybe_unused]],
MY_BITMAP *bitmap
[[maybe_unused]]) {
#ifndef NDEBUG
return tmp_use_all_columns(table, bitmap);
#else
return nullptr;
#endif
}
static inline void dbug_tmp_restore_column_map(MY_BITMAP *bitmap
[[maybe_unused]],
my_bitmap_map *old
[[maybe_unused]]) {
#ifndef NDEBUG
tmp_restore_column_map(bitmap, old);
#endif
}
/*
Variant of the above : handle both read and write sets.
Provide for the possibility of the read set being the same as the write set
*/
static inline void dbug_tmp_use_all_columns(
TABLE *table [[maybe_unused]], my_bitmap_map **save [[maybe_unused]],
MY_BITMAP *read_set [[maybe_unused]],
MY_BITMAP *write_set [[maybe_unused]]) {
#ifndef NDEBUG
save[0] = read_set->bitmap;
save[1] = write_set->bitmap;
(void)tmp_use_all_columns(table, read_set);
(void)tmp_use_all_columns(table, write_set);
#endif
}
static inline void dbug_tmp_restore_column_maps(
MY_BITMAP *read_set [[maybe_unused]], MY_BITMAP *write_set [[maybe_unused]],
my_bitmap_map **old [[maybe_unused]]) {
#ifndef NDEBUG
tmp_restore_column_map(read_set, old[0]);
tmp_restore_column_map(write_set, old[1]);
#endif
}
void init_mdl_requests(Table_ref *table_list);
/**
Unpacks the definition of a value generator in all its forms: generated
column, default expression or checked constraint.
The function parses the text definition of this expression, resolves its
items and runs validation and calculates the base_columns_map which is used
for tracking the columns the expression depends on.
@param[in] thd Thread handler
@param[in] table Table having the value generator to be unpacked
@param[in,out] val_generator Contains the expression in string format, and,
if successful will be replaced by the parser
with a new one having the unpacked expression.
@param[in] source Source of value generator(a generated column,
a regular column with generated default value or
a check constraint).
@param[in] source_name Name of the source (generated column, a regular
column with generated default value or a check
constraint).
@param[in] field The column the value generator depends on. Can
be null for checked constraints which do not
depend on a single column.
@param[in] is_create_table Indicates that table is opened as part
of CREATE or ALTER and does not yet exist in SE
@param[out] error_reported updated flag for the caller that no other error
messages are to be generated.
@retval true Failure.
@retval false Success.
*/
bool unpack_value_generator(THD *thd, TABLE *table,
Value_generator **val_generator,
Value_generator_source source,
const char *source_name, Field *field,
bool is_create_table, bool *error_reported);
/**
Unpack the partition expression. Parse the partition expression
to produce an Item.
@param[in] thd Thread handler
@param[in] outparam Table object
@param[in] share TABLE_SHARE object
@param[in] engine_type Engine type of the partitions.
@param[in] is_create_table Indicates that table is opened as part of
CREATE or ALTER and does not yet exist in SE
@retval true Failure.
@retval false Success.
*/
bool unpack_partition_info(THD *thd, TABLE *outparam, TABLE_SHARE *share,
handlerton *engine_type, bool is_create_table);
int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
uint db_stat, uint prgflag, uint ha_open_flags,
TABLE *outparam, bool is_create_table,
const dd::Table *table_def_param);
TABLE_SHARE *alloc_table_share(const char *db, const char *table_name,
const char *key, size_t key_length,
bool open_secondary);
void init_tmp_table_share(THD *thd, TABLE_SHARE *share, const char *key,
size_t key_length, const char *table_name,
const char *path, MEM_ROOT *mem_root);
void free_table_share(TABLE_SHARE *share);
void update_create_info_from_table(HA_CREATE_INFO *info, TABLE *form);
Ident_name_check check_db_name(const char *name, size_t length);
Ident_name_check check_and_convert_db_name(LEX_STRING *db,
bool preserve_lettercase);
bool check_column_name(const Name_string &namestring);
Ident_name_check check_table_name(const char *name, size_t length);
int rename_file_ext(const char *from, const char *to, const char *ext);
char *get_field(MEM_ROOT *mem, Field *field);
bool get_field(MEM_ROOT *mem, Field *field, class String *res);
int closefrm(TABLE *table, bool free_share);
void free_blobs(TABLE *table);
void free_blob_buffers_and_reset(TABLE *table, uint32 size);
int set_zone(int nr, int min_zone, int max_zone);
void append_unescaped(String *res, const char *pos, size_t length);
char *fn_rext(char *name);
TABLE_CATEGORY get_table_category(const LEX_CSTRING &db,
const LEX_CSTRING &name);
/* performance schema */
extern LEX_CSTRING PERFORMANCE_SCHEMA_DB_NAME;
extern LEX_CSTRING GENERAL_LOG_NAME;
extern LEX_CSTRING SLOW_LOG_NAME;
/* information schema */
extern LEX_CSTRING INFORMATION_SCHEMA_NAME;
/* mysql schema name and DD ID */
extern LEX_CSTRING MYSQL_SCHEMA_NAME;
static const uint MYSQL_SCHEMA_DD_ID = 1;
/* mysql tablespace name and DD ID */
extern LEX_CSTRING MYSQL_TABLESPACE_NAME;
static const uint MYSQL_TABLESPACE_DD_ID = 1;
/* replication's tables */
extern LEX_CSTRING RLI_INFO_NAME;
extern LEX_CSTRING MI_INFO_NAME;
extern LEX_CSTRING WORKER_INFO_NAME;
inline bool is_infoschema_db(const char *name, size_t len) {
return (
INFORMATION_SCHEMA_NAME.length == len &&
!my_strcasecmp(system_charset_info, INFORMATION_SCHEMA_NAME.str, name));
}
inline bool is_infoschema_db(const char *name) {
return !my_strcasecmp(system_charset_info, INFORMATION_SCHEMA_NAME.str, name);
}
inline bool is_perfschema_db(const char *name, size_t len) {
return (PERFORMANCE_SCHEMA_DB_NAME.length == len &&
!my_strcasecmp(system_charset_info, PERFORMANCE_SCHEMA_DB_NAME.str,
name));
}
inline bool is_perfschema_db(const char *name) {
return !my_strcasecmp(system_charset_info, PERFORMANCE_SCHEMA_DB_NAME.str,
name);
}
/**
Check if the table belongs to the P_S, excluding setup and threads tables.
@note Performance Schema tables must be accessible independently of the
LOCK TABLE mode. This function is needed to handle the special case
of P_S tables being used under LOCK TABLE mode.
*/
inline bool belongs_to_p_s(Table_ref *tl) {
return (!strcmp("performance_schema", tl->db) &&
strcmp(tl->table_name, "threads") &&
strstr(tl->table_name, "setup_") == nullptr);
}
/**
return true if the table was created explicitly.
*/
inline bool is_user_table(TABLE *table) {
const char *name = table->s->table_name.str;
return strncmp(name, tmp_file_prefix, tmp_file_prefix_length);
}
bool is_simple_order(ORDER *order);
uint add_pk_parts_to_sk(KEY *sk, uint sk_n, KEY *pk, uint pk_n,
TABLE_SHARE *share, handler *handler_file,
uint *usable_parts, bool use_extended_sk);
void setup_key_part_field(TABLE_SHARE *share, handler *handler_file,
uint primary_key_n, KEY *keyinfo, uint key_n,
uint key_part_n, uint *usable_parts,
bool part_of_key_not_extended);
const uchar *get_field_name(const uchar *arg, size_t *length);
void repoint_field_to_record(TABLE *table, uchar *old_rec, uchar *new_rec);
bool update_generated_write_fields(const MY_BITMAP *bitmap, TABLE *table);
bool update_generated_read_fields(uchar *buf, TABLE *table,
uint active_index = MAX_KEY);
/**
Check if a Table_ref instance represents a pre-opened temporary table.
*/
inline bool is_temporary_table(const Table_ref *tl) {
if (tl->is_view() || tl->schema_table) return false;
if (!tl->table) return false;
/*
NOTE: 'table->s' might be NULL for specially constructed TABLE
instances. See SHOW TRIGGERS for example.
*/
if (!tl->table->s) return false;
return tl->table->s->tmp_table != NO_TMP_TABLE;
}
/**
After parsing, a Common Table Expression is accessed through a
Table_ref. This class contains all information about the CTE which the
Table_ref needs.
@note that before and during parsing, the CTE is described by a
PT_common_table_expr.
*/
class Common_table_expr {
public:
Common_table_expr(MEM_ROOT *mem_root)
: references(mem_root), recursive(false), tmp_tables(mem_root) {}
TABLE *clone_tmp_table(THD *thd, Table_ref *tl);
bool substitute_recursive_reference(THD *thd, Query_block *sl);
/// Remove one table reference.
void remove_table(Table_ref *tr);
/// Empties the materialized CTE and informs all of its clones.
bool clear_all_references();
/**
All references to this CTE in the statement, except those inside the
query expression defining this CTE.
In other words, all non-recursive references.
*/
Mem_root_array<Table_ref *> references;
/// True if it's a recursive CTE
bool recursive;
/**
List of all TABLE_LISTSs reading/writing to the tmp table created to
materialize this CTE. Due to shared materialization, only the first one
has a TABLE generated by create_tmp_table(); other ones have a TABLE
generated by open_table_from_share().
*/
Mem_root_array<Table_ref *> tmp_tables;
/// Name of the WITH block. Used only for EXPLAIN FORMAT=tree.
LEX_STRING name;
};
/**
This iterates on those references to a derived table / view / CTE which are
materialized. If a recursive CTE, this includes recursive references.
Upon construction it is passed a non-recursive materialized reference
to the derived table (Table_ref*).
For a CTE it may return more than one reference; for a derived table or a
view, there is only one (as references to a same view are treated as
independent objects).
References are returned as TABLE*.
*/
class Derived_refs_iterator {
const Table_ref *start; ///< The reference provided in construction.
size_t ref_idx{0}; ///< Current index in cte->tmp_tables
bool m_is_first{true}; ///< True when at first reference in list
public:
explicit Derived_refs_iterator(const Table_ref *start_arg)
: start(start_arg) {}
TABLE *get_next() {
const Common_table_expr *cte = start->common_table_expr();
m_is_first = ref_idx == 0;
// Derived tables and views have a single reference.
if (cte == nullptr) {
return ref_idx++ == 0 ? start->table : nullptr;
}
/*
CTEs may have multiple references. Return the next one, but notice that
some references may have been deleted.
*/
while (ref_idx < cte->tmp_tables.size()) {
TABLE *table = cte->tmp_tables[ref_idx++]->table;
if (table != nullptr) return table;
}
return nullptr;
}
void rewind() {
ref_idx = 0;
m_is_first = true;
}
/// @returns true if the last get_next() returned the first element.
bool is_first() const {
// Call after get_next() has been called:
assert(ref_idx > 0);
return m_is_first;
}
};
/**
RAII class to reset TABLE::autoinc_field_has_explicit_non_null_value after
processing individual row in INSERT or LOAD DATA statements.
*/
class Autoinc_field_has_explicit_non_null_value_reset_guard {
public:
Autoinc_field_has_explicit_non_null_value_reset_guard(TABLE *table)
: m_table(table) {}
~Autoinc_field_has_explicit_non_null_value_reset_guard() {
m_table->autoinc_field_has_explicit_non_null_value = false;
}
private:
TABLE *m_table;
};
// Whether we can ask the storage engine for the row ID of the last row read.
//
// Some operations needs a row ID to operate correctly (i.e. weedout). Normally,
// the row ID is provided by the storage engine by calling handler::position().
// But there are cases when position() should not be called:
//
// 1. If we have a const table (rows are fetched during optimization), we
// should not call position().
// 2. If we have a NULL-complemented row, calling position() would give a
// random row ID back, as there has not been any row read.
//
// Operations that needs the row ID must also check the value of
// QEP_TAB::rowid_status to see whether they actually need a row ID.
// See QEP_TAB::rowid_status for more details.
inline bool can_call_position(const TABLE *table) {
return !table->const_table && !(table->is_nullable() && table->null_row);
}
//////////////////////////////////////////////////////////////////////////
/*
NOTE:
These structures are added to read .frm file in upgrade scenario.
They should not be used any where else in the code.
They will be removed in future release.
Any new code should not be added in this section.
*/
/**
These members were removed from TABLE_SHARE as they are not used in
in the code. open_binary_frm() uses these members while reading
.frm files.
*/
class FRM_context {
public:
FRM_context()
: default_part_db_type(nullptr),
null_field_first(false),
stored_fields(0),
view_def(nullptr),
frm_version(0),
fieldnames() {}
handlerton *default_part_db_type;
bool null_field_first;
uint stored_fields; /* Number of stored fields
(i.e. without generated-only ones) */
enum utype {
NONE,
DATE,
SHIELD,
NOEMPTY,
CASEUP,
PNR,
BGNR,
PGNR,
YES,
NO,
REL,
CHECK,
EMPTY_VAL, // EMPTY_VAL rather than EMPTY since EMPTY can conflict with
// system headers.
UNKNOWN_FIELD,
CASEDN,
NEXT_NUMBER,
INTERVAL_FIELD,
BIT_FIELD,
TIMESTAMP_OLD_FIELD,
CAPITALIZE,
BLOB_FIELD,
TIMESTAMP_DN_FIELD,
TIMESTAMP_UN_FIELD,
TIMESTAMP_DNUN_FIELD,
GENERATED_FIELD = 128
};
/**
For shares representing views File_parser object with view
definition read from .FRM file.
*/
const File_parser *view_def;
uchar frm_version;
TYPELIB fieldnames; /* Pointer to fieldnames */
};
/**
Create TABLE_SHARE from .frm file.
FRM_context object is used to store the value removed from
TABLE_SHARE. These values are used only for .frm file parsing.
@param[in] thd Thread handle.
@param[in] path Path of the frm file.
@param[out] share TABLE_SHARE to be populated.
@param[out] frm_context FRM_context object.
@param[in] db Database name.
@param[in] table Table name.
@param[in] is_fix_view_cols_and_deps Fix view column data, table
and routine dependency.
@retval 0 ON SUCCESS
@retval -1 ON FAILURE
@retval -2 ON LESS SEVER FAILURE (see read_frm_file)
*/
int create_table_share_for_upgrade(THD *thd, const char *path,
TABLE_SHARE *share, FRM_context *frm_context,
const char *db, const char *table,
bool is_fix_view_cols_and_deps);
//////////////////////////////////////////////////////////////////////////
/**
Create a copy of the key_info from TABLE_SHARE object to TABLE object.
Wherever prefix key is present, allocate a new Field object, having its
field_length set to the prefix key length, and point the table's matching
key_part->field to this new Field object.
This ensures that unpack_partition_info() reads the correct prefix length of
partitioned fields
@param table Table for which key_info is to be allocated
@param root MEM_ROOT in which to allocate key_info
@retval false Success
@retval true Failed to allocate memory for table.key_info in root
*/
bool create_key_part_field_with_prefix_length(TABLE *table, MEM_ROOT *root);
#endif /* TABLE_INCLUDED */ | c | github | https://github.com/mysql/mysql-server | sql/table.h |
#ifdef _MSC_VER
# include <nmmintrin.h>
# if defined(_M_X64)
# define CV_POPCNT_U64 _mm_popcnt_u64
# endif
# define CV_POPCNT_U32 _mm_popcnt_u32
#elif defined(__POPCNT__)
# include <popcntintrin.h>
# if defined(__x86_64__)
# define CV_POPCNT_U64 __builtin_popcountll
# endif
# define CV_POPCNT_U32 __builtin_popcount
#else
# error "__POPCNT__ is not defined by compiler"
#endif
int main()
{
#ifdef CV_POPCNT_U64
int i = CV_POPCNT_U64(1);
#endif
int j = CV_POPCNT_U32(1);
return 0;
} | cpp | github | https://github.com/opencv/opencv | cmake/checks/cpu_popcnt.cpp |
/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_COMPILER_MLIR_TFRT_TRANSFORMS_IFRT_IFRT_TYPES_H_
#define TENSORFLOW_COMPILER_MLIR_TFRT_TRANSFORMS_IFRT_IFRT_TYPES_H_
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
namespace tensorflow {
namespace ifrt_serving {
struct DtypeAndShape {
tensorflow::DataType dtype;
tensorflow::TensorShape shape;
bool operator==(const DtypeAndShape& other) const {
return dtype == other.dtype && shape == other.shape;
}
};
} // namespace ifrt_serving
} // namespace tensorflow
#endif // TENSORFLOW_COMPILER_MLIR_TFRT_TRANSFORMS_IFRT_IFRT_TYPES_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_types.h |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Template.template'
db.delete_column('popcorn_template', 'template')
def backwards(self, orm):
# Adding field 'Template.template'
db.add_column('popcorn_template', 'template',
self.gf('django.db.models.fields.files.FileField')(default='', max_length=100),
keep_default=False)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'popcorn.project': {
'Meta': {'ordering': "('is_featured', '-modified')", 'object_name': 'Project'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['popcorn.ProjectCategory']", 'symmetrical': 'False', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_forkable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_shared': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'metadata': ('django.db.models.fields.TextField', [], {}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['popcorn.Project']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['popcorn.Template']", 'null': 'True', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36', 'blank': 'True'}),
'views_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'votes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'popcorn.projectcategory': {
'Meta': {'object_name': 'ProjectCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'False'})
},
'popcorn.projectcategorymembership': {
'Meta': {'unique_together': "(('user', 'project_category'),)", 'object_name': 'ProjectCategoryMembership'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'project_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['popcorn.ProjectCategory']"}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['users.Profile']"})
},
'popcorn.template': {
'Meta': {'ordering': "('-is_featured', 'name')", 'object_name': 'Template'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['popcorn.TemplateCategory']", 'symmetrical': 'False', 'blank': 'True'}),
'config': ('django.db.models.fields.TextField', [], {'default': '{}', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'metadata': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'template_content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'views_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'votes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'popcorn.templatecategory': {
'Meta': {'object_name': 'TemplateCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'False'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
},
'users.profile': {
'Meta': {'object_name': 'Profile'},
'bio': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['popcorn.ProjectCategory']", 'through': "orm['popcorn.ProjectCategoryMembership']", 'symmetrical': 'False'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['popcorn'] | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_alertconfig
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of AlertConfig Avi RESTful Object
description:
- This module is used to configure AlertConfig object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
action_group_ref:
description:
- The alert config will trigger the selected alert action, which can send notifications and execute a controlscript.
- It is a reference to an object of type actiongroupconfig.
alert_rule:
description:
- List of filters matching on events or client logs used for triggering alerts.
required: true
autoscale_alert:
description:
- This alert config applies to auto scale alerts.
category:
description:
- Determines whether an alert is raised immediately when event occurs (realtime) or after specified number of events occurs within rolling time
- window.
- Enum options - REALTIME, ROLLINGWINDOW, WATERMARK.
- Default value when not specified in API or module is interpreted by Avi Controller as REALTIME.
required: true
description:
description:
- A custom description field.
enabled:
description:
- Enable or disable this alert config from generating new alerts.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
expiry_time:
description:
- An alert is expired and deleted after the expiry time has elapsed.
- The original event triggering the alert remains in the event's log.
- Allowed values are 1-31536000.
- Default value when not specified in API or module is interpreted by Avi Controller as 86400.
name:
description:
- Name of the alert configuration.
required: true
obj_uuid:
description:
- Uuid of the resource for which alert was raised.
object_type:
description:
- The object type to which the alert config is associated with.
- Valid object types are - virtual service, pool, service engine.
- Enum options - VIRTUALSERVICE, POOL, HEALTHMONITOR, NETWORKPROFILE, APPLICATIONPROFILE, HTTPPOLICYSET, DNSPOLICY, IPADDRGROUP, STRINGGROUP,
- SSLPROFILE, SSLKEYANDCERTIFICATE, NETWORKSECURITYPOLICY, APPLICATIONPERSISTENCEPROFILE, ANALYTICSPROFILE, VSDATASCRIPTSET, TENANT, PKIPROFILE,
- AUTHPROFILE, CLOUD, SERVERAUTOSCALEPOLICY, AUTOSCALELAUNCHCONFIG, MICROSERVICEGROUP, IPAMPROFILE, HARDWARESECURITYMODULEGROUP, POOLGROUP,
- PRIORITYLABELS, POOLGROUPDEPLOYMENTPOLICY, GSLBSERVICE, GSLBSERVICERUNTIME, SCHEDULER, GSLBGEODBPROFILE, GSLBAPPLICATIONPERSISTENCEPROFILE,
- TRAFFICCLONEPROFILE, VSVIP, SERVICEENGINE, DEBUGSERVICEENGINE, DEBUGCONTROLLER, DEBUGVIRTUALSERVICE, SERVICEENGINEGROUP, SEPROPERTIES, NETWORK,
- CONTROLLERNODE, CONTROLLERPROPERTIES, SYSTEMCONFIGURATION, VRFCONTEXT, USER, ALERTCONFIG, ALERTSYSLOGCONFIG, ALERTEMAILCONFIG, ALERTTYPECONFIG,
- APPLICATION, ROLE, CLOUDPROPERTIES, SNMPTRAPPROFILE, ACTIONGROUPPROFILE, MICROSERVICE, ALERTPARAMS, ACTIONGROUPCONFIG, CLOUDCONNECTORUSER, GSLB,
- GSLBDNSUPDATE, GSLBSITEOPS, GLBMGRWARMSTART, IPAMDNSRECORD, GSLBDNSGSSTATUS, GSLBDNSGEOFILEOPS, GSLBDNSGEOUPDATE, GSLBDNSGEOCLUSTEROPS,
- GSLBDNSCLEANUP, TCPSTATRUNTIME, UDPSTATRUNTIME, IPSTATRUNTIME, ARPSTATRUNTIME, MBSTATRUNTIME, IPSTKQSTATSRUNTIME, MALLOCSTATRUNTIME,
- SHMALLOCSTATRUNTIME, CPUUSAGERUNTIME, L7GLOBALSTATSRUNTIME, L7VIRTUALSERVICESTATSRUNTIME, SEAGENTVNICDBRUNTIME, SEAGENTGRAPHDBRUNTIME,
- SEAGENTSTATERUNTIME, INTERFACERUNTIME, ARPTABLERUNTIME, DISPATCHERSTATRUNTIME, DISPATCHERSTATCLEARRUNTIME, DISPATCHERTABLEDUMPRUNTIME,
- DISPATCHERREMOTETIMERLISTDUMPRUNTIME, METRICSAGENTMESSAGE, HEALTHMONITORSTATRUNTIME, METRICSENTITYRUNTIME, PERSISTENCEINTERNAL,
- HTTPPOLICYSETINTERNAL, DNSPOLICYINTERNAL, CONNECTIONDUMPRUNTIME, SHAREDDBSTATS, SHAREDDBSTATSCLEAR, ICMPSTATRUNTIME, ROUTETABLERUNTIME,
- VIRTUALMACHINE, POOLSERVER, SEVSLIST, MEMINFORUNTIME, RTERINGSTATRUNTIME, ALGOSTATRUNTIME, HEALTHMONITORRUNTIME, CPUSTATRUNTIME, SEVM, HOST,
- PORTGROUP, CLUSTER, DATACENTER, VCENTER, HTTPPOLICYSETSTATS, DNSPOLICYSTATS, METRICSSESTATS, RATELIMITERSTATRUNTIME, NETWORKSECURITYPOLICYSTATS,
- TCPCONNRUNTIME, POOLSTATS, CONNPOOLINTERNAL, CONNPOOLSTATS, VSHASHSHOWRUNTIME, SELOGSTATSRUNTIME, NETWORKSECURITYPOLICYDETAIL, LICENSERUNTIME,
- SERVERRUNTIME, METRICSRUNTIMESUMMARY, METRICSRUNTIMEDETAIL, DISPATCHERSEHMPROBETEMPDISABLERUNTIME, POOLDEBUG, VSLOGMGRMAP, SERUMINSERTIONSTATS,
- HTTPCACHE, HTTPCACHESTATS, SEDOSSTATRUNTIME, VSDOSSTATRUNTIME, SERVERUPDATEREQ, VSSCALEOUTLIST, SEMEMDISTRUNTIME, TCPCONNRUNTIMEDETAIL,
- SEUPGRADESTATUS, SEUPGRADEPREVIEW, SEFAULTINJECTEXHAUSTM, SEFAULTINJECTEXHAUSTMCL, SEFAULTINJECTEXHAUSTMCLSMALL, SEFAULTINJECTEXHAUSTCONN,
- SEHEADLESSONLINEREQ, SEUPGRADE, SEUPGRADESTATUSDETAIL, SERESERVEDVS, SERESERVEDVSCLEAR, VSCANDIDATESEHOSTLIST, SEGROUPUPGRADE, REBALANCE,
- SEGROUPREBALANCE, SEAUTHSTATSRUNTIME, AUTOSCALESTATE, VIRTUALSERVICEAUTHSTATS, NETWORKSECURITYPOLICYDOS, KEYVALINTERNAL, KEYVALSUMMARYINTERNAL,
- SERVERSTATEUPDATEINFO, CLTRACKINTERNAL, CLTRACKSUMMARYINTERNAL, MICROSERVICERUNTIME, SEMICROSERVICE, VIRTUALSERVICEANALYSIS, CLIENTINTERNAL,
- CLIENTSUMMARYINTERNAL, MICROSERVICEGROUPRUNTIME, BGPRUNTIME, REQUESTQUEUERUNTIME, MIGRATEALL, MIGRATEALLSTATUSSUMMARY, MIGRATEALLSTATUSDETAIL,
- INTERFACESUMMARYRUNTIME, INTERFACELACPRUNTIME, DNSTABLE, GSLBSERVICEDETAIL, GSLBSERVICEINTERNAL, GSLBSERVICEHMONSTAT, SETROLESREQUEST,
- TRAFFICCLONERUNTIME, GEOLOCATIONINFO, SEVSHBSTATRUNTIME, GEODBINTERNAL, GSLBSITEINTERNAL, SERESOURCEPROTO, SECONSUMERPROTO, SECREATEPENDINGPROTO,
- PLACEMENTSTATS, SEVIPPROTO, RMVRFPROTO, VCENTERMAP, VIMGRVCENTERRUNTIME, INTERESTEDVMS, INTERESTEDHOSTS, VCENTERSUPPORTEDCOUNTERS, ENTITYCOUNTERS,
- TRANSACTIONSTATS, SEVMCREATEPROGRESS, PLACEMENTSTATUS, VISUBFOLDERS, VIDATASTORE, VIHOSTRESOURCES, CLOUDCONNECTOR, VINETWORKSUBNETVMS,
- VIDATASTORECONTENTS, VIMGRVCENTERCLOUDRUNTIME, VIVCENTERPORTGROUPS, VIVCENTERDATACENTERS, VIMGRHOSTRUNTIME, PLACEMENTGLOBALS, APICCONFIGURATION,
- CIFTABLE, APICTRANSACTION, VIRTUALSERVICESTATEDBCACHESUMMARY, POOLSTATEDBCACHESUMMARY, SERVERSTATEDBCACHESUMMARY, APICAGENTINTERNAL,
- APICTRANSACTIONFLAP, APICGRAPHINSTANCES, APICEPGS, APICEPGEPS, APICDEVICEPKGVER, APICTENANTS, APICVMMDOMAINS, NSXCONFIGURATION, NSXSGTABLE,
- NSXAGENTINTERNAL, NSXSGINFO, NSXSGIPS, NSXAGENTINTERNALCLI, MAXOBJECTS.
recommendation:
description:
- Recommendation of alertconfig.
rolling_window:
description:
- Only if the number of events is reached or exceeded within the time window will an alert be generated.
- Allowed values are 1-31536000.
- Default value when not specified in API or module is interpreted by Avi Controller as 300.
source:
description:
- Signifies system events or the type of client logsused in this alert configuration.
- Enum options - CONN_LOGS, APP_LOGS, EVENT_LOGS, METRICS.
required: true
summary:
description:
- Summary of reason why alert is generated.
tenant_ref:
description:
- It is a reference to an object of type tenant.
threshold:
description:
- An alert is created only when the number of events meets or exceeds this number within the chosen time frame.
- Allowed values are 1-65536.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.
throttle:
description:
- Alerts are suppressed (throttled) for this duration of time since the last alert was raised for this alert config.
- Allowed values are 0-31536000.
- Default value when not specified in API or module is interpreted by Avi Controller as 600.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create AlertConfig object
avi_alertconfig:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_alertconfig
"""
RETURN = '''
obj:
description: AlertConfig (api/alertconfig) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
action_group_ref=dict(type='str',),
alert_rule=dict(type='dict', required=True),
autoscale_alert=dict(type='bool',),
category=dict(type='str', required=True),
description=dict(type='str',),
enabled=dict(type='bool',),
expiry_time=dict(type='int',),
name=dict(type='str', required=True),
obj_uuid=dict(type='str',),
object_type=dict(type='str',),
recommendation=dict(type='str',),
rolling_window=dict(type='int',),
source=dict(type='str', required=True),
summary=dict(type='str',),
tenant_ref=dict(type='str',),
threshold=dict(type='int',),
throttle=dict(type='int',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'alertconfig',
set([]))
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
var value = {
ho: [
1,
2
]
};
var tmp = value.ho, hey = tmp === void 0 ? [] : tmp;
console.log(hey); | javascript | github | https://github.com/vercel/next.js | crates/next-custom-transforms/tests/loader/issue-30766/output.js |
import os
from flask import Flask, request, Response
from flask import render_template, url_for, redirect, send_from_directory
from flask import send_file, make_response, abort
from angular_flask import app
# routing for API endpoints, generated from the models designated as API_MODELS
from angular_flask.core import api_manager
from angular_flask.models import *
for model_name in app.config['API_MODELS']:
model_class = app.config['API_MODELS'][model_name]
api_manager.create_api(model_class, methods=['GET', 'POST'])
session = api_manager.session
# routing for basic pages (pass routing onto the Angular app)
@app.route('/')
@app.route('/about')
@app.route('/blog')
def basic_pages(**kwargs):
return make_response(open('angular_flask/templates/index.html').read())
# routing for CRUD-style endpoints
# passes routing onto the angular frontend if the requested resource exists
from sqlalchemy.sql import exists
crud_url_models = app.config['CRUD_URL_MODELS']
@app.route('/<model_name>/')
@app.route('/<model_name>/<item_id>')
def rest_pages(model_name, item_id=None):
if model_name in crud_url_models:
model_class = crud_url_models[model_name]
if item_id is None or session.query(exists().where(
model_class.id == item_id)).scalar():
return make_response(open(
'angular_flask/templates/index.html').read())
abort(404)
# special file handlers and error handlers
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'img/favicon.ico')
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404 | unknown | codeparrot/codeparrot-clean | ||
"""
Sync Media to S3
================
Django command that scans all files in your settings.MEDIA_ROOT and
settings.STATIC_ROOT folders and uploads them to S3 with the same directory
structure.
This command can optionally do the following but it is off by default:
* gzip compress any CSS and Javascript files it finds and adds the appropriate
'Content-Encoding' header.
* set a far future 'Expires' header for optimal caching.
* upload only media or static files.
* use any other provider compatible with Amazon S3.
* set other than 'public-read' ACL.
Note: This script requires the Python boto library and valid Amazon Web
Services API keys.
Required settings.py variables:
AWS_ACCESS_KEY_ID = ''
AWS_SECRET_ACCESS_KEY = ''
AWS_BUCKET_NAME = ''
When you call this command with the `--renamegzip` param, it will add
the '.gz' extension to the file name. But Safari just doesn't recognize
'.gz' files and your site won't work on it! To fix this problem, you can
set any other extension (like .jgz) in the `SYNC_S3_RENAME_GZIP_EXT`
variable.
Command options are:
-p PREFIX, --prefix=PREFIX
The prefix to prepend to the path on S3.
--gzip Enables gzipping CSS and Javascript files.
--expires Enables setting a far future expires header.
--force Skip the file mtime check to force upload of all
files.
--filter-list Override default directory and file exclusion
filters. (enter as comma separated line)
--renamegzip Enables renaming of gzipped files by appending '.gz'.
to the original file name. This way your original
assets will not be replaced by the gzipped ones.
You can change the extension setting the
`SYNC_S3_RENAME_GZIP_EXT` var in your settings.py
file.
--invalidate Invalidates the objects in CloudFront after uploading
stuff to s3.
--media-only Only MEDIA_ROOT files will be uploaded to S3.
--static-only Only STATIC_ROOT files will be uploaded to S3.
--s3host Override default s3 host.
--acl Override default ACL settings ('public-read' if
settings.AWS_DEFAULT_ACL is not defined).
TODO:
* Use fnmatch (or regex) to allow more complex FILTER_LIST rules.
"""
import datetime
import email
import mimetypes
from optparse import make_option
import os
import time
import gzip
try:
from cStringIO import StringIO
assert StringIO
except ImportError:
from StringIO import StringIO
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
# Make sure boto is available
try:
import boto
import boto.exception
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
class Command(BaseCommand):
# Extra variables to avoid passing these around
AWS_ACCESS_KEY_ID = ''
AWS_SECRET_ACCESS_KEY = ''
AWS_BUCKET_NAME = ''
AWS_CLOUDFRONT_DISTRIBUTION = ''
SYNC_S3_RENAME_GZIP_EXT = ''
DIRECTORIES = ''
FILTER_LIST = ['.DS_Store', '.svn', '.hg', '.git', 'Thumbs.db']
GZIP_CONTENT_TYPES = (
'text/css',
'application/javascript',
'application/x-javascript',
'text/javascript'
)
uploaded_files = []
upload_count = 0
skip_count = 0
option_list = BaseCommand.option_list + (
make_option('-p', '--prefix',
dest='prefix',
default=getattr(settings, 'SYNC_S3_PREFIX', ''),
help="The prefix to prepend to the path on S3."),
make_option('-d', '--dir',
dest='dir',
help="Custom static root directory to use"),
make_option('--s3host',
dest='s3host',
default=getattr(settings, 'AWS_S3_HOST', ''),
help="The s3 host (enables connecting to other providers/regions)"),
make_option('--acl',
dest='acl',
default=getattr(settings, 'AWS_DEFAULT_ACL', 'public-read'),
help="Enables to override default acl (public-read)."),
make_option('--gzip',
action='store_true', dest='gzip', default=False,
help="Enables gzipping CSS and Javascript files."),
make_option('--renamegzip',
action='store_true', dest='renamegzip', default=False,
help="Enables renaming of gzipped assets to have '.gz' appended to the filename."),
make_option('--expires',
action='store_true', dest='expires', default=False,
help="Enables setting a far future expires header."),
make_option('--force',
action='store_true', dest='force', default=False,
help="Skip the file mtime check to force upload of all files."),
make_option('--filter-list', dest='filter_list',
action='store', default='',
help="Override default directory and file exclusion filters. (enter as comma seperated line)"),
make_option('--invalidate', dest='invalidate', default=False,
action='store_true',
help='Invalidates the associated objects in CloudFront'),
make_option('--media-only', dest='media_only', default='',
action='store_true',
help="Only MEDIA_ROOT files will be uploaded to S3"),
make_option('--static-only', dest='static_only', default='',
action='store_true',
help="Only STATIC_ROOT files will be uploaded to S3"),
)
help = 'Syncs the complete MEDIA_ROOT structure and files to S3 into the given bucket name.'
args = 'bucket_name'
can_import_settings = True
def handle(self, *args, **options):
if not HAS_BOTO:
raise ImportError("The boto Python library is not installed.")
# Check for AWS keys in settings
if not hasattr(settings, 'AWS_ACCESS_KEY_ID') or not hasattr(settings, 'AWS_SECRET_ACCESS_KEY'):
raise CommandError('Missing AWS keys from settings file. Please supply both AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY.')
else:
self.AWS_ACCESS_KEY_ID = settings.AWS_ACCESS_KEY_ID
self.AWS_SECRET_ACCESS_KEY = settings.AWS_SECRET_ACCESS_KEY
if not hasattr(settings, 'AWS_BUCKET_NAME'):
raise CommandError('Missing bucket name from settings file. Please add the AWS_BUCKET_NAME to your settings file.')
else:
if not settings.AWS_BUCKET_NAME:
raise CommandError('AWS_BUCKET_NAME cannot be empty.')
self.AWS_BUCKET_NAME = settings.AWS_BUCKET_NAME
if not hasattr(settings, 'MEDIA_ROOT'):
raise CommandError('MEDIA_ROOT must be set in your settings.')
else:
if not settings.MEDIA_ROOT:
raise CommandError('MEDIA_ROOT must be set in your settings.')
self.AWS_CLOUDFRONT_DISTRIBUTION = getattr(settings, 'AWS_CLOUDFRONT_DISTRIBUTION', '')
self.SYNC_S3_RENAME_GZIP_EXT = \
getattr(settings, 'SYNC_S3_RENAME_GZIP_EXT', '.gz')
self.verbosity = int(options.get('verbosity'))
self.prefix = options.get('prefix')
self.do_gzip = options.get('gzip')
self.rename_gzip = options.get('renamegzip')
self.do_expires = options.get('expires')
self.do_force = options.get('force')
self.invalidate = options.get('invalidate')
self.DIRECTORIES = options.get('dir')
self.s3host = options.get('s3host')
self.default_acl = options.get('acl')
self.FILTER_LIST = getattr(settings, 'FILTER_LIST', self.FILTER_LIST)
filter_list = options.get('filter_list')
if filter_list:
# command line option overrides default filter_list and
# settings.filter_list
self.FILTER_LIST = filter_list.split(',')
self.media_only = options.get('media_only')
self.static_only = options.get('static_only')
# Get directories
if self.media_only and self.static_only:
raise CommandError("Can't use --media-only and --static-only together. Better not use anything...")
elif self.media_only:
self.DIRECTORIES = [settings.MEDIA_ROOT]
elif self.static_only:
self.DIRECTORIES = [settings.STATIC_ROOT]
elif self.DIRECTORIES:
self.DIRECTORIES = [self.DIRECTORIES]
else:
self.DIRECTORIES = [settings.MEDIA_ROOT, settings.STATIC_ROOT]
# Now call the syncing method to walk the MEDIA_ROOT directory and
# upload all files found.
self.sync_s3()
# Sending the invalidation request to CloudFront if the user
# requested this action
if self.invalidate:
self.invalidate_objects_cf()
print("")
print("%d files uploaded." % self.upload_count)
print("%d files skipped." % self.skip_count)
def open_cf(self):
"""
Returns an open connection to CloudFront
"""
return boto.connect_cloudfront(
self.AWS_ACCESS_KEY_ID, self.AWS_SECRET_ACCESS_KEY)
def invalidate_objects_cf(self):
"""
Split the invalidation request in groups of 1000 objects
"""
if not self.AWS_CLOUDFRONT_DISTRIBUTION:
raise CommandError(
'An object invalidation was requested but the variable '
'AWS_CLOUDFRONT_DISTRIBUTION is not present in your settings.')
# We can't send more than 1000 objects in the same invalidation
# request.
chunk = 1000
# Connecting to CloudFront
conn = self.open_cf()
# Splitting the object list
objs = self.uploaded_files
chunks = [objs[i:i + chunk] for i in range(0, len(objs), chunk)]
# Invalidation requests
for paths in chunks:
conn.create_invalidation_request(
self.AWS_CLOUDFRONT_DISTRIBUTION, paths)
def sync_s3(self):
"""
Walks the media/static directories and syncs files to S3
"""
bucket, key = self.open_s3()
for directory in self.DIRECTORIES:
os.path.walk(directory, self.upload_s3, (bucket, key, self.AWS_BUCKET_NAME, directory))
def compress_string(self, s):
"""Gzip a given string."""
zbuf = StringIO()
zfile = gzip.GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)
zfile.write(s)
zfile.close()
return zbuf.getvalue()
def get_s3connection_kwargs(self):
"""Returns connection kwargs as a dict"""
kwargs = {}
if self.s3host:
kwargs['host'] = self.s3host
return kwargs
def open_s3(self):
"""
Opens connection to S3 returning bucket and key
"""
conn = boto.connect_s3(
self.AWS_ACCESS_KEY_ID,
self.AWS_SECRET_ACCESS_KEY,
**self.get_s3connection_kwargs())
try:
bucket = conn.get_bucket(self.AWS_BUCKET_NAME)
except boto.exception.S3ResponseError:
bucket = conn.create_bucket(self.AWS_BUCKET_NAME)
return bucket, boto.s3.key.Key(bucket)
def upload_s3(self, arg, dirname, names):
"""
This is the callback to os.path.walk and where much of the work happens
"""
bucket, key, bucket_name, root_dir = arg
# Skip directories we don't want to sync
if os.path.basename(dirname) in self.FILTER_LIST:
# prevent walk from processing subfiles/subdirs below the ignored one
del names[:]
return
# Later we assume the MEDIA_ROOT ends with a trailing slash
if not root_dir.endswith(os.path.sep):
root_dir = root_dir + os.path.sep
for file in names:
headers = {}
if file in self.FILTER_LIST:
continue # Skip files we don't want to sync
filename = os.path.join(dirname, file)
if os.path.isdir(filename):
continue # Don't try to upload directories
file_key = filename[len(root_dir):]
if self.prefix:
file_key = '%s/%s' % (self.prefix, file_key)
# Check if file on S3 is older than local file, if so, upload
if not self.do_force:
s3_key = bucket.get_key(file_key)
if s3_key:
s3_datetime = datetime.datetime(*time.strptime(
s3_key.last_modified, '%a, %d %b %Y %H:%M:%S %Z')[0:6])
local_datetime = datetime.datetime.utcfromtimestamp(
os.stat(filename).st_mtime)
if local_datetime < s3_datetime:
self.skip_count += 1
if self.verbosity > 1:
print("File %s hasn't been modified since last being uploaded" % file_key)
continue
# File is newer, let's process and upload
if self.verbosity > 0:
print("Uploading %s..." % file_key)
content_type = mimetypes.guess_type(filename)[0]
if content_type:
headers['Content-Type'] = content_type
else:
headers['Content-Type'] = 'application/octet-stream'
file_obj = open(filename, 'rb')
file_size = os.fstat(file_obj.fileno()).st_size
filedata = file_obj.read()
if self.do_gzip:
# Gzipping only if file is large enough (>1K is recommended)
# and only if file is a common text type (not a binary file)
if file_size > 1024 and content_type in self.GZIP_CONTENT_TYPES:
filedata = self.compress_string(filedata)
if self.rename_gzip:
# If rename_gzip is True, then rename the file
# by appending an extension (like '.gz)' to
# original filename.
file_key = '%s.%s' % (
file_key, self.SYNC_S3_RENAME_GZIP_EXT)
headers['Content-Encoding'] = 'gzip'
if self.verbosity > 1:
print("\tgzipped: %dk to %dk" % (file_size / 1024, len(filedata) / 1024))
if self.do_expires:
# HTTP/1.0
headers['Expires'] = '%s GMT' % (email.Utils.formatdate(time.mktime((datetime.datetime.now() + datetime.timedelta(days=365 * 2)).timetuple())))
# HTTP/1.1
headers['Cache-Control'] = 'max-age %d' % (3600 * 24 * 365 * 2)
if self.verbosity > 1:
print("\texpires: %s" % headers['Expires'])
print("\tcache-control: %s" % headers['Cache-Control'])
try:
key.name = file_key
key.set_contents_from_string(filedata, headers, replace=True,
policy=self.default_acl)
except boto.exception.S3CreateError as e:
print("Failed: %s" % e)
except Exception as e:
print(e)
raise
else:
self.upload_count += 1
self.uploaded_files.append(file_key)
file_obj.close() | unknown | codeparrot/codeparrot-clean | ||
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
package command
import (
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"crypto/x509"
"encoding/base64"
"encoding/pem"
"errors"
"fmt"
"os"
"regexp"
"strings"
"github.com/hashicorp/cli"
"github.com/hashicorp/vault/api"
"github.com/posener/complete"
"github.com/tink-crypto/tink-go/v2/kwp/subtle"
)
var (
_ cli.Command = (*TransitImportCommand)(nil)
_ cli.CommandAutocomplete = (*TransitImportCommand)(nil)
keyPath = regexp.MustCompile("^(.*)/keys/([^/]*)$")
)
type TransitImportCommand struct {
*BaseCommand
}
func (c *TransitImportCommand) Synopsis() string {
return "Import a key into the Transit secrets engines."
}
func (c *TransitImportCommand) Help() string {
helpText := `
Usage: vault transit import PATH KEY [options...]
Using the Transit key wrapping system, imports key material from
the base64 encoded KEY (either directly on the CLI or via @path notation),
into a new key whose API path is PATH. To import a new version into an
existing key, use import_version. The remaining options after KEY (key=value
style) are passed on to the Transit create key endpoint. If your
system or device natively supports the RSA AES key wrap mechanism (such as
the PKCS#11 mechanism CKM_RSA_AES_KEY_WRAP), you should use it directly
rather than this command.
` + c.Flags().Help()
return strings.TrimSpace(helpText)
}
func (c *TransitImportCommand) Flags() *FlagSets {
return c.flagSet(FlagSetHTTP)
}
func (c *TransitImportCommand) AutocompleteArgs() complete.Predictor {
return nil
}
func (c *TransitImportCommand) AutocompleteFlags() complete.Flags {
return c.Flags().Completions()
}
func (c *TransitImportCommand) Run(args []string) int {
return ImportKey(c.BaseCommand, "import", transitImportKeyPath, c.Flags(), args)
}
func transitImportKeyPath(s string, operation string) (path string, apiPath string, err error) {
parts := keyPath.FindStringSubmatch(s)
if len(parts) != 3 {
return "", "", errors.New("expected transit path and key name in the form :path:/keys/:name:")
}
path = parts[1]
keyName := parts[2]
apiPath = path + "/keys/" + keyName + "/" + operation
return path, apiPath, nil
}
type ImportKeyFunc func(s string, operation string) (path string, apiPath string, err error)
// error codes: 1: user error, 2: internal computation error, 3: remote api call error
func ImportKey(c *BaseCommand, operation string, pathFunc ImportKeyFunc, flags *FlagSets, args []string) int {
// Parse and validate the arguments.
if err := flags.Parse(args); err != nil {
c.UI.Error(err.Error())
return 1
}
args = flags.Args()
if len(args) < 2 {
c.UI.Error(fmt.Sprintf("Incorrect argument count (expected 2+, got %d). Wanted PATH to import into and KEY material.", len(args)))
return 1
}
client, err := c.Client()
if err != nil {
c.UI.Error(err.Error())
return 2
}
ephemeralAESKey := make([]byte, 32)
_, err = rand.Read(ephemeralAESKey)
if err != nil {
c.UI.Error(fmt.Sprintf("failed to generate ephemeral key: %v", err))
}
path, apiPath, err := pathFunc(args[0], operation)
if err != nil {
c.UI.Error(err.Error())
return 1
}
keyMaterial := args[1]
if keyMaterial[0] == '@' {
keyMaterialBytes, err := os.ReadFile(keyMaterial[1:])
if err != nil {
c.UI.Error(fmt.Sprintf("error reading key material file: %v", err))
return 1
}
keyMaterial = string(keyMaterialBytes)
}
key, err := base64.StdEncoding.DecodeString(keyMaterial)
if err != nil {
c.UI.Error(fmt.Sprintf("error base64 decoding source key material: %v", err))
return 1
}
// Fetch the wrapping key
c.UI.Output("Retrieving wrapping key.")
wrappingKey, err := fetchWrappingKey(client, path)
if err != nil {
c.UI.Error(fmt.Sprintf("failed to fetch wrapping key: %v", err))
return 3
}
c.UI.Output("Wrapping source key with ephemeral key.")
wrapKWP, err := subtle.NewKWP(ephemeralAESKey)
if err != nil {
c.UI.Error(fmt.Sprintf("failure building key wrapping key: %v", err))
return 2
}
wrappedTargetKey, err := wrapKWP.Wrap(key)
if err != nil {
c.UI.Error(fmt.Sprintf("failure wrapping source key: %v", err))
return 2
}
c.UI.Output("Encrypting ephemeral key with wrapping key.")
wrappedAESKey, err := rsa.EncryptOAEP(
sha256.New(),
rand.Reader,
wrappingKey,
ephemeralAESKey,
[]byte{},
)
if err != nil {
c.UI.Error(fmt.Sprintf("failure encrypting wrapped key: %v", err))
return 2
}
combinedCiphertext := append(wrappedAESKey, wrappedTargetKey...)
importCiphertext := base64.StdEncoding.EncodeToString(combinedCiphertext)
// Parse all the key options
data, err := parseArgsData(os.Stdin, args[2:])
if err != nil {
c.UI.Error(fmt.Sprintf("Failed to parse extra K=V data: %s", err))
return 1
}
if data == nil {
data = make(map[string]interface{}, 1)
}
data["ciphertext"] = importCiphertext
c.UI.Output("Submitting wrapped key.")
// Finally, call import
_, err = client.Logical().Write(apiPath, data)
if err != nil {
c.UI.Error(fmt.Sprintf("failed to call import:%v", err))
return 3
} else {
c.UI.Output("Success!")
return 0
}
}
func fetchWrappingKey(client *api.Client, path string) (*rsa.PublicKey, error) {
resp, err := client.Logical().Read(path + "/wrapping_key")
if err != nil {
return nil, fmt.Errorf("error fetching wrapping key: %w", err)
}
if resp == nil {
return nil, fmt.Errorf("no mount found at %s: %v", path, err)
}
key, ok := resp.Data["public_key"]
if !ok {
return nil, fmt.Errorf("missing public_key field in response")
}
keyBlock, _ := pem.Decode([]byte(key.(string)))
if keyBlock == nil {
return nil, fmt.Errorf("failed to decode PEM information from public_key response field")
}
parsedKey, err := x509.ParsePKIXPublicKey(keyBlock.Bytes)
if err != nil {
return nil, fmt.Errorf("error parsing wrapping key: %w", err)
}
rsaKey, ok := parsedKey.(*rsa.PublicKey)
if !ok {
return nil, fmt.Errorf("returned value was not an RSA public key but a %T", rsaKey)
}
return rsaKey, nil
} | go | github | https://github.com/hashicorp/vault | command/transit_import_key.go |
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.beans.support;
/**
* @author Juergen Hoeller
* @since 29.07.2004
*/
class ProtectedBaseBean {
private String someProperty;
public void setSomeProperty(String someProperty) {
this.someProperty = someProperty;
}
public String getSomeProperty() {
return someProperty;
}
} | java | github | https://github.com/spring-projects/spring-framework | spring-beans/src/test/java/org/springframework/beans/support/ProtectedBaseBean.java |
# Generated by h2py from \mssdk\include\shlobj.h and shellapi.h
WM_USER = 1024
DROPEFFECT_NONE = 0
DROPEFFECT_COPY = 1
DROPEFFECT_MOVE = 2
DROPEFFECT_LINK = 4
DROPEFFECT_SCROLL = -2147483648
FO_MOVE = 1
FO_COPY = 2
FO_DELETE = 3
FO_RENAME = 4
## File operation flags used with shell.SHFileOperation
FOF_MULTIDESTFILES = 1
FOF_CONFIRMMOUSE = 2
FOF_SILENT = 4
FOF_RENAMEONCOLLISION = 8
FOF_NOCONFIRMATION = 16
FOF_WANTMAPPINGHANDLE = 32
FOF_ALLOWUNDO = 64
FOF_FILESONLY = 128
FOF_SIMPLEPROGRESS = 256
FOF_NOCONFIRMMKDIR = 512
FOF_NOERRORUI = 1024
FOF_NOCOPYSECURITYATTRIBS = 2048
FOF_NORECURSION = 4096
FOF_NO_CONNECTED_ELEMENTS = 8192
FOF_WANTNUKEWARNING = 16384
FOF_NORECURSEREPARSE = 32768
FOF_NO_UI = FOF_SILENT | FOF_NOCONFIRMATION | FOF_NOERRORUI | FOF_NOCONFIRMMKDIR
## Extended file operation flags, used with IFileOperation
FOFX_NOSKIPJUNCTIONS =0x00010000
FOFX_PREFERHARDLINK =0x00020000
FOFX_SHOWELEVATIONPROMPT =0x00040000
FOFX_EARLYFAILURE =0x00100000
FOFX_PRESERVEFILEEXTENSIONS =0x00200000
FOFX_KEEPNEWERFILE =0x00400000
FOFX_NOCOPYHOOKS =0x00800000
FOFX_NOMINIMIZEBOX =0x01000000
FOFX_MOVEACLSACROSSVOLUMES =0x02000000
FOFX_DONTDISPLAYSOURCEPATH =0x04000000
FOFX_DONTDISPLAYDESTPATH =0x08000000
FOFX_REQUIREELEVATION =0x10000000
FOFX_COPYASDOWNLOAD =0x40000000
FOFX_DONTDISPLAYLOCATIONS =0x80000000
PO_DELETE = 19
PO_RENAME = 20
PO_PORTCHANGE = 32
PO_REN_PORT = 52
SE_ERR_FNF = 2
SE_ERR_PNF = 3
SE_ERR_ACCESSDENIED = 5
SE_ERR_OOM = 8
SE_ERR_DLLNOTFOUND = 32
SE_ERR_SHARE = 26
SE_ERR_ASSOCINCOMPLETE = 27
SE_ERR_DDETIMEOUT = 28
SE_ERR_DDEFAIL = 29
SE_ERR_DDEBUSY = 30
SE_ERR_NOASSOC = 31
SEE_MASK_CLASSNAME = 1
SEE_MASK_CLASSKEY = 3
SEE_MASK_IDLIST = 4
SEE_MASK_INVOKEIDLIST = 12
SEE_MASK_ICON = 16
SEE_MASK_HOTKEY = 32
SEE_MASK_NOCLOSEPROCESS = 64
SEE_MASK_CONNECTNETDRV = 128
SEE_MASK_FLAG_DDEWAIT = 256
SEE_MASK_DOENVSUBST = 512
SEE_MASK_FLAG_NO_UI = 1024
SEE_MASK_UNICODE = 16384
SEE_MASK_NO_CONSOLE = 32768
SEE_MASK_ASYNCOK = 1048576
SEE_MASK_HMONITOR = 2097152
SEE_MASK_CLASSNAME = 1
SEE_MASK_CLASSKEY = 3
SEE_MASK_IDLIST = 4
SEE_MASK_INVOKEIDLIST = 12
SEE_MASK_ICON = 16
SEE_MASK_HOTKEY = 32
SEE_MASK_NOCLOSEPROCESS = 64
SEE_MASK_CONNECTNETDRV = 128
SEE_MASK_FLAG_DDEWAIT = 256
SEE_MASK_DOENVSUBST = 512
SEE_MASK_FLAG_NO_UI = 1024
SEE_MASK_UNICODE = 16384
SEE_MASK_NO_CONSOLE = 32768
SEE_MASK_ASYNCOK = 1048576
SEE_MASK_HMONITOR = 2097152
SHERB_NOCONFIRMATION = 1
SHERB_NOPROGRESSUI = 2
SHERB_NOSOUND = 4
NIM_ADD = 0
NIM_MODIFY = 1
NIM_DELETE = 2
NIF_MESSAGE = 1
NIF_ICON = 2
NIF_TIP = 4
SHGFI_ICON = 256
SHGFI_DISPLAYNAME = 512
SHGFI_TYPENAME = 1024
SHGFI_ATTRIBUTES = 2048
SHGFI_ICONLOCATION = 4096
SHGFI_EXETYPE = 8192
SHGFI_SYSICONINDEX = 16384
SHGFI_LINKOVERLAY = 32768
SHGFI_SELECTED = 65536
SHGFI_ATTR_SPECIFIED = 131072
SHGFI_LARGEICON = 0
SHGFI_SMALLICON = 1
SHGFI_OPENICON = 2
SHGFI_SHELLICONSIZE = 4
SHGFI_PIDL = 8
SHGFI_USEFILEATTRIBUTES = 16
SHGNLI_PIDL = 1
SHGNLI_PREFIXNAME = 2
SHGNLI_NOUNIQUE = 4
PRINTACTION_OPEN = 0
PRINTACTION_PROPERTIES = 1
PRINTACTION_NETINSTALL = 2
PRINTACTION_NETINSTALLLINK = 3
PRINTACTION_TESTPAGE = 4
PRINTACTION_OPENNETPRN = 5
PRINTACTION_DOCUMENTDEFAULTS = 6
PRINTACTION_SERVERPROPERTIES = 7
# Flags used with IContextMenu.QueryContextMenu
CMF_NORMAL = 0
CMF_DEFAULTONLY = 1
CMF_VERBSONLY = 2
CMF_EXPLORE = 4
CMF_NOVERBS = 8
CMF_CANRENAME = 16
CMF_NODEFAULT = 32
CMF_INCLUDESTATIC = 64
CMF_ITEMMENU = 128
CMF_EXTENDEDVERBS = 256
CMF_DISABLEDVERBS = 512
CMF_ASYNCVERBSTATE = 1024
CMF_OPTIMIZEFORINVOKE = 2048
CMF_SYNCCASCADEMENU = 4096
CMF_DONOTPICKDEFAULT = 8192
CMF_RESERVED = 4294901760 # 0xffff0000
GCS_VERBA = 0
GCS_HELPTEXTA = 1
GCS_VALIDATEA = 2
GCS_VERBW = 4
GCS_HELPTEXTW = 5
GCS_VALIDATEW = 6
GCS_UNICODE = 4
GCS_VERB = GCS_VERBW
GCS_HELPTEXT = GCS_HELPTEXTW
GCS_VALIDATE = GCS_VALIDATEW
GCS_VERB = GCS_VERBA
GCS_HELPTEXT = GCS_HELPTEXTA
GCS_VALIDATE = GCS_VALIDATEA
CMDSTR_NEWFOLDERA = "NewFolder"
CMDSTR_VIEWLISTA = "ViewList"
CMDSTR_VIEWDETAILSA = "ViewDetails"
CMDSTR_NEWFOLDER = CMDSTR_NEWFOLDERA
CMDSTR_VIEWLIST = CMDSTR_VIEWLISTA
CMDSTR_VIEWDETAILS = CMDSTR_VIEWDETAILSA
CMIC_MASK_HOTKEY = SEE_MASK_HOTKEY
CMIC_MASK_ICON = SEE_MASK_ICON
CMIC_MASK_FLAG_NO_UI = SEE_MASK_FLAG_NO_UI
CMIC_MASK_UNICODE = SEE_MASK_UNICODE
CMIC_MASK_NO_CONSOLE = SEE_MASK_NO_CONSOLE
CMIC_MASK_ASYNCOK = SEE_MASK_ASYNCOK
CMIC_MASK_PTINVOKE = 536870912
GIL_OPENICON = 1
GIL_FORSHELL = 2
GIL_ASYNC = 32
GIL_DEFAULTICON = 64
GIL_FORSHORTCUT = 128
GIL_CHECKSHIELD = 512
GIL_SIMULATEDOC = 1
GIL_PERINSTANCE = 2
GIL_PERCLASS = 4
GIL_NOTFILENAME = 8
GIL_DONTCACHE = 16
GIL_SHIELD = 512
GIL_FORCENOSHIELD = 1024
ISIOI_ICONFILE = 1
ISIOI_ICONINDEX = 2
ISIOI_SYSIMAGELISTINDEX = 4
FO_MOVE = 1
FO_COPY = 2
FO_DELETE = 3
FO_RENAME = 4
FOF_MULTIDESTFILES = 1
FOF_CONFIRMMOUSE = 2
FOF_SILENT = 4
FOF_RENAMEONCOLLISION = 8
FOF_NOCONFIRMATION = 16
FOF_WANTMAPPINGHANDLE = 32
FOF_ALLOWUNDO = 64
FOF_FILESONLY = 128
FOF_SIMPLEPROGRESS = 256
FOF_NOCONFIRMMKDIR = 512
FOF_NOERRORUI = 1024
FOF_NOCOPYSECURITYATTRIBS = 2048
PO_DELETE = 19
PO_RENAME = 20
PO_PORTCHANGE = 32
PO_REN_PORT = 52
FVSIF_RECT = 1
FVSIF_PINNED = 2
FVSIF_NEWFAILED = 134217728
FVSIF_NEWFILE = -2147483648
FVSIF_CANVIEWIT = 1073741824
FCIDM_SHVIEWFIRST = 0
FCIDM_SHVIEWLAST = 32767
FCIDM_BROWSERFIRST = 40960
FCIDM_BROWSERLAST = 48896
FCIDM_GLOBALFIRST = 32768
FCIDM_GLOBALLAST = 40959
FCIDM_MENU_FILE = (FCIDM_GLOBALFIRST+0)
FCIDM_MENU_EDIT = (FCIDM_GLOBALFIRST+64)
FCIDM_MENU_VIEW = (FCIDM_GLOBALFIRST+128)
FCIDM_MENU_VIEW_SEP_OPTIONS = (FCIDM_GLOBALFIRST+129)
FCIDM_MENU_TOOLS = (FCIDM_GLOBALFIRST+192)
FCIDM_MENU_TOOLS_SEP_GOTO = (FCIDM_GLOBALFIRST+193)
FCIDM_MENU_HELP = (FCIDM_GLOBALFIRST+256)
FCIDM_MENU_FIND = (FCIDM_GLOBALFIRST+320)
FCIDM_MENU_EXPLORE = (FCIDM_GLOBALFIRST+336)
FCIDM_MENU_FAVORITES = (FCIDM_GLOBALFIRST+368)
FCIDM_TOOLBAR = (FCIDM_BROWSERFIRST + 0)
FCIDM_STATUS = (FCIDM_BROWSERFIRST + 1)
IDC_OFFLINE_HAND = 103
SBSP_DEFBROWSER = 0
SBSP_SAMEBROWSER = 1
SBSP_NEWBROWSER = 2
SBSP_DEFMODE = 0
SBSP_OPENMODE = 16
SBSP_EXPLOREMODE = 32
SBSP_ABSOLUTE = 0
SBSP_RELATIVE = 4096
SBSP_PARENT = 8192
SBSP_NAVIGATEBACK = 16384
SBSP_NAVIGATEFORWARD = 32768
SBSP_ALLOW_AUTONAVIGATE = 65536
SBSP_INITIATEDBYHLINKFRAME = -2147483648
SBSP_REDIRECT = 1073741824
SBSP_WRITENOHISTORY = 134217728
SBSP_NOAUTOSELECT = 67108864
FCW_STATUS = 1
FCW_TOOLBAR = 2
FCW_TREE = 3
FCW_INTERNETBAR = 6
FCW_PROGRESS = 8
FCT_MERGE = 1
FCT_CONFIGABLE = 2
FCT_ADDTOEND = 4
CDBOSC_SETFOCUS = 0
CDBOSC_KILLFOCUS = 1
CDBOSC_SELCHANGE = 2
CDBOSC_RENAME = 3
SVSI_DESELECT = 0
SVSI_SELECT = 1
SVSI_EDIT = 3
SVSI_DESELECTOTHERS = 4
SVSI_ENSUREVISIBLE = 8
SVSI_FOCUSED = 16
SVSI_TRANSLATEPT = 32
SVGIO_BACKGROUND = 0
SVGIO_SELECTION = 1
SVGIO_ALLVIEW = 2
SVGIO_CHECKED = 0x3,
SVGIO_TYPE_MASK = 0xf,
SVGIO_FLAG_VIEWORDER = -2147483648 # 0x80000000
STRRET_WSTR = 0
STRRET_OFFSET = 1
STRRET_CSTR = 2
CSIDL_DESKTOP = 0
CSIDL_INTERNET = 1
CSIDL_PROGRAMS = 2
CSIDL_CONTROLS = 3
CSIDL_PRINTERS = 4
CSIDL_PERSONAL = 5
CSIDL_FAVORITES = 6
CSIDL_STARTUP = 7
CSIDL_RECENT = 8
CSIDL_SENDTO = 9
CSIDL_BITBUCKET = 10
CSIDL_STARTMENU = 11
CSIDL_MYDOCUMENTS = 12
CSIDL_MYMUSIC = 13
CSIDL_MYVIDEO = 14
CSIDL_DESKTOPDIRECTORY = 16
CSIDL_DRIVES = 17
CSIDL_NETWORK = 18
CSIDL_NETHOOD = 19
CSIDL_FONTS = 20
CSIDL_TEMPLATES = 21
CSIDL_COMMON_STARTMENU = 22
CSIDL_COMMON_PROGRAMS = 23
CSIDL_COMMON_STARTUP = 24
CSIDL_COMMON_DESKTOPDIRECTORY = 25
CSIDL_APPDATA = 26
CSIDL_PRINTHOOD = 27
CSIDL_LOCAL_APPDATA = 28
CSIDL_ALTSTARTUP = 29
CSIDL_COMMON_ALTSTARTUP = 30
CSIDL_COMMON_FAVORITES = 31
CSIDL_INTERNET_CACHE = 32
CSIDL_COOKIES = 33
CSIDL_HISTORY = 34
CSIDL_COMMON_APPDATA = 35
CSIDL_WINDOWS = 36
CSIDL_SYSTEM = 37
CSIDL_PROGRAM_FILES = 38
CSIDL_MYPICTURES = 39
CSIDL_PROFILE = 40
CSIDL_SYSTEMX86 = 41
CSIDL_PROGRAM_FILESX86 = 42
CSIDL_PROGRAM_FILES_COMMON = 43
CSIDL_PROGRAM_FILES_COMMONX86 = 44
CSIDL_COMMON_TEMPLATES = 45
CSIDL_COMMON_DOCUMENTS = 46
CSIDL_COMMON_ADMINTOOLS = 47
CSIDL_ADMINTOOLS = 48
CSIDL_CONNECTIONS = 49
CSIDL_COMMON_MUSIC = 53
CSIDL_COMMON_PICTURES = 54
CSIDL_COMMON_VIDEO = 55
CSIDL_RESOURCES = 56
CSIDL_RESOURCES_LOCALIZED = 57
CSIDL_COMMON_OEM_LINKS = 58
CSIDL_CDBURN_AREA = 59
# 60 unused
CSIDL_COMPUTERSNEARME = 61
BIF_RETURNONLYFSDIRS = 1
BIF_DONTGOBELOWDOMAIN = 2
BIF_STATUSTEXT = 4
BIF_RETURNFSANCESTORS = 8
BIF_EDITBOX = 16
BIF_VALIDATE = 32
BIF_BROWSEFORCOMPUTER = 4096
BIF_BROWSEFORPRINTER = 8192
BIF_BROWSEINCLUDEFILES = 16384
BFFM_INITIALIZED = 1
BFFM_SELCHANGED = 2
BFFM_VALIDATEFAILEDA = 3
BFFM_VALIDATEFAILEDW = 4
BFFM_SETSTATUSTEXTA = (WM_USER + 100)
BFFM_ENABLEOK = (WM_USER + 101)
BFFM_SETSELECTIONA = (WM_USER + 102)
BFFM_SETSELECTIONW = (WM_USER + 103)
BFFM_SETSTATUSTEXTW = (WM_USER + 104)
BFFM_SETSTATUSTEXT = BFFM_SETSTATUSTEXTW
BFFM_SETSELECTION = BFFM_SETSELECTIONW
BFFM_VALIDATEFAILED = BFFM_VALIDATEFAILEDW
BFFM_SETSTATUSTEXT = BFFM_SETSTATUSTEXTA
BFFM_SETSELECTION = BFFM_SETSELECTIONA
BFFM_VALIDATEFAILED = BFFM_VALIDATEFAILEDA
SFGAO_CANCOPY = DROPEFFECT_COPY
SFGAO_CANMOVE = DROPEFFECT_MOVE
SFGAO_CANLINK = DROPEFFECT_LINK
SFGAO_CANRENAME = 16
SFGAO_CANDELETE = 32
SFGAO_HASPROPSHEET = 64
SFGAO_DROPTARGET = 256
SFGAO_CAPABILITYMASK = 375
SFGAO_LINK = 65536
SFGAO_SHARE = 131072
SFGAO_READONLY = 262144
SFGAO_GHOSTED = 524288
SFGAO_HIDDEN = 524288
SFGAO_DISPLAYATTRMASK = 983040
SFGAO_FILESYSANCESTOR = 268435456
SFGAO_FOLDER = 536870912
SFGAO_FILESYSTEM = 1073741824
SFGAO_HASSUBFOLDER = (-2147483648)
SFGAO_CONTENTSMASK = (-2147483648)
SFGAO_VALIDATE = 16777216
SFGAO_REMOVABLE = 33554432
SFGAO_COMPRESSED = 67108864
SFGAO_BROWSABLE = 134217728
SFGAO_NONENUMERATED = 1048576
SFGAO_NEWCONTENT = 2097152
SFGAO_STORAGE = 8
DWFRF_NORMAL = 0
DWFRF_DELETECONFIGDATA = 1
DWFAF_HIDDEN = 1
DBIM_MINSIZE = 1
DBIM_MAXSIZE = 2
DBIM_INTEGRAL = 4
DBIM_ACTUAL = 8
DBIM_TITLE = 16
DBIM_MODEFLAGS = 32
DBIM_BKCOLOR = 64
DBIMF_NORMAL = 0
DBIMF_VARIABLEHEIGHT = 8
DBIMF_DEBOSSED = 32
DBIMF_BKCOLOR = 64
DBIF_VIEWMODE_NORMAL = 0
DBIF_VIEWMODE_VERTICAL = 1
DBIF_VIEWMODE_FLOATING = 2
DBIF_VIEWMODE_TRANSPARENT = 4
COMPONENT_TOP = (2147483647)
COMP_TYPE_HTMLDOC = 0
COMP_TYPE_PICTURE = 1
COMP_TYPE_WEBSITE = 2
COMP_TYPE_CONTROL = 3
COMP_TYPE_CFHTML = 4
COMP_TYPE_MAX = 4
AD_APPLY_SAVE = 1
AD_APPLY_HTMLGEN = 2
AD_APPLY_REFRESH = 4
AD_APPLY_ALL = (AD_APPLY_SAVE | AD_APPLY_HTMLGEN | AD_APPLY_REFRESH)
AD_APPLY_FORCE = 8
AD_APPLY_BUFFERED_REFRESH = 16
WPSTYLE_CENTER = 0
WPSTYLE_TILE = 1
WPSTYLE_STRETCH = 2
WPSTYLE_MAX = 3
COMP_ELEM_TYPE = 1
COMP_ELEM_CHECKED = 2
COMP_ELEM_DIRTY = 4
COMP_ELEM_NOSCROLL = 8
COMP_ELEM_POS_LEFT = 16
COMP_ELEM_POS_TOP = 32
COMP_ELEM_SIZE_WIDTH = 64
COMP_ELEM_SIZE_HEIGHT = 128
COMP_ELEM_POS_ZINDEX = 256
COMP_ELEM_SOURCE = 512
COMP_ELEM_FRIENDLYNAME = 1024
COMP_ELEM_SUBSCRIBEDURL = 2048
ADDURL_SILENT = 0X0001
CFSTR_SHELLIDLIST = "Shell IDList Array"
CFSTR_SHELLIDLISTOFFSET = "Shell Object Offsets"
CFSTR_NETRESOURCES = "Net Resource"
CFSTR_FILEDESCRIPTORA = "FileGroupDescriptor"
CFSTR_FILEDESCRIPTORW = "FileGroupDescriptorW"
CFSTR_FILECONTENTS = "FileContents"
CFSTR_FILENAMEA = "FileName"
CFSTR_FILENAMEW = "FileNameW"
CFSTR_PRINTERGROUP = "PrinterFriendlyName"
CFSTR_FILENAMEMAPA = "FileNameMap"
CFSTR_FILENAMEMAPW = "FileNameMapW"
CFSTR_SHELLURL = "UniformResourceLocator"
CFSTR_INETURLA = CFSTR_SHELLURL
CFSTR_INETURLW = "UniformResourceLocatorW"
CFSTR_PREFERREDDROPEFFECT = "Preferred DropEffect"
CFSTR_PERFORMEDDROPEFFECT = "Performed DropEffect"
CFSTR_PASTESUCCEEDED = "Paste Succeeded"
CFSTR_INDRAGLOOP = "InShellDragLoop"
CFSTR_DRAGCONTEXT = "DragContext"
CFSTR_MOUNTEDVOLUME = "MountedVolume"
CFSTR_PERSISTEDDATAOBJECT = "PersistedDataObject"
CFSTR_TARGETCLSID = "TargetCLSID"
CFSTR_LOGICALPERFORMEDDROPEFFECT = "Logical Performed DropEffect"
CFSTR_AUTOPLAY_SHELLIDLISTS = "Autoplay Enumerated IDList Array"
CFSTR_FILEDESCRIPTOR = CFSTR_FILEDESCRIPTORW
CFSTR_FILENAME = CFSTR_FILENAMEW
CFSTR_FILENAMEMAP = CFSTR_FILENAMEMAPW
CFSTR_FILEDESCRIPTOR = CFSTR_FILEDESCRIPTORA
CFSTR_FILENAME = CFSTR_FILENAMEA
CFSTR_FILENAMEMAP = CFSTR_FILENAMEMAPA
DVASPECT_SHORTNAME = 2
SHCNE_RENAMEITEM = 1
SHCNE_CREATE = 2
SHCNE_DELETE = 4
SHCNE_MKDIR = 8
SHCNE_RMDIR = 16
SHCNE_MEDIAINSERTED = 32
SHCNE_MEDIAREMOVED = 64
SHCNE_DRIVEREMOVED = 128
SHCNE_DRIVEADD = 256
SHCNE_NETSHARE = 512
SHCNE_NETUNSHARE = 1024
SHCNE_ATTRIBUTES = 2048
SHCNE_UPDATEDIR = 4096
SHCNE_UPDATEITEM = 8192
SHCNE_SERVERDISCONNECT = 16384
SHCNE_UPDATEIMAGE = 32768
SHCNE_DRIVEADDGUI = 65536
SHCNE_RENAMEFOLDER = 131072
SHCNE_FREESPACE = 262144
SHCNE_EXTENDED_EVENT = 67108864
SHCNE_ASSOCCHANGED = 134217728
SHCNE_DISKEVENTS = 145439
SHCNE_GLOBALEVENTS = 201687520
SHCNE_ALLEVENTS = 2147483647
SHCNE_INTERRUPT = -2147483648
SHCNEE_ORDERCHANGED = 2
SHCNF_IDLIST = 0
SHCNF_PATHA = 1
SHCNF_PRINTERA = 2
SHCNF_DWORD = 3
SHCNF_PATHW = 5
SHCNF_PRINTERW = 6
SHCNF_TYPE = 255
SHCNF_FLUSH = 4096
SHCNF_FLUSHNOWAIT = 8192
SHCNF_PATH = SHCNF_PATHW
SHCNF_PRINTER = SHCNF_PRINTERW
SHCNF_PATH = SHCNF_PATHA
SHCNF_PRINTER = SHCNF_PRINTERA
QIF_CACHED = 1
QIF_DONTEXPANDFOLDER = 2
# SHARD enum for SHAddToRecentDocs
SHARD_PIDL = 1
SHARD_PATHA = 2
SHARD_PATHW = 3
SHARD_APPIDINFO = 4
SHARD_APPIDINFOIDLIST = 5
SHARD_LINK = 6
SHARD_APPIDINFOLINK = 7
SHARD_SHELLITEM = 8
## SHARD_PATH = SHARD_PATHW
SHARD_PATH = SHARD_PATHA
SHGDFIL_FINDDATA = 1
SHGDFIL_NETRESOURCE = 2
SHGDFIL_DESCRIPTIONID = 3
SHDID_ROOT_REGITEM = 1
SHDID_FS_FILE = 2
SHDID_FS_DIRECTORY = 3
SHDID_FS_OTHER = 4
SHDID_COMPUTER_DRIVE35 = 5
SHDID_COMPUTER_DRIVE525 = 6
SHDID_COMPUTER_REMOVABLE = 7
SHDID_COMPUTER_FIXED = 8
SHDID_COMPUTER_NETDRIVE = 9
SHDID_COMPUTER_CDROM = 10
SHDID_COMPUTER_RAMDISK = 11
SHDID_COMPUTER_OTHER = 12
SHDID_NET_DOMAIN = 13
SHDID_NET_SERVER = 14
SHDID_NET_SHARE = 15
SHDID_NET_RESTOFNET = 16
SHDID_NET_OTHER = 17
PID_IS_URL = 2
PID_IS_NAME = 4
PID_IS_WORKINGDIR = 5
PID_IS_HOTKEY = 6
PID_IS_SHOWCMD = 7
PID_IS_ICONINDEX = 8
PID_IS_ICONFILE = 9
PID_IS_WHATSNEW = 10
PID_IS_AUTHOR = 11
PID_IS_DESCRIPTION = 12
PID_IS_COMMENT = 13
PID_INTSITE_WHATSNEW = 2
PID_INTSITE_AUTHOR = 3
PID_INTSITE_LASTVISIT = 4
PID_INTSITE_LASTMOD = 5
PID_INTSITE_VISITCOUNT = 6
PID_INTSITE_DESCRIPTION = 7
PID_INTSITE_COMMENT = 8
PID_INTSITE_FLAGS = 9
PID_INTSITE_CONTENTLEN = 10
PID_INTSITE_CONTENTCODE = 11
PID_INTSITE_RECURSE = 12
PID_INTSITE_WATCH = 13
PID_INTSITE_SUBSCRIPTION = 14
PID_INTSITE_URL = 15
PID_INTSITE_TITLE = 16
PID_INTSITE_CODEPAGE = 18
PID_INTSITE_TRACKING = 19
PIDISF_RECENTLYCHANGED = 1
PIDISF_CACHEDSTICKY = 2
PIDISF_CACHEIMAGES = 16
PIDISF_FOLLOWALLLINKS = 32
PIDISM_GLOBAL = 0
PIDISM_WATCH = 1
PIDISM_DONTWATCH = 2
SSF_SHOWALLOBJECTS = 1
SSF_SHOWEXTENSIONS = 2
SSF_SHOWCOMPCOLOR = 8
SSF_SHOWSYSFILES = 32
SSF_DOUBLECLICKINWEBVIEW = 128
SSF_SHOWATTRIBCOL = 256
SSF_DESKTOPHTML = 512
SSF_WIN95CLASSIC = 1024
SSF_DONTPRETTYPATH = 2048
SSF_SHOWINFOTIP = 8192
SSF_MAPNETDRVBUTTON = 4096
SSF_NOCONFIRMRECYCLE = 32768
SSF_HIDEICONS = 16384
ABM_NEW = 0
ABM_REMOVE = 1
ABM_QUERYPOS = 2
ABM_SETPOS = 3
ABM_GETSTATE = 4
ABM_GETTASKBARPOS = 5
ABM_ACTIVATE = 6
ABM_GETAUTOHIDEBAR = 7
ABM_SETAUTOHIDEBAR = 8
ABM_WINDOWPOSCHANGED = 9
ABN_STATECHANGE = 0
ABN_POSCHANGED = 1
ABN_FULLSCREENAPP = 2
ABN_WINDOWARRANGE = 3
ABS_AUTOHIDE = 1
ABS_ALWAYSONTOP = 2
ABE_LEFT = 0
ABE_TOP = 1
ABE_RIGHT = 2
ABE_BOTTOM = 3
def EIRESID(x): return (-1 * (int)(x))
# Some manually added ones
CSIDL_COMMON_APPDATA = 35
CSIDL_LOCAL_APPDATA = 28
SHCONTF_FOLDERS = 32 # for shell browser
SHCONTF_NONFOLDERS = 64 # for default view
SHCONTF_INCLUDEHIDDEN = 128 # for hidden/system objects
SHCONTF_INIT_ON_FIRST_NEXT = 256
SHCONTF_NETPRINTERSRCH = 512
SHCONTF_SHAREABLE = 1024
SHCONTF_STORAGE = 2048
SHGDN_NORMAL = 0 # default (display purpose)
SHGDN_INFOLDER = 1 # displayed under a folder (relative)
SHGDN_FOREDITING = 4096 # for in-place editing
SHGDN_INCLUDE_NONFILESYS = 8192 # if not set, display names for shell name space items that are not in the file system will fail.
SHGDN_FORADDRESSBAR = 16384 # for displaying in the address (drives dropdown) bar
SHGDN_FORPARSING = 32768 # for ParseDisplayName or path
SHCONTF_FOLDERS = 32 # for shell browser
SHCONTF_NONFOLDERS = 64 # for default view
SHCONTF_INCLUDEHIDDEN = 128 # for hidden/system objects
BFO_NONE = 0
BFO_BROWSER_PERSIST_SETTINGS = 1
BFO_RENAME_FOLDER_OPTIONS_TOINTERNET = 2
BFO_BOTH_OPTIONS = 4
BIF_PREFER_INTERNET_SHORTCUT = 8
BFO_BROWSE_NO_IN_NEW_PROCESS = 16
BFO_ENABLE_HYPERLINK_TRACKING = 32
BFO_USE_IE_OFFLINE_SUPPORT = 64
BFO_SUBSTITUE_INTERNET_START_PAGE = 128
BFO_USE_IE_LOGOBANDING = 256
BFO_ADD_IE_TOCAPTIONBAR = 512
BFO_USE_DIALUP_REF = 1024
BFO_USE_IE_TOOLBAR = 2048
BFO_NO_PARENT_FOLDER_SUPPORT = 4096
BFO_NO_REOPEN_NEXT_RESTART = 8192
BFO_GO_HOME_PAGE = 16384
BFO_PREFER_IEPROCESS = 32768
BFO_SHOW_NAVIGATION_CANCELLED = 65536
BFO_QUERY_ALL = -1
# From ShlGuid.h
PID_FINDDATA = 0
PID_NETRESOURCE = 1
PID_DESCRIPTIONID = 2
PID_WHICHFOLDER = 3
PID_NETWORKLOCATION = 4
PID_COMPUTERNAME = 5
PID_DISPLACED_FROM = 2
PID_DISPLACED_DATE = 3
PID_SYNC_COPY_IN = 2
PID_MISC_STATUS = 2
PID_MISC_ACCESSCOUNT = 3
PID_MISC_OWNER = 4
PID_HTMLINFOTIPFILE = 5
PID_MISC_PICS = 6
PID_DISPLAY_PROPERTIES = 0
PID_INTROTEXT = 1
PIDSI_ARTIST = 2
PIDSI_SONGTITLE = 3
PIDSI_ALBUM = 4
PIDSI_YEAR = 5
PIDSI_COMMENT = 6
PIDSI_TRACK = 7
PIDSI_GENRE = 11
PIDSI_LYRICS = 12
PIDDRSI_PROTECTED = 2
PIDDRSI_DESCRIPTION = 3
PIDDRSI_PLAYCOUNT = 4
PIDDRSI_PLAYSTARTS = 5
PIDDRSI_PLAYEXPIRES = 6
PIDVSI_STREAM_NAME = 2
PIDVSI_FRAME_WIDTH = 3
PIDVSI_FRAME_HEIGHT = 4
PIDVSI_TIMELENGTH = 7
PIDVSI_FRAME_COUNT = 5
PIDVSI_FRAME_RATE = 6
PIDVSI_DATA_RATE = 8
PIDVSI_SAMPLE_SIZE = 9
PIDVSI_COMPRESSION = 10
PIDVSI_STREAM_NUMBER = 11
PIDASI_FORMAT = 2
PIDASI_TIMELENGTH = 3
PIDASI_AVG_DATA_RATE = 4
PIDASI_SAMPLE_RATE = 5
PIDASI_SAMPLE_SIZE = 6
PIDASI_CHANNEL_COUNT = 7
PIDASI_STREAM_NUMBER = 8
PIDASI_STREAM_NAME = 9
PIDASI_COMPRESSION = 10
PID_CONTROLPANEL_CATEGORY = 2
PID_VOLUME_FREE = 2
PID_VOLUME_CAPACITY = 3
PID_VOLUME_FILESYSTEM = 4
PID_SHARE_CSC_STATUS = 2
PID_LINK_TARGET = 2
PID_QUERY_RANK = 2
# From PropIdl.h
PROPSETFLAG_DEFAULT = ( 0 )
PROPSETFLAG_NONSIMPLE = ( 1 )
PROPSETFLAG_ANSI = ( 2 )
PROPSETFLAG_UNBUFFERED = ( 4 )
PROPSETFLAG_CASE_SENSITIVE = ( 8 )
PROPSET_BEHAVIOR_CASE_SENSITIVE = ( 1 )
PID_DICTIONARY = ( 0 )
PID_CODEPAGE = ( 1 )
PID_FIRST_USABLE = ( 2 )
PID_FIRST_NAME_DEFAULT = ( 4095 )
PID_LOCALE = ( (-2147483648) )
PID_MODIFY_TIME = ( (-2147483647) )
PID_SECURITY = ( (-2147483646) )
PID_BEHAVIOR = ( (-2147483645) )
PID_ILLEGAL = ( (-1) )
PID_MIN_READONLY = ( (-2147483648) )
PID_MAX_READONLY = ( (-1073741825) )
PIDDI_THUMBNAIL = 2
PIDSI_TITLE = 2
PIDSI_SUBJECT = 3
PIDSI_AUTHOR = 4
PIDSI_KEYWORDS = 5
PIDSI_COMMENTS = 6
PIDSI_TEMPLATE = 7
PIDSI_LASTAUTHOR = 8
PIDSI_REVNUMBER = 9
PIDSI_EDITTIME = 10
PIDSI_LASTPRINTED = 11
PIDSI_CREATE_DTM = 12
PIDSI_LASTSAVE_DTM = 13
PIDSI_PAGECOUNT = 14
PIDSI_WORDCOUNT = 15
PIDSI_CHARCOUNT = 16
PIDSI_THUMBNAIL = 17
PIDSI_APPNAME = 18
PIDSI_DOC_SECURITY = 19
PIDDSI_CATEGORY = 2
PIDDSI_PRESFORMAT = 3
PIDDSI_BYTECOUNT = 4
PIDDSI_LINECOUNT = 5
PIDDSI_PARCOUNT = 6
PIDDSI_SLIDECOUNT = 7
PIDDSI_NOTECOUNT = 8
PIDDSI_HIDDENCOUNT = 9
PIDDSI_MMCLIPCOUNT = 10
PIDDSI_SCALE = 11
PIDDSI_HEADINGPAIR = 12
PIDDSI_DOCPARTS = 13
PIDDSI_MANAGER = 14
PIDDSI_COMPANY = 15
PIDDSI_LINKSDIRTY = 16
PIDMSI_EDITOR = 2
PIDMSI_SUPPLIER = 3
PIDMSI_SOURCE = 4
PIDMSI_SEQUENCE_NO = 5
PIDMSI_PROJECT = 6
PIDMSI_STATUS = 7
PIDMSI_OWNER = 8
PIDMSI_RATING = 9
PIDMSI_PRODUCTION = 10
PIDMSI_COPYRIGHT = 11
PRSPEC_INVALID = ( (-1) )
PRSPEC_LPWSTR = ( 0 )
PRSPEC_PROPID = ( 1 )
# From ShObjIdl.h
SHCIDS_ALLFIELDS = (-2147483648)
SHCIDS_CANONICALONLY = 268435456
SHCIDS_BITMASK = (-65536)
SHCIDS_COLUMNMASK = 65535
SFGAO_CANMONIKER = 4194304
SFGAO_HASSTORAGE = 4194304
SFGAO_STREAM = 4194304
SFGAO_STORAGEANCESTOR = 8388608
SFGAO_STORAGECAPMASK = 1891958792
MAXPROPPAGES = 100
PSP_DEFAULT = 0
PSP_DLGINDIRECT = 1
PSP_USEHICON = 2
PSP_USEICONID = 4
PSP_USETITLE = 8
PSP_RTLREADING = 16
PSP_HASHELP = 32
PSP_USEREFPARENT = 64
PSP_USECALLBACK = 128
PSP_PREMATURE = 1024
PSP_HIDEHEADER = 2048
PSP_USEHEADERTITLE = 4096
PSP_USEHEADERSUBTITLE = 8192
PSP_USEFUSIONCONTEXT = 16384
PSPCB_ADDREF = 0
PSPCB_RELEASE = 1
PSPCB_CREATE = 2
PSH_DEFAULT = 0
PSH_PROPTITLE = 1
PSH_USEHICON = 2
PSH_USEICONID = 4
PSH_PROPSHEETPAGE = 8
PSH_WIZARDHASFINISH = 16
PSH_WIZARD = 32
PSH_USEPSTARTPAGE = 64
PSH_NOAPPLYNOW = 128
PSH_USECALLBACK = 256
PSH_HASHELP = 512
PSH_MODELESS = 1024
PSH_RTLREADING = 2048
PSH_WIZARDCONTEXTHELP = 4096
PSH_WIZARD97 = 8192
PSH_WIZARD97 = 16777216
PSH_WATERMARK = 32768
PSH_USEHBMWATERMARK = 65536
PSH_USEHPLWATERMARK = 131072
PSH_STRETCHWATERMARK = 262144
PSH_HEADER = 524288
PSH_USEHBMHEADER = 1048576
PSH_USEPAGELANG = 2097152
PSH_WIZARD_LITE = 4194304
PSH_NOCONTEXTHELP = 33554432
PSCB_INITIALIZED = 1
PSCB_PRECREATE = 2
PSCB_BUTTONPRESSED = 3
PSNRET_NOERROR = 0
PSNRET_INVALID = 1
PSNRET_INVALID_NOCHANGEPAGE = 2
PSNRET_MESSAGEHANDLED = 3
PSWIZB_BACK = 1
PSWIZB_NEXT = 2
PSWIZB_FINISH = 4
PSWIZB_DISABLEDFINISH = 8
PSBTN_BACK = 0
PSBTN_NEXT = 1
PSBTN_FINISH = 2
PSBTN_OK = 3
PSBTN_APPLYNOW = 4
PSBTN_CANCEL = 5
PSBTN_HELP = 6
PSBTN_MAX = 6
ID_PSRESTARTWINDOWS = 2
ID_PSREBOOTSYSTEM = (ID_PSRESTARTWINDOWS | 1)
WIZ_CXDLG = 276
WIZ_CYDLG = 140
WIZ_CXBMP = 80
WIZ_BODYX = 92
WIZ_BODYCX = 184
PROP_SM_CXDLG = 212
PROP_SM_CYDLG = 188
PROP_MED_CXDLG = 227
PROP_MED_CYDLG = 215
PROP_LG_CXDLG = 252
PROP_LG_CYDLG = 218
ISOLATION_AWARE_USE_STATIC_LIBRARY = 0
ISOLATION_AWARE_BUILD_STATIC_LIBRARY = 0
SHCOLSTATE_TYPE_STR = 1
SHCOLSTATE_TYPE_INT = 2
SHCOLSTATE_TYPE_DATE = 3
SHCOLSTATE_TYPEMASK = 15
SHCOLSTATE_ONBYDEFAULT = 16
SHCOLSTATE_SLOW = 32
SHCOLSTATE_EXTENDED = 64
SHCOLSTATE_SECONDARYUI = 128
SHCOLSTATE_HIDDEN = 256
SHCOLSTATE_PREFER_VARCMP = 512
FWF_AUTOARRANGE = 1
FWF_ABBREVIATEDNAMES = 2
FWF_SNAPTOGRID = 4
FWF_OWNERDATA = 8
FWF_BESTFITWINDOW = 16
FWF_DESKTOP = 32
FWF_SINGLESEL = 64
FWF_NOSUBFOLDERS = 128
FWF_TRANSPARENT = 256
FWF_NOCLIENTEDGE = 512
FWF_NOSCROLL = 1024
FWF_ALIGNLEFT = 2048
FWF_NOICONS = 4096
FWF_SHOWSELALWAYS = 8192
FWF_NOVISIBLE = 16384
FWF_SINGLECLICKACTIVATE = 32768
FWF_NOWEBVIEW = 65536
FWF_HIDEFILENAMES = 131072
FWF_CHECKSELECT = 262144
FVM_FIRST = 1
FVM_ICON = 1
FVM_SMALLICON = 2
FVM_LIST = 3
FVM_DETAILS = 4
FVM_THUMBNAIL = 5
FVM_TILE = 6
FVM_THUMBSTRIP = 7
SVUIA_DEACTIVATE = 0
SVUIA_ACTIVATE_NOFOCUS = 1
SVUIA_ACTIVATE_FOCUS = 2
SVUIA_INPLACEACTIVATE = 3
# SHChangeNotifyRegister flags
SHCNRF_InterruptLevel = 1
SHCNRF_ShellLevel = 2
SHCNRF_RecursiveInterrupt = 4096
SHCNRF_NewDelivery = 32768
FD_CLSID = 0x0001
FD_SIZEPOINT = 0x0002
FD_ATTRIBUTES = 0x0004
FD_CREATETIME = 0x0008
FD_ACCESSTIME = 0x0010
FD_WRITESTIME = 0x0020
FD_FILESIZE = 0x0040
FD_PROGRESSUI = 0x4000
FD_LINKUI = 0x8000
# shlwapi stuff
ASSOCF_INIT_NOREMAPCLSID = 0x00000001 # do not remap clsids to progids
ASSOCF_INIT_BYEXENAME = 0x00000002 # executable is being passed in
ASSOCF_OPEN_BYEXENAME = 0x00000002 # executable is being passed in
ASSOCF_INIT_DEFAULTTOSTAR = 0x00000004 # treat "*" as the BaseClass
ASSOCF_INIT_DEFAULTTOFOLDER = 0x00000008 # treat "Folder" as the BaseClass
ASSOCF_NOUSERSETTINGS = 0x00000010 # dont use HKCU
ASSOCF_NOTRUNCATE = 0x00000020 # dont truncate the return string
ASSOCF_VERIFY = 0x00000040 # verify data is accurate (DISK HITS)
ASSOCF_REMAPRUNDLL = 0x00000080 # actually gets info about rundlls target if applicable
ASSOCF_NOFIXUPS = 0x00000100 # attempt to fix errors if found
ASSOCF_IGNOREBASECLASS = 0x00000200 # dont recurse into the baseclass
ASSOCSTR_COMMAND = 1 # shell\verb\command string
ASSOCSTR_EXECUTABLE = 2 # the executable part of command string
ASSOCSTR_FRIENDLYDOCNAME = 3 # friendly name of the document type
ASSOCSTR_FRIENDLYAPPNAME = 4 # friendly name of executable
ASSOCSTR_NOOPEN = 5 # noopen value
ASSOCSTR_SHELLNEWVALUE = 6 # query values under the shellnew key
ASSOCSTR_DDECOMMAND = 7 # template for DDE commands
ASSOCSTR_DDEIFEXEC = 8 # DDECOMMAND to use if just create a process
ASSOCSTR_DDEAPPLICATION = 9 # Application name in DDE broadcast
ASSOCSTR_DDETOPIC = 10 # Topic Name in DDE broadcast
ASSOCSTR_INFOTIP = 11 # info tip for an item, or list of properties to create info tip from
ASSOCSTR_QUICKTIP = 12 # same as ASSOCSTR_INFOTIP, except, this list contains only quickly retrievable properties
ASSOCSTR_TILEINFO = 13 # similar to ASSOCSTR_INFOTIP - lists important properties for tileview
ASSOCSTR_CONTENTTYPE = 14 # MIME Content type
ASSOCSTR_DEFAULTICON = 15 # Default icon source
ASSOCSTR_SHELLEXTENSION = 16 # Guid string pointing to the Shellex\Shellextensionhandler value.
ASSOCKEY_SHELLEXECCLASS = 1 # the key that should be passed to ShellExec(hkeyClass)
ASSOCKEY_APP = 2 # the "Application" key for the association
ASSOCKEY_CLASS = 3 # the progid or class key
ASSOCKEY_BASECLASS = 4 # the BaseClass key
ASSOCDATA_MSIDESCRIPTOR = 1 # Component Descriptor to pass to MSI APIs
ASSOCDATA_NOACTIVATEHANDLER = 2 # restrict attempts to activate window
ASSOCDATA_QUERYCLASSSTORE = 3 # should check with the NT Class Store
ASSOCDATA_HASPERUSERASSOC = 4 # defaults to user specified association
ASSOCDATA_EDITFLAGS = 5 # Edit flags.
ASSOCDATA_VALUE = 6 # use pszExtra as the Value name
# flags used with SHGetViewStatePropertyBag
SHGVSPB_PERUSER = 1
SHGVSPB_ALLUSERS = 2
SHGVSPB_PERFOLDER = 4
SHGVSPB_ALLFOLDERS = 8
SHGVSPB_INHERIT = 16
SHGVSPB_ROAM = 32
SHGVSPB_NOAUTODEFAULTS = 2147483648 # 0x80000000
SHGVSPB_FOLDER = SHGVSPB_PERUSER | SHGVSPB_PERFOLDER
SHGVSPB_FOLDERNODEFAULTS = SHGVSPB_PERUSER | SHGVSPB_PERFOLDER | SHGVSPB_NOAUTODEFAULTS
SHGVSPB_USERDEFAULTS = SHGVSPB_PERUSER | SHGVSPB_ALLFOLDERS
SHGVSPB_GLOBALDEAFAULTS = SHGVSPB_ALLUSERS | SHGVSPB_ALLFOLDERS
# IDeskband and related
DBIM_MINSIZE = 0x0001
DBIM_MAXSIZE = 0x0002
DBIM_INTEGRAL = 0x0004
DBIM_ACTUAL = 0x0008
DBIM_TITLE = 0x0010
DBIM_MODEFLAGS = 0x0020
DBIM_BKCOLOR = 0x0040
DBIMF_NORMAL = 0x0000
DBIMF_VARIABLEHEIGHT = 0x0008
DBIMF_DEBOSSED = 0x0020
DBIMF_BKCOLOR = 0x0040
DBIF_VIEWMODE_NORMAL = 0x0000
DBIF_VIEWMODE_VERTICAL = 0x0001
DBIF_VIEWMODE_FLOATING = 0x0002
DBIF_VIEWMODE_TRANSPARENT = 0x0004
# Message types used with SHShellFolderView_Message
SFVM_REARRANGE = 1
SFVM_ADDOBJECT = 3
SFVM_REMOVEOBJECT = 6
SFVM_UPDATEOBJECT = 7
SFVM_GETSELECTEDOBJECTS = 9
SFVM_SETITEMPOS = 14
SFVM_SETCLIPBOARD = 16
SFVM_SETPOINTS = 23
# SHELL_LINK_DATA_FLAGS enum, used with IShellLinkDatalist
SLDF_HAS_ID_LIST = 1
SLDF_HAS_LINK_INFO = 2
SLDF_HAS_NAME = 4
SLDF_HAS_RELPATH = 8
SLDF_HAS_WORKINGDIR = 16
SLDF_HAS_ARGS = 32
SLDF_HAS_ICONLOCATION = 64
SLDF_UNICODE = 128
SLDF_FORCE_NO_LINKINFO = 256
SLDF_HAS_EXP_SZ = 512
SLDF_RUN_IN_SEPARATE = 1024
SLDF_HAS_LOGO3ID = 2048
SLDF_HAS_DARWINID = 4096
SLDF_RUNAS_USER = 8192
SLDF_NO_PIDL_ALIAS = 32768
SLDF_FORCE_UNCNAME = 65536
SLDF_HAS_EXP_ICON_SZ = 16384
SLDF_RUN_WITH_SHIMLAYER = 131072
SLDF_RESERVED = 2147483648
# IShellLinkDataList data block signatures
EXP_SPECIAL_FOLDER_SIG = 2684354565
NT_CONSOLE_PROPS_SIG = 2684354562
NT_FE_CONSOLE_PROPS_SIG = 2684354564
EXP_DARWIN_ID_SIG = 2684354566
EXP_LOGO3_ID_SIG = 2684354567
EXP_SZ_ICON_SIG = 2684354567
EXP_SZ_LINK_SIG = 2684354561
# IURL_SETURL_FLAGS enum, used with PyIUniformResourceLocator.SetURL
IURL_SETURL_FL_GUESS_PROTOCOL = 1
IURL_SETURL_FL_USE_DEFAULT_PROTOCOL = 2
# IURL_INVOKECOMMAND_FLAGS enum, used with PyIUniformResourceLocator.InvokeCommand
IURL_INVOKECOMMAND_FL_ALLOW_UI = 1
IURL_INVOKECOMMAND_FL_USE_DEFAULT_VERB = 2
IURL_INVOKECOMMAND_FL_DDEWAIT = 4
## constants used with IActiveDesktop
# COMPONENT.ComponentType
COMP_TYPE_HTMLDOC = 0
COMP_TYPE_PICTURE = 1
COMP_TYPE_WEBSITE = 2
COMP_TYPE_CONTROL = 3
COMP_TYPE_CFHTML = 4
COMP_TYPE_MAX = 4
# COMPONENT.CurItemState
IS_NORMAL = 1
IS_FULLSCREEN = 2
IS_SPLIT = 4
IS_VALIDSIZESTATEBITS = IS_NORMAL|IS_SPLIT|IS_FULLSCREEN
IS_VALIDSTATEBITS = IS_NORMAL|IS_SPLIT|IS_FULLSCREEN|2147483648|1073741824 ## 0x80000000|0x40000000
# IActiveDesktop.ApplyChanges Flags
AD_APPLY_SAVE = 1
AD_APPLY_HTMLGEN = 2
AD_APPLY_REFRESH = 4
AD_APPLY_ALL = AD_APPLY_SAVE|AD_APPLY_HTMLGEN|AD_APPLY_REFRESH
AD_APPLY_FORCE = 8
AD_APPLY_BUFFERED_REFRESH = 16
AD_APPLY_DYNAMICREFRESH = 32
# Wallpaper styles used with GetWallpaper and SetWallpaper
WPSTYLE_CENTER = 0
WPSTYLE_TILE = 1
WPSTYLE_STRETCH = 2
WPSTYLE_MAX = 3
# ModifyDesktopItem flags
COMP_ELEM_TYPE = 1 ## 0x00000001
COMP_ELEM_CHECKED = 2 ## 0x00000002
COMP_ELEM_DIRTY = 4 ## 0x00000004
COMP_ELEM_NOSCROLL = 8 ## 0x00000008
COMP_ELEM_POS_LEFT = 16 ## 0x00000010
COMP_ELEM_POS_TOP = 32 ## 0x00000020
COMP_ELEM_SIZE_WIDTH = 64 ## 0x00000040
COMP_ELEM_SIZE_HEIGHT = 128 ## 0x00000080
COMP_ELEM_POS_ZINDEX = 256 ## 0x00000100
COMP_ELEM_SOURCE = 512 ## 0x00000200
COMP_ELEM_FRIENDLYNAME = 1024 ## 0x00000400
COMP_ELEM_SUBSCRIBEDURL = 2048 ## 0x00000800
COMP_ELEM_ORIGINAL_CSI = 4096 ## 0x00001000
COMP_ELEM_RESTORED_CSI = 8192 ## 0x00002000
COMP_ELEM_CURITEMSTATE = 16384 ## 0x00004000
COMP_ELEM_ALL = COMP_ELEM_TYPE|COMP_ELEM_CHECKED|COMP_ELEM_DIRTY|COMP_ELEM_NOSCROLL|COMP_ELEM_POS_LEFT \
|COMP_ELEM_SIZE_WIDTH|COMP_ELEM_SIZE_HEIGHT|COMP_ELEM_POS_ZINDEX|COMP_ELEM_SOURCE \
|COMP_ELEM_FRIENDLYNAME|COMP_ELEM_POS_TOP|COMP_ELEM_SUBSCRIBEDURL|COMP_ELEM_ORIGINAL_CSI \
|COMP_ELEM_RESTORED_CSI|COMP_ELEM_CURITEMSTATE
DTI_ADDUI_DEFAULT = 0
DTI_ADDUI_DISPSUBWIZARD = 1
DTI_ADDUI_POSITIONITEM = 2
ADDURL_SILENT = 1
COMPONENT_TOP = 1073741823 ## 0x3fffffff
COMPONENT_DEFAULT_LEFT = 65535 ## 0xFFFF
COMPONENT_DEFAULT_TOP = 65535 ## 0xFFFF
SSM_CLEAR = 0
SSM_SET = 1
SSM_REFRESH = 2
SSM_UPDATE = 4
SCHEME_DISPLAY = 1 ##0x0001
SCHEME_EDIT = 2 ##0x0002
SCHEME_LOCAL = 4 ##0x0004
SCHEME_GLOBAL = 8 ##0x0008
SCHEME_REFRESH = 16 ##0x0010
SCHEME_UPDATE = 32 ##0x0020
SCHEME_DONOTUSE = 64 ##0x0040
SCHEME_CREATE = 128 ##0x0080
GADOF_DIRTY = 1
# From EmptyVC.h
EVCF_HASSETTINGS = 0x0001
EVCF_ENABLEBYDEFAULT = 0x0002
EVCF_REMOVEFROMLIST = 0x0004
EVCF_ENABLEBYDEFAULT_AUTO = 0x0008
EVCF_DONTSHOWIFZERO = 0x0010
EVCF_SETTINGSMODE = 0x0020
EVCF_OUTOFDISKSPACE = 0x0040
EVCCBF_LASTNOTIFICATION = 0x0001
# ShObjIdl.h IExplorer* related
EBO_NONE = 0
EBO_NAVIGATEONCE = 0x1
EBO_SHOWFRAMES = 0x2
EBO_ALWAYSNAVIGATE = 0x4
EBO_NOTRAVELLOG = 0x8
EBO_NOWRAPPERWINDOW = 0x10
EBF_NONE = 0
EBF_SELECTFROMDATAOBJECT = 0x100
EBF_NODROPTARGET = 0x200
ECS_ENABLED = 0
ECS_DISABLED = 0x1
ECS_HIDDEN = 0x2
ECS_CHECKBOX = 0x4
ECS_CHECKED = 0x8
ECF_HASSUBCOMMANDS = 0x1
ECF_HASSPLITBUTTON = 0x2
ECF_HIDELABEL = 0x4
ECF_ISSEPARATOR = 0x8
ECF_HASLUASHIELD = 0x10
SIATTRIBFLAGS_AND = 0x1
SIATTRIBFLAGS_OR = 0x2
SIATTRIBFLAGS_APPCOMPAT = 0x3
SIATTRIBFLAGS_MASK = 0x3
SIGDN_NORMALDISPLAY = 0
SIGDN_PARENTRELATIVEPARSING = -2147385343 ## 0x80018001
SIGDN_DESKTOPABSOLUTEPARSING = -2147319808 ## 0x80028000
SIGDN_PARENTRELATIVEEDITING = -2147282943 ## 0x80031001
SIGDN_DESKTOPABSOLUTEEDITING = -2147172352 ## 0x8004c000
SIGDN_FILESYSPATH = -2147123200 ## 0x80058000
SIGDN_URL = -2147057664 ## 0x80068000
SIGDN_PARENTRELATIVEFORADDRESSBAR = -2146975743 ## 0x8007c001,
SIGDN_PARENTRELATIVE = -2146959359 ## 0x80080001
SICHINT_DISPLAY = 0,
SICHINT_ALLFIELDS = -2147483648 ## 0x80000000
SICHINT_CANONICAL = 0x10000000
ASSOCCLASS_SHELL_KEY = 0
ASSOCCLASS_PROGID_KEY = 1 # hkeyClass
ASSOCCLASS_PROGID_STR = 2 # pszClass (HKCR\pszClass)
ASSOCCLASS_CLSID_KEY = 3 # hkeyClass
ASSOCCLASS_CLSID_STR = 4 # pszClass (HKCR\CLSID\pszClass)
ASSOCCLASS_APP_KEY = 5 # hkeyClass
ASSOCCLASS_APP_STR = 6 # pszClass (HKCR\Applications\PathFindFileName(pszClass))
ASSOCCLASS_SYSTEM_STR = 7 # pszClass
ASSOCCLASS_FOLDER = 8 # none
ASSOCCLASS_STAR = 9 # none
NSTCS_HASEXPANDOS = 0x1
NSTCS_HASLINES = 0x2
NSTCS_SINGLECLICKEXPAND = 0x4
NSTCS_FULLROWSELECT = 0x8
NSTCS_SPRINGEXPAND = 0x10
NSTCS_HORIZONTALSCROLL = 0x20
NSTCS_ROOTHASEXPANDO = 0x40
NSTCS_SHOWSELECTIONALWAYS = 0x80
NSTCS_NOINFOTIP = 0x200
NSTCS_EVENHEIGHT = 0x400
NSTCS_NOREPLACEOPEN = 0x800
NSTCS_DISABLEDRAGDROP = 0x1000
NSTCS_NOORDERSTREAM = 0x2000
NSTCS_RICHTOOLTIP = 0x4000
NSTCS_BORDER = 0x8000
NSTCS_NOEDITLABELS = 0x10000
NSTCS_TABSTOP = 0x20000
NSTCS_FAVORITESMODE = 0x80000
NSTCS_AUTOHSCROLL = 0x100000
NSTCS_FADEINOUTEXPANDOS = 0x200000
NSTCS_EMPTYTEXT = 0x400000
NSTCS_CHECKBOXES = 0x800000
NSTCS_PARTIALCHECKBOXES = 0x1000000
NSTCS_EXCLUSIONCHECKBOXES = 0x2000000
NSTCS_DIMMEDCHECKBOXES = 0x4000000
NSTCS_NOINDENTCHECKS = 0x8000000
NSTCS_ALLOWJUNCTIONS = 0x10000000
NSTCS_SHOWTABSBUTTON = 0x20000000
NSTCS_SHOWDELETEBUTTON = 0x40000000
NSTCS_SHOWREFRESHBUTTON = -2147483648 # 0x80000000
NSTCRS_VISIBLE = 0
NSTCRS_HIDDEN = 0x1
NSTCRS_EXPANDED = 0x2
NSTCIS_NONE = 0
NSTCIS_SELECTED = 0x1
NSTCIS_EXPANDED = 0x2
NSTCIS_BOLD = 0x4
NSTCIS_DISABLED = 0x8
NSTCGNI_NEXT = 0
NSTCGNI_NEXTVISIBLE = 0x1
NSTCGNI_PREV = 0x2
NSTCGNI_PREVVISIBLE = 0x3
NSTCGNI_PARENT = 0x4
NSTCGNI_CHILD = 0x5
NSTCGNI_FIRSTVISIBLE = 0x6
NSTCGNI_LASTVISIBLE = 0x7
CLSID_ExplorerBrowser = "{71f96385-ddd6-48d3-a0c1-ae06e8b055fb}"
# Names of the methods of many shell interfaces; used by implementation of
# the interfaces.
IBrowserFrame_Methods = ["GetFrameOptions"]
ICategorizer_Methods = ["GetDescription", "GetCategory",
"GetCategoryInfo", "CompareCategory"]
ICategoryProvider_Methods = ["CanCategorizeOnSCID", "GetDefaultCategory",
"GetCategoryForSCID", "EnumCategories",
"GetCategoryName", "CreateCategory"]
IContextMenu_Methods = ["QueryContextMenu", "InvokeCommand", "GetCommandString"]
IExplorerCommand_Methods = ["GetTitle", "GetIcon", "GetToolTip",
"GetCanonicalName", "GetState", "Invoke",
"GetFlags", "EnumSubCommands"]
IExplorerCommandProvider_Methods = ["GetCommand", "GetCommands"]
IOleWindow_Methods = ["GetWindow", "ContextSensitiveHelp"] # XXX - this should be somewhere in win32com
IPersist_Methods = ["GetClassID"]
IPersistFolder_Methods = IPersist_Methods + ["Initialize"]
IPersistFolder2_Methods = IPersistFolder_Methods + ["GetCurFolder"]
IShellExtInit_Methods = ["Initialize"]
IShellView_Methods = IOleWindow_Methods + \
["TranslateAccelerator", "EnableModeless", "UIActivate",
"Refresh", "CreateViewWindow", "DestroyViewWindow",
"GetCurrentInfo", "AddPropertySheetPages",
"SaveViewState", "SelectItem", "GetItemObject"]
IShellFolder_Methods = ["ParseDisplayName", "EnumObjects", "BindToObject",
"BindToStorage", "CompareIDs", "CreateViewObject",
"GetAttributesOf", "GetUIObjectOf",
"GetDisplayNameOf", "SetNameOf"]
IShellFolder2_Methods = IShellFolder_Methods + \
["GetDefaultSearchGUID", "EnumSearches",
"GetDefaultColumn", "GetDefaultColumnState",
"GetDetailsEx", "GetDetailsOf", "MapColumnToSCID"]
## enum GETPROPERTYSTOREFLAGS, used with IShellItem2 methods
GPS_DEFAULT = 0
GPS_HANDLERPROPERTIESONLY = 0x1
GPS_READWRITE = 0x2
GPS_TEMPORARY = 0x4
GPS_FASTPROPERTIESONLY = 0x8
GPS_OPENSLOWITEM = 0x10
GPS_DELAYCREATION = 0x20
GPS_BESTEFFORT = 0x40
GPS_MASK_VALID = 0x7f
## Bind context parameter names, used with IBindCtx::RegisterObjectParam
STR_AVOID_DRIVE_RESTRICTION_POLICY = "Avoid Drive Restriction Policy"
STR_BIND_DELEGATE_CREATE_OBJECT = "Delegate Object Creation"
STR_BIND_FOLDERS_READ_ONLY = "Folders As Read Only"
STR_BIND_FOLDER_ENUM_MODE = "Folder Enum Mode"
STR_BIND_FORCE_FOLDER_SHORTCUT_RESOLVE = "Force Folder Shortcut Resolve"
STR_DONT_PARSE_RELATIVE = "Don't Parse Relative"
STR_DONT_RESOLVE_LINK = "Don't Resolve Link"
## STR_ENUM_ITEMS_FLAGS
STR_FILE_SYS_BIND_DATA = "File System Bind Data"
STR_GET_ASYNC_HANDLER = "GetAsyncHandler"
STR_GPS_BESTEFFORT = "GPS_BESTEFFORT"
STR_GPS_DELAYCREATION = "GPS_DELAYCREATION"
STR_GPS_FASTPROPERTIESONLY = "GPS_FASTPROPERTIESONLY"
STR_GPS_HANDLERPROPERTIESONLY = "GPS_HANDLERPROPERTIESONLY"
STR_GPS_NO_OPLOCK = "GPS_NO_OPLOCK"
STR_GPS_OPENSLOWITEM = "GPS_OPENSLOWITEM"
STR_IFILTER_FORCE_TEXT_FILTER_FALLBACK = "Always bind persistent handlers"
STR_IFILTER_LOAD_DEFINED_FILTER = "Only bind registered persistent handlers"
STR_INTERNAL_NAVIGATE = "Internal Navigation"
STR_INTERNETFOLDER_PARSE_ONLY_URLMON_BINDABLE = "Validate URL"
STR_ITEM_CACHE_CONTEXT = "ItemCacheContext"
STR_NO_VALIDATE_FILENAME_CHARS = "NoValidateFilenameChars"
STR_PARSE_ALLOW_INTERNET_SHELL_FOLDERS = "Allow binding to Internet shell folder handlers and negate STR_PARSE_PREFER_WEB_BROWSING"
STR_PARSE_AND_CREATE_ITEM = "ParseAndCreateItem"
STR_PARSE_DONT_REQUIRE_VALIDATED_URLS = "Do not require validated URLs"
STR_PARSE_EXPLICIT_ASSOCIATION_SUCCESSFUL = "ExplicitAssociationSuccessful"
STR_PARSE_PARTIAL_IDLIST = "ParseOriginalItem"
STR_PARSE_PREFER_FOLDER_BROWSING = "Parse Prefer Folder Browsing"
STR_PARSE_PREFER_WEB_BROWSING = "Do not bind to Internet shell folder handlers"
STR_PARSE_PROPERTYSTORE = "DelegateNamedProperties"
STR_PARSE_SHELL_PROTOCOL_TO_FILE_OBJECTS = "Parse Shell Protocol To File Objects"
STR_PARSE_SHOW_NET_DIAGNOSTICS_UI = "Show network diagnostics UI"
STR_PARSE_SKIP_NET_CACHE = "Skip Net Resource Cache"
STR_PARSE_TRANSLATE_ALIASES = "Parse Translate Aliases"
STR_PARSE_WITH_EXPLICIT_ASSOCAPP = "ExplicitAssociationApp"
STR_PARSE_WITH_EXPLICIT_PROGID = "ExplicitProgid"
STR_PARSE_WITH_PROPERTIES = "ParseWithProperties"
## STR_PROPERTYBAG_PARAM
STR_SKIP_BINDING_CLSID = "Skip Binding CLSID"
STR_TRACK_CLSID = "Track the CLSID"
## KF_REDIRECTION_CAPABILITIES enum
KF_REDIRECTION_CAPABILITIES_ALLOW_ALL = 0x000000FF
KF_REDIRECTION_CAPABILITIES_REDIRECTABLE = 0x00000001
KF_REDIRECTION_CAPABILITIES_DENY_ALL = 0x000FFF00
KF_REDIRECTION_CAPABILITIES_DENY_POLICY_REDIRECTED = 0x00000100
KF_REDIRECTION_CAPABILITIES_DENY_POLICY = 0x00000200
KF_REDIRECTION_CAPABILITIES_DENY_PERMISSIONS = 0x00000400
## KF_REDIRECT_FLAGS enum
KF_REDIRECT_USER_EXCLUSIVE = 0x00000001
KF_REDIRECT_COPY_SOURCE_DACL = 0x00000002
KF_REDIRECT_OWNER_USER = 0x00000004
KF_REDIRECT_SET_OWNER_EXPLICIT = 0x00000008
KF_REDIRECT_CHECK_ONLY = 0x00000010
KF_REDIRECT_WITH_UI = 0x00000020
KF_REDIRECT_UNPIN = 0x00000040
KF_REDIRECT_PIN = 0x00000080
KF_REDIRECT_COPY_CONTENTS = 0x00000200
KF_REDIRECT_DEL_SOURCE_CONTENTS = 0x00000400
KF_REDIRECT_EXCLUDE_ALL_KNOWN_SUBFOLDERS = 0x00000800
## KF_CATEGORY enum
KF_CATEGORY_VIRTUAL = 0x00000001
KF_CATEGORY_FIXED = 0x00000002
KF_CATEGORY_COMMON = 0x00000003
KF_CATEGORY_PERUSER = 0x00000004
## FFFP_MODE enum
FFFP_EXACTMATCH = 0
FFFP_NEARESTPARENTMATCH = 1
KF_FLAG_CREATE = 0x00008000
KF_FLAG_DONT_VERIFY = 0x00004000
KF_FLAG_DONT_UNEXPAND = 0x00002000
KF_FLAG_NO_ALIAS = 0x00001000
KF_FLAG_INIT = 0x00000800
KF_FLAG_DEFAULT_PATH = 0x00000400
KF_FLAG_NOT_PARENT_RELATIVE = 0x00000200
KF_FLAG_SIMPLE_IDLIST = 0x00000100
## APPDOCLISTTYPE, used with IApplicationDocumentLists.GetList
ADLT_RECENT = 0
ADLT_FREQUENT = 1
## KNOWNDESTCATEGORY used with ICustomDestinationList.AppendKnownCategory
KDC_FREQUENT = 1
KDC_RECENT = 2
## LIBRARYFOLDERFILTER used with IShellLibrary.GetFolders
LFF_FORCEFILESYSTEM = 1
LFF_STORAGEITEMS = 2
LFF_ALLITEMS = 3
## DEFAULTSAVEFOLDERTYPE used with IShellLibrary.Get/SetDefaultSaveFolder
DSFT_DETECT = 1
DSFT_PRIVATE = 2
DSFT_PUBLIC = 3
## LIBRARYOPTIONFLAGS used with IShellLibrary.Get/SetOptions
LOF_DEFAULT = 0
LOF_PINNEDTONAVPANE = 1
LOF_MASK_ALL = 1
## LIBRARYSAVEFLAGS Used with PyIShellLibrary.Save
LSF_FAILIFTHERE = 0
LSF_OVERRIDEEXISTING = 1
LSF_MAKEUNIQUENAME = 2
## TRANSFER_SOURCE_FLAGS, used with IFileOperationProgressSink
TSF_NORMAL = 0
TSF_FAIL_EXIST = 0
TSF_RENAME_EXIST = 0x1
TSF_OVERWRITE_EXIST = 0x2
TSF_ALLOW_DECRYPTION = 0x4
TSF_NO_SECURITY = 0x8
TSF_COPY_CREATION_TIME = 0x10
TSF_COPY_WRITE_TIME = 0x20
TSF_USE_FULL_ACCESS = 0x40
TSF_DELETE_RECYCLE_IF_POSSIBLE = 0x80
TSF_COPY_HARD_LINK = 0x100
TSF_COPY_LOCALIZED_NAME = 0x200
TSF_MOVE_AS_COPY_DELETE = 0x400
TSF_SUSPEND_SHELLEVENTS = 0x800
## TRANSFER_ADVISE_STATE, used with ITransferAdviseSink
TS_NONE = 0
TS_PERFORMING = 1
TS_PREPARING = 2
TS_INDETERMINATE = 4
## Success HRESULTs returned by ITransfer* interface operations
COPYENGINE_S_YES = 0x00270001
COPYENGINE_S_NOT_HANDLED = 0x00270003
COPYENGINE_S_USER_RETRY = 0x00270004
COPYENGINE_S_USER_IGNORED = 0x00270005
COPYENGINE_S_MERGE = 0x00270006
COPYENGINE_S_DONT_PROCESS_CHILDREN = 0x00270008
COPYENGINE_S_ALREADY_DONE = 0x0027000A
COPYENGINE_S_PENDING = 0x0027000B
COPYENGINE_S_KEEP_BOTH = 0x0027000C
COPYENGINE_S_CLOSE_PROGRAM = 0x0027000D
COPYENGINE_S_COLLISIONRESOLVED = 0x0027000E
## Error HRESULTS
COPYENGINE_E_USER_CANCELLED = 0x80270000
COPYENGINE_E_CANCELLED = 0x80270001
COPYENGINE_E_REQUIRES_ELEVATION = 0x80270002
COPYENGINE_E_SAME_FILE = 0x80270003
COPYENGINE_E_DIFF_DIR = 0x80270004
COPYENGINE_E_MANY_SRC_1_DEST = 0x80270005
COPYENGINE_E_DEST_SUBTREE = 0x80270009
COPYENGINE_E_DEST_SAME_TREE = 0x8027000A
COPYENGINE_E_FLD_IS_FILE_DEST = 0x8027000B
COPYENGINE_E_FILE_IS_FLD_DEST = 0x8027000C
COPYENGINE_E_FILE_TOO_LARGE = 0x8027000D
COPYENGINE_E_REMOVABLE_FULL = 0x8027000E
COPYENGINE_E_DEST_IS_RO_CD = 0x8027000F
COPYENGINE_E_DEST_IS_RW_CD = 0x80270010
COPYENGINE_E_DEST_IS_R_CD = 0x80270011
COPYENGINE_E_DEST_IS_RO_DVD = 0x80270012
COPYENGINE_E_DEST_IS_RW_DVD = 0x80270013
COPYENGINE_E_DEST_IS_R_DVD = 0x80270014
COPYENGINE_E_SRC_IS_RO_CD = 0x80270015
COPYENGINE_E_SRC_IS_RW_CD = 0x80270016
COPYENGINE_E_SRC_IS_R_CD = 0x80270017
COPYENGINE_E_SRC_IS_RO_DVD = 0x80270018
COPYENGINE_E_SRC_IS_RW_DVD = 0x80270019
COPYENGINE_E_SRC_IS_R_DVD = 0x8027001A
COPYENGINE_E_INVALID_FILES_SRC = 0x8027001B
COPYENGINE_E_INVALID_FILES_DEST = 0x8027001C
COPYENGINE_E_PATH_TOO_DEEP_SRC = 0x8027001D
COPYENGINE_E_PATH_TOO_DEEP_DEST = 0x8027001E
COPYENGINE_E_ROOT_DIR_SRC = 0x8027001F
COPYENGINE_E_ROOT_DIR_DEST = 0x80270020
COPYENGINE_E_ACCESS_DENIED_SRC = 0x80270021
COPYENGINE_E_ACCESS_DENIED_DEST = 0x80270022
COPYENGINE_E_PATH_NOT_FOUND_SRC = 0x80270023
COPYENGINE_E_PATH_NOT_FOUND_DEST = 0x80270024
COPYENGINE_E_NET_DISCONNECT_SRC = 0x80270025
COPYENGINE_E_NET_DISCONNECT_DEST = 0x80270026
COPYENGINE_E_SHARING_VIOLATION_SRC = 0x80270027
COPYENGINE_E_SHARING_VIOLATION_DEST = 0x80270028
COPYENGINE_E_ALREADY_EXISTS_NORMAL = 0x80270029
COPYENGINE_E_ALREADY_EXISTS_READONLY = 0x8027002A
COPYENGINE_E_ALREADY_EXISTS_SYSTEM = 0x8027002B
COPYENGINE_E_ALREADY_EXISTS_FOLDER = 0x8027002C
COPYENGINE_E_STREAM_LOSS = 0x8027002D
COPYENGINE_E_EA_LOSS = 0x8027002E
COPYENGINE_E_PROPERTY_LOSS = 0x8027002F
COPYENGINE_E_PROPERTIES_LOSS = 0x80270030
COPYENGINE_E_ENCRYPTION_LOSS = 0x80270031
COPYENGINE_E_DISK_FULL = 0x80270032
COPYENGINE_E_DISK_FULL_CLEAN = 0x80270033
COPYENGINE_E_EA_NOT_SUPPORTED = 0x80270034
COPYENGINE_E_CANT_REACH_SOURCE = 0x80270035
COPYENGINE_E_RECYCLE_UNKNOWN_ERROR = 0x80270035
COPYENGINE_E_RECYCLE_FORCE_NUKE = 0x80270036
COPYENGINE_E_RECYCLE_SIZE_TOO_BIG = 0x80270037
COPYENGINE_E_RECYCLE_PATH_TOO_LONG = 0x80270038
COPYENGINE_E_RECYCLE_BIN_NOT_FOUND = 0x8027003A
COPYENGINE_E_NEWFILE_NAME_TOO_LONG = 0x8027003B
COPYENGINE_E_NEWFOLDER_NAME_TOO_LONG = 0x8027003C
COPYENGINE_E_DIR_NOT_EMPTY = 0x8027003D
COPYENGINE_E_FAT_MAX_IN_ROOT = 0x8027003E
COPYENGINE_E_ACCESSDENIED_READONLY = 0x8027003F
COPYENGINE_E_REDIRECTED_TO_WEBPAGE = 0x80270040
COPYENGINE_E_SERVER_BAD_FILE_TYPE = 0x80270041 | unknown | codeparrot/codeparrot-clean | ||
"""
Mixins to facilitate testing OAuth connections to Django-OAuth-Toolkit or
Django-OAuth2-Provider.
"""
# pylint: disable=protected-access
from unittest import skip, expectedFailure
from django.test.client import RequestFactory
from openedx.core.djangoapps.oauth_dispatch import adapters
from openedx.core.djangoapps.oauth_dispatch.tests.constants import DUMMY_REDIRECT_URL
from ..views import DOTAccessTokenExchangeView
class DOPAdapterMixin(object):
"""
Mixin to rewire existing tests to use django-oauth2-provider (DOP) backend
Overwrites self.client_id, self.access_token, self.oauth2_adapter
"""
client_id = 'dop_test_client_id'
access_token = 'dop_test_access_token'
oauth2_adapter = adapters.DOPAdapter()
def create_public_client(self, user, client_id=None):
"""
Create an oauth client application that is public.
"""
return self.oauth2_adapter.create_public_client(
name='Test Public Client',
user=user,
client_id=client_id,
redirect_uri=DUMMY_REDIRECT_URL,
)
def create_confidential_client(self, user, client_id=None):
"""
Create an oauth client application that is confidential.
"""
return self.oauth2_adapter.create_confidential_client(
name='Test Confidential Client',
user=user,
client_id=client_id,
redirect_uri=DUMMY_REDIRECT_URL,
)
def get_token_response_keys(self):
"""
Return the set of keys provided when requesting an access token
"""
return {'access_token', 'token_type', 'expires_in', 'scope'}
class DOTAdapterMixin(object):
"""
Mixin to rewire existing tests to use django-oauth-toolkit (DOT) backend
Overwrites self.client_id, self.access_token, self.oauth2_adapter
"""
client_id = 'dot_test_client_id'
access_token = 'dot_test_access_token'
oauth2_adapter = adapters.DOTAdapter()
def create_public_client(self, user, client_id=None):
"""
Create an oauth client application that is public.
"""
return self.oauth2_adapter.create_public_client(
name='Test Public Application',
user=user,
client_id=client_id,
redirect_uri=DUMMY_REDIRECT_URL,
)
def create_confidential_client(self, user, client_id=None):
"""
Create an oauth client application that is confidential.
"""
return self.oauth2_adapter.create_confidential_client(
name='Test Confidential Application',
user=user,
client_id=client_id,
redirect_uri=DUMMY_REDIRECT_URL,
)
def get_token_response_keys(self):
"""
Return the set of keys provided when requesting an access token
"""
return {'access_token', 'refresh_token', 'token_type', 'expires_in', 'scope'}
def test_get_method(self):
# Dispatch routes all get methods to DOP, so we test this on the view
request_factory = RequestFactory()
request = request_factory.get('/oauth2/exchange_access_token/')
request.session = {}
view = DOTAccessTokenExchangeView.as_view()
response = view(request, backend='facebook')
self.assertEqual(response.status_code, 400)
@expectedFailure
def test_single_access_token(self):
# TODO: Single access tokens not supported yet for DOT (See MA-2122)
super(DOTAdapterMixin, self).test_single_access_token()
@skip("Not supported yet (See MA-2123)")
def test_scopes(self):
super(DOTAdapterMixin, self).test_scopes() | unknown | codeparrot/codeparrot-clean | ||
# This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
from headphones import helpers
from logutils.queue import QueueHandler, QueueListener
from logging import handlers
import multiprocessing
import contextlib
import headphones
import threading
import traceback
import logging
import errno
import sys
import os
# These settings are for file logging only
FILENAME = "headphones.log"
MAX_SIZE = 1000000 # 1 MB
MAX_FILES = 5
# Headphones logger
logger = logging.getLogger("headphones")
# Global queue for multiprocessing logging
queue = None
class LogListHandler(logging.Handler):
"""
Log handler for Web UI.
"""
def emit(self, record):
message = self.format(record)
message = message.replace("\n", "<br />")
headphones.LOG_LIST.insert(0, (helpers.now(), message, record.levelname, record.threadName))
@contextlib.contextmanager
def listener():
"""
Wrapper that create a QueueListener, starts it and automatically stops it.
To be used in a with statement in the main process, for multiprocessing.
"""
global queue
# Initialize queue if not already done
if queue is None:
try:
queue = multiprocessing.Queue()
except OSError as e:
queue = False
# Some machines don't have access to /dev/shm. See
# http://stackoverflow.com/questions/2009278 for more information.
if e.errno == errno.EACCES:
logger.warning("Multiprocess logging disabled, because "
"current user cannot map shared memory. You won't see any" \
"logging generated by the worker processed.")
# Multiprocess logging may be disabled.
if not queue:
yield
else:
queue_listener = QueueListener(queue, *logger.handlers)
try:
queue_listener.start()
yield
finally:
queue_listener.stop()
def initMultiprocessing():
"""
Remove all handlers and add QueueHandler on top. This should only be called
inside a multiprocessing worker process, since it changes the logger
completely.
"""
# Multiprocess logging may be disabled.
if not queue:
return
# Remove all handlers and add the Queue handler as the only one.
for handler in logger.handlers[:]:
logger.removeHandler(handler)
queue_handler = QueueHandler(queue)
queue_handler.setLevel(logging.DEBUG)
logger.addHandler(queue_handler)
# Change current thread name for log record
threading.current_thread().name = multiprocessing.current_process().name
def initLogger(console=False, log_dir=False, verbose=False):
"""
Setup logging for Headphones. It uses the logger instance with the name
'headphones'. Three log handlers are added:
* RotatingFileHandler: for the file headphones.log
* LogListHandler: for Web UI
* StreamHandler: for console (if console)
Console logging is only enabled if console is set to True. This method can
be invoked multiple times, during different stages of Headphones.
"""
# Close and remove old handlers. This is required to reinit the loggers
# at runtime
for handler in logger.handlers[:]:
# Just make sure it is cleaned up.
if isinstance(handler, handlers.RotatingFileHandler):
handler.close()
elif isinstance(handler, logging.StreamHandler):
handler.flush()
logger.removeHandler(handler)
# Configure the logger to accept all messages
logger.propagate = False
logger.setLevel(logging.DEBUG if verbose else logging.INFO)
# Add list logger
loglist_handler = LogListHandler()
loglist_handler.setLevel(logging.DEBUG)
logger.addHandler(loglist_handler)
# Setup file logger
if log_dir:
filename = os.path.join(log_dir, FILENAME)
file_formatter = logging.Formatter('%(asctime)s - %(levelname)-7s :: %(threadName)s : %(message)s', '%d-%b-%Y %H:%M:%S')
file_handler = handlers.RotatingFileHandler(filename, maxBytes=MAX_SIZE, backupCount=MAX_FILES)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(file_formatter)
logger.addHandler(file_handler)
# Setup console logger
if console:
console_formatter = logging.Formatter('%(asctime)s - %(levelname)s :: %(threadName)s : %(message)s', '%d-%b-%Y %H:%M:%S')
console_handler = logging.StreamHandler()
console_handler.setFormatter(console_formatter)
console_handler.setLevel(logging.DEBUG)
logger.addHandler(console_handler)
# Install exception hooks
initHooks()
def initHooks(global_exceptions=True, thread_exceptions=True, pass_original=True):
"""
This method installs exception catching mechanisms. Any exception caught
will pass through the exception hook, and will be logged to the logger as
an error. Additionally, a traceback is provided.
This is very useful for crashing threads and any other bugs, that may not
be exposed when running as daemon.
The default exception hook is still considered, if pass_original is True.
"""
def excepthook(*exception_info):
# We should always catch this to prevent loops!
try:
message = "".join(traceback.format_exception(*exception_info))
logger.error("Uncaught exception: %s", message)
except:
pass
# Original excepthook
if pass_original:
sys.__excepthook__(*exception_info)
# Global exception hook
if global_exceptions:
sys.excepthook = excepthook
# Thread exception hook
if thread_exceptions:
old_init = threading.Thread.__init__
def new_init(self, *args, **kwargs):
old_init(self, *args, **kwargs)
old_run = self.run
def new_run(*args, **kwargs):
try:
old_run(*args, **kwargs)
except (KeyboardInterrupt, SystemExit):
raise
except:
excepthook(*sys.exc_info())
self.run = new_run
# Monkey patch the run() by monkey patching the __init__ method
threading.Thread.__init__ = new_init
# Expose logger methods
info = logger.info
warn = logger.warn
error = logger.error
debug = logger.debug
warning = logger.warning
exception = logger.exception | unknown | codeparrot/codeparrot-clean | ||
import re
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.utils.module_loading import import_by_path
from django.middleware.csrf import rotate_token
from .signals import user_logged_in, user_logged_out, user_login_failed
SESSION_KEY = '_auth_user_id'
BACKEND_SESSION_KEY = '_auth_user_backend'
REDIRECT_FIELD_NAME = 'next'
def load_backend(path):
return import_by_path(path)()
def get_backends():
backends = []
for backend_path in settings.AUTHENTICATION_BACKENDS:
backends.append(load_backend(backend_path))
if not backends:
raise ImproperlyConfigured('No authentication backends have been defined. Does AUTHENTICATION_BACKENDS contain anything?')
return backends
def _clean_credentials(credentials):
"""
Cleans a dictionary of credentials of potentially sensitive info before
sending to less secure functions.
Not comprehensive - intended for user_login_failed signal
"""
SENSITIVE_CREDENTIALS = re.compile('api|token|key|secret|password|signature', re.I)
CLEANSED_SUBSTITUTE = '********************'
for key in credentials:
if SENSITIVE_CREDENTIALS.search(key):
credentials[key] = CLEANSED_SUBSTITUTE
return credentials
def authenticate(**credentials):
"""
If the given credentials are valid, return a User object.
"""
for backend in get_backends():
try:
user = backend.authenticate(**credentials)
except TypeError:
# This backend doesn't accept these credentials as arguments. Try the next one.
continue
except PermissionDenied:
# This backend says to stop in our tracks - this user should not be allowed in at all.
return None
if user is None:
continue
# Annotate the user object with the path of the backend.
user.backend = "%s.%s" % (backend.__module__, backend.__class__.__name__)
return user
# The credentials supplied are invalid to all backends, fire signal
user_login_failed.send(sender=__name__,
credentials=_clean_credentials(credentials))
def login(request, user):
"""
Persist a user id and a backend in the request. This way a user doesn't
have to reauthenticate on every request. Note that data set during
the anonymous session is retained when the user logs in.
"""
if user is None:
user = request.user
# TODO: It would be nice to support different login methods, like signed cookies.
if SESSION_KEY in request.session:
if request.session[SESSION_KEY] != user.pk:
# To avoid reusing another user's session, create a new, empty
# session if the existing session corresponds to a different
# authenticated user.
request.session.flush()
else:
request.session.cycle_key()
request.session[SESSION_KEY] = user.pk
request.session[BACKEND_SESSION_KEY] = user.backend
if hasattr(request, 'user'):
request.user = user
rotate_token(request)
user_logged_in.send(sender=user.__class__, request=request, user=user)
def logout(request):
"""
Removes the authenticated user's ID from the request and flushes their
session data.
"""
# Dispatch the signal before the user is logged out so the receivers have a
# chance to find out *who* logged out.
user = getattr(request, 'user', None)
if hasattr(user, 'is_authenticated') and not user.is_authenticated():
user = None
user_logged_out.send(sender=user.__class__, request=request, user=user)
# remember language choice saved to session
language = request.session.get('django_language')
request.session.flush()
if language is not None:
request.session['django_language'] = language
if hasattr(request, 'user'):
from django.contrib.auth.models import AnonymousUser
request.user = AnonymousUser()
def get_user_model():
"""
Returns the User model that is active in this project.
"""
from django.db.models import get_model
try:
app_label, model_name = settings.AUTH_USER_MODEL.split('.')
except ValueError:
raise ImproperlyConfigured("AUTH_USER_MODEL must be of the form 'app_label.model_name'")
user_model = get_model(app_label, model_name)
if user_model is None:
raise ImproperlyConfigured("AUTH_USER_MODEL refers to model '%s' that has not been installed" % settings.AUTH_USER_MODEL)
return user_model
def get_user(request):
"""
Returns the user model instance associated with the given request session.
If no user is retrieved an instance of `AnonymousUser` is returned.
"""
from .models import AnonymousUser
try:
user_id = request.session[SESSION_KEY]
backend_path = request.session[BACKEND_SESSION_KEY]
assert backend_path in settings.AUTHENTICATION_BACKENDS
backend = load_backend(backend_path)
user = backend.get_user(user_id) or AnonymousUser()
except (KeyError, AssertionError):
user = AnonymousUser()
return user
def get_permission_codename(action, opts):
"""
Returns the codename of the permission for the specified action.
"""
return '%s_%s' % (action, opts.model_name) | unknown | codeparrot/codeparrot-clean | ||
apiVersion: test.com/v1
kind: Example
metadata:
name: test
spec:
test: test | unknown | github | https://github.com/kubernetes/kubernetes | hack/testdata/CRD/example-crd-1-cluster-scoped-resource.yaml |
// LANGUAGE: +JvmIndyAllowLambdasWithAnnotations
// WITH_FIR_TEST_COMPILER_PLUGIN
// DUMP_IR
// MODULE: lib
// MODULE_KIND: LibraryBinary
// FILE: p3/foo.kt
package p3
import org.jetbrains.kotlin.plugin.sandbox.MyInlineable
@MyInlineable
fun Scaffold(x: @MyInlineable () -> (@MyInlineable () -> Unit)) {
}
// MODULE: main(lib)
// FILE: main.kt
import org.jetbrains.kotlin.plugin.sandbox.MyInlineable
import p3.Scaffold
@MyInlineable
private fun TopAppBar(title: String) {
}
@MyInlineable
private fun ArticleScreenContent(title: String) {
Scaffold { { TopAppBar(title) } }
} | kotlin | github | https://github.com/JetBrains/kotlin | analysis/analysis-api/testData/components/compilerFacility/firPluginPrototypeMultiModule/functionParamInBinaryModule3.kt |
package libnetwork
import (
"fmt"
"net"
"net/netip"
"testing"
"github.com/Microsoft/hcsshim"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"gotest.tools/v3/assert"
is "gotest.tools/v3/assert/cmp"
)
func TestAddEpToResolver(t *testing.T) {
const (
ep1v4 = "192.0.2.11"
ep2v4 = "192.0.2.12"
epFiveDNS = "192.0.2.13"
epNoIntDNS = "192.0.2.14"
ep1v6 = "2001:db8:aaaa::2"
gw1v4 = "192.0.2.1"
gw2v4 = "192.0.2.2"
gw1v6 = "2001:db8:aaaa::1"
dns1v4 = "198.51.100.1"
dns2v4 = "198.51.100.2"
dns3v4 = "198.51.100.3"
)
hnsEndpoints := map[string]hcsshim.HNSEndpoint{
ep1v4: {
IPAddress: net.ParseIP(ep1v4),
GatewayAddress: gw1v4,
DNSServerList: gw1v4 + "," + dns1v4,
EnableInternalDNS: true,
},
ep2v4: {
IPAddress: net.ParseIP(ep2v4),
GatewayAddress: gw1v4,
DNSServerList: gw1v4 + "," + dns2v4,
EnableInternalDNS: true,
},
epFiveDNS: {
IPAddress: net.ParseIP(epFiveDNS),
GatewayAddress: gw1v4,
DNSServerList: gw1v4 + "," + dns1v4 + "," + dns2v4 + "," + dns3v4 + ",198.51.100.4",
EnableInternalDNS: true,
},
epNoIntDNS: {
IPAddress: net.ParseIP(epNoIntDNS),
GatewayAddress: gw1v4,
DNSServerList: gw1v4 + "," + dns1v4,
// EnableInternalDNS: false,
},
ep1v6: {
IPv6Address: net.ParseIP(ep1v6),
GatewayAddressV6: gw1v6,
DNSServerList: gw1v6 + "," + dns1v4,
EnableInternalDNS: true,
},
}
makeIPNet := func(addr, netmask string) *net.IPNet {
t.Helper()
ip, ipnet, err := net.ParseCIDR(addr + "/" + netmask)
assert.NilError(t, err)
return &net.IPNet{IP: ip, Mask: ipnet.Mask}
}
testcases := []struct {
name string
epToAdd *EndpointInterface
hnsEndpoints []hcsshim.HNSEndpoint
resolverLAs []string
expIPToExtDNS map[netip.Addr][maxExtDNS]extDNSEntry
expResolverIdx int
}{
{
name: "ipv4",
epToAdd: &EndpointInterface{
addr: makeIPNet(ep1v4, "32"),
},
hnsEndpoints: []hcsshim.HNSEndpoint{
hnsEndpoints[ep1v4],
},
resolverLAs: []string{gw1v4},
expIPToExtDNS: map[netip.Addr][maxExtDNS]extDNSEntry{
netip.MustParseAddr(ep1v4): {{IPStr: dns1v4}},
},
},
{
name: "limit of three dns servers",
epToAdd: &EndpointInterface{
addr: makeIPNet(epFiveDNS, "32"),
},
hnsEndpoints: []hcsshim.HNSEndpoint{
hnsEndpoints[epFiveDNS],
},
resolverLAs: []string{gw1v4},
// Expect the internal resolver to keep the first three ext-servers.
expIPToExtDNS: map[netip.Addr][maxExtDNS]extDNSEntry{
netip.MustParseAddr(epFiveDNS): {
{IPStr: dns1v4},
{IPStr: dns2v4},
{IPStr: dns3v4},
},
},
},
{
name: "disabled internal resolver",
epToAdd: &EndpointInterface{
addr: makeIPNet(epNoIntDNS, "32"),
},
hnsEndpoints: []hcsshim.HNSEndpoint{
hnsEndpoints[epNoIntDNS],
hnsEndpoints[ep2v4],
},
resolverLAs: []string{gw1v4},
},
{
name: "missing internal resolver",
epToAdd: &EndpointInterface{
addr: makeIPNet(ep1v4, "32"),
},
hnsEndpoints: []hcsshim.HNSEndpoint{
hnsEndpoints[ep1v4],
},
// The only resolver is for the gateway on a different network.
resolverLAs: []string{gw2v4},
},
{
name: "multiple resolvers and endpoints",
epToAdd: &EndpointInterface{
addr: makeIPNet(ep2v4, "32"),
},
hnsEndpoints: []hcsshim.HNSEndpoint{
hnsEndpoints[ep1v4],
hnsEndpoints[ep2v4],
},
// Put the internal resolver for this network second in the list.
expResolverIdx: 1,
resolverLAs: []string{gw2v4, gw1v4},
expIPToExtDNS: map[netip.Addr][maxExtDNS]extDNSEntry{
netip.MustParseAddr(ep2v4): {{IPStr: dns2v4}},
},
},
{
name: "ipv6",
epToAdd: &EndpointInterface{
addrv6: makeIPNet(ep1v6, "80"),
},
hnsEndpoints: []hcsshim.HNSEndpoint{
hnsEndpoints[ep1v6],
},
resolverLAs: []string{gw1v6},
expIPToExtDNS: map[netip.Addr][maxExtDNS]extDNSEntry{
netip.MustParseAddr(ep1v6): {{IPStr: dns1v4}},
},
},
}
eMapCmpOpts := []cmp.Option{
cmpopts.EquateEmpty(),
cmpopts.EquateComparable(netip.Addr{}),
cmpopts.IgnoreUnexported(extDNSEntry{}),
}
emptyEMap := map[netip.Addr][maxExtDNS]extDNSEntry{}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
// Set up resolvers with the required listen-addresses.
var resolvers []*Resolver
for _, la := range tc.resolverLAs {
resolvers = append(resolvers, NewResolver(la, true, nil))
}
// Add the endpoint and check expected results.
err := addEpToResolverImpl(t.Context(),
"netname", "epname", tc.epToAdd, resolvers, tc.hnsEndpoints)
assert.Check(t, err)
for i, resolver := range resolvers {
if i == tc.expResolverIdx {
assert.Check(t, is.DeepEqual(resolver.ipToExtDNS.eMap, tc.expIPToExtDNS,
eMapCmpOpts...), fmt.Sprintf("resolveridx=%d", i))
} else {
assert.Check(t, is.DeepEqual(resolver.ipToExtDNS.eMap, emptyEMap,
eMapCmpOpts...), fmt.Sprintf("resolveridx=%d", i))
}
}
// Delete the endpoint, check nothing got left behind.
err = deleteEpFromResolverImpl("epname", tc.epToAdd, resolvers, tc.hnsEndpoints)
assert.Check(t, err)
for i, resolver := range resolvers {
assert.Check(t, is.DeepEqual(resolver.ipToExtDNS.eMap, emptyEMap,
eMapCmpOpts...), fmt.Sprintf("resolveridx=%d", i))
}
})
}
} | go | github | https://github.com/moby/moby | daemon/libnetwork/network_windows_test.go |
#!/usr/bin/env python3
# This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
# Set up Internationalization using gettext
# searching for installed locales on /usr/share; uses relative folder if not found (windows)
from .utils import install_locale, get_home_pos
install_locale('pronterface')
import wx
import sys
import os
import time
import types
import re
import math
import logging
from printrun import gcoder
from printrun.objectplater import make_plater, PlaterPanel
from printrun.gl.libtatlin import actors
import printrun.gui.viz # NOQA
from printrun import gcview
def extrusion_only(gline):
return gline.e is not None \
and (gline.x, gline.y, gline.z) == (None, None, None)
# Custom method for gcoder.GCode to analyze & output gcode in a single call
def gcoder_write(self, f, line, store = False):
f.write(line)
self.append(line, store = store)
rewrite_exp = re.compile("(%s)" % "|".join(["X([-+]?[0-9]*\.?[0-9]*)",
"Y([-+]?[0-9]*\.?[0-9]*)"]))
def rewrite_gline(centeroffset, gline, cosr, sinr):
if gline.is_move and (gline.x is not None or gline.y is not None):
if gline.relative:
xc = yc = 0
cox = coy = 0
if gline.x is not None:
xc = gline.x
if gline.y is not None:
yc = gline.y
else:
xc = gline.current_x + centeroffset[0]
yc = gline.current_y + centeroffset[1]
cox = centeroffset[0]
coy = centeroffset[1]
new_x = "X%.04f" % (xc * cosr - yc * sinr - cox)
new_y = "Y%.04f" % (xc * sinr + yc * cosr - coy)
new = {"X": new_x, "Y": new_y}
new_line = rewrite_exp.sub(lambda ax: new[ax.group()[0]], gline.raw)
new_line = new_line.split(";")[0]
if gline.x is None: new_line += " " + new_x
if gline.y is None: new_line += " " + new_y
return new_line
else:
return gline.raw
class GcodePlaterPanel(PlaterPanel):
load_wildcard = _("GCODE files (*.gcode;*.GCODE;*.g)") + "|*.gcode;*.gco;*.g"
save_wildcard = _("GCODE files (*.gcode;*.GCODE;*.g)") + "|*.gcode;*.gco;*.g"
def prepare_ui(self, filenames = [], callback = None,
parent = None, build_dimensions = None,
circular_platform = False,
antialias_samples = 0,
grid = (1, 10)):
super(GcodePlaterPanel, self).prepare_ui(filenames, callback, parent, build_dimensions)
viewer = gcview.GcodeViewPanel(self, build_dimensions = self.build_dimensions,
antialias_samples = antialias_samples)
self.set_viewer(viewer)
self.platform = actors.Platform(self.build_dimensions,
circular = circular_platform,
grid = grid)
self.platform_object = gcview.GCObject(self.platform)
def get_objects(self):
return [self.platform_object] + list(self.models.values())
objects = property(get_objects)
def load_file(self, filename):
gcode = gcoder.GCode(open(filename, "rU"),
get_home_pos(self.build_dimensions))
model = actors.GcodeModel()
if gcode.filament_length > 0:
model.display_travels = False
generator = model.load_data(gcode)
generator_output = next(generator)
while generator_output is not None:
generator_output = next(generator)
obj = gcview.GCObject(model)
obj.offsets = [self.build_dimensions[3], self.build_dimensions[4], 0]
obj.gcode = gcode
obj.dims = [gcode.xmin, gcode.xmax,
gcode.ymin, gcode.ymax,
gcode.zmin, gcode.zmax]
obj.centeroffset = [-(obj.dims[1] + obj.dims[0]) / 2,
-(obj.dims[3] + obj.dims[2]) / 2,
0]
self.add_model(filename, obj)
wx.CallAfter(self.Refresh)
def done(self, event, cb):
if not os.path.exists("tempgcode"):
os.mkdir("tempgcode")
name = "tempgcode/" + str(int(time.time()) % 10000) + ".gcode"
self.export_to(name)
if cb is not None:
cb(name)
if self.destroy_on_done:
self.Destroy()
# What's hard in there ?
# 1) [x] finding the order in which the objects are printed
# 2) [x] handling layers correctly
# 3) [x] handling E correctly
# 4) [x] handling position shifts: should we either reset absolute 0 using
# G92 or should we rewrite all positions ? => we use G92s
# 5) [ ] handling the start & end gcode properly ?
# 6) [x] handling of current tool
# 7) [x] handling of Z moves for sequential printing (don't lower Z before
# reaching the next object print area)
# 8) [x] handling of absolute/relative status
# Initial implementation should just print the objects sequentially,
# but the end goal is to have a clean per-layer merge
def export_to(self, name):
return self.export_combined(name)
return self.export_sequential(name)
def export_combined(self, name):
models = list(self.models.values())
last_real_position = None
# Sort models by Z max to print smaller objects first
models.sort(key = lambda x: x.dims[-1])
alllayers = []
for (model_i, model) in enumerate(models):
def add_offset(layer):
return layer.z + model.offsets[2] if layer.z is not None else layer.z
alllayers += [(add_offset(layer), model_i, layer_i)
for (layer_i, layer) in enumerate(model.gcode.all_layers) if add_offset(layer) is not None]
alllayers.sort()
laste = [0] * len(models)
lasttool = [0] * len(models)
lastrelative = [False] * len(models)
with open(name, "w") as f:
analyzer = gcoder.GCode(None, get_home_pos(self.build_dimensions))
analyzer.write = types.MethodType(lambda self, line: gcoder_write(self, f, line), analyzer)
for (layer_z, model_i, layer_i) in alllayers:
model = models[model_i]
layer = model.gcode.all_layers[layer_i]
r = math.radians(model.rot)
o = model.offsets
co = model.centeroffset
offset_pos = last_real_position if last_real_position is not None else (0, 0, 0)
analyzer.write("; %f %f %f\n" % offset_pos)
trans = (- (o[0] + co[0]),
- (o[1] + co[1]),
- (o[2] + co[2]))
trans_wpos = (offset_pos[0] + trans[0],
offset_pos[1] + trans[1],
offset_pos[2] + trans[2])
analyzer.write("; GCodePlater: Model %d Layer %d at Z = %s\n" % (model_i, layer_i, layer_z))
if lastrelative[model_i]:
analyzer.write("G91\n")
else:
analyzer.write("G90\n")
if analyzer.current_tool != lasttool[model_i]:
analyzer.write("T%d\n" % lasttool[model_i])
analyzer.write("G92 X%.5f Y%.5f Z%.5f\n" % trans_wpos)
analyzer.write("G92 E%.5f\n" % laste[model_i])
for l in layer:
if l.command != "G28" and (l.command != "G92" or extrusion_only(l)):
if r == 0:
analyzer.write(l.raw + "\n")
else:
analyzer.write(rewrite_gline(co, l, math.cos(r), math.sin(r)) + "\n")
# Find the current real position & E
last_real_position = analyzer.current_pos
laste[model_i] = analyzer.current_e
lastrelative[model_i] = analyzer.relative
lasttool[model_i] = analyzer.current_tool
logging.info(_("Exported merged G-Codes to %s") % name)
def export_sequential(self, name):
models = list(self.models.values())
last_real_position = None
# Sort models by Z max to print smaller objects first
models.sort(key = lambda x: x.dims[-1])
with open(name, "w") as f:
for model_i, model in enumerate(models):
r = math.radians(model.rot)
o = model.offsets
co = model.centeroffset
offset_pos = last_real_position if last_real_position is not None else (0, 0, 0)
trans = (- (o[0] + co[0]),
- (o[1] + co[1]),
- (o[2] + co[2]))
trans_wpos = (offset_pos[0] + trans[0],
offset_pos[1] + trans[1],
offset_pos[2] + trans[2])
f.write("; GCodePlater: Model %d\n" % model_i)
f.write("G90\n")
f.write("G92 X%.5f Y%.5f Z%.5f E0\n" % trans_wpos)
f.write("G1 X%.5f Y%.5f" % (-co[0], -co[1]))
for l in model.gcode:
if l.command != "G28" and (l.command != "G92" or extrusion_only(l)):
if r == 0:
f.write(l.raw + "\n")
else:
f.write(rewrite_gline(co, l, math.cos(r), math.sin(r)) + "\n")
# Find the current real position
for i in range(len(model.gcode) - 1, -1, -1):
gline = model.gcode.lines[i]
if gline.is_move:
last_real_position = (- trans[0] + gline.current_x,
- trans[1] + gline.current_y,
- trans[2] + gline.current_z)
break
logging.info(_("Exported merged G-Codes to %s") % name)
GcodePlater = make_plater(GcodePlaterPanel)
if __name__ == '__main__':
app = wx.App(False)
main = GcodePlater(filenames = sys.argv[1:])
for fn in main.filenames:
main.load_file(fn)
main.filenames = None
main.autoplate()
main.export_to("gcodeplate___test.gcode")
raise SystemExit
main.Show()
app.MainLoop() | unknown | codeparrot/codeparrot-clean | ||
#ifndef PATH_H
#define PATH_H
struct repository;
struct strbuf;
struct string_list;
struct worktree;
/*
* The result to all functions which return statically allocated memory may be
* overwritten by another call to _any_ one of these functions. Consider using
* the safer variants which operate on strbufs or return allocated memory.
*/
/*
* Return a statically allocated path.
*/
const char *mkpath(const char *fmt, ...)
__attribute__((format (printf, 1, 2)));
/*
* Return a path.
*/
char *mkpathdup(const char *fmt, ...)
__attribute__((format (printf, 1, 2)));
/*
* The `repo_common_path` family of functions will construct a path into a
* repository's common git directory, which is shared by all worktrees.
*/
char *repo_common_path(const struct repository *repo,
const char *fmt, ...)
__attribute__((format (printf, 2, 3)));
const char *repo_common_path_append(const struct repository *repo,
struct strbuf *sb,
const char *fmt, ...)
__attribute__((format (printf, 3, 4)));
const char *repo_common_path_replace(const struct repository *repo,
struct strbuf *sb,
const char *fmt, ...)
__attribute__((format (printf, 3, 4)));
/*
* The `repo_git_path` family of functions will construct a path into a repository's
* git directory.
*
* These functions will perform adjustments to the resultant path to account
* for special paths which are either considered common among worktrees (e.g.
* paths into the object directory) or have been explicitly set via an
* environment variable or config (e.g. path to the index file).
*
* For an exhaustive list of the adjustments made look at `common_list` and
* `adjust_git_path` in path.c.
*/
char *repo_git_path(struct repository *repo,
const char *fmt, ...)
__attribute__((format (printf, 2, 3)));
const char *repo_git_path_append(struct repository *repo,
struct strbuf *sb,
const char *fmt, ...)
__attribute__((format (printf, 3, 4)));
const char *repo_git_path_replace(struct repository *repo,
struct strbuf *sb,
const char *fmt, ...)
__attribute__((format (printf, 3, 4)));
/*
* Similar to repo_git_path() but can produce paths for a specified
* worktree instead of current one. When no worktree is given, then the path is
* computed relative to main worktree of the given repository.
*/
const char *worktree_git_path(struct repository *r,
const struct worktree *wt,
const char *fmt, ...)
__attribute__((format (printf, 3, 4)));
/*
* The `repo_worktree_path` family of functions will construct a path into a
* repository's worktree.
*
* Returns a `NULL` pointer in case the repository has no worktree.
*/
char *repo_worktree_path(const struct repository *repo,
const char *fmt, ...)
__attribute__((format (printf, 2, 3)));
const char *repo_worktree_path_append(const struct repository *repo,
struct strbuf *sb,
const char *fmt, ...)
__attribute__((format (printf, 3, 4)));
const char *repo_worktree_path_replace(const struct repository *repo,
struct strbuf *sb,
const char *fmt, ...)
__attribute__((format (printf, 3, 4)));
/*
* The `repo_submodule_path` family of functions will construct a path into a
* submodule's git directory located at `path`. `path` must be a submodule path
* as found in the index and must be part of the given repository.
*
* Returns a `NULL` pointer in case the submodule cannot be found.
*/
char *repo_submodule_path(struct repository *repo,
const char *path,
const char *fmt, ...)
__attribute__((format (printf, 3, 4)));
const char *repo_submodule_path_append(struct repository *repo,
struct strbuf *sb,
const char *path,
const char *fmt, ...)
__attribute__((format (printf, 4, 5)));
const char *repo_submodule_path_replace(struct repository *repo,
struct strbuf *sb,
const char *path,
const char *fmt, ...)
__attribute__((format (printf, 4, 5)));
void report_linked_checkout_garbage(struct repository *r);
/*
* You can define a static memoized git path like:
*
* static REPO_GIT_PATH_FUNC(git_path_foo, "FOO")
*
* or use one of the global ones below.
*/
#define REPO_GIT_PATH_FUNC(var, filename) \
const char *git_path_##var(struct repository *r) \
{ \
if (!r->cached_paths.var) \
r->cached_paths.var = repo_git_path(r, filename); \
return r->cached_paths.var; \
}
const char *git_path_squash_msg(struct repository *r);
const char *git_path_merge_msg(struct repository *r);
const char *git_path_merge_rr(struct repository *r);
const char *git_path_merge_mode(struct repository *r);
const char *git_path_merge_head(struct repository *r);
const char *git_path_fetch_head(struct repository *r);
const char *git_path_shallow(struct repository *r);
int ends_with_path_components(const char *path, const char *components);
int calc_shared_perm(struct repository *repo, int mode);
int adjust_shared_perm(struct repository *repo, const char *path);
char *interpolate_path(const char *path, int real_home);
const char *remove_leading_path(const char *in, const char *prefix);
const char *relative_path(const char *in, const char *prefix, struct strbuf *sb);
int normalize_path_copy_len(char *dst, const char *src, int *prefix_len);
int normalize_path_copy(char *dst, const char *src);
/**
* Normalize in-place the path contained in the strbuf. If an error occurs,
* the contents of "sb" are left untouched, and -1 is returned.
*/
int strbuf_normalize_path(struct strbuf *src);
int longest_ancestor_length(const char *path, struct string_list *prefixes);
char *strip_path_suffix(const char *path, const char *suffix);
int daemon_avoid_alias(const char *path);
/*
* These functions match their is_hfs_dotgit() counterparts; see utf8.h for
* details.
*/
int is_ntfs_dotgit(const char *name);
int is_ntfs_dotgitmodules(const char *name);
int is_ntfs_dotgitignore(const char *name);
int is_ntfs_dotgitattributes(const char *name);
int is_ntfs_dotmailmap(const char *name);
/*
* Returns true iff "str" could be confused as a command-line option when
* passed to a sub-program like "ssh". Note that this has nothing to do with
* shell-quoting, which should be handled separately; we're assuming here that
* the string makes it verbatim to the sub-program.
*/
int looks_like_command_line_option(const char *str);
/**
* Return a newly allocated string with the evaluation of
* "$XDG_CONFIG_HOME/$subdir/$filename" if $XDG_CONFIG_HOME is non-empty, otherwise
* "$HOME/.config/$subdir/$filename". Return NULL upon error.
*/
char *xdg_config_home_for(const char *subdir, const char *filename);
/**
* Return a newly allocated string with the evaluation of
* "$XDG_CONFIG_HOME/git/$filename" if $XDG_CONFIG_HOME is non-empty, otherwise
* "$HOME/.config/git/$filename". Return NULL upon error.
*/
char *xdg_config_home(const char *filename);
/**
* Return a newly allocated string with the evaluation of
* "$XDG_CACHE_HOME/git/$filename" if $XDG_CACHE_HOME is non-empty, otherwise
* "$HOME/.cache/git/$filename". Return NULL upon error.
*/
char *xdg_cache_home(const char *filename);
/*
* Create a directory and (if share is nonzero) adjust its permissions
* according to the shared_repository setting. Only use this for
* directories under $GIT_DIR. Don't use it for working tree
* directories.
*/
void safe_create_dir(struct repository *repo, const char *dir, int share);
/*
* Similar to `safe_create_dir()`, but with two differences:
*
* - It knows to resolve gitlink files for symlinked worktrees.
*
* - It always adjusts shared permissions.
*
* Returns a negative erorr code on error, 0 on success.
*/
int safe_create_dir_in_gitdir(struct repository *repo, const char *path);
/*
* Create the directory containing the named path, using care to be
* somewhat safe against races. Return one of the scld_error values to
* indicate success/failure. On error, set errno to describe the
* problem.
*
* SCLD_VANISHED indicates that one of the ancestor directories of the
* path existed at one point during the function call and then
* suddenly vanished, probably because another process pruned the
* directory while we were working. To be robust against this kind of
* race, callers might want to try invoking the function again when it
* returns SCLD_VANISHED.
*
* safe_create_leading_directories() temporarily changes path while it
* is working but restores it before returning.
* safe_create_leading_directories_const() doesn't modify path, even
* temporarily. Both these variants adjust the permissions of the
* created directories to honor core.sharedRepository, so they are best
* suited for files inside the git dir. For working tree files, use
* safe_create_leading_directories_no_share() instead, as it ignores
* the core.sharedRepository setting.
*/
enum scld_error {
SCLD_OK = 0,
SCLD_FAILED = -1,
SCLD_PERMS = -2,
SCLD_EXISTS = -3,
SCLD_VANISHED = -4
};
enum scld_error safe_create_leading_directories(struct repository *repo, char *path);
enum scld_error safe_create_leading_directories_const(struct repository *repo,
const char *path);
enum scld_error safe_create_leading_directories_no_share(char *path);
/*
* Create a file, potentially creating its leading directories in case they
* don't exist. Returns the return value of the open(3p) call.
*/
int safe_create_file_with_leading_directories(struct repository *repo,
const char *path);
# ifdef USE_THE_REPOSITORY_VARIABLE
# include "strbuf.h"
# include "repository.h"
#define GIT_PATH_FUNC(func, filename) \
const char *func(void) \
{ \
static char *ret; \
if (!ret) \
ret = repo_git_path(the_repository, filename); \
return ret; \
}
# endif /* USE_THE_REPOSITORY_VARIABLE */
#endif /* PATH_H */ | c | github | https://github.com/git/git | path.h |
"""Tests for distutils.spawn."""
import unittest
import sys
import os
from test.support import run_unittest, unix_shell
from distutils.spawn import _nt_quote_args
from distutils.spawn import spawn
from distutils.errors import DistutilsExecError
from distutils.tests import support
class SpawnTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_nt_quote_args(self):
for (args, wanted) in ((['with space', 'nospace'],
['"with space"', 'nospace']),
(['nochange', 'nospace'],
['nochange', 'nospace'])):
res = _nt_quote_args(args)
self.assertEqual(res, wanted)
@unittest.skipUnless(os.name in ('nt', 'posix'),
'Runs only under posix or nt')
def test_spawn(self):
tmpdir = self.mkdtemp()
# creating something executable
# through the shell that returns 1
if sys.platform != 'win32':
exe = os.path.join(tmpdir, 'foo.sh')
self.write_file(exe, '#!%s\nexit 1' % unix_shell)
else:
exe = os.path.join(tmpdir, 'foo.bat')
self.write_file(exe, 'exit 1')
os.chmod(exe, 0o777)
self.assertRaises(DistutilsExecError, spawn, [exe])
# now something that works
if sys.platform != 'win32':
exe = os.path.join(tmpdir, 'foo.sh')
self.write_file(exe, '#!%s\nexit 0' % unix_shell)
else:
exe = os.path.join(tmpdir, 'foo.bat')
self.write_file(exe, 'exit 0')
os.chmod(exe, 0o777)
spawn([exe]) # should work without any error
def test_suite():
return unittest.makeSuite(SpawnTestCase)
if __name__ == "__main__":
run_unittest(test_suite()) | unknown | codeparrot/codeparrot-clean | ||
# coding=utf-8
from __future__ import unicode_literals
from .. import Provider as AddressProvider
class Provider(AddressProvider):
city_suffixes = (
'do Sul', 'do Norte', 'de Minas', 'do Campo', 'Grande', 'da Serra', 'do Oeste', 'de Goiás', 'Paulista',
'da Mata', 'Alegre', 'da Praia', 'das Flores', 'das Pedras', 'dos Dourados',
'do Amparo', 'do Galho', 'da Prata', 'Verde'
)
street_prefixes = ('Aeroporto', 'Alameda', 'Área', 'Avenida', 'Campo', 'Chácara', 'Colônia', 'Condomínio',
'Conjunto', 'Distrito', 'Esplanada', 'Estação', 'Estrada', 'Favela', 'Fazenda', 'Feira',
'Jardim', 'Ladeira', 'Lago', 'Lagoa', 'Largo', 'Loteamento', 'Morro', 'Núcleo', 'Parque',
'Passarela', 'Pátio', 'Praça', 'Quadra', 'Recanto', 'Residencial', 'Rodovia', 'Rua', 'Setor',
'Sítio', 'Travessa', 'Trecho', 'Trevo', 'Vale', 'Vereda', 'Via', 'Viaduto', 'Viela', 'Vila')
city_formats = (
'{{last_name}}',
'{{last_name}}',
'{{last_name}}',
'{{last_name}}',
'{{last_name}} {{city_suffix}}',
'{{last_name}} {{city_suffix}}',
'{{last_name}} {{city_suffix}}',
'{{last_name}} de {{last_name}}',
)
street_name_formats = (
'{{street_prefix}} {{last_name}}',
'{{street_prefix}} {{first_name}} {{last_name}}',
'{{street_prefix}} de {{last_name}}',
)
street_address_formats = (
'{{street_name}}',
'{{street_name}}, {{building_number}}',
'{{street_name}}, {{building_number}}',
'{{street_name}}, {{building_number}}',
'{{street_name}}, {{building_number}}',
'{{street_name}}, {{building_number}}',
'{{street_name}}, {{building_number}}',
)
address_formats = (
"{{street_address}}\n{{bairro}}\n{{postcode}} {{city}} / {{estado_sigla}}",
)
building_number_formats = ('%', '%#', '%#', '%#', '%##')
postcode_formats = ('########', '#####-###')
bairros = (
'Aarão Reis', 'Acaba Mundo', 'Acaiaca', 'Ademar Maldonado', 'Aeroporto', 'Aguas Claras', 'Alípio De Melo',
'Alpes',
'Alta Tensão 1ª Seção', 'Alta Tensão 2ª Seção', 'Alto Caiçaras', 'Alto Das Antenas', 'Alto Dos Pinheiros',
'Alto Vera Cruz',
'Álvaro Camargos', 'Ambrosina', 'Andiroba', 'Antonio Ribeiro De Abreu 1ª Seção', 'Aparecida 7ª Seção', 'Ápia',
'Apolonia', 'Araguaia', 'Atila De Paiva', 'Bacurau', 'Bairro Das Indústrias Ii', 'Baleia',
'Barão Homem De Melo 1ª Seção', 'Barão Homem De Melo 2ª Seção', 'Barão Homem De Melo 3ª Seção',
'Barreiro', 'Beija Flor', 'Beira Linha', 'Bela Vitoria', 'Belmonte', 'Bernadete', 'Betânia', 'Biquinhas',
'Boa Esperança', 'Boa União 1ª Seção', 'Boa União 2ª Seção', 'Boa Viagem', 'Boa Vista', 'Bom Jesus', 'Bonfim',
'Bonsucesso', 'Brasil Industrial', 'Braúnas', 'Buraco Quente', 'Cabana Do Pai Tomás',
'Cachoeirinha', 'Caetano Furquim', 'Caiçara - Adelaide', 'Calafate', 'Califórnia', 'Camargos', 'Campo Alegre',
'Camponesa 1ª Seção', 'Camponesa 2ª Seção', 'Canaa', 'Canadá', 'Candelaria', 'Capitão Eduardo', 'Cardoso',
'Casa Branca', 'Castanheira', 'Cdi Jatoba', 'Cenaculo', 'Céu Azul', 'Chácara Leonina',
'Cidade Jardim Taquaril', 'Cinquentenário', 'Colégio Batista', 'Comiteco', 'Concórdia',
'Cônego Pinheiro 1ª Seção',
'Cônego Pinheiro 2ª Seção', 'Confisco', 'Conjunto Bonsucesso', 'Conjunto Califórnia I',
'Conjunto Califórnia Ii',
'Conjunto Capitão Eduardo', 'Conjunto Celso Machado', 'Conjunto Floramar',
'Conjunto Jardim Filadélfia', 'Conjunto Jatoba', 'Conjunto Lagoa', 'Conjunto Minas Caixa',
'Conjunto Novo Dom Bosco', 'Conjunto Paulo Vi', 'Conjunto Providencia', 'Conjunto Santa Maria',
'Conjunto São Francisco De Assis', 'Conjunto Serra Verde', 'Conjunto Taquaril', 'Copacabana', 'Coqueiros',
'Corumbiara',
'Custodinha', 'Das Industrias I', 'Delta', 'Diamante', 'Distrito Industrial Do Jatoba', 'Dom Bosco',
'Dom Cabral',
'Dom Joaquim', 'Dom Silverio', 'Dona Clara', 'Embaúbas', 'Engenho Nogueira', 'Ermelinda', 'Ernesto Nascimento',
'Esperança', 'Estrela', 'Estrela Do Oriente', 'Etelvina Carneiro', 'Europa',
'Eymard', 'Fazendinha', 'Flamengo', 'Flavio De Oliveira', 'Flavio Marques Lisboa', 'Floramar', 'Frei Leopoldo',
'Gameleira', 'Garças', 'Glória', 'Goiania', 'Graça', 'Granja De Freitas', 'Granja Werneck', 'Grota', 'Grotinha',
'Guarani', 'Guaratã', 'Havaí', 'Heliopolis', 'Horto Florestal', 'Inconfidência',
'Indaiá', 'Independência', 'Ipe', 'Itapoa', 'Itatiaia', 'Jaqueline', 'Jaraguá', 'Jardim Alvorada',
'Jardim Atlântico', 'Jardim Do Vale', 'Jardim Dos Comerciarios', 'Jardim Felicidade', 'Jardim Guanabara',
'Jardim Leblon', 'Jardim Montanhês', 'Jardim São José', 'Jardim Vitoria', 'Jardinópolis', 'Jatobá',
'João Alfredo', 'João Paulo Ii', 'Jonas Veiga', 'Juliana', 'Lagoa', 'Lagoinha', 'Lagoinha Leblon', 'Lajedo',
'Laranjeiras', 'Leonina', 'Leticia', 'Liberdade', 'Lindéia', 'Lorena', 'Madre Gertrudes', 'Madri',
'Mala E Cuia',
'Manacas', 'Mangueiras', 'Mantiqueira', 'Marajó', 'Maravilha', 'Marçola', 'Maria Goretti',
'Maria Helena', 'Maria Tereza', 'Maria Virgínia', 'Mariano De Abreu', 'Marieta 1ª Seção', 'Marieta 2ª Seção',
'Marieta 3ª Seção', 'Marilandia', 'Mariquinhas', 'Marmiteiros', 'Milionario', 'Minas Brasil', 'Minas Caixa',
'Minaslandia', 'Mineirão', 'Miramar', 'Mirante', 'Mirtes', 'Monsenhor Messias', 'Monte Azul',
'Monte São José', 'Morro Dos Macacos', 'Nazare', 'Nossa Senhora Aparecida', 'Nossa Senhora Da Aparecida',
'Nossa Senhora Da Conceição', 'Nossa Senhora De Fátima', 'Nossa Senhora Do Rosário', 'Nova America',
'Nova Cachoeirinha', 'Nova Cintra', 'Nova Esperança', 'Nova Floresta', 'Nova Gameleira', 'Nova Pampulha',
'Novo Aarão Reis', 'Novo Das Industrias', 'Novo Glória', 'Novo Santa Cecilia', 'Novo Tupi', 'Oeste', 'Olaria',
"Olhos D'água", 'Ouro Minas', 'Pantanal', 'Paquetá', 'Paraíso', 'Parque São José', 'Parque São Pedro',
'Paulo Vi',
'Pedreira Padro Lopes', 'Penha', 'Petropolis', 'Pilar', 'Pindorama', 'Pindura Saia',
'Piraja', 'Piratininga', 'Pirineus', 'Pompéia', 'Pongelupe', 'Pousada Santo Antonio', 'Primeiro De Maio',
'Providencia', 'Ribeiro De Abreu', 'Rio Branco', 'Salgado Filho', 'Santa Amelia', 'Santa Branca',
'Santa Cecilia',
'Santa Cruz', 'Santa Helena', 'Santa Inês', 'Santa Isabel', 'Santa Margarida', 'Santa Maria',
'Santa Rita', 'Santa Rita De Cássia', 'Santa Sofia', 'Santa Terezinha', 'Santana Do Cafezal', 'Santo André',
'São Benedito', 'São Bernardo', 'São Cristóvão', 'São Damião', 'São Francisco', 'São Francisco Das Chagas',
'São Gabriel', 'São Geraldo', 'São Gonçalo', 'São João', 'São João Batista', 'São Jorge 1ª Seção',
'São Jorge 2ª Seção', 'São Jorge 3ª Seção', 'São José', 'São Marcos', 'São Paulo', 'São Salvador',
'São Sebastião',
'São Tomaz', 'São Vicente', 'Satelite', 'Saudade', 'Senhor Dos Passos', 'Serra Do Curral', 'Serra Verde',
'Serrano',
'Solar Do Barreiro', 'Solimoes', 'Sport Club', 'Suzana', 'Taquaril',
'Teixeira Dias', 'Tiradentes', 'Tirol', 'Tres Marias', 'Trevo', 'Túnel De Ibirité', 'Tupi A', 'Tupi B', 'União',
'Unidas', 'Universitário', 'Universo', 'Urca', 'Vale Do Jatoba', 'Varzea Da Palma', 'Venda Nova', 'Ventosa',
'Vera Cruz', 'Vila Aeroporto', 'Vila Aeroporto Jaraguá', 'Vila Antena', 'Vila Antena Montanhês',
'Vila Atila De Paiva', 'Vila Bandeirantes', 'Vila Barragem Santa Lúcia', 'Vila Batik', 'Vila Betânia',
'Vila Boa Vista', 'Vila Calafate', 'Vila Califórnia', 'Vila Canto Do Sabiá', 'Vila Cemig', 'Vila Cloris',
'Vila Copacabana', 'Vila Copasa', 'Vila Coqueiral', 'Vila Da Amizade', 'Vila Da Ária', 'Vila Da Luz',
'Vila Da Paz', 'Vila Das Oliveiras', 'Vila Do Pombal', 'Vila Dos Anjos', 'Vila Ecológica',
'Vila Engenho Nogueira',
'Vila Esplanada', 'Vila Formosa', 'Vila Fumec', 'Vila Havaí', 'Vila Independencia 1ª Seção',
'Vila Independencia 2ª Seção', 'Vila Independencia 3ª Seção', 'Vila Inestan', 'Vila Ipiranga',
'Vila Jardim Alvorada', 'Vila Jardim Leblon', 'Vila Jardim São José', 'Vila Madre Gertrudes 1ª Seção',
'Vila Madre Gertrudes 2ª Seção', 'Vila Madre Gertrudes 3ª Seção', 'Vila Madre Gertrudes 4ª Seção',
'Vila Maloca',
'Vila Mangueiras', 'Vila Mantiqueira', 'Vila Maria', 'Vila Minaslandia', 'Vila Nossa Senhora Do Rosário',
'Vila Nova', 'Vila Nova Cachoeirinha 1ª Seção', 'Vila Nova Cachoeirinha 2ª Seção',
'Vila Nova Cachoeirinha 3ª Seção', 'Vila Nova Dos Milionarios', 'Vila Nova Gameleira 1ª Seção',
'Vila Nova Gameleira 2ª Seção', 'Vila Nova Gameleira 3ª Seção', 'Vila Nova Paraíso', 'Vila Novo São Lucas',
'Vila Oeste', "Vila Olhos D'água",
'Vila Ouro Minas', 'Vila Paquetá', 'Vila Paraíso', 'Vila Petropolis', 'Vila Pilar', 'Vila Pinho',
'Vila Piratininga', 'Vila Piratininga Venda Nova', 'Vila Primeiro De Maio', 'Vila Puc', 'Vila Real 1ª Seção',
'Vila Real 2ª Seção', 'Vila Rica', 'Vila Santa Monica 1ª Seção', 'Vila Santa Monica 2ª Seção',
'Vila Santa Rosa',
'Vila Santo Antônio', 'Vila Santo Antônio Barroquinha', 'Vila São Dimas', 'Vila São Francisco',
'Vila São Gabriel',
'Vila São Gabriel Jacui', 'Vila São Geraldo', 'Vila São João Batista', 'Vila São Paulo', 'Vila São Rafael',
'Vila Satélite', 'Vila Sesc', 'Vila Sumaré', 'Vila Suzana Primeira Seção', 'Vila Suzana Segunda Seção',
'Vila Tirol', 'Vila Trinta E Um De Março', 'Vila União', 'Vila Vista Alegre', 'Virgínia', 'Vista Alegre',
'Vista Do Sol', 'Vitoria', 'Vitoria Da Conquista', 'Xangri-Lá', 'Xodo-Marize', 'Zilah Sposito', 'Outro',
'Novo São Lucas', 'Esplanada', 'Estoril', 'Novo Ouro Preto', 'Ouro Preto', 'Padre Eustáquio', 'Palmares',
'Palmeiras', 'Vila De Sá', 'Floresta', 'Anchieta', 'Aparecida', 'Grajaú', 'Planalto', 'Bandeirantes',
'Gutierrez',
'Jardim América', 'Renascença', 'Barro Preto', 'Barroca', 'Sagrada Família', 'Ipiranga', 'Belvedere',
'Santa Efigênia', 'Santa Lúcia', 'Santa Monica', 'Vila Jardim Montanhes', 'Santa Rosa', 'Santa Tereza',
'Buritis', 'Vila Paris', 'Santo Agostinho', 'Santo Antônio', 'Caiçaras', 'São Bento', 'Prado', 'Lourdes',
'Fernão Dias', 'Carlos Prates', 'Carmo', 'Luxemburgo', 'São Lucas', 'São Luiz', 'Mangabeiras', 'São Pedro',
'Horto',
'Cidade Jardim', 'Castelo', 'Cidade Nova', 'Savassi', 'Serra', 'Silveira', 'Sion', 'Centro',
'Alto Barroca', 'Nova Vista', 'Coração De Jesus', 'Coração Eucarístico', 'Funcionários', 'Cruzeiro',
'João Pinheiro', 'Nova Granada', 'Nova Suíça', 'Itaipu'
)
countries = ('Afeganistão', 'África do Sul', 'Akrotiri', 'Albânia', 'Alemanha', 'Andorra', 'Angola', 'Anguila',
'Antártica', 'Antígua e Barbuda', 'Antilhas Holandesas', 'Arábia Saudita', 'Argélia', 'Argentina',
'Armênia', 'Aruba', 'Ashmore and Cartier Islands', 'Austrália', 'Áustria', 'Azerbaijão', 'Bahamas',
'Bangladesh', 'Barbados', 'Barein', 'Bélgica', 'Belize', 'Benim', 'Bermudas', 'Bielorrússia',
'Birmânia', 'Bolívia', 'Bósnia e Herzegovina', 'Botsuana', 'Brasil', 'Brunei', 'Bulgária',
'Burquina Faso', 'Burundi', 'Butão', 'Cabo Verde', 'Camarões', 'Camboja', 'Canadá', 'Catar',
'Cazaquistão', 'Chade', 'Chile', 'China', 'Chipre', 'Clipperton Island', 'Colômbia', 'Comores',
'Congo-Brazzaville', 'Congo-Kinshasa', 'Coral Sea Islands', 'Coreia do Norte', 'Coreia do Sul',
'Costa do Marfim', 'Costa Rica', 'Croácia', 'Cuba', 'Dhekelia', 'Dinamarca', 'Domínica', 'Egito',
'Costa do Marfim', 'Costa Rica', 'Croácia', 'Cuba', 'Dhekelia', 'Dinamarca', 'Domínica', 'Egito',
'Emirados Árabes Unidos', 'Equador', 'Eritreia', 'Eslováquia', 'Eslovênia', 'Espanha',
'Estados Unidos',
'Estônia', 'Etiópia', 'Faroé', 'Fiji', 'Filipinas', 'Finlândia', 'França', 'Gabão', 'Gâmbia', 'Gana',
'Geórgia', 'Geórgia do Sul e Sandwich do Sul', 'Gibraltar', 'Granada', 'Grécia', 'Gronelândia',
'Guam', 'Guatemala', 'Guernsey', 'Guiana', 'Guiné', 'Guiné Equatorial', 'Guiné-Bissau', 'Haiti',
'Honduras', 'Hong Kong', 'Hungria', 'Iêmen', 'Ilha Bouvet', 'Ilha do Natal', 'Ilha Norfolk',
'Ilhas Caiman', 'Ilhas Cook', 'Ilhas dos Cocos', 'Ilhas Falkland', 'Ilhas Heard e McDonald',
'Ilhas Marshall', 'Ilhas Salomão', 'Ilhas Turcas e Caicos', 'Ilhas Virgens Americanas',
'Ilhas Virgens Britânicas', 'Índia', 'Indonésia', 'Iran', 'Iraque', 'Irlanda', 'Islândia', 'Israel',
'Itália', 'Jamaica', 'Jan Mayen', 'Japão', 'Jersey', 'Jibuti', 'Jordânia', 'Kuwait', 'Laos', 'Lesoto',
'Letônia', 'Líbano', 'Libéria', 'Líbia', 'Liechtenstein', 'Lituânia', 'Luxemburgo', 'Macau',
'Macedônia',
'Madagáscar', 'Malásia', 'Malávi', 'Maldivas', 'Mali', 'Malta', 'Man, Isle of', 'Marianas do Norte',
'Marrocos', 'Maurícia', 'Mauritânia', 'Mayotte', 'México', 'Micronésia', 'Moçambique', 'Moldávia',
'Mônaco', 'Mongólia', 'Monserrate', 'Montenegro', 'Namíbia', 'Nauru', 'Navassa Island', 'Nepal',
'Nicarágua', 'Níger', 'Nigéria', 'Niue', 'Noruega', 'Nova Caledónia', 'Nova Zelândia', 'Omã',
'Países Baixos', 'Palau', 'Panamá', 'Papua-Nova Guiné', 'Paquistão', 'Paracel Islands', 'Paraguai',
'Peru', 'Pitcairn', 'Polinésia Francesa', 'Polônia', 'Porto Rico', 'Portugal', 'Quênia',
'Quirguizistão',
'Quiribáti', 'Reino Unido', 'República Centro-Africana', 'República Checa', 'República Dominicana',
'Roménia', 'Ruanda', 'Rússia', 'Salvador', 'Samoa', 'Samoa Americana', 'Santa Helena', 'Santa Lúcia',
'São Cristóvão e Neves', 'São Marinho', 'São Pedro e Miquelon', 'São Tomé e Príncipe',
'São Vicente e Granadinas', 'Sara Ocidental', 'Seicheles', 'Senegal', 'Serra Leoa', 'Sérvia',
'Singapura', 'Síria', 'Somália', 'Sri Lanka', 'Suazilândia', 'Sudão', 'Suécia', 'Suíça', 'Suriname',
'Svalbard e Jan Mayen', 'Tailândia', 'Taiwan', 'Tajiquistão', 'Tanzânia',
'Território Britânico do Oceano Índico',
'Territórios Austrais Franceses', 'Timor Leste', 'Togo', 'Tokelau', 'Tonga', 'Trindade e Tobago',
'Tunísia', 'Turquemenistão', 'Turquia', 'Tuvalu', 'Ucrânia', 'Uganda', 'União Europeia', 'Uruguai',
'Usbequistão', 'Vanuatu', 'Vaticano', 'Venezuela', 'Vietnam', 'Wake Island', 'Wallis e Futuna',
'Zâmbia', 'Zimbabué'
)
estados = (
('AC', 'Acre'), ('AL', 'Alagoas'), ('AP', 'Amapá'), ('AM', 'Amazonas'), ('BA', 'Bahia'),
('CE', 'Ceará'), ('DF', 'Distrito Federal'), ('ES', 'Espírito Santo'), ('GO', 'Goiás'), ('MA', 'Maranhão'),
('MT', 'Mato Grosso'), ('MS', 'Mato Grosso do Sul'), ('MG', 'Minas Gerais'), ('PA', 'Pará'), ('PB', 'Paraíba'),
('PR', 'Paraná'), ('PE', 'Pernambuco'), ('PI', 'Piauí'), ('RJ', 'Rio de Janeiro'),
('RN', 'Rio Grande do Norte'),
('RS', 'Rio Grande do Sul'), ('RO', 'Rondônia'), ('RR', 'Roraima'), ('SC', 'Santa Catarina'),
('SP', 'São Paulo'),
('SE', 'Sergipe'), ('TO', 'Tocantins')
)
@classmethod
def street_prefix(cls):
"""
:example 'rua'
"""
return cls.random_element(cls.street_prefixes)
@classmethod
def estado(cls):
"""
Randomly returns a Brazilian State ('sigla' , 'nome').
:example ('MG' . 'Minas Gerais')
"""
return cls.random_element(cls.estados)
@classmethod
def estado_nome(cls):
"""
Randomly returns a Brazilian State Name
:example 'Minas Gerais'
"""
return cls.estado()[1]
@classmethod
def estado_sigla(cls):
"""
Randomly returns the abbreviation of a Brazilian State
:example 'MG'
"""
return cls.estado()[0]
@classmethod
def bairro(cls):
"""
Randomly returns a bairro (neighborhood) name. The names were taken from the city of Belo Horizonte - Minas Gerais
:example 'Serra'
"""
return cls.random_element(cls.bairros) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='OpenIDNonce',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),
('server_url', models.CharField(max_length=255)),
('timestamp', models.IntegerField()),
('salt', models.CharField(max_length=255)),
('date_created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='OpenIDStore',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),
('server_url', models.CharField(max_length=255)),
('handle', models.CharField(max_length=255)),
('secret', models.TextField()),
('issued', models.IntegerField()),
('lifetime', models.IntegerField()),
('assoc_type', models.TextField()),
],
),
] | unknown | codeparrot/codeparrot-clean | ||
"""
API - Resource Manager
"""
import json
import re
import textwrap
from collections import OrderedDict, defaultdict
from os.path import basename, dirname, join
import simple_yaml as yaml
from flask import abort as flask_abort
from flask import (
Response, current_app, make_response, request, send_from_directory
)
from validr import Invalid, SchemaParser
from validr.schema import MarkKey
from werkzeug.wrappers import Response as ResponseBase
from .cli import generate_code, parse_meta
from .exporters import exporters
from .res import Res
PATTERN_ACTION = re.compile(
r'^(get|post|put|delete|head|options|trace|patch){1}(?:_(.*))?$')
PATTERN_ENDPOINT = re.compile(r"^(?:(.*)\.)?(\w*)(?:@(.*))?$")
DEFAULT_AUTH = {
"header": "Authorization",
"algorithm": "HS256",
"expiration": 3600,
"cookie": None,
"refresh": True
}
BUILTIN_ERROR = {
"400.InvalidData": "request data invalid",
"403.PermissionDeny": "permission deny",
"500.ServerError": "internal server error"
}
DOCS_DIST = join(dirname(__file__), 'docs/dist')
DOCS_HTML = join(dirname(__file__), 'docs/docs.html')
def abort(code, error=None, message=None):
"""
Abort with suitable error response
Args:
code (int): status code
error (str): error symbol or flask.Response
message (str): error message
"""
if error is None:
flask_abort(code)
elif isinstance(error, Response):
error.status_code = code
flask_abort(code, response=error)
else:
body = {
"status": code,
"error": error,
"message": message
}
flask_abort(code, response=export(body, code))
def unpack(rv):
"""
Convert rv to tuple(data, code, headers)
Args:
rv: data or tuple that contain code and headers
Returns:
tuple (rv, status, headers)
"""
status = headers = None
if isinstance(rv, tuple):
rv, status, headers = rv + (None,) * (3 - len(rv))
if isinstance(status, (dict, list)):
headers, status = status, headers
return (rv, status, headers)
def export(rv, code=None, headers=None):
"""
Create a suitable response
Args:
rv: return value of action
code: status code
headers: response headers
Returns:
flask.Response
"""
if isinstance(rv, ResponseBase):
return make_response(rv, code, headers)
else:
if code is None:
code = 200
mediatype = request.accept_mimetypes.best_match(
exporters.keys(), default='application/json')
return exporters[mediatype](rv, code, headers)
def parse_docs(docs, marks):
"""
Parse YAML syntax content from docs
If docs is None, return {}
If docs has no YAML content, return {"$desc": docs}
Else, parse YAML content, return {"$desc": docs, YAML}
Args:
docs (str): docs to be parsed
marks (list): list of which indicate YAML content starts
Returns:
A dict contains information of docs
"""
if docs is None:
return {}
indexs = []
for mark in marks:
i = docs.find(mark)
if i >= 0:
indexs.append(i)
if not indexs:
return {"$desc": textwrap.dedent(docs).strip()}
start = min(indexs)
start = docs.rfind("\n", 0, start)
yamltext = textwrap.dedent(docs[start + 1:])
meta = yaml.load(yamltext)
meta["$desc"] = textwrap.dedent(docs[:start]).strip()
return meta
def get_request_data():
"""
Get request data based on request.method
If method is GET or DELETE, get data from request.args
If method is POST, PATCH or PUT, get data from request.form or request.json
"""
method = request.method.lower()
if method in ["get", "delete"]:
return request.args
elif method in ["post", "put", "patch"]:
if request.mimetype == 'application/json':
try:
return request.get_json()
except:
abort(400, "InvalidData", "invalid json content")
else:
return request.form
else:
return None
def parse_request():
"""Parse request endpoint and return (resource, action)"""
find = None
if request.endpoint is not None:
find = PATTERN_ENDPOINT.findall(request.endpoint)
if not find:
raise ValueError("invalid endpoint %s" % request.endpoint)
__, resource, action_name = find[0]
if action_name:
action = request.method.lower() + "_" + action_name
else:
action = request.method.lower()
return resource, action
def get_title(desc, default=None):
"""Get title of desc"""
if not desc:
return default
lines = desc.strip('\n').split('\n')
if not lines:
return default
return lines[0].lstrip('# ').rstrip(' ')
class Api:
"""
Manager of Resource
Args:
app: Flask or Blueprint
validators (dict): custom validators
metafile (str): path of metafile
docs (str): api docs
Attributes:
validators (dict): custom validators
meta (dict): metadata of api
"""
def __init__(self, app, validators=None, metafile=None, docs=""):
self.before_request_funcs = []
self.after_request_funcs = []
self.handle_error_func = None
self.app = app
if validators:
self.validators = validators
else:
self.validators = {}
if metafile is None:
self.meta = {}
else:
with open(metafile) as f:
self.meta = json.load(f)
meta_api = parse_docs(docs, ["$shared", "$error"])
self.meta["$desc"] = meta_api.get("$desc", "")
self.meta["$title"] = get_title(self.meta.get('$desc'), 'Document')
self.meta["$shared"] = meta_api.get("$shared", OrderedDict())
self.meta["$error"] = BUILTIN_ERROR.copy()
self.meta["$error"].update(meta_api.get("$error", {}))
# check shared is valid or not
if self.meta["$shared"]:
with MarkKey("$shared"):
SchemaParser(shared=self.meta["$shared"])
auth = DEFAULT_AUTH.copy()
auth.update(self.meta.get("$auth", {}))
self.meta["$auth"] = auth
# TODO new feature: $requires
self.requires = {}
for k, v in self.meta.get("$requires", {}).items():
self.requires[k] = Res(v)
self._resjs_cache = None
def meta_view(self):
"""
Meta data / API document
By default, this view func will return API document(HTML),
you can set request header `Accept` to `application/json`
or set query string `json` to get meta data(JSON).
"""
# API_URL_PREFIX maybe diffierent in development and production,
# so pick it from app.config other than store it in metafile
self.meta["$url_prefix"] = current_app.config.get("API_URL_PREFIX", "")
mediatype = request.accept_mimetypes.best_match(
['text/html', 'application/json'], default='text/html')
dumped = json.dumps(
self.meta, indent=4, sort_keys=True, ensure_ascii=False)
if mediatype == 'application/json' or 'json' in request.args:
return make_response(dumped, {
"Content-Type": "application/json; charset=utf-8"
})
filename = request.args.get('f')
if filename in ["res.js", "res.min.js"]:
# cache parsed meta
if self._resjs_cache is None:
self._resjs_cache = parse_meta(self.meta)
minify = filename == "res.min.js"
code = generate_code(self._resjs_cache,
prefix=self.meta["$url_prefix"], min=minify)
response = make_response(code, {
"Content-Type": "application/javascript"
})
# handle etag
response.add_etag()
return response.make_conditional(request)
if filename:
return send_from_directory(DOCS_DIST, basename(filename))
with open(DOCS_HTML) as f:
content = f.read()\
.replace('$(title)', self.meta.get('$title', ''))\
.replace('$(meta)', dumped)
return make_response(content)
def add_resource(self, resource, *class_args, **class_kwargs):
"""
Add resource
Parse resource and it's actions, route actions by naming rule.
Args:
resource: resource class
class_args: class_args
class_kwargs: class_kwargs
"""
name = resource.__name__.lower()
meta_resource = parse_docs(resource.__doc__, ["$shared"])
self.meta[name] = meta_resource
shared = self.meta["$shared"].copy()
shared.update(meta_resource.get("$shared", {}))
with MarkKey("%s.$shared" % resource.__name__):
sp = SchemaParser(validators=self.validators, shared=shared)
with MarkKey(resource.__name__):
resource = resource(*class_args, **class_kwargs)
# group actions by it's name, and
# make action group a view function
actions = defaultdict(lambda: {})
for action in dir(resource):
find = PATTERN_ACTION.findall(action)
if not find:
continue
httpmethod, action_name = find[0]
action_group = actions[action_name]
fn = getattr(resource, action)
meta_action = parse_docs(
fn.__doc__, ["$input", "$output", "$error"])
meta_resource[action] = meta_action
with MarkKey(fn.__name__):
action_group[httpmethod] = \
self.make_action(fn, sp, meta_action)
for action_name in actions:
if action_name == "":
url = "/" + name
endpoint = name
else:
url = "/{0}/{1}".format(name, action_name)
endpoint = "{0}@{1}".format(name, action_name)
action_group = actions[action_name]
self.app.add_url_rule(
url, endpoint=endpoint,
view_func=self.make_view(action_group),
methods=set(action_group)
)
def make_action(self, fn, schema_parser, meta):
"""
Make resource's method an action
Validate input, output by schema in meta.
If no input schema, call fn without params.
If no output schema, will not validate return value.
Args:
fn: resource's method
schema_parser: for parsing schema in meta
meta: meta data of the action
"""
validate_input = validate_output = None
if "$input" in meta:
with MarkKey("$input"):
validate_input = schema_parser.parse(meta["$input"])
if "$output" in meta:
with MarkKey("$output"):
validate_output = schema_parser.parse(meta["$output"])
def action(data):
if validate_input:
try:
data = validate_input(data)
except Invalid as ex:
return abort(400, "InvalidData", str(ex))
if isinstance(data, dict):
rv = fn(**data)
else:
rv = fn(data)
else:
rv = fn()
rv, status, headers = unpack(rv)
if validate_output:
try:
rv = validate_output(rv)
except Invalid as ex:
return abort(500, "ServerError", str(ex))
return rv, status, headers
return action
def make_view(self, action_group):
"""
Create a view function
Check permission and Dispatch request to action by request.method
"""
def view(*args, **kwargs):
try:
httpmathod = request.method.lower()
if httpmathod not in action_group:
abort(405)
resp = self._before_request()
if resp is None:
fn = action_group[httpmathod]
resp = fn(get_request_data())
except Exception as ex:
resp = self._handle_error(ex)
if resp is None:
raise
resp = self._after_request(*unpack(resp))
return export(*resp)
return view
def authorize(self, role):
"""Check permission"""
resource, action = parse_request()
roles = self.meta.get("$roles", {})
message = "%s can't access %s.%s" % (role, resource, action)
try:
if action not in roles[role][resource]:
abort(403, "PermissionDeny", message)
except KeyError:
abort(403, "PermissionDeny", message)
def _before_request(self):
for fn in self.before_request_funcs:
rv = fn()
if rv is not None:
return rv
return None
def _after_request(self, rv, status, headers):
for fn in self.after_request_funcs:
rv, status, headers = fn(rv, status, headers)
return rv, status, headers
def _handle_error(self, ex):
if self.handle_error_func:
return self.handle_error_func(ex)
return None
def after_request(self, f):
"""Decorater"""
self.after_request_funcs.append(f)
return f
def before_request(self, f):
"""Decorater"""
self.before_request_funcs.append(f)
return f
def error_handler(self, f):
"""Decorater"""
self.handle_error_func = f
return f | unknown | codeparrot/codeparrot-clean | ||
from sympy import (symbols, Symbol, sqrt, oo, re, nan, im, sign, I, E, log,
pi, arg, conjugate, expand, exp, sin, cos, Function, Abs, zoo, atan2,
S, DiracDelta, Rational, Heaviside)
from sympy.utilities.pytest import XFAIL
from sympy.utilities.randtest import comp
def N_equals(a, b):
"""Check whether two complex numbers are numerically close"""
return comp(a.n(), b.n(), 1.e-6)
def test_re():
x, y = symbols('x,y')
a, b = symbols('a,b', real=True)
r = Symbol('r', real=True)
i = Symbol('i', imaginary=True)
assert re(nan) == nan
assert re(oo) == oo
assert re(-oo) == -oo
assert re(0) == 0
assert re(1) == 1
assert re(-1) == -1
assert re(E) == E
assert re(-E) == -E
assert re(x) == re(x)
assert re(x*I) == -im(x)
assert re(r*I) == 0
assert re(r) == r
assert re(i*I) == I * i
assert re(i) == 0
assert re(x + y) == re(x + y)
assert re(x + r) == re(x) + r
assert re(re(x)) == re(x)
assert re(2 + I) == 2
assert re(x + I) == re(x)
assert re(x + y*I) == re(x) - im(y)
assert re(x + r*I) == re(x)
assert re(log(2*I)) == log(2)
assert re((2+I)**2).expand(complex=True) == 3
assert re(conjugate(x)) == re(x)
assert conjugate(re(x)) == re(x)
assert re(x).as_real_imag() == (re(x), 0)
assert re(i*r*x).diff(r) == re(i*x)
assert re(i*r*x).diff(i) == -I * im(r*x)
assert re(sqrt(a + b*I)) == (a**2 + b**2)**Rational(1,4)*cos(atan2(b, a)/2)
assert re(a * (2 + b*I)) == 2*a
assert re((1 + sqrt(a + b*I))/2) == \
(a**2 + b**2)**Rational(1,4)*cos(atan2(b, a)/2)/2 + Rational(1,2)
def test_im():
x, y = symbols('x,y')
a, b = symbols('a,b', real=True)
r = Symbol('r', real=True)
i = Symbol('i', imaginary=True)
assert im(nan) == nan
assert im(oo*I) == oo
assert im(-oo*I) == -oo
assert im(0) == 0
assert im(1) == 0
assert im(-1) == 0
assert im(E*I) == E
assert im(-E*I) == -E
assert im(x) == im(x)
assert im(x*I) == re(x)
assert im(r*I) == r
assert im(r) == 0
assert im(i*I) == 0
assert im(i) == -I * i
assert im(x + y) == im(x + y)
assert im(x + r) == im(x)
assert im(x + r*I) == im(x) + r
assert im(im(x)*I) == im(x)
assert im(2 + I) == 1
assert im(x + I) == im(x) + 1
assert im(x + y*I) == im(x) + re(y)
assert im(x + r*I) == im(x) + r
assert im(log(2*I)) == pi/2
assert im((2+I)**2).expand(complex=True) == 4
assert im(conjugate(x)) == -im(x)
assert conjugate(im(x)) == im(x)
assert im(x).as_real_imag() == (im(x), 0)
assert im(i*r*x).diff(r) == im(i*x)
assert im(i*r*x).diff(i) == -I * re(r*x)
assert im(sqrt(a + b*I)) == (a**2 + b**2)**Rational(1,4)*sin(atan2(b, a)/2)
assert im(a * (2 + b*I)) == a*b
assert im((1 + sqrt(a + b*I))/2) == \
(a**2 + b**2)**Rational(1,4)*sin(atan2(b, a)/2)/2
def test_sign():
assert sign(1.2) == 1
assert sign(-1.2) == -1
assert sign(3*I) == I
assert sign(-3*I) == -I
assert sign(0) == 0
assert sign(nan) == nan
x = Symbol('x')
assert sign(x).is_zero == None
assert sign(x).doit() == sign(x)
assert sign(1.2*x) == sign(x)
assert sign(2*x) == sign(x)
assert sign(I*x) == I*sign(x)
assert sign(-2*I*x) == -I*sign(x)
assert sign(conjugate(x)) == conjugate(sign(x))
p = Symbol('p', positive = True)
n = Symbol('n', negative = True)
m = Symbol('m', negative = True)
assert sign(2*p*x) == sign(x)
assert sign(n*x) == -sign(x)
assert sign(n*m*x) == sign(x)
x = Symbol('x', imaginary=True)
assert sign(x).is_zero == False
assert sign(x).diff(x) == 2*DiracDelta(-I*x)
assert sign(x).doit() == x / Abs(x)
assert conjugate(sign(x)) == -sign(x)
x = Symbol('x', real=True)
assert sign(x).is_zero == None
assert sign(x).diff(x) == 2*DiracDelta(x)
assert sign(x).doit() == sign(x)
assert conjugate(sign(x)) == sign(x)
x = Symbol('x', nonzero=True)
assert sign(x).is_zero == False
assert sign(x).doit() == x / Abs(x)
assert sign(Abs(x)) == 1
assert Abs(sign(x)) == 1
x = Symbol('x', positive=True)
assert sign(x).is_zero == False
assert sign(x).doit() == x / Abs(x)
assert sign(Abs(x)) == 1
assert Abs(sign(x)) == 1
x = 0
assert sign(x).is_zero == True
assert sign(x).doit() == 0
assert sign(Abs(x)) == 0
assert Abs(sign(x)) == 0
nz = Symbol('nz', nonzero=True, integer=True)
assert sign(nz)**2 == 1
assert (sign(nz)**3).args == (sign(nz), 3)
def test_as_real_imag():
n = pi**1000
# the special code for working out the real
# and complex parts of a power with Integer exponent
# should not run if there is no imaginary part, hence
# this should not hang
assert n.as_real_imag() == (n, 0)
# issue 3162
x = Symbol('x')
assert sqrt(x).as_real_imag() == \
((re(x)**2 + im(x)**2)**(S(1)/4)*cos(atan2(im(x), re(x))/2), \
(re(x)**2 + im(x)**2)**(S(1)/4)*sin(atan2(im(x), re(x))/2))
# issue 754
a, b = symbols('a,b', real=True)
assert ((1 + sqrt(a + b*I))/2).as_real_imag() == \
((a**2 + b**2)**Rational(1,4)*cos(atan2(b, a)/2)/2 + Rational(1,2), \
(a**2 + b**2)**Rational(1,4)*sin(atan2(b, a)/2)/2)
@XFAIL
def test_sign_issue_3068():
n = pi**1000
i = int(n)
assert (n - i).round() == 1 # doesn't hang
assert sign(n - i) == 1
# perhaps it's not possible to get the sign right when
# only 1 digit is being requested for this situtation;
# 2 digits works
assert (n - x).n(1, subs={x: i}) > 0
assert (n - x).n(2, subs={x: i}) > 0
def test_Abs():
x, y = symbols('x,y')
assert sign(sign(x)) == sign(x)
assert sign(x*y).func is sign
assert Abs(0) == 0
assert Abs(1) == 1
assert Abs(-1) == 1
assert Abs(I) == 1
assert Abs(-I) == 1
assert Abs(nan) == nan
assert Abs(I * pi) == pi
assert Abs(-I * pi) == pi
assert Abs(I * x) == Abs(x)
assert Abs(-I * x) == Abs(x)
assert Abs(-2*x) == 2*Abs(x)
assert Abs(-2.0*x) == 2.0*Abs(x)
assert Abs(2*pi*x*y) == 2*pi*Abs(x*y)
assert Abs(conjugate(x)) == Abs(x)
assert conjugate(Abs(x)) == Abs(x)
a = Symbol('a', positive=True)
assert Abs(2*pi*x*a) == 2*pi*a*Abs(x)
assert Abs(2*pi*I*x*a) == 2*pi*a*Abs(x)
x = Symbol('x', real=True)
n = Symbol('n', integer=True)
assert x**(2*n) == Abs(x)**(2*n)
assert Abs(x).diff(x) == sign(x)
assert abs(x) == Abs(x) # Python built-in
assert Abs(x)**3 == x**2*Abs(x)
assert Abs(x)**4 == x**4
assert (Abs(x)**(3*n)).args == (Abs(x), 3*n) # leave symbolic odd unchanged
assert (1/Abs(x)).args == (Abs(x), -1)
assert 1/Abs(x)**3 == 1/(x**2*Abs(x))
x = Symbol('x', imaginary=True)
assert Abs(x).diff(x) == -sign(x)
def test_Abs_rewrite():
x = Symbol('x', real=True)
a = Abs(x).rewrite(Heaviside).expand()
assert a == x*Heaviside(x) - x*Heaviside(-x)
for i in [-2, -1, 0, 1, 2]:
assert a.subs(x, i) == abs(i)
y = Symbol('y')
assert Abs(y).rewrite(Heaviside) == Abs(y)
def test_Abs_real():
# test some properties of abs that only apply
# to real numbers
x = Symbol('x', complex=True)
assert sqrt(x**2) != Abs(x)
assert Abs(x**2) != x**2
x = Symbol('x', real=True)
assert sqrt(x**2) == Abs(x)
assert Abs(x**2) == x**2
# if the symbol is zero, the following will still apply
nn = Symbol('nn', nonnegative=True, real=True)
np = Symbol('np', nonpositive=True, real=True)
assert Abs(nn) == nn
assert Abs(np) == -np
def test_Abs_properties():
x = Symbol('x')
assert Abs(x).is_real == True
assert Abs(x).is_positive == None
assert Abs(x).is_nonnegative == True
w = Symbol('w', complex=True, zero=False)
assert Abs(w).is_real == True
assert Abs(w).is_positive == True
assert Abs(w).is_zero == False
q = Symbol('q', positive=True)
assert Abs(q).is_real == True
assert Abs(q).is_positive == True
assert Abs(q).is_zero == False
def test_abs():
# this tests that abs calls Abs; don't rename to
# test_Abs since that test is already above
a = Symbol('a', positive=True)
assert abs(I*(1 + a)**2) == (1 + a)**2
def test_arg():
assert arg(0) == nan
assert arg(1) == 0
assert arg(-1) == pi
assert arg(I) == pi/2
assert arg(-I) == -pi/2
assert arg(1+I) == pi/4
assert arg(-1+I) == 3*pi/4
assert arg(1-I) == -pi/4
p = Symbol('p', positive=True)
assert arg(p) == 0
n = Symbol('n', negative=True)
assert arg(n) == pi
x = Symbol('x')
assert conjugate(arg(x)) == arg(x)
def test_conjugate():
a = Symbol('a', real=True)
assert conjugate(a) == a
assert conjugate(I*a) == -I*a
x, y = symbols('x,y')
assert conjugate(conjugate(x)) == x
assert conjugate(x + y) == conjugate(x) + conjugate(y)
assert conjugate(x - y) == conjugate(x) - conjugate(y)
assert conjugate(x * y) == conjugate(x) * conjugate(y)
assert conjugate(x / y) == conjugate(x) / conjugate(y)
assert conjugate(-x) == -conjugate(x)
def test_issue936():
x = Symbol('x')
assert Abs(x).expand(trig=True) == Abs(x)
assert sign(x).expand(trig=True) == sign(x)
assert arg(x).expand(trig=True) == arg(x)
def test_issue3206():
x = Symbol('x')
assert Abs(Abs(x)) == Abs(x)
def test_issue1655_derivative_conjugate():
x = Symbol('x', real=True)
y = Symbol('y', imaginary=True)
f = Function('f')
assert (f(x).conjugate()).diff(x) == (f(x).diff(x)).conjugate()
assert (f(y).conjugate()).diff(y) == -(f(y).diff(y)).conjugate()
def test_derivatives_issue1658():
x = Symbol('x', real=True)
y = Symbol('y', imaginary=True)
f = Function('f')
assert re(f(x)).diff(x) == re(f(x).diff(x))
assert im(f(x)).diff(x) == im(f(x).diff(x))
assert re(f(y)).diff(y) == -I*im(f(y).diff(y))
assert im(f(y)).diff(y) == -I*re(f(y).diff(y))
assert Abs(f(x)).diff(x).subs(f(x), 1+I*x).doit() == x/sqrt(1 + x**2)
assert arg(f(x)).diff(x).subs(f(x), 1+I*x**2).doit() == 2*x/(1+x**4)
assert Abs(f(y)).diff(y).subs(f(y), 1+y).doit() == -y/sqrt(1 - y**2)
assert arg(f(y)).diff(y).subs(f(y), I+y**2).doit() == 2*y/(1 + y**4)
def test_periodic_argument():
from sympy import (periodic_argument, unbranched_argument, oo,
principal_branch, polar_lift, pi)
x = Symbol('x')
p = Symbol('p', positive = True)
assert unbranched_argument(2 + I) == periodic_argument(2 + I, oo)
assert unbranched_argument(1 + x) == periodic_argument(1 + x, oo)
assert N_equals(unbranched_argument((1+I)**2), pi/2)
assert N_equals(unbranched_argument((1-I)**2), -pi/2)
assert N_equals(periodic_argument((1+I)**2, 3*pi), pi/2)
assert N_equals(periodic_argument((1-I)**2, 3*pi), -pi/2)
assert unbranched_argument(principal_branch(x, pi)) \
== periodic_argument(x, pi)
assert unbranched_argument(polar_lift(2 + I)) == unbranched_argument(2 + I)
assert periodic_argument(polar_lift(2 + I), 2*pi) \
== periodic_argument(2 + I, 2*pi)
assert periodic_argument(polar_lift(2 + I), 3*pi) \
== periodic_argument(2 + I, 3*pi)
assert periodic_argument(polar_lift(2 + I), pi) \
== periodic_argument(polar_lift(2 + I), pi)
assert unbranched_argument(polar_lift(1 + I)) == pi/4
assert periodic_argument(2*p, p) == periodic_argument(p, p)
assert periodic_argument(pi*p, p) == periodic_argument(p, p)
@XFAIL
def test_principal_branch_fail():
# TODO XXX why does abs(x)._eval_evalf() not fall back to global evalf?
assert N_equals(principal_branch((1 + I)**2, pi/2), 0)
def test_principal_branch():
from sympy import principal_branch, polar_lift, exp_polar
p = Symbol('p', positive=True)
x = Symbol('x')
neg = Symbol('x', negative=True)
assert principal_branch(polar_lift(x), p) == principal_branch(x, p)
assert principal_branch(polar_lift(2 + I), p) == principal_branch(2 + I, p)
assert principal_branch(2*x, p) == 2*principal_branch(x, p)
assert principal_branch(1, pi) == exp_polar(0)
assert principal_branch(-1, 2*pi) == exp_polar(I*pi)
assert principal_branch(-1, pi) == exp_polar(0)
assert principal_branch(exp_polar(3*pi*I)*x, 2*pi) == \
principal_branch(exp_polar(I*pi)*x, 2*pi)
assert principal_branch(neg*exp_polar(pi*I), 2*pi) == neg*exp_polar(-I*pi)
assert N_equals(principal_branch((1 + I)**2, 2*pi), 2*I)
assert N_equals(principal_branch((1 + I)**2, 3*pi), 2*I)
assert N_equals(principal_branch((1 + I)**2, 1*pi), 2*I)
# test argument sanitization
assert principal_branch(x, I).func is principal_branch
assert principal_branch(x, -4).func is principal_branch
assert principal_branch(x, -oo).func is principal_branch
assert principal_branch(x, zoo).func is principal_branch | unknown | codeparrot/codeparrot-clean | ||
"""Base class for all estimators."""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD Style
import copy
import inspect
import numpy as np
from scipy import sparse
from .metrics import r2_score
###############################################################################
def clone(estimator, safe=True):
"""Constructs a new estimator with the same parameters.
Clone does a deep copy of the model in an estimator
without actually copying attached data. It yields a new estimator
with the same parameters that has not been fit on any data.
Parameters
----------
estimator: estimator object, or list, tuple or set of objects
The estimator or group of estimators to be cloned
safe: boolean, optional
If safe is false, clone will fall back to a deepcopy on objects
that are not estimators.
"""
estimator_type = type(estimator)
# XXX: not handling dictionaries
if estimator_type in (list, tuple, set, frozenset):
return estimator_type([clone(e, safe=safe) for e in estimator])
elif not hasattr(estimator, 'get_params'):
if not safe:
return copy.deepcopy(estimator)
else:
raise TypeError("Cannot clone object '%s' (type %s): "
"it does not seem to be a scikit-learn estimator a"
" it does not implement a 'get_params' methods."
% (repr(estimator), type(estimator)))
klass = estimator.__class__
new_object_params = estimator.get_params(deep=False)
for name, param in new_object_params.iteritems():
new_object_params[name] = clone(param, safe=False)
new_object = klass(**new_object_params)
params_set = new_object.get_params(deep=False)
# quick sanity check of the parameters of the clone
for name in new_object_params:
param1 = new_object_params[name]
param2 = params_set[name]
if isinstance(param1, np.ndarray):
# For most ndarrays, we do not test for complete equality
if not isinstance(param2, type(param1)):
equality_test = False
elif (param1.ndim > 0
and param1.shape[0] > 0
and isinstance(param2, np.ndarray)
and param2.ndim > 0
and param2.shape[0] > 0):
equality_test = (
param1.shape == param2.shape
and param1.dtype == param2.dtype
# We have to use '.flat' for 2D arrays
and param1.flat[0] == param2.flat[0]
and param1.flat[-1] == param2.flat[-1]
)
else:
equality_test = np.all(param1 == param2)
elif sparse.issparse(param1):
# For sparse matrices equality doesn't work
if not sparse.issparse(param2):
equality_test = False
elif param1.size == 0 or param2.size == 0:
equality_test = (
param1.__class__ == param2.__class__
and param1.size == 0
and param2.size == 0
)
else:
equality_test = (
param1.__class__ == param2.__class__
and param1.data[0] == param2.data[0]
and param1.data[-1] == param2.data[-1]
and param1.nnz == param2.nnz
and param1.shape == param2.shape
)
else:
equality_test = new_object_params[name] == params_set[name]
if not equality_test:
raise RuntimeError('Cannot clone object %s, as the constructor '
'does not seem to set parameter %s' %
(estimator, name))
return new_object
###############################################################################
def _pprint(params, offset=0, printer=repr):
"""Pretty print the dictionary 'params'
Parameters
----------
params: dict
The dictionary to pretty print
offset: int
The offset in characters to add at the begin of each line.
printer:
The function to convert entries to strings, typically
the builtin str or repr
"""
# Do a multi-line justified repr:
options = np.get_printoptions()
np.set_printoptions(precision=5, threshold=64, edgeitems=2)
params_list = list()
this_line_length = offset
line_sep = ',\n' + (1 + offset // 2) * ' '
for i, (k, v) in enumerate(sorted(params.iteritems())):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = '%s=%s' % (k, str(v))
else:
# use repr of the rest
this_repr = '%s=%s' % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + '...' + this_repr[-100:]
if i > 0:
if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr):
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(', ')
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = ''.join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
return lines
###############################################################################
class BaseEstimator(object):
"""Base class for all estimators in scikit-learn
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their __init__ as explicit keyword
arguments (no *args, **kwargs).
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
try:
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
# introspect the constructor arguments to find the model parameters
# to represent
args, varargs, kw, default = inspect.getargspec(init)
if not varargs is None:
raise RuntimeError('scikit learn estimators should always '
'specify their parameters in the signature'
' of their init (no varargs).')
# Remove 'self'
# XXX: This is going to fail if the init is a staticmethod, but
# who would do this?
args.pop(0)
except TypeError:
# No explicit __init__
args = []
args.sort()
return args
def get_params(self, deep=True):
"""Get parameters for the estimator
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
"""
out = dict()
for key in self._get_param_names():
value = getattr(self, key, None)
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of the estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return
valid_params = self.get_params(deep=True)
for key, value in params.iteritems():
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if not name in valid_params:
raise ValueError('Invalid parameter %s for estimator %s' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if not key in valid_params:
raise ValueError('Invalid parameter %s ' 'for estimator %s'
% (key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False),
offset=len(class_name),),)
def __str__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name,
_pprint(self.get_params(deep=True),
offset=len(class_name), printer=str,),)
###############################################################################
class ClassifierMixin(object):
"""Mixin class for all classifiers in scikit-learn"""
def score(self, X, y):
"""Returns the mean accuracy on the given test data and labels.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training set.
y : array-like, shape = [n_samples]
Labels for X.
Returns
-------
z : float
"""
return np.mean(self.predict(X) == y)
###############################################################################
class RegressorMixin(object):
"""Mixin class for all regression estimators in scikit-learn"""
def score(self, X, y):
"""Returns the coefficient of determination R^2 of the prediction.
The coefficient R^2 is defined as (1 - u/v), where u is the
regression sum of squares ((y - y_pred) ** 2).sum() and v is the
residual sum of squares ((y_true - y_true.mean()) ** 2).sum().
Best possible score is 1.0, lower values are worse.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training set.
y : array-like, shape = [n_samples]
Returns
-------
z : float
"""
return r2_score(y, self.predict(X))
###############################################################################
class ClusterMixin(object):
"""Mixin class for all cluster estimators in scikit-learn"""
def fit_predict(self, X, y=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input data.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
self.fit(X)
return self.labels_
###############################################################################
class TransformerMixin(object):
"""Mixin class for all transformers in scikit-learn"""
def fit_transform(self, X, y=None, **fit_params):
"""Fit to data, then transform it
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Training set.
y : numpy array of shape [n_samples]
Target values.
Returns
-------
X_new : numpy array of shape [n_samples, n_features_new]
Transformed array.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
if y is None:
# fit method of arity 1 (unsupervised transformation)
return self.fit(X, **fit_params).transform(X)
else:
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, **fit_params).transform(X)
###############################################################################
class MetaEstimatorMixin(object):
"""Mixin class for all meta estimators in scikit-learn"""
# this is just a tag for the moment
###############################################################################
# XXX: Temporary solution to figure out if an estimator is a classifier
def _get_sub_estimator(estimator):
"""Returns the final estimator if there is any."""
if hasattr(estimator, 'estimator'):
# GridSearchCV and other CV-tuned estimators
return _get_sub_estimator(estimator.estimator)
if hasattr(estimator, 'steps'):
# Pipeline
return _get_sub_estimator(estimator.steps[-1][1])
return estimator
def is_classifier(estimator):
"""Returns True if the given estimator is (probably) a classifier."""
estimator = _get_sub_estimator(estimator)
return isinstance(estimator, ClassifierMixin) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import wsgiref.handlers
import xml.dom.minidom
import simplejson
import xml2json
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
# Matches an acceptable JSONp callback
_CALLBACK_REGEX = re.compile(r'[\w\.\[\]]+')
def FetchUrlContent(url):
"""Returns the string fetched from the given URL.
Uses the urlfetch interface to get the contents of a given URL. The
memcache version will be returned if recent.
Args:
url: The url to fetch.
Raises:
LookupError: The URL was not able to be fetched.
"""
content = memcache.get(url)
if content:
return content
request = urlfetch.fetch(url)
if request.status_code == 200:
content = request.content
memcache.add(url, content, 60 * 60)
return content
raise LookupError('Unable to fetch URL. Response code: ' +
str(request.status_code))
def FetchJsonFromUrl(url):
"""Returns a JSON string representing an XML document at the given URL.
Args:
url: The url of an XML document to fetch.
Raises:
LookupError: The URL was not able to be fetched.
SyntaxError: The XML document had bad syntax.
"""
doc_str = FetchUrlContent(url)
doc = xml.dom.minidom.parseString(doc_str)
doc_json = xml2json.DocumentToJson(doc)
return doc_json
class HomeHandler(webapp.RequestHandler):
"""Handles the root index page."""
def get(self):
"""Writes out the root page"""
self.response.out.write(
self.response.out.write(template.render('index.tpl', {})))
class ProxyHandler(webapp.RequestHandler):
"""Handles the proxy form (/proxy)."""
def get(self):
"""Handle a proxy request."""
callback = None
try:
url = self.request.get('url')
tmp_callback = self.request.get('callback')
if (tmp_callback is not None and
_CALLBACK_REGEX.match(tmp_callback)):
callback = tmp_callback
json = FetchJsonFromUrl(url)
# We don't want to just 500 on an error, we still need to return valid
# JSON or our users will get JavaScript errors.
except Exception, e:
self.response.set_status(503)
json = simplejson.dumps({'$error': str(e)})
# Wrap as JSONp if requested
if callback is not None:
json = '%s(%s)' % (callback, json)
self.response.headers.add_header(
'Content-Type', 'application/javascript')
self.response.out.write(json)
class TemplateHandler(webapp.RequestHandler):
"""Handler that just serves a template."""
def __init__(self, template_path):
"""Create a handler that serves the provided template.
Args:
template_path: The path to the template.
"""
self.template_path = template_path
def get(self):
"""Serves the template"""
self.response.out.write(template.render(self.template_path, {}))
class AboutHandler(TemplateHandler):
"""Serves the about page."""
def __init__(self):
TemplateHandler.__init__(self, 'about.tpl')
class ExamplesHandler(TemplateHandler):
"""Serves the examples page."""
def __init__(self):
TemplateHandler.__init__(self, 'examples.tpl')
class AmazonExampleHandler(TemplateHandler):
"""Serves the Amazon example page."""
def __init__(self):
TemplateHandler.__init__(self, 'amazon.tpl')
def main():
application = webapp.WSGIApplication([
('/', HomeHandler),
('/about', AboutHandler),
('/proxy', ProxyHandler),
('/examples', ExamplesHandler),
('/examples/amazon', AmazonExampleHandler)
], debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
#
# Copyright (C) 2012-2013 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import codecs
from collections import deque
import contextlib
import csv
from glob import iglob as std_iglob
import io
import json
import logging
import os
import py_compile
import re
import shutil
import socket
import ssl
import subprocess
import sys
import tarfile
import tempfile
try:
import threading
except ImportError:
import dummy_threading as threading
import time
from . import DistlibException
from .compat import (string_types, text_type, shutil, raw_input, StringIO,
cache_from_source, urlopen, httplib, xmlrpclib, splittype,
HTTPHandler, HTTPSHandler as BaseHTTPSHandler,
BaseConfigurator, valid_ident, Container, configparser,
URLError, match_hostname, CertificateError, ZipFile)
logger = logging.getLogger(__name__)
#
# Requirement parsing code for name + optional constraints + optional extras
#
# e.g. 'foo >= 1.2, < 2.0 [bar, baz]'
#
# The regex can seem a bit hairy, so we build it up out of smaller pieces
# which are manageable.
#
COMMA = r'\s*,\s*'
COMMA_RE = re.compile(COMMA)
IDENT = r'(\w|[.-])+'
EXTRA_IDENT = r'(\*|:(\*|\w+):|' + IDENT + ')'
VERSPEC = IDENT + r'\*?'
RELOP = '([<>=!~]=)|[<>]'
#
# The first relop is optional - if absent, will be taken as '~='
#
BARE_CONSTRAINTS = ('(' + RELOP + r')?\s*(' + VERSPEC + ')(' + COMMA + '(' +
RELOP + r')\s*(' + VERSPEC + '))*')
DIRECT_REF = '(from\s+(?P<diref>.*))'
#
# Either the bare constraints or the bare constraints in parentheses
#
CONSTRAINTS = (r'\(\s*(?P<c1>' + BARE_CONSTRAINTS + '|' + DIRECT_REF +
r')\s*\)|(?P<c2>' + BARE_CONSTRAINTS + '\s*)')
EXTRA_LIST = EXTRA_IDENT + '(' + COMMA + EXTRA_IDENT + ')*'
EXTRAS = r'\[\s*(?P<ex>' + EXTRA_LIST + r')?\s*\]'
REQUIREMENT = ('(?P<dn>' + IDENT + r')\s*(' + EXTRAS + r'\s*)?(\s*' +
CONSTRAINTS + ')?$')
REQUIREMENT_RE = re.compile(REQUIREMENT)
#
# Used to scan through the constraints
#
RELOP_IDENT = '(?P<op>' + RELOP + r')\s*(?P<vn>' + VERSPEC + ')'
RELOP_IDENT_RE = re.compile(RELOP_IDENT)
def parse_requirement(s):
def get_constraint(m):
d = m.groupdict()
return d['op'], d['vn']
result = None
m = REQUIREMENT_RE.match(s)
if m:
d = m.groupdict()
name = d['dn']
cons = d['c1'] or d['c2']
if not d['diref']:
url = None
else:
# direct reference
cons = None
url = d['diref'].strip()
if not cons:
cons = None
constr = ''
rs = d['dn']
else:
if cons[0] not in '<>!=':
cons = '~=' + cons
iterator = RELOP_IDENT_RE.finditer(cons)
cons = [get_constraint(m) for m in iterator]
rs = '%s (%s)' % (name, ', '.join(['%s %s' % con for con in cons]))
if not d['ex']:
extras = None
else:
extras = COMMA_RE.split(d['ex'])
result = Container(name=name, constraints=cons, extras=extras,
requirement=rs, source=s, url=url)
return result
def get_resources_dests(resources_root, rules):
"""Find destinations for resources files"""
def get_rel_path(base, path):
# normalizes and returns a lstripped-/-separated path
base = base.replace(os.path.sep, '/')
path = path.replace(os.path.sep, '/')
assert path.startswith(base)
return path[len(base):].lstrip('/')
destinations = {}
for base, suffix, dest in rules:
prefix = os.path.join(resources_root, base)
for abs_base in iglob(prefix):
abs_glob = os.path.join(abs_base, suffix)
for abs_path in iglob(abs_glob):
resource_file = get_rel_path(resources_root, abs_path)
if dest is None: # remove the entry if it was here
destinations.pop(resource_file, None)
else:
rel_path = get_rel_path(abs_base, abs_path)
rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
destinations[resource_file] = rel_dest + '/' + rel_path
return destinations
def in_venv():
if hasattr(sys, 'real_prefix'):
# virtualenv venvs
result = True
else:
# PEP 405 venvs
result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
return result
def get_executable():
if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__'
in os.environ):
result = os.environ['__PYVENV_LAUNCHER__']
else:
result = sys.executable
return result
def proceed(prompt, allowed_chars, error_prompt=None, default=None):
p = prompt
while True:
s = raw_input(p)
p = prompt
if not s and default:
s = default
if s:
c = s[0].lower()
if c in allowed_chars:
break
if error_prompt:
p = '%c: %s\n%s' % (c, error_prompt, prompt)
return c
def extract_by_key(d, keys):
if isinstance(keys, string_types):
keys = keys.split()
result = {}
for key in keys:
if key in d:
result[key] = d[key]
return result
def read_exports(stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
# Try to load as JSON, falling back on legacy format
data = stream.read()
stream = StringIO(data)
try:
data = json.load(stream)
result = data['exports']
for group, entries in result.items():
for k, v in entries.items():
s = '%s = %s' % (k, v)
entry = get_export_entry(s)
assert entry is not None
entries[k] = entry
return result
except Exception:
stream.seek(0, 0)
cp = configparser.ConfigParser()
if hasattr(cp, 'read_file'):
cp.read_file(stream)
else:
cp.readfp(stream)
result = {}
for key in cp.sections():
result[key] = entries = {}
for name, value in cp.items(key):
s = '%s = %s' % (name, value)
entry = get_export_entry(s)
assert entry is not None
#entry.dist = self
entries[name] = entry
return result
def write_exports(exports, stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getwriter('utf-8')(stream)
cp = configparser.ConfigParser()
for k, v in exports.items():
# TODO check k, v for valid values
cp.add_section(k)
for entry in v.values():
if entry.suffix is None:
s = entry.prefix
else:
s = '%s:%s' % (entry.prefix, entry.suffix)
if entry.flags:
s = '%s [%s]' % (s, ', '.join(entry.flags))
cp.set(k, entry.name, s)
cp.write(stream)
@contextlib.contextmanager
def tempdir():
td = tempfile.mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
@contextlib.contextmanager
def chdir(d):
cwd = os.getcwd()
try:
os.chdir(d)
yield
finally:
os.chdir(cwd)
@contextlib.contextmanager
def socket_timeout(seconds=15):
cto = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(seconds)
yield
finally:
socket.setdefaulttimeout(cto)
class cached_property(object):
def __init__(self, func):
self.func = func
#for attr in ('__name__', '__module__', '__doc__'):
# setattr(self, attr, getattr(func, attr, None))
def __get__(self, obj, cls=None):
if obj is None:
return self
value = self.func(obj)
object.__setattr__(obj, self.func.__name__, value)
#obj.__dict__[self.func.__name__] = value = self.func(obj)
return value
def convert_path(pathname):
"""Return 'pathname' as a name that will work on the native filesystem.
The path is split on '/' and put back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError("path '%s' cannot be absolute" % pathname)
if pathname[-1] == '/':
raise ValueError("path '%s' cannot end with '/'" % pathname)
paths = pathname.split('/')
while os.curdir in paths:
paths.remove(os.curdir)
if not paths:
return os.curdir
return os.path.join(*paths)
class FileOperator(object):
def __init__(self, dry_run=False):
self.dry_run = dry_run
self.ensured = set()
self._init_record()
def _init_record(self):
self.record = False
self.files_written = set()
self.dirs_created = set()
def record_as_written(self, path):
if self.record:
self.files_written.add(path)
def newer(self, source, target):
"""Tell if the target is newer than the source.
Returns true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't.
Returns false if both exist and 'target' is the same age or younger
than 'source'. Raise PackagingFileError if 'source' does not exist.
Note that this test is not very accurate: files created in the same
second will have the same "age".
"""
if not os.path.exists(source):
raise DistlibException("file '%r' does not exist" %
os.path.abspath(source))
if not os.path.exists(target):
return True
return os.stat(source).st_mtime > os.stat(target).st_mtime
def copy_file(self, infile, outfile, check=True):
"""Copy a file respecting dry-run and force flags.
"""
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying %s to %s', infile, outfile)
if not self.dry_run:
msg = None
if check:
if os.path.islink(outfile):
msg = '%s is a symlink' % outfile
elif os.path.exists(outfile) and not os.path.isfile(outfile):
msg = '%s is a non-regular file' % outfile
if msg:
raise ValueError(msg + ' which would be overwritten')
shutil.copyfile(infile, outfile)
self.record_as_written(outfile)
def copy_stream(self, instream, outfile, encoding=None):
assert not os.path.isdir(outfile)
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying stream %s to %s', instream, outfile)
if not self.dry_run:
if encoding is None:
outstream = open(outfile, 'wb')
else:
outstream = codecs.open(outfile, 'w', encoding=encoding)
try:
shutil.copyfileobj(instream, outstream)
finally:
outstream.close()
self.record_as_written(outfile)
def write_binary_file(self, path, data):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data)
self.record_as_written(path)
def write_text_file(self, path, data, encoding):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data.encode(encoding))
self.record_as_written(path)
def set_mode(self, bits, mask, files):
if os.name == 'posix':
# Set the executable bits (owner, group, and world) on
# all the files specified.
for f in files:
if self.dry_run:
logger.info("changing mode of %s", f)
else:
mode = (os.stat(f).st_mode | bits) & mask
logger.info("changing mode of %s to %o", f, mode)
os.chmod(f, mode)
set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
def ensure_dir(self, path):
path = os.path.abspath(path)
if path not in self.ensured and not os.path.exists(path):
self.ensured.add(path)
d, f = os.path.split(path)
self.ensure_dir(d)
logger.info('Creating %s' % path)
if not self.dry_run:
os.mkdir(path)
if self.record:
self.dirs_created.add(path)
def byte_compile(self, path, optimize=False, force=False, prefix=None):
dpath = cache_from_source(path, not optimize)
logger.info('Byte-compiling %s to %s', path, dpath)
if not self.dry_run:
if force or self.newer(path, dpath):
if not prefix:
diagpath = None
else:
assert path.startswith(prefix)
diagpath = path[len(prefix):]
py_compile.compile(path, dpath, diagpath, True) # raise error
self.record_as_written(dpath)
return dpath
def ensure_removed(self, path):
if os.path.exists(path):
if os.path.isdir(path) and not os.path.islink(path):
logger.debug('Removing directory tree at %s', path)
if not self.dry_run:
shutil.rmtree(path)
if self.record:
if path in self.dirs_created:
self.dirs_created.remove(path)
else:
if os.path.islink(path):
s = 'link'
else:
s = 'file'
logger.debug('Removing %s %s', s, path)
if not self.dry_run:
os.remove(path)
if self.record:
if path in self.files_written:
self.files_written.remove(path)
def is_writable(self, path):
result = False
while not result:
if os.path.exists(path):
result = os.access(path, os.W_OK)
break
parent = os.path.dirname(path)
if parent == path:
break
path = parent
return result
def commit(self):
"""
Commit recorded changes, turn off recording, return
changes.
"""
assert self.record
result = self.files_written, self.dirs_created
self._init_record()
return result
def rollback(self):
if not self.dry_run:
for f in list(self.files_written):
if os.path.exists(f):
os.remove(f)
# dirs should all be empty now, except perhaps for
# __pycache__ subdirs
# reverse so that subdirs appear before their parents
dirs = sorted(self.dirs_created, reverse=True)
for d in dirs:
flist = os.listdir(d)
if flist:
assert flist == ['__pycache__']
sd = os.path.join(d, flist[0])
os.rmdir(sd)
os.rmdir(d) # should fail if non-empty
self._init_record()
def resolve(module_name, dotted_path):
if module_name in sys.modules:
mod = sys.modules[module_name]
else:
mod = __import__(module_name)
if dotted_path is None:
result = mod
else:
parts = dotted_path.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
class ExportEntry(object):
def __init__(self, name, prefix, suffix, flags):
self.name = name
self.prefix = prefix
self.suffix = suffix
self.flags = flags
@cached_property
def value(self):
return resolve(self.prefix, self.suffix)
def __repr__(self):
return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix,
self.suffix, self.flags)
def __eq__(self, other):
if not isinstance(other, ExportEntry):
result = False
else:
result = (self.name == other.name and
self.prefix == other.prefix and
self.suffix == other.suffix and
self.flags == other.flags)
return result
__hash__ = object.__hash__
ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.])+)
\s*=\s*(?P<callable>(\w+)([:\.]\w+)*)
\s*(\[\s*(?P<flags>\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
''', re.VERBOSE)
def get_export_entry(specification):
m = ENTRY_RE.search(specification)
if not m:
result = None
if '[' in specification or ']' in specification:
raise DistlibException('Invalid specification '
'%r' % specification)
else:
d = m.groupdict()
name = d['name']
path = d['callable']
colons = path.count(':')
if colons == 0:
prefix, suffix = path, None
else:
if colons != 1:
raise DistlibException('Invalid specification '
'%r' % specification)
prefix, suffix = path.split(':')
flags = d['flags']
if flags is None:
if '[' in specification or ']' in specification:
raise DistlibException('Invalid specification '
'%r' % specification)
flags = []
else:
flags = [f.strip() for f in flags.split(',')]
result = ExportEntry(name, prefix, suffix, flags)
return result
def get_cache_base(suffix=None):
"""
Return the default base location for distlib caches. If the directory does
not exist, it is created. Use the suffix provided for the base directory,
and default to '.distlib' if it isn't provided.
On Windows, if LOCALAPPDATA is defined in the environment, then it is
assumed to be a directory, and will be the parent directory of the result.
On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
directory - using os.expanduser('~') - will be the parent directory of
the result.
The result is just the directory '.distlib' in the parent directory as
determined above, or with the name specified with ``suffix``.
"""
if suffix is None:
suffix = '.distlib'
if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:
result = os.path.expandvars('$localappdata')
else:
# Assume posix, or old Windows
result = os.path.expanduser('~')
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if os.path.isdir(result):
usable = os.access(result, os.W_OK)
if not usable:
logger.warning('Directory exists but is not writable: %s', result)
else:
try:
os.makedirs(result)
usable = True
except OSError:
logger.warning('Unable to create %s', result, exc_info=True)
usable = False
if not usable:
result = tempfile.mkdtemp()
logger.warning('Default location unusable, using %s', result)
return os.path.join(result, suffix)
def path_to_cache_dir(path):
"""
Convert an absolute path to a directory name for use in a cache.
The algorithm used is:
#. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
#. Any occurrence of ``os.sep`` is replaced with ``'--'``.
#. ``'.cache'`` is appended.
"""
d, p = os.path.splitdrive(os.path.abspath(path))
if d:
d = d.replace(':', '---')
p = p.replace(os.sep, '--')
return d + p + '.cache'
def ensure_slash(s):
if not s.endswith('/'):
return s + '/'
return s
def parse_credentials(netloc):
username = password = None
if '@' in netloc:
prefix, netloc = netloc.split('@', 1)
if ':' not in prefix:
username = prefix
else:
username, password = prefix.split(':', 1)
return username, password, netloc
def get_process_umask():
result = os.umask(0o22)
os.umask(result)
return result
def is_string_sequence(seq):
result = True
i = None
for i, s in enumerate(seq):
if not isinstance(s, string_types):
result = False
break
assert i is not None
return result
PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-'
'([a-z0-9_.+-]+)', re.I)
PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)')
def split_filename(filename, project_name=None):
"""
Extract name, version, python version from a filename (no extension)
Return name, version, pyver or None
"""
result = None
pyver = None
m = PYTHON_VERSION.search(filename)
if m:
pyver = m.group(1)
filename = filename[:m.start()]
if project_name and len(filename) > len(project_name) + 1:
m = re.match(re.escape(project_name) + r'\b', filename)
if m:
n = m.end()
result = filename[:n], filename[n + 1:], pyver
if result is None:
m = PROJECT_NAME_AND_VERSION.match(filename)
if m:
result = m.group(1), m.group(3), pyver
return result
# Allow spaces in name because of legacy dists like "Twisted Core"
NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*'
r'\(\s*(?P<ver>[^\s)]+)\)$')
def parse_name_and_version(p):
"""
A utility method used to get name and version from a string.
From e.g. a Provides-Dist value.
:param p: A value in a form 'foo (1.0)'
:return: The name and version as a tuple.
"""
m = NAME_VERSION_RE.match(p)
if not m:
raise DistlibException('Ill-formed name/version string: \'%s\'' % p)
d = m.groupdict()
return d['name'].strip().lower(), d['ver']
def get_extras(requested, available):
result = set()
requested = set(requested or [])
available = set(available or [])
if '*' in requested:
requested.remove('*')
result |= available
for r in requested:
if r == '-':
result.add(r)
elif r.startswith('-'):
unwanted = r[1:]
if unwanted not in available:
logger.warning('undeclared extra: %s' % unwanted)
if unwanted in result:
result.remove(unwanted)
else:
if r not in available:
logger.warning('undeclared extra: %s' % r)
result.add(r)
return result
#
# Extended metadata functionality
#
def _get_external_data(url):
result = {}
try:
# urlopen might fail if it runs into redirections,
# because of Python issue #13696. Fixed in locators
# using a custom redirect handler.
resp = urlopen(url)
headers = resp.info()
if headers.get('Content-Type') != 'application/json':
logger.debug('Unexpected response for JSON request')
else:
reader = codecs.getreader('utf-8')(resp)
#data = reader.read().decode('utf-8')
#result = json.loads(data)
result = json.load(reader)
except Exception as e:
logger.exception('Failed to get external data for %s: %s', url, e)
return result
def get_project_data(name):
url = ('https://www.red-dove.com/pypi/projects/'
'%s/%s/project.json' % (name[0].upper(), name))
result = _get_external_data(url)
return result
def get_package_data(name, version):
url = ('https://www.red-dove.com/pypi/projects/'
'%s/%s/package-%s.json' % (name[0].upper(), name, version))
return _get_external_data(url)
class Cache(object):
"""
A class implementing a cache for resources that need to live in the file system
e.g. shared libraries. This class was moved from resources to here because it
could be used by other modules, e.g. the wheel module.
"""
def __init__(self, base):
"""
Initialise an instance.
:param base: The base directory where the cache should be located.
"""
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if not os.path.isdir(base):
os.makedirs(base)
if (os.stat(base).st_mode & 0o77) != 0:
logger.warning('Directory \'%s\' is not private', base)
self.base = os.path.abspath(os.path.normpath(base))
def prefix_to_dir(self, prefix):
"""
Converts a resource prefix to a directory name in the cache.
"""
return path_to_cache_dir(prefix)
def clear(self):
"""
Clear the cache.
"""
not_removed = []
for fn in os.listdir(self.base):
fn = os.path.join(self.base, fn)
try:
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
except Exception:
not_removed.append(fn)
return not_removed
class EventMixin(object):
"""
A very simple publish/subscribe system.
"""
def __init__(self):
self._subscribers = {}
def add(self, event, subscriber, append=True):
"""
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
"""
subs = self._subscribers
if event not in subs:
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber)
def remove(self, event, subscriber):
"""
Remove a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be removed.
"""
subs = self._subscribers
if event not in subs:
raise ValueError('No subscribers: %r' % event)
subs[event].remove(subscriber)
def get_subscribers(self, event):
"""
Return an iterator for the subscribers for an event.
:param event: The event to return subscribers for.
"""
return iter(self._subscribers.get(event, ()))
def publish(self, event, *args, **kwargs):
"""
Publish a event and return a list of values returned by its
subscribers.
:param event: The event to publish.
:param args: The positional arguments to pass to the event's
subscribers.
:param kwargs: The keyword arguments to pass to the event's
subscribers.
"""
result = []
for subscriber in self.get_subscribers(event):
try:
value = subscriber(event, *args, **kwargs)
except Exception:
logger.exception('Exception during event publication')
value = None
result.append(value)
logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
event, args, kwargs, result)
return result
#
# Simple sequencing
#
class Sequencer(object):
def __init__(self):
self._preds = {}
self._succs = {}
self._nodes = set() # nodes with no preds/succs
def add_node(self, node):
self._nodes.add(node)
def remove_node(self, node, edges=False):
if node in self._nodes:
self._nodes.remove(node)
if edges:
for p in set(self._preds.get(node, ())):
self.remove(p, node)
for s in set(self._succs.get(node, ())):
self.remove(node, s)
# Remove empties
for k, v in list(self._preds.items()):
if not v:
del self._preds[k]
for k, v in list(self._succs.items()):
if not v:
del self._succs[k]
def add(self, pred, succ):
assert pred != succ
self._preds.setdefault(succ, set()).add(pred)
self._succs.setdefault(pred, set()).add(succ)
def remove(self, pred, succ):
assert pred != succ
try:
preds = self._preds[succ]
succs = self._succs[pred]
except KeyError:
raise ValueError('%r not a successor of anything' % succ)
try:
preds.remove(pred)
succs.remove(succ)
except KeyError:
raise ValueError('%r not a successor of %r' % (succ, pred))
def is_step(self, step):
return (step in self._preds or step in self._succs or
step in self._nodes)
def get_steps(self, final):
if not self.is_step(final):
raise ValueError('Unknown: %r' % final)
result = []
todo = []
seen = set()
todo.append(final)
while todo:
step = todo.pop(0)
if step in seen:
# if a step was already seen,
# move it to the end (so it will appear earlier
# when reversed on return) ... but not for the
# final step, as that would be confusing for
# users
if step != final:
result.remove(step)
result.append(step)
else:
seen.add(step)
result.append(step)
preds = self._preds.get(step, ())
todo.extend(preds)
return reversed(result)
@property
def strong_connections(self):
#http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
index_counter = [0]
stack = []
lowlinks = {}
index = {}
result = []
graph = self._succs
def strongconnect(node):
# set the depth index for this node to the smallest unused index
index[node] = index_counter[0]
lowlinks[node] = index_counter[0]
index_counter[0] += 1
stack.append(node)
# Consider successors
try:
successors = graph[node]
except Exception:
successors = []
for successor in successors:
if successor not in lowlinks:
# Successor has not yet been visited
strongconnect(successor)
lowlinks[node] = min(lowlinks[node],lowlinks[successor])
elif successor in stack:
# the successor is in the stack and hence in the current
# strongly connected component (SCC)
lowlinks[node] = min(lowlinks[node],index[successor])
# If `node` is a root node, pop the stack and generate an SCC
if lowlinks[node] == index[node]:
connected_component = []
while True:
successor = stack.pop()
connected_component.append(successor)
if successor == node: break
component = tuple(connected_component)
# storing the result
result.append(component)
for node in graph:
if node not in lowlinks:
strongconnect(node)
return result
@property
def dot(self):
result = ['digraph G {']
for succ in self._preds:
preds = self._preds[succ]
for pred in preds:
result.append(' %s -> %s;' % (pred, succ))
for node in self._nodes:
result.append(' %s;' % node)
result.append('}')
return '\n'.join(result)
#
# Unarchiving functionality for zip, tar, tgz, tbz, whl
#
ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip',
'.tgz', '.tbz', '.whl')
def unarchive(archive_filename, dest_dir, format=None, check=True):
def check_path(path):
if not isinstance(path, text_type):
path = path.decode('utf-8')
p = os.path.abspath(os.path.join(dest_dir, path))
if not p.startswith(dest_dir) or p[plen] != os.sep:
raise ValueError('path outside destination: %r' % p)
dest_dir = os.path.abspath(dest_dir)
plen = len(dest_dir)
archive = None
if format is None:
if archive_filename.endswith(('.zip', '.whl')):
format = 'zip'
elif archive_filename.endswith(('.tar.gz', '.tgz')):
format = 'tgz'
mode = 'r:gz'
elif archive_filename.endswith(('.tar.bz2', '.tbz')):
format = 'tbz'
mode = 'r:bz2'
elif archive_filename.endswith('.tar'):
format = 'tar'
mode = 'r'
else:
raise ValueError('Unknown format for %r' % archive_filename)
try:
if format == 'zip':
archive = ZipFile(archive_filename, 'r')
if check:
names = archive.namelist()
for name in names:
check_path(name)
else:
archive = tarfile.open(archive_filename, mode)
if check:
names = archive.getnames()
for name in names:
check_path(name)
if format != 'zip' and sys.version_info[0] < 3:
# See Python issue 17153. If the dest path contains Unicode,
# tarfile extraction fails on Python 2.x if a member path name
# contains non-ASCII characters - it leads to an implicit
# bytes -> unicode conversion using ASCII to decode.
for tarinfo in archive.getmembers():
if not isinstance(tarinfo.name, text_type):
tarinfo.name = tarinfo.name.decode('utf-8')
archive.extractall(dest_dir)
finally:
if archive:
archive.close()
def zip_dir(directory):
"""zip a directory tree into a BytesIO object"""
result = io.BytesIO()
dlen = len(directory)
with ZipFile(result, "w") as zf:
for root, dirs, files in os.walk(directory):
for name in files:
full = os.path.join(root, name)
rel = root[dlen:]
dest = os.path.join(rel, name)
zf.write(full, dest)
return result
#
# Simple progress bar
#
UNITS = ('', 'K', 'M', 'G','T','P')
class Progress(object):
unknown = 'UNKNOWN'
def __init__(self, minval=0, maxval=100):
assert maxval is None or maxval >= minval
self.min = self.cur = minval
self.max = maxval
self.started = None
self.elapsed = 0
self.done = False
def update(self, curval):
assert self.min <= curval
assert self.max is None or curval <= self.max
self.cur = curval
now = time.time()
if self.started is None:
self.started = now
else:
self.elapsed = now - self.started
def increment(self, incr):
assert incr >= 0
self.update(self.cur + incr)
def start(self):
self.update(self.min)
return self
def stop(self):
if self.max is not None:
self.update(self.max)
self.done = True
@property
def maximum(self):
return self.unknown if self.max is None else self.max
@property
def percentage(self):
if self.done:
result = '100 %'
elif self.max is None:
result = ' ?? %'
else:
v = 100.0 * (self.cur - self.min) / (self.max - self.min)
result = '%3d %%' % v
return result
def format_duration(self, duration):
if (duration <= 0) and self.max is None or self.cur == self.min:
result = '??:??:??'
#elif duration < 1:
# result = '--:--:--'
else:
result = time.strftime('%H:%M:%S', time.gmtime(duration))
return result
@property
def ETA(self):
if self.done:
prefix = 'Done'
t = self.elapsed
#import pdb; pdb.set_trace()
else:
prefix = 'ETA '
if self.max is None:
t = -1
elif self.elapsed == 0 or (self.cur == self.min):
t = 0
else:
#import pdb; pdb.set_trace()
t = float(self.max - self.min)
t /= self.cur - self.min
t = (t - 1) * self.elapsed
return '%s: %s' % (prefix, self.format_duration(t))
@property
def speed(self):
if self.elapsed == 0:
result = 0.0
else:
result = (self.cur - self.min) / self.elapsed
for unit in UNITS:
if result < 1000:
break
result /= 1000.0
return '%d %sB/s' % (result, unit)
#
# Glob functionality
#
RICH_GLOB = re.compile(r'\{([^}]*)\}')
_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')
_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')
def iglob(path_glob):
"""Extended globbing function that supports ** and {opt1,opt2,opt3}."""
if _CHECK_RECURSIVE_GLOB.search(path_glob):
msg = """invalid glob %r: recursive glob "**" must be used alone"""
raise ValueError(msg % path_glob)
if _CHECK_MISMATCH_SET.search(path_glob):
msg = """invalid glob %r: mismatching set marker '{' or '}'"""
raise ValueError(msg % path_glob)
return _iglob(path_glob)
def _iglob(path_glob):
rich_path_glob = RICH_GLOB.split(path_glob, 1)
if len(rich_path_glob) > 1:
assert len(rich_path_glob) == 3, rich_path_glob
prefix, set, suffix = rich_path_glob
for item in set.split(','):
for path in _iglob(''.join((prefix, item, suffix))):
yield path
else:
if '**' not in path_glob:
for item in std_iglob(path_glob):
yield item
else:
prefix, radical = path_glob.split('**', 1)
if prefix == '':
prefix = '.'
if radical == '':
radical = '*'
else:
# we support both
radical = radical.lstrip('/')
radical = radical.lstrip('\\')
for path, dir, files in os.walk(prefix):
path = os.path.normpath(path)
for fn in _iglob(os.path.join(path, radical)):
yield fn
#
# HTTPSConnection which verifies certificates/matches domains
#
class HTTPSConnection(httplib.HTTPSConnection):
ca_certs = None # set this to the path to the certs file (.pem)
check_domain = True # only used if ca_certs is not None
# noinspection PyPropertyAccess
def connect(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if getattr(self, '_tunnel_host', False):
self.sock = sock
self._tunnel()
if not hasattr(ssl, 'SSLContext'):
# For 2.x
if self.ca_certs:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=cert_reqs,
ssl_version=ssl.PROTOCOL_SSLv23,
ca_certs=self.ca_certs)
else:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
if self.cert_file:
context.load_cert_chain(self.cert_file, self.key_file)
kwargs = {}
if self.ca_certs:
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(cafile=self.ca_certs)
if getattr(ssl, 'HAS_SNI', False):
kwargs['server_hostname'] = self.host
self.sock = context.wrap_socket(sock, **kwargs)
if self.ca_certs and self.check_domain:
try:
match_hostname(self.sock.getpeercert(), self.host)
logger.debug('Host verified: %s', self.host)
except CertificateError:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
class HTTPSHandler(BaseHTTPSHandler):
def __init__(self, ca_certs, check_domain=True):
BaseHTTPSHandler.__init__(self)
self.ca_certs = ca_certs
self.check_domain = check_domain
def _conn_maker(self, *args, **kwargs):
"""
This is called to create a connection instance. Normally you'd
pass a connection class to do_open, but it doesn't actually check for
a class, and just expects a callable. As long as we behave just as a
constructor would have, we should be OK. If it ever changes so that
we *must* pass a class, we'll create an UnsafeHTTPSConnection class
which just sets check_domain to False in the class definition, and
choose which one to pass to do_open.
"""
result = HTTPSConnection(*args, **kwargs)
if self.ca_certs:
result.ca_certs = self.ca_certs
result.check_domain = self.check_domain
return result
def https_open(self, req):
try:
return self.do_open(self._conn_maker, req)
except URLError as e:
if 'certificate verify failed' in str(e.reason):
raise CertificateError('Unable to verify server certificate '
'for %s' % req.host)
else:
raise
#
# To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
# Middle proxy using HTTP listens on port 443, or an index mistakenly serves
# HTML containing a http://xyz link when it should be https://xyz),
# you can use the following handler class, which does not allow HTTP traffic.
#
# It works by inheriting from HTTPHandler - so build_opener won't add a
# handler for HTTP itself.
#
class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
def http_open(self, req):
raise URLError('Unexpected HTTP request on what should be a secure '
'connection: %s' % req)
#
# XML-RPC with timeouts
#
_ver_info = sys.version_info[:2]
if _ver_info == (2, 6):
class HTTP(httplib.HTTP):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class HTTPS(httplib.HTTPS):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class Transport(xmlrpclib.Transport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.Transport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, x509 = self.get_host_info(host)
if _ver_info == (2, 6):
result = HTTP(h, timeout=self.timeout)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPConnection(h)
result = self._connection[1]
return result
class SafeTransport(xmlrpclib.SafeTransport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.SafeTransport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, kwargs = self.get_host_info(host)
if not kwargs:
kwargs = {}
kwargs['timeout'] = self.timeout
if _ver_info == (2, 6):
result = HTTPS(host, None, **kwargs)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPSConnection(h, None,
**kwargs)
result = self._connection[1]
return result
class ServerProxy(xmlrpclib.ServerProxy):
def __init__(self, uri, **kwargs):
self.timeout = timeout = kwargs.pop('timeout', None)
# The above classes only come into play if a timeout
# is specified
if timeout is not None:
scheme, _ = splittype(uri)
use_datetime = kwargs.get('use_datetime', 0)
if scheme == 'https':
tcls = SafeTransport
else:
tcls = Transport
kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)
self.transport = t
xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
#
# CSV functionality. This is provided because on 2.x, the csv module can't
# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
#
def _csv_open(fn, mode, **kwargs):
if sys.version_info[0] < 3:
mode += 'b'
else:
kwargs['newline'] = ''
return open(fn, mode, **kwargs)
class CSVBase(object):
defaults = {
'delimiter': str(','), # The strs are used because we need native
'quotechar': str('"'), # str in the csv API (2.x won't take
'lineterminator': str('\n') # Unicode)
}
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.stream.close()
class CSVReader(CSVBase):
def __init__(self, **kwargs):
if 'stream' in kwargs:
stream = kwargs['stream']
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
self.stream = stream
else:
self.stream = _csv_open(kwargs['path'], 'r')
self.reader = csv.reader(self.stream, **self.defaults)
def __iter__(self):
return self
def next(self):
result = next(self.reader)
if sys.version_info[0] < 3:
for i, item in enumerate(result):
if not isinstance(item, text_type):
result[i] = item.decode('utf-8')
return result
__next__ = next
class CSVWriter(CSVBase):
def __init__(self, fn, **kwargs):
self.stream = _csv_open(fn, 'w')
self.writer = csv.writer(self.stream, **self.defaults)
def writerow(self, row):
if sys.version_info[0] < 3:
r = []
for item in row:
if isinstance(item, text_type):
item = item.encode('utf-8')
r.append(item)
row = r
self.writer.writerow(row)
#
# Configurator functionality
#
class Configurator(BaseConfigurator):
value_converters = dict(BaseConfigurator.value_converters)
value_converters['inc'] = 'inc_convert'
def __init__(self, config, base=None):
super(Configurator, self).__init__(config)
self.base = base or os.getcwd()
def configure_custom(self, config):
def convert(o):
if isinstance(o, (list, tuple)):
result = type(o)([convert(i) for i in o])
elif isinstance(o, dict):
if '()' in o:
result = self.configure_custom(o)
else:
result = {}
for k in o:
result[k] = convert(o[k])
else:
result = self.convert(o)
return result
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
args = config.pop('[]', ())
if args:
args = tuple([convert(o) for o in args])
items = [(k, convert(config[k])) for k in config if valid_ident(k)]
kwargs = dict(items)
result = c(*args, **kwargs)
if props:
for n, v in props.items():
setattr(result, n, convert(v))
return result
def __getitem__(self, key):
result = self.config[key]
if isinstance(result, dict) and '()' in result:
self.config[key] = result = self.configure_custom(result)
return result
def inc_convert(self, value):
"""Default converter for the inc:// protocol."""
if not os.path.isabs(value):
value = os.path.join(self.base, value)
with codecs.open(value, 'r', encoding='utf-8') as f:
result = json.load(f)
return result
#
# Mixin for running subprocesses and capturing their output
#
class SubprocessMixin(object):
def __init__(self, verbose=False, progress=None):
self.verbose = verbose
self.progress = progress
def reader(self, stream, context):
"""
Read lines from a subprocess' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.
"""
progress = self.progress
verbose = self.verbose
while True:
s = stream.readline()
if not s:
break
if progress is not None:
progress(s, context)
else:
if not verbose:
sys.stderr.write('.')
else:
sys.stderr.write(s.decode('utf-8'))
sys.stderr.flush()
stream.close()
def run_command(self, cmd, **kwargs):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, **kwargs)
t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))
t1.start()
t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))
t2.start()
p.wait()
t1.join()
t2.join()
if self.progress is not None:
self.progress('done.', 'main')
elif self.verbose:
sys.stderr.write('done.\n')
return p | unknown | codeparrot/codeparrot-clean | ||
{
"DELETE": {
"summary": "Deletes a library and its functions.",
"complexity": "O(1)",
"group": "scripting",
"since": "7.0.0",
"arity": 3,
"container": "FUNCTION",
"function": "functionDeleteCommand",
"command_flags": [
"NOSCRIPT",
"WRITE"
],
"acl_categories": [
"SCRIPTING"
],
"command_tips": [
"REQUEST_POLICY:ALL_SHARDS",
"RESPONSE_POLICY:ALL_SUCCEEDED"
],
"arguments": [
{
"name": "library-name",
"type": "string"
}
],
"reply_schema": {
"const": "OK"
}
}
} | json | github | https://github.com/redis/redis | src/commands/function-delete.json |
# -*- coding: utf-8 -*-
"""
***************************************************************************
ZonalStatistics.py
---------------------
Date : August 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'August 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import numpy
try:
from scipy.stats.mstats import mode
hasSciPy = True
except:
hasSciPy = False
from osgeo import gdal, ogr, osr
from qgis.core import QgsRectangle, QgsGeometry, QgsFeature
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterString
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterBoolean
from processing.core.outputs import OutputVector
from processing.tools.raster import mapToPixel
from processing.tools import dataobjects, vector
class ZonalStatistics(GeoAlgorithm):
INPUT_RASTER = 'INPUT_RASTER'
RASTER_BAND = 'RASTER_BAND'
INPUT_VECTOR = 'INPUT_VECTOR'
COLUMN_PREFIX = 'COLUMN_PREFIX'
GLOBAL_EXTENT = 'GLOBAL_EXTENT'
OUTPUT_LAYER = 'OUTPUT_LAYER'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Zonal Statistics')
self.group, self.i18n_group = self.trAlgorithm('Raster tools')
self.addParameter(ParameterRaster(self.INPUT_RASTER,
self.tr('Raster layer')))
self.addParameter(ParameterNumber(self.RASTER_BAND,
self.tr('Raster band'), 1, 999, 1))
self.addParameter(ParameterVector(self.INPUT_VECTOR,
self.tr('Vector layer containing zones'),
[ParameterVector.VECTOR_TYPE_POLYGON]))
self.addParameter(ParameterString(self.COLUMN_PREFIX,
self.tr('Output column prefix'), '_'))
self.addParameter(ParameterBoolean(self.GLOBAL_EXTENT,
self.tr('Load whole raster in memory')))
self.addOutput(OutputVector(self.OUTPUT_LAYER, self.tr('Zonal statistics')))
def processAlgorithm(self, progress):
""" Based on code by Matthew Perry
https://gist.github.com/perrygeo/5667173
"""
layer = dataobjects.getObjectFromUri(self.getParameterValue(self.INPUT_VECTOR))
rasterPath = unicode(self.getParameterValue(self.INPUT_RASTER))
bandNumber = self.getParameterValue(self.RASTER_BAND)
columnPrefix = self.getParameterValue(self.COLUMN_PREFIX)
useGlobalExtent = self.getParameterValue(self.GLOBAL_EXTENT)
rasterDS = gdal.Open(rasterPath, gdal.GA_ReadOnly)
geoTransform = rasterDS.GetGeoTransform()
rasterBand = rasterDS.GetRasterBand(bandNumber)
noData = rasterBand.GetNoDataValue()
cellXSize = abs(geoTransform[1])
cellYSize = abs(geoTransform[5])
rasterXSize = rasterDS.RasterXSize
rasterYSize = rasterDS.RasterYSize
rasterBBox = QgsRectangle(geoTransform[0], geoTransform[3] - cellYSize
* rasterYSize, geoTransform[0] + cellXSize
* rasterXSize, geoTransform[3])
rasterGeom = QgsGeometry.fromRect(rasterBBox)
crs = osr.SpatialReference()
crs.ImportFromProj4(str(layer.crs().toProj4()))
if useGlobalExtent:
xMin = rasterBBox.xMinimum()
xMax = rasterBBox.xMaximum()
yMin = rasterBBox.yMinimum()
yMax = rasterBBox.yMaximum()
(startColumn, startRow) = mapToPixel(xMin, yMax, geoTransform)
(endColumn, endRow) = mapToPixel(xMax, yMin, geoTransform)
width = endColumn - startColumn
height = endRow - startRow
srcOffset = (startColumn, startRow, width, height)
srcArray = rasterBand.ReadAsArray(*srcOffset)
srcArray = srcArray * rasterBand.GetScale() + rasterBand.GetOffset()
newGeoTransform = (
geoTransform[0] + srcOffset[0] * geoTransform[1],
geoTransform[1],
0.0,
geoTransform[3] + srcOffset[1] * geoTransform[5],
0.0,
geoTransform[5],
)
memVectorDriver = ogr.GetDriverByName('Memory')
memRasterDriver = gdal.GetDriverByName('MEM')
fields = layer.pendingFields()
(idxMin, fields) = vector.findOrCreateField(layer, fields,
columnPrefix + 'min', 21, 6)
(idxMax, fields) = vector.findOrCreateField(layer, fields,
columnPrefix + 'max', 21, 6)
(idxSum, fields) = vector.findOrCreateField(layer, fields,
columnPrefix + 'sum', 21, 6)
(idxCount, fields) = vector.findOrCreateField(layer, fields,
columnPrefix + 'count', 21, 6)
(idxMean, fields) = vector.findOrCreateField(layer, fields,
columnPrefix + 'mean', 21, 6)
(idxStd, fields) = vector.findOrCreateField(layer, fields,
columnPrefix + 'std', 21, 6)
(idxUnique, fields) = vector.findOrCreateField(layer, fields,
columnPrefix + 'unique', 21, 6)
(idxRange, fields) = vector.findOrCreateField(layer, fields,
columnPrefix + 'range', 21, 6)
(idxVar, fields) = vector.findOrCreateField(layer, fields,
columnPrefix + 'var', 21, 6)
(idxMedian, fields) = vector.findOrCreateField(layer, fields,
columnPrefix + 'median', 21, 6)
if hasSciPy:
(idxMode, fields) = vector.findOrCreateField(layer, fields,
columnPrefix + 'mode', 21, 6)
writer = self.getOutputFromName(self.OUTPUT_LAYER).getVectorWriter(
fields.toList(), layer.dataProvider().geometryType(), layer.crs())
outFeat = QgsFeature()
outFeat.initAttributes(len(fields))
outFeat.setFields(fields)
features = vector.features(layer)
total = 100.0 / len(features)
for current, f in enumerate(features):
geom = f.geometry()
intersectedGeom = rasterGeom.intersection(geom)
ogrGeom = ogr.CreateGeometryFromWkt(intersectedGeom.exportToWkt())
if not useGlobalExtent:
bbox = intersectedGeom.boundingBox()
xMin = bbox.xMinimum()
xMax = bbox.xMaximum()
yMin = bbox.yMinimum()
yMax = bbox.yMaximum()
(startColumn, startRow) = mapToPixel(xMin, yMax, geoTransform)
(endColumn, endRow) = mapToPixel(xMax, yMin, geoTransform)
width = endColumn - startColumn
height = endRow - startRow
if width == 0 or height == 0:
continue
srcOffset = (startColumn, startRow, width, height)
srcArray = rasterBand.ReadAsArray(*srcOffset)
srcArray = srcArray * rasterBand.GetScale() + rasterBand.GetOffset()
newGeoTransform = (
geoTransform[0] + srcOffset[0] * geoTransform[1],
geoTransform[1],
0.0,
geoTransform[3] + srcOffset[1] * geoTransform[5],
0.0,
geoTransform[5],
)
# Create a temporary vector layer in memory
memVDS = memVectorDriver.CreateDataSource('out')
memLayer = memVDS.CreateLayer('poly', crs, ogr.wkbPolygon)
ft = ogr.Feature(memLayer.GetLayerDefn())
ft.SetGeometry(ogrGeom)
memLayer.CreateFeature(ft)
ft.Destroy()
# Rasterize it
rasterizedDS = memRasterDriver.Create('', srcOffset[2],
srcOffset[3], 1, gdal.GDT_Byte)
rasterizedDS.SetGeoTransform(newGeoTransform)
gdal.RasterizeLayer(rasterizedDS, [1], memLayer, burn_values=[1])
rasterizedArray = rasterizedDS.ReadAsArray()
srcArray = numpy.nan_to_num(srcArray)
masked = numpy.ma.MaskedArray(srcArray,
mask=numpy.logical_or(srcArray == noData,
numpy.logical_not(rasterizedArray)))
outFeat.setGeometry(geom)
attrs = f.attributes()
attrs.insert(idxMin, float(masked.min()))
attrs.insert(idxMax, float(masked.max()))
attrs.insert(idxSum, float(masked.sum()))
attrs.insert(idxCount, int(masked.count()))
attrs.insert(idxMean, float(masked.mean()))
attrs.insert(idxStd, float(masked.std()))
attrs.insert(idxUnique, numpy.unique(masked.compressed()).size)
attrs.insert(idxRange, float(masked.max()) - float(masked.min()))
attrs.insert(idxVar, float(masked.var()))
attrs.insert(idxMedian, float(numpy.ma.median(masked)))
if hasSciPy:
attrs.insert(idxMode, float(mode(masked, axis=None)[0][0]))
outFeat.setAttributes(attrs)
writer.addFeature(outFeat)
memVDS = None
rasterizedDS = None
progress.setPercentage(int(current * total))
rasterDS = None
del writer | unknown | codeparrot/codeparrot-clean | ||
//// [tests/cases/conformance/types/typeRelationships/assignmentCompatibility/assignmentCompatWithStringIndexer.ts] ////
//// [assignmentCompatWithStringIndexer.ts]
// index signatures must be compatible in assignments
interface Base { foo: string; }
interface Derived extends Base { bar: string; }
interface Derived2 extends Derived { baz: string; }
class A {
[x: string]: Base;
}
declare var a: A;
declare var b: { [x: string]: Derived; };
a = b; // ok
b = a; // error
declare var b2: { [x: string]: Derived2; };
a = b2; // ok
b2 = a; // error
namespace Generics {
class A<T extends Base> {
[x: string]: T;
}
class B extends A<Base> {
[x: string]: Derived; // ok
}
declare var b1: { [x: string]: Derived; };
declare var a1: A<Base>;
a1 = b1; // ok
b1 = a1; // error
class B2 extends A<Base> {
[x: string]: Derived2; // ok
}
declare var b2: { [x: string]: Derived2; };
a1 = b2; // ok
b2 = a1; // error
function foo<T extends Base>() {
var b3: { [x: string]: Derived; };
var a3: A<T>;
a3 = b3; // error
b3 = a3; // error
var b4: { [x: string]: Derived2; };
a3 = b4; // error
b4 = a3; // error
}
}
//// [assignmentCompatWithStringIndexer.js]
"use strict";
// index signatures must be compatible in assignments
class A {
}
a = b; // ok
b = a; // error
a = b2; // ok
b2 = a; // error
var Generics;
(function (Generics) {
class A {
}
class B extends A {
}
a1 = b1; // ok
b1 = a1; // error
class B2 extends A {
}
a1 = b2; // ok
b2 = a1; // error
function foo() {
var b3;
var a3;
a3 = b3; // error
b3 = a3; // error
var b4;
a3 = b4; // error
b4 = a3; // error
}
})(Generics || (Generics = {})); | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/assignmentCompatWithStringIndexer.js |
# Copyright (C) 2016-2018 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Not installing aliases from python-future; it's unreliable and slow.
from builtins import * # noqa
from hamcrest import assert_that, contains, has_entry, has_entries, instance_of
from ycmd.tests.python import SharedYcmd
from ycmd.tests.test_utils import BuildRequest
@SharedYcmd
def DebugInfo_test( app ):
request_data = BuildRequest( filetype = 'python' )
assert_that(
app.post_json( '/debug_info', request_data ).json,
has_entry( 'completer', has_entries( {
'name': 'Python',
'items': contains(
has_entries( {
'key': 'Python interpreter',
'value': instance_of( str )
} ),
has_entries( {
'key': 'Python path',
'value': instance_of( str )
} ),
has_entries( {
'key': 'Python version',
'value': instance_of( str )
} ),
has_entries( {
'key': 'Jedi version',
'value': instance_of( str )
} ),
has_entries( {
'key': 'Parso version',
'value': instance_of( str )
} )
)
} ) )
) | unknown | codeparrot/codeparrot-clean | ||
# -*- test-case-name: twisted.web.test.test_newclient -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
An U{HTTP 1.1<http://www.w3.org/Protocols/rfc2616/rfc2616.html>} client.
The way to use the functionality provided by this module is to:
- Connect a L{HTTP11ClientProtocol} to an HTTP server
- Create a L{Request} with the appropriate data
- Pass the request to L{HTTP11ClientProtocol.request}
- The returned Deferred will fire with a L{Response} object
- Create a L{IProtocol} provider which can handle the response body
- Connect it to the response with L{Response.deliverBody}
- When the protocol's C{connectionLost} method is called, the response is
complete. See L{Response.deliverBody} for details.
Various other classes in this module support this usage:
- HTTPParser is the basic HTTP parser. It can handle the parts of HTTP which
are symmetric between requests and responses.
- HTTPClientParser extends HTTPParser to handle response-specific parts of
HTTP. One instance is created for each request to parse the corresponding
response.
"""
__metaclass__ = type
from zope.interface import implements
from twisted.python import log
from twisted.python.reflect import fullyQualifiedName
from twisted.python.failure import Failure
from twisted.python.compat import set
from twisted.internet.interfaces import IConsumer, IPushProducer
from twisted.internet.error import ConnectionDone
from twisted.internet.defer import Deferred, succeed, fail, maybeDeferred
from twisted.internet.protocol import Protocol
from twisted.protocols.basic import LineReceiver
from twisted.web.iweb import UNKNOWN_LENGTH
from twisted.web.http_headers import Headers
from twisted.web.http import NO_CONTENT, NOT_MODIFIED
from twisted.web.http import _DataLoss, PotentialDataLoss
from twisted.web.http import _IdentityTransferDecoder, _ChunkedTransferDecoder
# States HTTPParser can be in
STATUS = 'STATUS'
HEADER = 'HEADER'
BODY = 'BODY'
DONE = 'DONE'
class BadHeaders(Exception):
"""
Headers passed to L{Request} were in some way invalid.
"""
class ExcessWrite(Exception):
"""
The body L{IBodyProducer} for a request tried to write data after
indicating it had finished writing data.
"""
class ParseError(Exception):
"""
Some received data could not be parsed.
@ivar data: The string which could not be parsed.
"""
def __init__(self, reason, data):
Exception.__init__(self, reason, data)
self.data = data
class BadResponseVersion(ParseError):
"""
The version string in a status line was unparsable.
"""
class _WrapperException(Exception):
"""
L{_WrapperException} is the base exception type for exceptions which
include one or more other exceptions as the low-level causes.
@ivar reasons: A list of exceptions. See subclass documentation for more
details.
"""
def __init__(self, reasons):
Exception.__init__(self, reasons)
self.reasons = reasons
class RequestGenerationFailed(_WrapperException):
"""
There was an error while creating the bytes which make up a request.
@ivar reasons: A C{list} of one or more L{Failure} instances giving the
reasons the request generation was considered to have failed.
"""
class RequestTransmissionFailed(_WrapperException):
"""
There was an error while sending the bytes which make up a request.
@ivar reasons: A C{list} of one or more L{Failure} instances giving the
reasons the request transmission was considered to have failed.
"""
class ConnectionAborted(Exception):
"""
The connection was explicitly aborted by application code.
"""
class WrongBodyLength(Exception):
"""
An L{IBodyProducer} declared the number of bytes it was going to
produce (via its C{length} attribute) and then produced a different number
of bytes.
"""
class ResponseDone(Exception):
"""
L{ResponseDone} may be passed to L{IProtocol.connectionLost} on the
protocol passed to L{Response.deliverBody} and indicates that the entire
response has been delivered.
"""
class ResponseFailed(_WrapperException):
"""
L{ResponseFailed} indicates that all of the response to a request was not
received for some reason.
@ivar reasons: A C{list} of one or more L{Failure} instances giving the
reasons the response was considered to have failed.
"""
class RequestNotSent(Exception):
"""
L{RequestNotSent} indicates that an attempt was made to issue a request but
for reasons unrelated to the details of the request itself, the request
could not be sent. For example, this may indicate that an attempt was made
to send a request using a protocol which is no longer connected to a
server.
"""
def _callAppFunction(function):
"""
Call C{function}. If it raises an exception, log it with a minimal
description of the source.
@return: C{None}
"""
try:
function()
except:
log.err(None, "Unexpected exception from %s" % (
fullyQualifiedName(function),))
class HTTPParser(LineReceiver):
"""
L{HTTPParser} handles the parsing side of HTTP processing. With a suitable
subclass, it can parse either the client side or the server side of the
connection.
@ivar headers: All of the non-connection control message headers yet
received.
@ivar state: State indicator for the response parsing state machine. One
of C{STATUS}, C{HEADER}, C{BODY}, C{DONE}.
@ivar _partialHeader: C{None} or a C{list} of the lines of a multiline
header while that header is being received.
"""
# NOTE: According to HTTP spec, we're supposed to eat the
# 'Proxy-Authenticate' and 'Proxy-Authorization' headers also, but that
# doesn't sound like a good idea to me, because it makes it impossible to
# have a non-authenticating transparent proxy in front of an authenticating
# proxy. An authenticating proxy can eat them itself. -jknight
#
# Further, quoting
# http://homepages.tesco.net/J.deBoynePollard/FGA/web-proxy-connection-header.html
# regarding the 'Proxy-Connection' header:
#
# The Proxy-Connection: header is a mistake in how some web browsers
# use HTTP. Its name is the result of a false analogy. It is not a
# standard part of the protocol. There is a different standard
# protocol mechanism for doing what it does. And its existence
# imposes a requirement upon HTTP servers such that no proxy HTTP
# server can be standards-conforming in practice.
#
# -exarkun
# Some servers (like http://news.ycombinator.com/) return status lines and
# HTTP headers delimited by \n instead of \r\n.
delimiter = '\n'
CONNECTION_CONTROL_HEADERS = set([
'content-length', 'connection', 'keep-alive', 'te', 'trailers',
'transfer-encoding', 'upgrade', 'proxy-connection'])
def connectionMade(self):
self.headers = Headers()
self.connHeaders = Headers()
self.state = STATUS
self._partialHeader = None
def switchToBodyMode(self, decoder):
"""
Switch to body parsing mode - interpret any more bytes delivered as
part of the message body and deliver them to the given decoder.
"""
if self.state == BODY:
raise RuntimeError("already in body mode")
self.bodyDecoder = decoder
self.state = BODY
self.setRawMode()
def lineReceived(self, line):
"""
Handle one line from a response.
"""
# Handle the normal CR LF case.
if line[-1:] == '\r':
line = line[:-1]
if self.state == STATUS:
self.statusReceived(line)
self.state = HEADER
elif self.state == HEADER:
if not line or line[0] not in ' \t':
if self._partialHeader is not None:
header = ''.join(self._partialHeader)
name, value = header.split(':', 1)
value = value.strip()
self.headerReceived(name, value)
if not line:
# Empty line means the header section is over.
self.allHeadersReceived()
else:
# Line not beginning with LWS is another header.
self._partialHeader = [line]
else:
# A line beginning with LWS is a continuation of a header
# begun on a previous line.
self._partialHeader.append(line)
def rawDataReceived(self, data):
"""
Pass data from the message body to the body decoder object.
"""
self.bodyDecoder.dataReceived(data)
def isConnectionControlHeader(self, name):
"""
Return C{True} if the given lower-cased name is the name of a
connection control header (rather than an entity header).
According to RFC 2616, section 14.10, the tokens in the Connection
header are probably relevant here. However, I am not sure what the
practical consequences of either implementing or ignoring that are.
So I leave it unimplemented for the time being.
"""
return name in self.CONNECTION_CONTROL_HEADERS
def statusReceived(self, status):
"""
Callback invoked whenever the first line of a new message is received.
Override this.
@param status: The first line of an HTTP request or response message
without trailing I{CR LF}.
@type status: C{str}
"""
def headerReceived(self, name, value):
"""
Store the given header in C{self.headers}.
"""
name = name.lower()
if self.isConnectionControlHeader(name):
headers = self.connHeaders
else:
headers = self.headers
headers.addRawHeader(name, value)
def allHeadersReceived(self):
"""
Callback invoked after the last header is passed to C{headerReceived}.
Override this to change to the C{BODY} or C{DONE} state.
"""
self.switchToBodyMode(None)
class HTTPClientParser(HTTPParser):
"""
An HTTP parser which only handles HTTP responses.
@ivar request: The request with which the expected response is associated.
@type request: L{Request}
@ivar NO_BODY_CODES: A C{set} of response codes which B{MUST NOT} have a
body.
@ivar finisher: A callable to invoke when this response is fully parsed.
@ivar _responseDeferred: A L{Deferred} which will be called back with the
response when all headers in the response have been received.
Thereafter, C{None}.
"""
NO_BODY_CODES = set([NO_CONTENT, NOT_MODIFIED])
_transferDecoders = {
'chunked': _ChunkedTransferDecoder,
}
bodyDecoder = None
def __init__(self, request, finisher):
self.request = request
self.finisher = finisher
self._responseDeferred = Deferred()
def parseVersion(self, strversion):
"""
Parse version strings of the form Protocol '/' Major '.' Minor. E.g.
'HTTP/1.1'. Returns (protocol, major, minor). Will raise ValueError
on bad syntax.
"""
try:
proto, strnumber = strversion.split('/')
major, minor = strnumber.split('.')
major, minor = int(major), int(minor)
except ValueError, e:
raise BadResponseVersion(str(e), strversion)
if major < 0 or minor < 0:
raise BadResponseVersion("version may not be negative", strversion)
return (proto, major, minor)
def statusReceived(self, status):
"""
Parse the status line into its components and create a response object
to keep track of this response's state.
"""
parts = status.split(' ', 2)
if len(parts) != 3:
raise ParseError("wrong number of parts", status)
try:
statusCode = int(parts[1])
except ValueError:
raise ParseError("non-integer status code", status)
self.response = Response(
self.parseVersion(parts[0]),
statusCode,
parts[2],
self.headers,
self.transport)
def _finished(self, rest):
"""
Called to indicate that an entire response has been received. No more
bytes will be interpreted by this L{HTTPClientParser}. Extra bytes are
passed up and the state of this L{HTTPClientParser} is set to I{DONE}.
@param rest: A C{str} giving any extra bytes delivered to this
L{HTTPClientParser} which are not part of the response being
parsed.
"""
self.state = DONE
self.finisher(rest)
def isConnectionControlHeader(self, name):
"""
Content-Length in the response to a HEAD request is an entity header,
not a connection control header.
"""
if self.request.method == 'HEAD' and name == 'content-length':
return False
return HTTPParser.isConnectionControlHeader(self, name)
def allHeadersReceived(self):
"""
Figure out how long the response body is going to be by examining
headers and stuff.
"""
if (self.response.code in self.NO_BODY_CODES
or self.request.method == 'HEAD'):
self.response.length = 0
self._finished(self.clearLineBuffer())
else:
transferEncodingHeaders = self.connHeaders.getRawHeaders(
'transfer-encoding')
if transferEncodingHeaders:
# This could be a KeyError. However, that would mean we do not
# know how to decode the response body, so failing the request
# is as good a behavior as any. Perhaps someday we will want
# to normalize/document/test this specifically, but failing
# seems fine to me for now.
transferDecoder = self._transferDecoders[transferEncodingHeaders[0].lower()]
# If anyone ever invents a transfer encoding other than
# chunked (yea right), and that transfer encoding can predict
# the length of the response body, it might be sensible to
# allow the transfer decoder to set the response object's
# length attribute.
else:
contentLengthHeaders = self.connHeaders.getRawHeaders('content-length')
if contentLengthHeaders is None:
contentLength = None
elif len(contentLengthHeaders) == 1:
contentLength = int(contentLengthHeaders[0])
self.response.length = contentLength
else:
# "HTTP Message Splitting" or "HTTP Response Smuggling"
# potentially happening. Or it's just a buggy server.
raise ValueError(
"Too many Content-Length headers; response is invalid")
if contentLength == 0:
self._finished(self.clearLineBuffer())
transferDecoder = None
else:
transferDecoder = lambda x, y: _IdentityTransferDecoder(
contentLength, x, y)
if transferDecoder is None:
self.response._bodyDataFinished()
else:
# Make sure as little data as possible from the response body
# gets delivered to the response object until the response
# object actually indicates it is ready to handle bytes
# (probably because an application gave it a way to interpret
# them).
self.transport.pauseProducing()
self.switchToBodyMode(transferDecoder(
self.response._bodyDataReceived,
self._finished))
# This must be last. If it were first, then application code might
# change some state (for example, registering a protocol to receive the
# response body). Then the pauseProducing above would be wrong since
# the response is ready for bytes and nothing else would ever resume
# the transport.
self._responseDeferred.callback(self.response)
del self._responseDeferred
def connectionLost(self, reason):
if self.bodyDecoder is not None:
try:
try:
self.bodyDecoder.noMoreData()
except PotentialDataLoss:
self.response._bodyDataFinished(Failure())
except _DataLoss:
self.response._bodyDataFinished(
Failure(ResponseFailed([reason, Failure()])))
else:
self.response._bodyDataFinished()
except:
# Handle exceptions from both the except suites and the else
# suite. Those functions really shouldn't raise exceptions,
# but maybe there's some buggy application code somewhere
# making things difficult.
log.err()
elif self.state != DONE:
self._responseDeferred.errback(Failure(ResponseFailed([reason])))
del self._responseDeferred
class Request:
"""
A L{Request} instance describes an HTTP request to be sent to an HTTP
server.
@ivar method: The HTTP method to for this request, ex: 'GET', 'HEAD',
'POST', etc.
@type method: C{str}
@ivar uri: The relative URI of the resource to request. For example,
C{'/foo/bar?baz=quux'}.
@type uri: C{str}
@ivar headers: Headers to be sent to the server. It is important to
note that this object does not create any implicit headers. So it
is up to the HTTP Client to add required headers such as 'Host'.
@type headers: L{twisted.web.http_headers.Headers}
@ivar bodyProducer: C{None} or an L{IBodyProducer} provider which
produces the content body to send to the remote HTTP server.
"""
def __init__(self, method, uri, headers, bodyProducer):
self.method = method
self.uri = uri
self.headers = headers
self.bodyProducer = bodyProducer
def _writeHeaders(self, transport, TEorCL):
hosts = self.headers.getRawHeaders('host', ())
if len(hosts) != 1:
raise BadHeaders("Exactly one Host header required")
# In the future, having the protocol version be a parameter to this
# method would probably be good. It would be nice if this method
# weren't limited to issueing HTTP/1.1 requests.
requestLines = []
requestLines.append(
'%s %s HTTP/1.1\r\n' % (self.method, self.uri))
requestLines.append('Connection: close\r\n')
if TEorCL is not None:
requestLines.append(TEorCL)
for name, values in self.headers.getAllRawHeaders():
requestLines.extend(['%s: %s\r\n' % (name, v) for v in values])
requestLines.append('\r\n')
transport.writeSequence(requestLines)
def _writeToChunked(self, transport):
"""
Write this request to the given transport using chunked
transfer-encoding to frame the body.
"""
self._writeHeaders(transport, 'Transfer-Encoding: chunked\r\n')
encoder = ChunkedEncoder(transport)
encoder.registerProducer(self.bodyProducer, True)
d = self.bodyProducer.startProducing(encoder)
def cbProduced(ignored):
encoder.unregisterProducer()
def ebProduced(err):
encoder._allowNoMoreWrites()
# Don't call the encoder's unregisterProducer because it will write
# a zero-length chunk. This would indicate to the server that the
# request body is complete. There was an error, though, so we
# don't want to do that.
transport.unregisterProducer()
return err
d.addCallbacks(cbProduced, ebProduced)
return d
def _writeToContentLength(self, transport):
"""
Write this request to the given transport using content-length to frame
the body.
"""
self._writeHeaders(
transport,
'Content-Length: %d\r\n' % (self.bodyProducer.length,))
# This Deferred is used to signal an error in the data written to the
# encoder below. It can only errback and it will only do so before too
# many bytes have been written to the encoder and before the producer
# Deferred fires.
finishedConsuming = Deferred()
# This makes sure the producer writes the correct number of bytes for
# the request body.
encoder = LengthEnforcingConsumer(
self.bodyProducer, transport, finishedConsuming)
transport.registerProducer(self.bodyProducer, True)
finishedProducing = self.bodyProducer.startProducing(encoder)
def combine(consuming, producing):
# This Deferred is returned and will be fired when the first of
# consuming or producing fires.
ultimate = Deferred()
# Keep track of what has happened so far. This initially
# contains None, then an integer uniquely identifying what
# sequence of events happened. See the callbacks and errbacks
# defined below for the meaning of each value.
state = [None]
def ebConsuming(err):
if state == [None]:
# The consuming Deferred failed first. This means the
# overall writeTo Deferred is going to errback now. The
# producing Deferred should not fire later (because the
# consumer should have called stopProducing on the
# producer), but if it does, a callback will be ignored
# and an errback will be logged.
state[0] = 1
ultimate.errback(err)
else:
# The consuming Deferred errbacked after the producing
# Deferred fired. This really shouldn't ever happen.
# If it does, I goofed. Log the error anyway, just so
# there's a chance someone might notice and complain.
log.err(
err,
"Buggy state machine in %r/[%d]: "
"ebConsuming called" % (self, state[0]))
def cbProducing(result):
if state == [None]:
# The producing Deferred succeeded first. Nothing will
# ever happen to the consuming Deferred. Tell the
# encoder we're done so it can check what the producer
# wrote and make sure it was right.
state[0] = 2
try:
encoder._noMoreWritesExpected()
except:
# Fail the overall writeTo Deferred - something the
# producer did was wrong.
ultimate.errback()
else:
# Success - succeed the overall writeTo Deferred.
ultimate.callback(None)
# Otherwise, the consuming Deferred already errbacked. The
# producing Deferred wasn't supposed to fire, but it did
# anyway. It's buggy, but there's not really anything to be
# done about it. Just ignore this result.
def ebProducing(err):
if state == [None]:
# The producing Deferred failed first. This means the
# overall writeTo Deferred is going to errback now.
# Tell the encoder that we're done so it knows to reject
# further writes from the producer (which should not
# happen, but the producer may be buggy).
state[0] = 3
encoder._allowNoMoreWrites()
ultimate.errback(err)
else:
# The producing Deferred failed after the consuming
# Deferred failed. It shouldn't have, so it's buggy.
# Log the exception in case anyone who can fix the code
# is watching.
log.err(err, "Producer is buggy")
consuming.addErrback(ebConsuming)
producing.addCallbacks(cbProducing, ebProducing)
return ultimate
d = combine(finishedConsuming, finishedProducing)
def f(passthrough):
# Regardless of what happens with the overall Deferred, once it
# fires, the producer registered way up above the definition of
# combine should be unregistered.
transport.unregisterProducer()
return passthrough
d.addBoth(f)
return d
def writeTo(self, transport):
"""
Format this L{Request} as an HTTP/1.1 request and write it to the given
transport. If bodyProducer is not None, it will be associated with an
L{IConsumer}.
@return: A L{Deferred} which fires with C{None} when the request has
been completely written to the transport or with a L{Failure} if
there is any problem generating the request bytes.
"""
if self.bodyProducer is not None:
if self.bodyProducer.length is UNKNOWN_LENGTH:
return self._writeToChunked(transport)
else:
return self._writeToContentLength(transport)
else:
self._writeHeaders(transport, None)
return succeed(None)
def stopWriting(self):
"""
Stop writing this request to the transport. This can only be called
after C{writeTo} and before the L{Deferred} returned by C{writeTo}
fires. It should cancel any asynchronous task started by C{writeTo}.
The L{Deferred} returned by C{writeTo} need not be fired if this method
is called.
"""
# If bodyProducer is None, then the Deferred returned by writeTo has
# fired already and this method cannot be called.
_callAppFunction(self.bodyProducer.stopProducing)
class LengthEnforcingConsumer:
"""
An L{IConsumer} proxy which enforces an exact length requirement on the
total data written to it.
@ivar _length: The number of bytes remaining to be written.
@ivar _producer: The L{IBodyProducer} which is writing to this
consumer.
@ivar _consumer: The consumer to which at most C{_length} bytes will be
forwarded.
@ivar _finished: A L{Deferred} which will be fired with a L{Failure} if too
many bytes are written to this consumer.
"""
def __init__(self, producer, consumer, finished):
self._length = producer.length
self._producer = producer
self._consumer = consumer
self._finished = finished
def _allowNoMoreWrites(self):
"""
Indicate that no additional writes are allowed. Attempts to write
after calling this method will be met with an exception.
"""
self._finished = None
def write(self, bytes):
"""
Write C{bytes} to the underlying consumer unless
C{_noMoreWritesExpected} has been called or there are/have been too
many bytes.
"""
if self._finished is None:
# No writes are supposed to happen any more. Try to convince the
# calling code to stop calling this method by calling its
# stopProducing method and then throwing an exception at it. This
# exception isn't documented as part of the API because you're
# never supposed to expect it: only buggy code will ever receive
# it.
self._producer.stopProducing()
raise ExcessWrite()
if len(bytes) <= self._length:
self._length -= len(bytes)
self._consumer.write(bytes)
else:
# No synchronous exception is raised in *this* error path because
# we still have _finished which we can use to report the error to a
# better place than the direct caller of this method (some
# arbitrary application code).
_callAppFunction(self._producer.stopProducing)
self._finished.errback(WrongBodyLength("too many bytes written"))
self._allowNoMoreWrites()
def _noMoreWritesExpected(self):
"""
Called to indicate no more bytes will be written to this consumer.
Check to see that the correct number have been written.
@raise WrongBodyLength: If not enough bytes have been written.
"""
if self._finished is not None:
self._allowNoMoreWrites()
if self._length:
raise WrongBodyLength("too few bytes written")
def makeStatefulDispatcher(name, template):
"""
Given a I{dispatch} name and a function, return a function which can be
used as a method and which, when called, will call another method defined
on the instance and return the result. The other method which is called is
determined by the value of the C{_state} attribute of the instance.
@param name: A string which is used to construct the name of the subsidiary
method to invoke. The subsidiary method is named like C{'_%s_%s' %
(name, _state)}.
@param template: A function object which is used to give the returned
function a docstring.
@return: The dispatcher function.
"""
def dispatcher(self, *args, **kwargs):
func = getattr(self, '_' + name + '_' + self._state, None)
if func is None:
raise RuntimeError(
"%r has no %s method in state %s" % (self, name, self._state))
return func(*args, **kwargs)
dispatcher.__doc__ = template.__doc__
return dispatcher
class Response:
"""
A L{Response} instance describes an HTTP response received from an HTTP
server.
L{Response} should not be subclassed or instantiated.
@ivar version: A three-tuple describing the protocol and protocol version
of the response. The first element is of type C{str}, the second and
third are of type C{int}. For example, C{('HTTP', 1, 1)}.
@type version: C{tuple}
@ivar code: The HTTP status code of this response.
@type code: C{int}
@ivar phrase: The HTTP reason phrase of this response.
@type phrase: C{str}
@ivar headers: The HTTP response headers of this response.
@type headers: L{Headers}
@ivar length: The number of bytes expected to be in the body of this
response or L{UNKNOWN_LENGTH} if the server did not indicate how many
bytes to expect. For I{HEAD} responses, this will be 0; if the
response includes a I{Content-Length} header, it will be available in
C{headers}.
@type length: C{int} or something else
@ivar _transport: The transport which is delivering this response.
@ivar _bodyProtocol: The L{IProtocol} provider to which the body is
delivered. C{None} before one has been registered with
C{deliverBody}.
@ivar _bodyBuffer: A C{list} of the strings passed to C{bodyDataReceived}
before C{deliverBody} is called. C{None} afterwards.
@ivar _state: Indicates what state this L{Response} instance is in,
particularly with respect to delivering bytes from the response body
to an application-suppled protocol object. This may be one of
C{'INITIAL'}, C{'CONNECTED'}, C{'DEFERRED_CLOSE'}, or C{'FINISHED'},
with the following meanings:
- INITIAL: This is the state L{Response} objects start in. No
protocol has yet been provided and the underlying transport may
still have bytes to deliver to it.
- DEFERRED_CLOSE: If the underlying transport indicates all bytes
have been delivered but no application-provided protocol is yet
available, the L{Response} moves to this state. Data is
buffered and waiting for a protocol to be delivered to.
- CONNECTED: If a protocol is provided when the state is INITIAL,
the L{Response} moves to this state. Any buffered data is
delivered and any data which arrives from the transport
subsequently is given directly to the protocol.
- FINISHED: If a protocol is provided in the DEFERRED_CLOSE state,
the L{Response} moves to this state after delivering all
buffered data to the protocol. Otherwise, if the L{Response} is
in the CONNECTED state, if the transport indicates there is no
more data, the L{Response} moves to this state. Nothing else
can happen once the L{Response} is in this state.
"""
length = UNKNOWN_LENGTH
_bodyProtocol = None
_bodyFinished = False
def __init__(self, version, code, phrase, headers, _transport):
self.version = version
self.code = code
self.phrase = phrase
self.headers = headers
self._transport = _transport
self._bodyBuffer = []
self._state = 'INITIAL'
def deliverBody(self, protocol):
"""
Register an L{IProtocol} provider to receive the response body.
The protocol will be connected to a transport which provides
L{IPushProducer}. The protocol's C{connectionLost} method will be
called with:
- ResponseDone, which indicates that all bytes from the response
have been successfully delivered.
- PotentialDataLoss, which indicates that it cannot be determined
if the entire response body has been delivered. This only occurs
when making requests to HTTP servers which do not set
I{Content-Length} or a I{Transfer-Encoding} in the response.
- ResponseFailed, which indicates that some bytes from the response
were lost. The C{reasons} attribute of the exception may provide
more specific indications as to why.
"""
deliverBody = makeStatefulDispatcher('deliverBody', deliverBody)
def _deliverBody_INITIAL(self, protocol):
"""
Deliver any buffered data to C{protocol} and prepare to deliver any
future data to it. Move to the C{'CONNECTED'} state.
"""
# Now that there's a protocol to consume the body, resume the
# transport. It was previously paused by HTTPClientParser to avoid
# reading too much data before it could be handled.
self._transport.resumeProducing()
protocol.makeConnection(self._transport)
self._bodyProtocol = protocol
for data in self._bodyBuffer:
self._bodyProtocol.dataReceived(data)
self._bodyBuffer = None
self._state = 'CONNECTED'
def _deliverBody_CONNECTED(self, protocol):
"""
It is invalid to attempt to deliver data to a protocol when it is
already being delivered to another protocol.
"""
raise RuntimeError(
"Response already has protocol %r, cannot deliverBody "
"again" % (self._bodyProtocol,))
def _deliverBody_DEFERRED_CLOSE(self, protocol):
"""
Deliver any buffered data to C{protocol} and then disconnect the
protocol. Move to the C{'FINISHED'} state.
"""
# Unlike _deliverBody_INITIAL, there is no need to resume the
# transport here because all of the response data has been received
# already. Some higher level code may want to resume the transport if
# that code expects further data to be received over it.
protocol.makeConnection(self._transport)
for data in self._bodyBuffer:
protocol.dataReceived(data)
self._bodyBuffer = None
protocol.connectionLost(self._reason)
self._state = 'FINISHED'
def _deliverBody_FINISHED(self, protocol):
"""
It is invalid to attempt to deliver data to a protocol after the
response body has been delivered to another protocol.
"""
raise RuntimeError(
"Response already finished, cannot deliverBody now.")
def _bodyDataReceived(self, data):
"""
Called by HTTPClientParser with chunks of data from the response body.
They will be buffered or delivered to the protocol passed to
deliverBody.
"""
_bodyDataReceived = makeStatefulDispatcher('bodyDataReceived',
_bodyDataReceived)
def _bodyDataReceived_INITIAL(self, data):
"""
Buffer any data received for later delivery to a protocol passed to
C{deliverBody}.
Little or no data should be buffered by this method, since the
transport has been paused and will not be resumed until a protocol
is supplied.
"""
self._bodyBuffer.append(data)
def _bodyDataReceived_CONNECTED(self, data):
"""
Deliver any data received to the protocol to which this L{Response}
is connected.
"""
self._bodyProtocol.dataReceived(data)
def _bodyDataReceived_DEFERRED_CLOSE(self, data):
"""
It is invalid for data to be delivered after it has been indicated
that the response body has been completely delivered.
"""
raise RuntimeError("Cannot receive body data after _bodyDataFinished")
def _bodyDataReceived_FINISHED(self, data):
"""
It is invalid for data to be delivered after the response bofdy has
been delivered to a protocol.
"""
raise RuntimeError("Cannot receive body data after protocol disconnected")
def _bodyDataFinished(self, reason=None):
"""
Called by HTTPClientParser when no more body data is available. If the
optional reason is supplied, this indicates a problem or potential
problem receiving all of the response body.
"""
_bodyDataFinished = makeStatefulDispatcher('bodyDataFinished',
_bodyDataFinished)
def _bodyDataFinished_INITIAL(self, reason=None):
"""
Move to the C{'DEFERRED_CLOSE'} state to wait for a protocol to
which to deliver the response body.
"""
self._state = 'DEFERRED_CLOSE'
if reason is None:
reason = Failure(ResponseDone("Response body fully received"))
self._reason = reason
def _bodyDataFinished_CONNECTED(self, reason=None):
"""
Disconnect the protocol and move to the C{'FINISHED'} state.
"""
if reason is None:
reason = Failure(ResponseDone("Response body fully received"))
self._bodyProtocol.connectionLost(reason)
self._bodyProtocol = None
self._state = 'FINISHED'
def _bodyDataFinished_DEFERRED_CLOSE(self):
"""
It is invalid to attempt to notify the L{Response} of the end of the
response body data more than once.
"""
raise RuntimeError("Cannot finish body data more than once")
def _bodyDataFinished_FINISHED(self):
"""
It is invalid to attempt to notify the L{Response} of the end of the
response body data more than once.
"""
raise RuntimeError("Cannot finish body data after protocol disconnected")
class ChunkedEncoder:
"""
Helper object which exposes L{IConsumer} on top of L{HTTP11ClientProtocol}
for streaming request bodies to the server.
"""
implements(IConsumer)
def __init__(self, transport):
self.transport = transport
def _allowNoMoreWrites(self):
"""
Indicate that no additional writes are allowed. Attempts to write
after calling this method will be met with an exception.
"""
self.transport = None
def registerProducer(self, producer, streaming):
"""
Register the given producer with C{self.transport}.
"""
self.transport.registerProducer(producer, streaming)
def write(self, data):
"""
Write the given request body bytes to the transport using chunked
encoding.
@type data: C{str}
"""
if self.transport is None:
raise ExcessWrite()
self.transport.writeSequence(("%x\r\n" % len(data), data, "\r\n"))
def unregisterProducer(self):
"""
Indicate that the request body is complete and finish the request.
"""
self.write('')
self.transport.unregisterProducer()
self._allowNoMoreWrites()
class TransportProxyProducer:
"""
An L{IPushProducer} implementation which wraps another such thing and
proxies calls to it until it is told to stop.
@ivar _producer: The wrapped L{IPushProducer} provider or C{None} after
this proxy has been stopped.
"""
implements(IPushProducer)
# LineReceiver uses this undocumented attribute of transports to decide
# when to stop calling lineReceived or rawDataReceived (if it finds it to
# be true, it doesn't bother to deliver any more data). Set disconnecting
# to False here and never change it to true so that all data is always
# delivered to us and so that LineReceiver doesn't fail with an
# AttributeError.
disconnecting = False
def __init__(self, producer):
self._producer = producer
def _stopProxying(self):
"""
Stop forwarding calls of L{IPushProducer} methods to the underlying
L{IPushProvider} provider.
"""
self._producer = None
def stopProducing(self):
"""
Proxy the stoppage to the underlying producer, unless this proxy has
been stopped.
"""
if self._producer is not None:
self._producer.stopProducing()
def resumeProducing(self):
"""
Proxy the resumption to the underlying producer, unless this proxy has
been stopped.
"""
if self._producer is not None:
self._producer.resumeProducing()
def pauseProducing(self):
"""
Proxy the pause to the underlying producer, unless this proxy has been
stopped.
"""
if self._producer is not None:
self._producer.pauseProducing()
class HTTP11ClientProtocol(Protocol):
"""
L{HTTP11ClientProtocol} is an implementation of the HTTP 1.1 client
protocol. It supports as few features as possible.
@ivar _parser: After a request is issued, the L{HTTPClientParser} to
which received data making up the response to that request is
delivered.
@ivar _finishedRequest: After a request is issued, the L{Deferred} which
will fire when a L{Response} object corresponding to that request is
available. This allows L{HTTP11ClientProtocol} to fail the request
if there is a connection or parsing problem.
@ivar _currentRequest: After a request is issued, the L{Request}
instance used to make that request. This allows
L{HTTP11ClientProtocol} to stop request generation if necessary (for
example, if the connection is lost).
@ivar _transportProxy: After a request is issued, the
L{TransportProxyProducer} to which C{_parser} is connected. This
allows C{_parser} to pause and resume the transport in a way which
L{HTTP11ClientProtocol} can exert some control over.
@ivar _responseDeferred: After a request is issued, the L{Deferred} from
C{_parser} which will fire with a L{Response} when one has been
received. This is eventually chained with C{_finishedRequest}, but
only in certain cases to avoid double firing that Deferred.
@ivar _state: Indicates what state this L{HTTP11ClientProtocol} instance
is in with respect to transmission of a request and reception of a
response. This may be one of the following strings:
- QUIESCENT: This is the state L{HTTP11ClientProtocol} instances
start in. Nothing is happening: no request is being sent and no
response is being received or expected.
- TRANSMITTING: When a request is made (via L{request}), the
instance moves to this state. L{Request.writeTo} has been used
to start to send a request but it has not yet finished.
- TRANSMITTING_AFTER_RECEIVING_RESPONSE: The server has returned a
complete response but the request has not yet been fully sent
yet. The instance will remain in this state until the request
is fully sent.
- GENERATION_FAILED: There was an error while the request. The
request was not fully sent to the network.
- WAITING: The request was fully sent to the network. The
instance is now waiting for the response to be fully received.
- ABORTING: Application code has requested that the HTTP connection
be aborted.
- CONNECTION_LOST: The connection has been lost.
"""
_state = 'QUIESCENT'
_parser = None
def request(self, request):
"""
Issue C{request} over C{self.transport} and return a L{Deferred} which
will fire with a L{Response} instance or an error.
@param request: The object defining the parameters of the request to
issue.
@type request: L{Request}
@rtype: L{Deferred}
@return: The deferred may errback with L{RequestGenerationFailed} if
the request was not fully written to the transport due to a local
error. It may errback with L{RequestTransmissionFailed} if it was
not fully written to the transport due to a network error. It may
errback with L{ResponseFailed} if the request was sent (not
necessarily received) but some or all of the response was lost. It
may errback with L{RequestNotSent} if it is not possible to send
any more requests using this L{HTTP11ClientProtocol}.
"""
if self._state != 'QUIESCENT':
return fail(RequestNotSent())
self._state = 'TRANSMITTING'
_requestDeferred = maybeDeferred(request.writeTo, self.transport)
self._finishedRequest = Deferred()
# Keep track of the Request object in case we need to call stopWriting
# on it.
self._currentRequest = request
self._transportProxy = TransportProxyProducer(self.transport)
self._parser = HTTPClientParser(request, self._finishResponse)
self._parser.makeConnection(self._transportProxy)
self._responseDeferred = self._parser._responseDeferred
def cbRequestWrotten(ignored):
if self._state == 'TRANSMITTING':
self._state = 'WAITING'
# XXX We're stuck in WAITING until we lose the connection now.
# This will be wrong when persistent connections are supported.
# See #3420 for persistent connections.
self._responseDeferred.chainDeferred(self._finishedRequest)
def ebRequestWriting(err):
if self._state == 'TRANSMITTING':
self._state = 'GENERATION_FAILED'
self.transport.loseConnection()
self._finishedRequest.errback(
Failure(RequestGenerationFailed([err])))
else:
log.err(err, 'Error writing request, but not in valid state '
'to finalize request: %s' % self._state)
_requestDeferred.addCallbacks(cbRequestWrotten, ebRequestWriting)
return self._finishedRequest
def _finishResponse(self, rest):
"""
Called by an L{HTTPClientParser} to indicate that it has parsed a
complete response.
@param rest: A C{str} giving any trailing bytes which were given to
the L{HTTPClientParser} which were not part of the response it
was parsing.
"""
# XXX this is because Connection: close is hard-coded above, probably
# will want to change that at some point. Either the client or the
# server can control this.
# XXX If the connection isn't being closed at this point, it's
# important to make sure the transport isn't paused (after _giveUp,
# or inside it, or something - after the parser can no longer touch
# the transport)
# For both of the above, see #3420 for persistent connections.
if self._state == 'TRANSMITTING':
# The server sent the entire response before we could send the
# whole request. That sucks. Oh well. Fire the request()
# Deferred with the response. But first, make sure that if the
# request does ever finish being written that it won't try to fire
# that Deferred.
self._state = 'TRANSMITTING_AFTER_RECEIVING_RESPONSE'
self._responseDeferred.chainDeferred(self._finishedRequest)
self._giveUp(Failure(ConnectionDone("synthetic!")))
def _disconnectParser(self, reason):
"""
If there is still a parser, call its C{connectionLost} method with the
given reason. If there is not, do nothing.
@type reason: L{Failure}
"""
if self._parser is not None:
parser = self._parser
self._parser = None
# The parser is no longer allowed to do anything to the real
# transport. Stop proxying from the parser's transport to the real
# transport before telling the parser it's done so that it can't do
# anything.
self._transportProxy._stopProxying()
parser.connectionLost(reason)
def _giveUp(self, reason):
"""
Lose the underlying connection and disconnect the parser with the given
L{Failure}.
Use this method instead of calling the transport's loseConnection
method directly otherwise random things will break.
"""
self.transport.loseConnection()
self._disconnectParser(reason)
def dataReceived(self, bytes):
"""
Handle some stuff from some place.
"""
try:
self._parser.dataReceived(bytes)
except:
self._giveUp(Failure())
def connectionLost(self, reason):
"""
The underlying transport went away. If appropriate, notify the parser
object.
"""
connectionLost = makeStatefulDispatcher('connectionLost', connectionLost)
def _connectionLost_QUIESCENT(self, reason):
"""
Nothing is currently happening. Move to the C{'CONNECTION_LOST'}
state but otherwise do nothing.
"""
self._state = 'CONNECTION_LOST'
def _connectionLost_GENERATION_FAILED(self, reason):
"""
The connection was in an inconsistent state. Move to the
C{'CONNECTION_LOST'} state but otherwise do nothing.
"""
self._state = 'CONNECTION_LOST'
def _connectionLost_TRANSMITTING(self, reason):
"""
Fail the L{Deferred} for the current request, notify the request
object that it does not need to continue transmitting itself, and
move to the C{'CONNECTION_LOST'} state.
"""
self._state = 'CONNECTION_LOST'
self._finishedRequest.errback(
Failure(RequestTransmissionFailed([reason])))
del self._finishedRequest
# Tell the request that it should stop bothering now.
self._currentRequest.stopWriting()
def _connectionLost_WAITING(self, reason):
"""
Disconnect the response parser so that it can propagate the event as
necessary (for example, to call an application protocol's
C{connectionLost} method, or to fail a request L{Deferred}) and move
to the C{'CONNECTION_LOST'} state.
"""
self._disconnectParser(reason)
self._state = 'CONNECTION_LOST'
def _connectionLost_ABORTING(self, reason):
"""
Disconnect the response parser with a L{ConnectionAborted} failure, and
move to the C{'CONNECTION_LOST'} state.
"""
self._disconnectParser(Failure(ConnectionAborted()))
self._state = 'CONNECTION_LOST'
def abort(self):
"""
Close the connection and cause all outstanding L{request} L{Deferred}s
to fire with an error.
"""
self.transport.loseConnection()
self._state = 'ABORTING' | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (c) 2018 Mockito contributors
* This program is made available under the terms of the MIT License.
*/
package org.mockito.internal.session;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.mockito.Mockito.when;
import static org.mockito.quality.Strictness.WARN;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import org.junit.After;
import org.junit.Test;
import org.mockito.Mock;
import org.mockito.MockitoSession;
import org.mockito.StateMaster;
import org.mockito.exceptions.misusing.UnfinishedMockingSessionException;
import org.mockito.quality.Strictness;
import org.mockito.session.MockitoSessionLogger;
import org.mockitoutil.ThrowableAssert;
public class DefaultMockitoSessionBuilderTest {
@After
public void after() {
new StateMaster().clearMockitoListeners();
}
@Test
public void creates_sessions() {
// no configuration is legal
new DefaultMockitoSessionBuilder().startMocking().finishMocking();
// passing null to configuration is legal, default value will be used
new DefaultMockitoSessionBuilder().initMocks((Object) null).startMocking().finishMocking();
new DefaultMockitoSessionBuilder()
.initMocks((Object[]) null)
.startMocking()
.finishMocking();
new DefaultMockitoSessionBuilder()
.initMocks(null, null)
.strictness(null)
.startMocking()
.finishMocking();
new DefaultMockitoSessionBuilder().strictness(null).startMocking().finishMocking();
// happy path
new DefaultMockitoSessionBuilder().initMocks(this).startMocking().finishMocking();
new DefaultMockitoSessionBuilder().initMocks(new Object()).startMocking().finishMocking();
new DefaultMockitoSessionBuilder()
.strictness(Strictness.LENIENT)
.startMocking()
.finishMocking();
}
@Test
public void creates_sessions_for_multiple_test_class_instances_for_repeated_calls() {
TestClass testClass = new TestClass();
TestClass.NestedTestClass nestedTestClass = testClass.new NestedTestClass();
new DefaultMockitoSessionBuilder()
.initMocks(testClass)
.initMocks(nestedTestClass)
.startMocking()
.finishMocking();
assertNotNull(testClass.set);
assertNotNull(nestedTestClass.list);
}
@Test
public void creates_sessions_for_multiple_test_class_instances_for_varargs_call() {
TestClass testClass = new TestClass();
TestClass.NestedTestClass nestedTestClass = testClass.new NestedTestClass();
new DefaultMockitoSessionBuilder()
.initMocks(testClass, nestedTestClass)
.startMocking()
.finishMocking();
assertNotNull(testClass.set);
assertNotNull(nestedTestClass.list);
}
@Test
public void uses_logger_and_strictness() {
TestClass testClass = new TestClass();
final List<String> hints = new ArrayList<String>();
MockitoSession session =
new DefaultMockitoSessionBuilder()
.initMocks(testClass)
.strictness(WARN)
.logger(
new MockitoSessionLogger() {
@Override
public void log(String hint) {
hints.add(hint);
}
})
.startMocking();
when(testClass.set.add(1)).thenReturn(true);
session.finishMocking();
assertFalse(hints.isEmpty());
}
@Test
public void requires_finish_mocking() {
new DefaultMockitoSessionBuilder().startMocking();
ThrowableAssert.assertThat(
new Runnable() {
public void run() {
new DefaultMockitoSessionBuilder().startMocking();
}
})
.throwsException(UnfinishedMockingSessionException.class);
}
@Test
public void auto_cleans_dirty_listeners() {
new DefaultMockitoSessionBuilder().startMocking();
ThrowableAssert.assertThat(
new Runnable() {
public void run() {
new DefaultMockitoSessionBuilder().startMocking();
}
})
.throwsException(UnfinishedMockingSessionException.class);
}
class TestClass {
@Mock public Set<Object> set;
class NestedTestClass {
@Mock public List<Object> list;
}
}
} | java | github | https://github.com/mockito/mockito | mockito-core/src/test/java/org/mockito/internal/session/DefaultMockitoSessionBuilderTest.java |
package kotlinx.coroutines.reactive
import kotlinx.atomicfu.*
import kotlinx.coroutines.*
import kotlinx.coroutines.channels.*
import kotlinx.coroutines.flow.*
import kotlinx.coroutines.flow.internal.*
import kotlinx.coroutines.intrinsics.*
import org.reactivestreams.*
import java.util.*
import kotlin.coroutines.*
import kotlinx.coroutines.internal.*
/**
* Transforms the given reactive [Publisher] into [Flow].
* Use the [buffer] operator on the resulting flow to specify the size of the back-pressure.
* In effect, it specifies the value of the subscription's [request][Subscription.request].
* The [default buffer capacity][Channel.BUFFERED] for a suspending channel is used by default.
*
* If any of the resulting flow transformations fails, the subscription is immediately cancelled and all the in-flight
* elements are discarded.
*
* This function is integrated with `ReactorContext` from `kotlinx-coroutines-reactor` module,
* see its documentation for additional details.
*/
public fun <T : Any> Publisher<T>.asFlow(): Flow<T> =
PublisherAsFlow(this)
/**
* Transforms the given flow into a reactive specification compliant [Publisher].
*
* This function is integrated with `ReactorContext` from `kotlinx-coroutines-reactor` module,
* see its documentation for additional details.
*
* An optional [context] can be specified to control the execution context of calls to the [Subscriber] methods.
* A [CoroutineDispatcher] can be set to confine them to a specific thread; various [ThreadContextElement] can be set to
* inject additional context into the caller thread. By default, the [Unconfined][Dispatchers.Unconfined] dispatcher
* is used, so calls are performed from an arbitrary thread.
*/
@JvmOverloads // binary compatibility
public fun <T : Any> Flow<T>.asPublisher(context: CoroutineContext = EmptyCoroutineContext): Publisher<T> =
FlowAsPublisher(this, Dispatchers.Unconfined + context)
private class PublisherAsFlow<T : Any>(
private val publisher: Publisher<T>,
context: CoroutineContext = EmptyCoroutineContext,
capacity: Int = Channel.BUFFERED,
onBufferOverflow: BufferOverflow = BufferOverflow.SUSPEND
) : ChannelFlow<T>(context, capacity, onBufferOverflow) {
override fun create(context: CoroutineContext, capacity: Int, onBufferOverflow: BufferOverflow): ChannelFlow<T> =
PublisherAsFlow(publisher, context, capacity, onBufferOverflow)
/*
* The @Suppress is for Channel.CHANNEL_DEFAULT_CAPACITY.
* It's too counter-intuitive to be public, and moving it to Flow companion
* will also create undesired effect.
*/
@Suppress("INVISIBLE_MEMBER", "INVISIBLE_REFERENCE") // do not remove the INVISIBLE_REFERENCE suppression: required in K2
private val requestSize: Long
get() =
if (onBufferOverflow != BufferOverflow.SUSPEND) {
Long.MAX_VALUE // request all, since buffering strategy is to never suspend
} else when (capacity) {
Channel.RENDEZVOUS -> 1L // need to request at least one anyway
Channel.UNLIMITED -> Long.MAX_VALUE // reactive streams way to say "give all", must be Long.MAX_VALUE
Channel.BUFFERED -> Channel.CHANNEL_DEFAULT_CAPACITY.toLong()
else -> capacity.toLong().also { check(it >= 1) }
}
override suspend fun collect(collector: FlowCollector<T>) {
val collectContext = coroutineContext
val newDispatcher = context[ContinuationInterceptor]
if (newDispatcher == null || newDispatcher == collectContext[ContinuationInterceptor]) {
// fast path -- subscribe directly in this dispatcher
return collectImpl(collectContext + context, collector)
}
// slow path -- produce in a separate dispatcher
collectSlowPath(collector)
}
private suspend fun collectSlowPath(collector: FlowCollector<T>) {
coroutineScope {
collector.emitAll(produceImpl(this + context))
}
}
private suspend fun collectImpl(injectContext: CoroutineContext, collector: FlowCollector<T>) {
val subscriber = ReactiveSubscriber<T>(capacity, onBufferOverflow, requestSize)
// inject subscribe context into publisher
publisher.injectCoroutineContext(injectContext).subscribe(subscriber)
try {
var consumed = 0L
while (true) {
val value = subscriber.takeNextOrNull() ?: break
coroutineContext.ensureActive()
collector.emit(value)
if (++consumed == requestSize) {
consumed = 0L
subscriber.makeRequest()
}
}
} finally {
subscriber.cancel()
}
}
// The second channel here is used for produceIn/broadcastIn and slow-path (dispatcher change)
override suspend fun collectTo(scope: ProducerScope<T>) =
collectImpl(scope.coroutineContext, SendingCollector(scope.channel))
}
@Suppress("ReactiveStreamsSubscriberImplementation")
private class ReactiveSubscriber<T : Any>(
capacity: Int,
onBufferOverflow: BufferOverflow,
private val requestSize: Long
) : Subscriber<T> {
private lateinit var subscription: Subscription
// This implementation of ReactiveSubscriber always uses "offer" in its onNext implementation and it cannot
// be reliable with rendezvous channel, so a rendezvous channel is replaced with buffer=1 channel
private val channel = Channel<T>(if (capacity == Channel.RENDEZVOUS) 1 else capacity, onBufferOverflow)
suspend fun takeNextOrNull(): T? {
val result = channel.receiveCatching()
result.exceptionOrNull()?.let { throw it }
return result.getOrElse { null } // Closed channel
}
override fun onNext(value: T) {
// Controlled by requestSize
require(channel.trySend(value).isSuccess) { "Element $value was not added to channel because it was full, $channel" }
}
override fun onComplete() {
channel.close()
}
override fun onError(t: Throwable?) {
channel.close(t)
}
override fun onSubscribe(s: Subscription) {
subscription = s
makeRequest()
}
fun makeRequest() {
subscription.request(requestSize)
}
fun cancel() {
subscription.cancel()
}
}
// ContextInjector service is implemented in `kotlinx-coroutines-reactor` module only.
// If `kotlinx-coroutines-reactor` module is not included, the list is empty.
private val contextInjectors: Array<ContextInjector> =
ServiceLoader.load(ContextInjector::class.java, ContextInjector::class.java.classLoader)
.iterator().asSequence()
.toList().toTypedArray() // R8 opto
internal fun <T> Publisher<T>.injectCoroutineContext(coroutineContext: CoroutineContext) =
contextInjectors.fold(this) { pub, contextInjector -> contextInjector.injectCoroutineContext(pub, coroutineContext) }
/**
* Adapter that transforms [Flow] into TCK-complaint [Publisher].
* [cancel] invocation cancels the original flow.
*/
@Suppress("ReactiveStreamsPublisherImplementation")
private class FlowAsPublisher<T : Any>(
private val flow: Flow<T>,
private val context: CoroutineContext
) : Publisher<T> {
override fun subscribe(subscriber: Subscriber<in T>?) {
if (subscriber == null) throw NullPointerException()
subscriber.onSubscribe(FlowSubscription(flow, subscriber, context))
}
}
/** @suppress */
@InternalCoroutinesApi
public class FlowSubscription<T>(
@JvmField public val flow: Flow<T>,
@JvmField public val subscriber: Subscriber<in T>,
context: CoroutineContext
) : Subscription, AbstractCoroutine<Unit>(context, initParentJob = false, true) {
/*
* We deliberately set initParentJob to false and do not establish parent-child
* relationship because FlowSubscription doesn't support it
*/
private val requested = atomic(0L)
private val producer = atomic<Continuation<Unit>?>(createInitialContinuation())
@Volatile
private var cancellationRequested = false
// This code wraps startCoroutineCancellable into continuation
private fun createInitialContinuation(): Continuation<Unit> = Continuation(coroutineContext) {
::flowProcessing.startCoroutineCancellable(this)
}
private suspend fun flowProcessing() {
try {
consumeFlow()
} catch (cause: Throwable) {
@Suppress("INVISIBLE_MEMBER", "INVISIBLE_REFERENCE") // do not remove the INVISIBLE_REFERENCE suppression: required in K2
val unwrappedCause = unwrap(cause)
if (!cancellationRequested || isActive || unwrappedCause !== getCancellationException()) {
try {
subscriber.onError(cause)
} catch (e: Throwable) {
// Last ditch report
cause.addSuppressed(e)
handleCoroutineException(coroutineContext, cause)
}
}
return
}
// We only call this if `consumeFlow()` finished successfully
try {
subscriber.onComplete()
} catch (e: Throwable) {
handleCoroutineException(coroutineContext, e)
}
}
/*
* This method has at most one caller at any time (triggered from the `request` method)
*/
private suspend fun consumeFlow() {
flow.collect { value ->
// Emit the value
subscriber.onNext(value)
// Suspend if needed before requesting the next value
if (requested.decrementAndGet() <= 0) {
suspendCancellableCoroutine<Unit> {
producer.value = it
}
} else {
// check for cancellation if we don't suspend
coroutineContext.ensureActive()
}
}
}
@Deprecated("Since 1.2.0, binary compatibility with versions <= 1.1.x", level = DeprecationLevel.HIDDEN)
override fun cancel() {
cancellationRequested = true
cancel(null)
}
override fun request(n: Long) {
if (n <= 0) return
val old = requested.getAndUpdate { value ->
val newValue = value + n
if (newValue <= 0L) Long.MAX_VALUE else newValue
}
if (old <= 0L) {
assert(old == 0L)
// Emitter is not started yet or has suspended -- spin on race with suspendCancellableCoroutine
while (true) {
val producer = producer.getAndSet(null) ?: continue // spin if not set yet
producer.resume(Unit)
break
}
}
}
} | kotlin | github | https://github.com/Kotlin/kotlinx.coroutines | reactive/kotlinx-coroutines-reactive/src/ReactiveFlow.kt |
import functools, logging
import bson.json_util as json_util
from fbvoting.conf import REDIS_TIMEOUT, ACTIVATE_REDIS
from fbvoting.mylogging import report
if ACTIVATE_REDIS:
from flask_redis import Redis
redis_store = Redis()
logger = logging.getLogger(__name__)
def _serialize(obj):
return json_util.dumps(obj, sort_keys=True)
def _unserialize(item):
return json_util.loads(item)
_test = [ {"a": 123, "b": 345, "C": [1,2,3]}, "ciao", -9, u"Hello"]
assert _unserialize(_serialize(_test)) == _test
def redis_cached(func):
if not ACTIVATE_REDIS:
logger.warn("Redis is not active: function %s not cached.", func.__name__)
return func
@functools.wraps(func)
def search_cache_then_do_function(*args, **kwargs):
# pylint: disable=E1101
# redis_store methods will be there only after initialization
func_name = func.__name__ # speed
redis_key = _serialize( (func_name, args, kwargs) )
cache_result = redis_store.get( redis_key )
if cache_result is None:
report.mark(func_name + '-cacheless')
func_result = func(*args, **kwargs)
redis_store.set(redis_key, _serialize(func_result))
redis_store.expire(redis_key, REDIS_TIMEOUT)
return func_result
else:
report.mark(func_name + '-cached')
return _unserialize(cache_result)
return search_cache_then_do_function
def void_cache(func_to_void, *args, **kwargs):
# pylint: disable=E1101
# ibid.
func_name = func_to_void.__name__ # speed
redis_key = _serialize( (func_name, args, kwargs) )
redis_store.delete(redis_key)
report.mark(func_name + '-cache-voided') | unknown | codeparrot/codeparrot-clean | ||
#! /usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""demonstrates using configman to make a Socorro app"""
# This app can be invoked like this:
# .../socorro/app/example_app.py --help
# set your path to make that simpler
# set both socorro and configman in your PYTHONPATH
import datetime
from socorro.app.generic_app import App, main
from configman import Namespace
#==============================================================================
class ExampleApp(App):
app_name = 'example'
app_version = '0.1'
app_description = __doc__
#--------------------------------------------------------------------------
# in this section, define any configuration requirements
required_config = Namespace()
required_config.add_option('name',
default='Wilma',
doc='a name to echo')
required_config.add_option('time',
default=datetime.datetime.now(),
doc='the time of day')
#--------------------------------------------------------------------------
# implementing this constructor is only necessary when there is more
# initialization to be done before main can be called
#def __init__(self, config):
#super(ExampleApp,self).__init__(config)
#--------------------------------------------------------------------------
def main(self):
# this is where we'd implement the app
# the configuraton is already setup as self.config
print 'hello, %s. The time is: %s' % (self.config.name,
self.config.time)
if __name__ == '__main__':
main(ExampleApp) | unknown | codeparrot/codeparrot-clean | ||
{
"spring":[
"boot",
"framework"
]
} | json | github | https://github.com/spring-projects/spring-boot | core/spring-boot-test/src/test/resources/org/springframework/boot/test/json/source.json |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import glob
class Redundans(Package):
"""Redundans pipeline assists an assembly of heterozygous genomes."""
homepage = "https://github.com/Gabaldonlab/redundans"
url = "https://github.com/Gabaldonlab/redundans/archive/v0.13c.tar.gz"
git = "https://github.com/Gabaldonlab/redundans.git"
version('0.14a', commit='a20215a862aed161cbfc79df9133206156a1e9f0')
version('0.13c', sha256='26d48f27a32678d94c1d00cb3b8991d74891d6cad64a94569901ff9607a7a736')
depends_on('python', type=('build', 'run'))
depends_on('py-pyscaf', type=('build', 'run'))
depends_on('py-fastaindex', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
depends_on('perl', type=('build', 'run'))
depends_on('sspace-standard')
depends_on('bwa')
depends_on('last')
depends_on('gapcloser')
depends_on('parallel')
depends_on('snap-berkeley@1.0beta.18:', type=('build', 'run'))
def install(self, spec, prefix):
sspace_location = join_path(spec['sspace-standard'].prefix,
'SSPACE_Standard_v3.0.pl')
filter_file(r'sspacebin = os.path.join(.*)$',
'sspacebin = \'' + sspace_location + '\'',
'redundans.py')
binfiles = ['redundans.py', 'bin/filterReads.py']
binfiles.extend(glob.glob('bin/fast?2*.py'))
# new internal dep with 0.14a
if spec.satisfies('@0.14a:'):
binfiles.append('bin/denovo.py')
mkdirp(prefix.bin)
for f in binfiles:
install(f, prefix.bin) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Peter Sprygada <psprygada@ansible.com>
# Copyright: (c) 2016, Dell Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = r'''
options:
provider:
description:
- A dict object containing connection details.
type: dict
suboptions:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
type: str
required: true
port:
description:
- Specifies the port to use when building the connection to the remote
device.
type: int
default: 22
username:
description:
- User to authenticate the SSH session to the remote device. If the
value is not specified in the task, the value of environment variable
C(ANSIBLE_NET_USERNAME) will be used instead.
type: str
password:
description:
- Password to authenticate the SSH session to the remote device. If the
value is not specified in the task, the value of environment variable
C(ANSIBLE_NET_PASSWORD) will be used instead.
type: str
ssh_keyfile:
description:
- Path to an ssh key used to authenticate the SSH session to the remote
device. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
type: path
timeout:
description:
- Specifies idle timeout (in seconds) for the connection. Useful if the
console freezes before continuing. For example when saving
configurations.
type: int
default: 10
notes:
- For more information on using Ansible to manage Dell EMC Network devices see U(https://www.ansible.com/ansible-dell-networking).
''' | unknown | codeparrot/codeparrot-clean | ||
import zlib
from scrapy.utils.gz import gunzip, is_gzipped
from scrapy.http import Response, TextResponse
from scrapy.responsetypes import responsetypes
from scrapy.exceptions import NotConfigured
class HttpCompressionMiddleware(object):
"""This middleware allows compressed (gzip, deflate) traffic to be
sent/received from web sites"""
@classmethod
def from_crawler(cls, crawler):
if not crawler.settings.getbool('COMPRESSION_ENABLED'):
raise NotConfigured
return cls()
def process_request(self, request, spider):
request.headers.setdefault('Accept-Encoding', 'gzip,deflate')
def process_response(self, request, response, spider):
if isinstance(response, Response):
content_encoding = response.headers.getlist('Content-Encoding')
if content_encoding and not is_gzipped(response):
encoding = content_encoding.pop()
decoded_body = self._decode(response.body, encoding.lower())
respcls = responsetypes.from_args(headers=response.headers, \
url=response.url)
kwargs = dict(cls=respcls, body=decoded_body)
if issubclass(respcls, TextResponse):
# force recalculating the encoding until we make sure the
# responsetypes guessing is reliable
kwargs['encoding'] = None
response = response.replace(**kwargs)
if not content_encoding:
del response.headers['Content-Encoding']
return response
def _decode(self, body, encoding):
if encoding == 'gzip' or encoding == 'x-gzip':
body = gunzip(body)
if encoding == 'deflate':
try:
body = zlib.decompress(body)
except zlib.error:
# ugly hack to work with raw deflate content that may
# be sent by microsoft servers. For more information, see:
# http://carsten.codimi.de/gzip.yaws/
# http://www.port80software.com/200ok/archive/2005/10/31/868.aspx
# http://www.gzip.org/zlib/zlib_faq.html#faq38
body = zlib.decompress(body, -15)
return body | unknown | codeparrot/codeparrot-clean | ||
framework:
php_errors:
throw: true | unknown | github | https://github.com/symfony/symfony | src/Symfony/Bundle/FrameworkBundle/Tests/DependencyInjection/Fixtures/yml/php_errors_enabled.yml |
# Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db import models
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django_pgjson.fields import JsonField
class StorageEntry(models.Model):
owner = models.ForeignKey(settings.AUTH_USER_MODEL, blank=False, null=False,
related_name="storage_entries", verbose_name=_("owner"))
created_date = models.DateTimeField(auto_now_add=True, null=False, blank=False,
verbose_name=_("created date"))
modified_date = models.DateTimeField(auto_now=True, null=False, blank=False,
verbose_name=_("modified date"))
key = models.CharField(max_length=255, null=False, blank=False, verbose_name=_("key"))
value = JsonField(blank=True, default=None, null=True, verbose_name=_("value"))
class Meta:
verbose_name = "storage entry"
verbose_name_plural = "storages entries"
unique_together = ("owner", "key")
ordering = ["owner", "key"] | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Sat May 25 00:09:01 2013
@author: Sol
"""
from __future__ import absolute_import, print_function
from builtins import range
from builtins import object
from textwrap import TextWrapper
import io
import os
from collections import deque
from weakref import proxy
class ParsedTextDocument(object):
def __init__(self, text_data, text_grid):
if os.path.isfile(text_data):
with io.open(text_data, 'r', encoding='utf-8-sig') as f:
text_data = f.read()
self._text_grid = proxy(text_grid)
self._num_columns, self._max_visible_rows = text_grid._shape
text_data = text_data.replace('\r\n', '\n')
# if len(text_data) and text_data[-1] != u'\n':
# text_data=text_data+u'\n'
self._text = text_data
self._children = []
self._limit_text_length = self._max_visible_rows * self._num_columns
if 0 < self._limit_text_length < len(self._text):
self._text = self._text[:self._limit_text_length]
self._default_parse_chunk_size = (self._num_columns *
(self._max_visible_rows + 1))
self._text_wrapper = TextWrapper(width=self._num_columns,
drop_whitespace=False,
replace_whitespace=False,
expand_tabs=False)
self._text_parsed_to_index = 0
self._parse(0, len(self._text))
def getDisplayedText(self):
lli = min(self._max_visible_rows, self.getChildCount()) - 1
lline = self.getParsedLine(lli)
return self._text[:lline._index_range[1]]
def addChild(self, c):
self._children.append(c)
def getChildren(self):
return self._children
def getChildCount(self):
return len(self._children)
def getText(self):
return self._text
def getCharAtIndex(self, text_index):
try:
return self._text[text_index]
except Exception:
print("WARNING: ParsedTextDocument.getCharAtIndex received "
"out of bounds index: ",
text_index, self.getTextLength())
return
def getTextLength(self):
return len(self._text)
def deleteText(self, start_index, end_index, insertText=None):
start_index = int(start_index)
end_index = int(end_index)
deleted_text = self._text[start_index:end_index]
if insertText is None:
self._text = ''.join([self._text[:start_index],
self._text[end_index:]])
else:
self._text = ''.join([self._text[:start_index],
insertText,
self._text[end_index:]])
self._parse(start_index)
return deleted_text
def insertText(self, text, start_index, end_index=None):
start_index = int(start_index)
if end_index is None:
end_index = start_index
else:
end_index = int(end_index)
self._text = ''.join([self._text[:int(start_index)],
text,
self._text[int(end_index):]])
return self._parse(start_index)
def parseTextTo(self, requested_line_index):
requested_line_index = int(requested_line_index)
if self.getParsedLineCount() > requested_line_index:
return requested_line_index
add_line_count = requested_line_index - self.getParsedLineCount() + 1
max_chars_to_add = add_line_count * self._num_columns
start_index = self._children[-1]._index_range[0]
self._parse(start_index, start_index + max_chars_to_add)
if self.getParsedLineCount() >= requested_line_index:
return requested_line_index
return self.getParsedLineCount() - 1
def _parse(self, from_text_index, to_text_index=None):
from_text_index = 0
to_text_index = self.getTextLength()
line_index = None
if self._children:
line_index = 0
update_lines = []
if line_index is not None:
update_lines = deque(self._children[:])
para_split_text = self._text[from_text_index:to_text_index].splitlines(True)
if len(para_split_text) == 0:
return
current_index = 0
for para_text in para_split_text:
current_index = self._wrapText(para_text, current_index,
update_lines)
if len(update_lines) > 0:
self._children = self._children[:-len(update_lines)]
self._text_parsed_to_index = current_index
def _wrapText(self, para_text, current_index, update_lines):
rewrap = False
para_text_index = 0
for linestr in self._text_wrapper.wrap(para_text):
if (linestr[-1] != u' ' and
len(self._text) > current_index + len(linestr) and
self._text[current_index + len(linestr)] == u' '):
last_space = linestr.rfind(u' ')
if last_space > 0:
linestr = linestr[:last_space + 1]
rewrap = True
if len(update_lines) > 0:
line = update_lines.popleft()
line._text = linestr
line._index_range = [current_index,
current_index + len(linestr)]
line.updateOrds(linestr)
line._gl_display_list[0] = 0
else:
ParsedTextLine(self, linestr,
[current_index, current_index + len(linestr)])
line = self._children[-1]
current_index += len(linestr)
para_text_index += len(linestr)
if rewrap is True:
return self._wrapText(para_text[para_text_index:],
current_index, update_lines)
return current_index
def clearCachedLineDisplayLists(self, from_char_index, to_char_index):
if from_char_index < 0:
from_char_index = 0
elif from_char_index >= len(self._text):
from_char_index = len(self._text) - 1
if to_char_index < 0:
to_char_index = 0
elif to_char_index >= len(self._text):
to_char_index = len(self._text) - 1
start_line = self.getLineIndex(from_char_index)
to_line = self.getLineIndex(to_char_index)
for l in range(start_line, to_line + 1):
self._children[l]._gl_display_list[0] = 0
def getLineInfoByIndex(self, i):
c = self._children[i]
return c, c._length, c._gl_display_list, c._ords
def getParsedLine(self, i):
if i < len(self._children):
return self._children[i]
return None
def getParsedLines(self):
return self._children
def getParsedLineCount(self):
return self.getChildCount()
def getTextGridCellForCharIndex(self, char_index):
for line in self._children:
rsi, rei = line._index_range
if rsi <= char_index < rei:
r = line._line_index
c = char_index - rsi
return c, r
return None
def getLineIndex(self, char_index):
for line in self._children:
rsi, rei = line._index_range
if rsi <= char_index < rei:
return line.getIndex()
return None
def getLineFromCharIndex(self, char_index):
if char_index < 0:
return None
for line in self._children:
rsi, rei = line._index_range
if rsi <= char_index < rei:
return line
return None
def _free(self):
self._text = None
self._text_wrapper = None
del self._text_wrapper
for c in self._children:
c._free()
del self._children[:]
def __del__(self):
if self._text is not None:
self._free()
import numpy
class ParsedTextLine(object):
charcodes_with_glyphs = None
replacement_charcode = None
def __init__(self, parent, source_text, index_range):
if parent:
self._parent = proxy(parent)
self._parent.addChild(self)
else:
self._parent = None
self._text = source_text
self._index_range = index_range
self._line_index = parent.getChildCount() - 1
self._trans_left = 0
self._trans_top = 0
self.updateOrds(self._text)
# self.text_region_flags=numpy.ones((2,parent._num_columns),
# numpy.uint32)#*parent._text_grid.default_region_type_key
self._gl_display_list = numpy.zeros(parent._num_columns, numpy.uint)
def updateOrds(self, text):
if ParsedTextLine.charcodes_with_glyphs is None:
active_text_style = self._parent._text_grid._text_box._current_glfont
if active_text_style:
ParsedTextLine.charcodes_with_glyphs = list(active_text_style.charcode2unichr.keys())
ok_charcodes = ParsedTextLine.charcodes_with_glyphs
if ParsedTextLine.replacement_charcode is None:
replacement_charcodes = [ord(cc)
for cc in [u'?', u' ', u'_', u'-', u'0', u'=']
if cc in ok_charcodes]
if not replacement_charcodes:
ParsedTextLine.replacement_charcode = ok_charcodes[0]
else:
ParsedTextLine.replacement_charcode = replacement_charcodes[0]
self._ords = []
text = text.replace(u'\n', ' ').replace(u'\t', ' ')
for c in text:
ccode = ord(c)
if ccode in ok_charcodes:
self._ords.append(ccode)
else:
self._ords.append(self.replacement_charcode)
self._length = len(self._ords)
def getIndex(self):
return self._line_index
def getParent(self):
return self._parent
def getIndexRange(self):
return self._index_range
def getText(self):
return self._text
def getOrds(self):
return self._ords
def getLength(self):
return self._length
def getDisplayList(self):
return self._gl_display_list
def _free(self):
self._text = None
del self._index_range
del self._ords
del self._gl_display_list
def __del__(self):
if self._text is not None:
self._free() | unknown | codeparrot/codeparrot-clean | ||
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualMachineRunCommandsOperations:
"""VirtualMachineRunCommandsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2018_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location: str,
**kwargs: Any
) -> AsyncIterable["_models.RunCommandListResult"]:
"""Lists all available run commands for a subscription in a location.
:param location: The location upon which run commands is queried.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RunCommandListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2018_10_01.models.RunCommandListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RunCommandListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str', pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RunCommandListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/runCommands'} # type: ignore
async def get(
self,
location: str,
command_id: str,
**kwargs: Any
) -> "_models.RunCommandDocument":
"""Gets specific run command for a subscription in a location.
:param location: The location upon which run commands is queried.
:type location: str
:param command_id: The command id.
:type command_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RunCommandDocument, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2018_10_01.models.RunCommandDocument
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RunCommandDocument"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str', pattern=r'^[-\w\._]+$'),
'commandId': self._serialize.url("command_id", command_id, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RunCommandDocument', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/runCommands/{commandId}'} # type: ignore | unknown | codeparrot/codeparrot-clean | ||
//// [tests/cases/conformance/es6/spread/arrayLiteralSpreadES5iterable.ts] ////
//// [arrayLiteralSpreadES5iterable.ts]
function f0() {
var a = [1, 2, 3];
var a1 = [...a];
var a2 = [1, ...a];
var a3 = [1, 2, ...a];
var a4 = [...a, 1];
var a5 = [...a, 1, 2];
var a6 = [1, 2, ...a, 1, 2];
var a7 = [1, ...a, 2, ...a];
var a8 = [...a, ...a, ...a];
}
function f1() {
var a = [1, 2, 3];
var b = ["hello", ...a, true];
var b: (string | number | boolean)[];
}
function f2() {
var a = [...[...[...[...[...[]]]]]];
var b = [...[...[...[...[...[5]]]]]];
}
//// [arrayLiteralSpreadES5iterable.js]
"use strict";
function f0() {
var a = [1, 2, 3];
var a1 = [...a];
var a2 = [1, ...a];
var a3 = [1, 2, ...a];
var a4 = [...a, 1];
var a5 = [...a, 1, 2];
var a6 = [1, 2, ...a, 1, 2];
var a7 = [1, ...a, 2, ...a];
var a8 = [...a, ...a, ...a];
}
function f1() {
var a = [1, 2, 3];
var b = ["hello", ...a, true];
var b;
}
function f2() {
var a = [...[...[...[...[...[]]]]]];
var b = [...[...[...[...[...[5]]]]]];
} | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/arrayLiteralSpreadES5iterable(target=es2015).js |
import unittest
from testlib import testutil, PygrTestProgram
import ConfigParser
import os
import string
import sys
from pygr.mapping import Collection
import pygr.Data
try:
import hashlib
except ImportError:
import md5 as hashlib
config = ConfigParser.ConfigParser({'testOutputBaseDir': '.',
'smallSampleKey': ''})
config.read([os.path.join(os.path.expanduser('~'), '.pygrrc'),
os.path.join(os.path.expanduser('~'), 'pygr.cfg'),
'.pygrrc', 'pygr.cfg'])
msaDir = config.get('megatests_hg18', 'msaDir')
seqDir = config.get('megatests_hg18', 'seqDir')
smallSampleKey = config.get('megatests_hg18', 'smallSampleKey')
testInputDB = config.get('megatests', 'testInputDB')
testInputDir = config.get('megatests', 'testInputDir')
testOutputBaseDir = config.get('megatests', 'testOutputBaseDir')
if smallSampleKey:
smallSamplePostfix = '_' + smallSampleKey
else:
smallSamplePostfix = ''
## msaDir CONTAINS PRE-BUILT NLMSA
## seqDir CONTAINS GENOME ASSEMBLIES AND THEIR SEQDB FILES
## TEST INPUT/OUPTUT FOR COMPARISON, THESE FILES SHOULD BE IN THIS DIRECTORY
## exonAnnotFileName = 'Annotation_ConservedElement_Exons_hg18.txt'
## intronAnnotFileName = 'Annotation_ConservedElement_Introns_hg18.txt'
## stopAnnotFileName = 'Annotation_ConservedElement_Stop_hg18.txt'
## testDir = os.path.join(testOutputBaseDir, 'TEST_' + ''.join(tmpList)) SHOULD
## BE DELETED IF YOU WANT TO RUN IN '.'
# DIRECTIONARY FOR DOC STRING OF SEQDB
docStringDict = {
'anoCar1': 'Lizard Genome (January 2007)',
'bosTau3': 'Cow Genome (August 2006)',
'canFam2': 'Dog Genome (May 2005)',
'cavPor2': 'Guinea Pig (October 2005)',
'danRer4': 'Zebrafish Genome (March 2006)',
'dasNov1': 'Armadillo Genome (May 2005)',
'echTel1': 'Tenrec Genome (July 2005)',
'eriEur1': 'European Hedgehog (Junuary 2006)',
'equCab1': 'Horse Genome (January 2007)',
'felCat3': 'Cat Genome (March 2006)',
'fr2': 'Fugu Genome (October 2004)',
'galGal3': 'Chicken Genome (May 2006)',
'gasAcu1': 'Stickleback Genome (February 2006)',
'hg18': 'Human Genome (May 2006)',
'loxAfr1': 'Elephant Genome (May 2005)',
'mm8': 'Mouse Genome (March 2006)',
'monDom4': 'Opossum Genome (January 2006)',
'ornAna1': 'Platypus Genome (March 2007)',
'oryCun1': 'Rabbit Genome (May 2005)',
'oryLat1': 'Medaka Genome (April 2006)',
'otoGar1': 'Bushbaby Genome (December 2006)',
'panTro2': 'Chimpanzee Genome (March 2006)',
'rheMac2': 'Rhesus Genome (January 2006)',
'rn4': 'Rat Genome (November 2004)',
'sorAra1': 'Shrew (Junuary 2006)',
'tetNig1': 'Tetraodon Genome (February 2004)',
'tupBel1': 'Tree Shrew (December 2006)',
'xenTro2': 'X. tropicalis Genome (August 2005)',
}
# GENOME ASSEMBLY LIST FOR DM2 MULTIZ15WAY
msaSpeciesList = ['anoCar1', 'bosTau3', 'canFam2', 'cavPor2', 'danRer4',
'dasNov1', 'echTel1', 'equCab1', 'eriEur1', 'felCat3', 'fr2',
'galGal3', 'gasAcu1', 'hg18', 'loxAfr1', 'mm8', 'monDom4',
'ornAna1', 'oryCun1', 'oryLat1', 'otoGar1', 'panTro2',
'rheMac2', 'rn4', 'sorAra1', 'tetNig1', 'tupBel1', 'xenTro2']
class PygrBuildNLMSAMegabase(unittest.TestCase):
def setUp(self, testDir=None):
'''restrict megatest to an initially empty directory, need
large space to perform'''
import random
tmpList = [c for c in 'PygrBuildNLMSAMegabase']
random.shuffle(tmpList)
# Comment out the next line to run in current directory.
testDir = os.path.join(testOutputBaseDir, 'TEST_' + ''.join(tmpList))
if testDir is None:
testDir = 'TEST_' + ''.join(tmpList)
try:
os.mkdir(testDir)
testDir = os.path.realpath(testDir)
except:
raise IOError
self.path = testDir
try:
tmpFileName = os.path.join(testDir, 'DELETE_THIS_TEMP_FILE')
open(tmpFileName, 'w').write('A' * 1024 * 1024)
except:
raise IOError
pygr.Data.update(self.path)
from pygr import seqdb
for orgstr in msaSpeciesList:
genome = seqdb.BlastDB(os.path.join(seqDir, orgstr))
genome.__doc__ = docStringDict[orgstr]
pygr.Data.addResource('TEST.Seq.Genome.' + orgstr, genome)
pygr.Data.save()
def copyFile(self, filename): # COPY A FILE INTO TEST DIRECTORY
newname = os.path.join(self.path, os.path.basename(filename))
open(newname, 'w').write(open(filename, 'r').read())
return newname
def tearDown(self):
'delete the temporary directory and files, restore pygr.Data path'
# Delete them bottom-up for obvious reasons.
for dirpath, subdirs, files in os.walk(self.path, topdown=False):
# Note: this part may not work in directories on NFS due to
# creation of lock files (.nfsXXXXXXXXX), which will only allow
# deletion after pygr.Data has been closed.
for filename in files:
os.remove(os.path.join(dirpath, filename))
os.rmdir(dirpath)
# Restore original pygr.Data path to remedy lack of isolation
# between tests from the same run
pygr.Data.update(None)
class Build_Test(PygrBuildNLMSAMegabase):
def test_seqdb(self):
'Check pygr.Data contents'
l = pygr.Data.dir('TEST')
preList = ['TEST.Seq.Genome.' + orgstr for orgstr in msaSpeciesList]
assert l == preList
def test_collectionannot(self):
'Test building an AnnotationDB from file'
from pygr import seqdb, cnestedlist, sqlgraph
hg18 = pygr.Data.getResource('TEST.Seq.Genome.hg18')
# BUILD ANNOTATION DATABASE FOR REFSEQ EXONS
exon_slices = Collection(
filename=os.path.join(self.path, 'refGene_exonAnnot_hg18.cdb'),
intKeys=True, mode='cr', writeback=False)
exon_db = seqdb.AnnotationDB(exon_slices, hg18,
sliceAttrDict=dict(id=0, exon_id=1,
orientation=2,
gene_id=3, start=4,
stop=5))
msa = cnestedlist.NLMSA(os.path.join(self.path,
'refGene_exonAnnot_hg18'), 'w',
pairwiseMode=True, bidirectional=False)
for lines in open(os.path.join(testInputDir,
'refGene_exonAnnot%s_hg18.txt'
% smallSamplePostfix),
'r').xreadlines():
row = [x for x in lines.split('\t')] # CONVERT TO LIST SO MUTABLE
row[1] = int(row[1]) # CONVERT FROM STRING TO INTEGER
exon_slices[row[1]] = row
exon = exon_db[row[1]] # GET THE ANNOTATION OBJECT FOR THIS EXON
msa.addAnnotation(exon) # SAVE IT TO GENOME MAPPING
exon_db.clear_cache() # not really necessary; cache should autoGC
# SHELVE SHOULD BE EXPLICITLY CLOSED IN ORDER TO SAVE CURRENT CONTENTS
exon_slices.close()
msa.build() # FINALIZE GENOME ALIGNMENT INDEXES
exon_db.__doc__ = 'Exon Annotation Database for hg18'
pygr.Data.addResource('TEST.Annotation.hg18.exons', exon_db)
msa.__doc__ = 'NLMSA Exon for hg18'
pygr.Data.addResource('TEST.Annotation.NLMSA.hg18.exons', msa)
exon_schema = pygr.Data.ManyToManyRelation(hg18, exon_db,
bindAttrs=('exon1', ))
exon_schema.__doc__ = 'Exon Schema for hg18'
pygr.Data.addSchema('TEST.Annotation.NLMSA.hg18.exons', exon_schema)
# BUILD ANNOTATION DATABASE FOR REFSEQ SPLICES
splice_slices = Collection(
filename=os.path.join(self.path, 'refGene_spliceAnnot_hg18.cdb'),
intKeys=True, mode='cr', writeback=False)
splice_db = seqdb.AnnotationDB(splice_slices, hg18,
sliceAttrDict=dict(id=0, splice_id=1,
orientation=2,
gene_id=3, start=4,
stop=5))
msa = cnestedlist.NLMSA(os.path.join(self.path,
'refGene_spliceAnnot_hg18'),
'w', pairwiseMode=True, bidirectional=False)
for lines in open(os.path.join(testInputDir,
'refGene_spliceAnnot%s_hg18.txt'
% smallSamplePostfix),
'r').xreadlines():
row = [x for x in lines.split('\t')] # CONVERT TO LIST SO MUTABLE
row[1] = int(row[1]) # CONVERT FROM STRING TO INTEGER
splice_slices[row[1]] = row
# GET THE ANNOTATION OBJECT FOR THIS EXON
splice = splice_db[row[1]]
msa.addAnnotation(splice) # SAVE IT TO GENOME MAPPING
splice_db.clear_cache() # not really necessary; cache should autoGC
# SHELVE SHOULD BE EXPLICITLY CLOSED IN ORDER TO SAVE CURRENT CONTENTS
splice_slices.close()
msa.build() # FINALIZE GENOME ALIGNMENT INDEXES
splice_db.__doc__ = 'Splice Annotation Database for hg18'
pygr.Data.addResource('TEST.Annotation.hg18.splices', splice_db)
msa.__doc__ = 'NLMSA Splice for hg18'
pygr.Data.addResource('TEST.Annotation.NLMSA.hg18.splices', msa)
splice_schema = pygr.Data.ManyToManyRelation(hg18, splice_db,
bindAttrs=('splice1', ))
splice_schema.__doc__ = 'Splice Schema for hg18'
pygr.Data.addSchema('TEST.Annotation.NLMSA.hg18.splices',
splice_schema)
# BUILD ANNOTATION DATABASE FOR REFSEQ EXONS
cds_slices = Collection(
filename=os.path.join(self.path, 'refGene_cdsAnnot_hg18.cdb'),
intKeys=True, mode='cr', writeback=False)
cds_db = seqdb.AnnotationDB(cds_slices, hg18,
sliceAttrDict=dict(id=0, cds_id=1,
orientation=2,
gene_id=3, start=4,
stop=5))
msa = cnestedlist.NLMSA(os.path.join(self.path,
'refGene_cdsAnnot_hg18'), 'w',
pairwiseMode=True, bidirectional=False)
for lines in open(os.path.join(testInputDir,
'refGene_cdsAnnot%s_hg18.txt'
% smallSamplePostfix),
'r').xreadlines():
row = [x for x in lines.split('\t')] # CONVERT TO LIST SO MUTABLE
row[1] = int(row[1]) # CONVERT FROM STRING TO INTEGER
cds_slices[row[1]] = row
cds = cds_db[row[1]] # GET THE ANNOTATION OBJECT FOR THIS EXON
msa.addAnnotation(cds) # SAVE IT TO GENOME MAPPING
cds_db.clear_cache() # not really necessary; cache should autoGC
# SHELVE SHOULD BE EXPLICITLY CLOSED IN ORDER TO SAVE CURRENT CONTENTS
cds_slices.close()
msa.build() # FINALIZE GENOME ALIGNMENT INDEXES
cds_db.__doc__ = 'CDS Annotation Database for hg18'
pygr.Data.addResource('TEST.Annotation.hg18.cdss', cds_db)
msa.__doc__ = 'NLMSA CDS for hg18'
pygr.Data.addResource('TEST.Annotation.NLMSA.hg18.cdss', msa)
cds_schema = pygr.Data.ManyToManyRelation(hg18, cds_db,
bindAttrs=('cds1', ))
cds_schema.__doc__ = 'CDS Schema for hg18'
pygr.Data.addSchema('TEST.Annotation.NLMSA.hg18.cdss', cds_schema)
# BUILD ANNOTATION DATABASE FOR MOST CONSERVED ELEMENTS FROM UCSC
ucsc_slices = Collection(
filename=os.path.join(self.path, 'phastConsElements28way_hg18.cdb'),
intKeys=True, mode='cr', writeback=False)
ucsc_db = seqdb.AnnotationDB(ucsc_slices, hg18,
sliceAttrDict=dict(id=0, ucsc_id=1,
orientation=2,
gene_id=3, start=4,
stop=5))
msa = cnestedlist.NLMSA(os.path.join(self.path,
'phastConsElements28way_hg18'),
'w', pairwiseMode=True, bidirectional=False)
for lines in open(os.path.join(testInputDir,
'phastConsElements28way%s_hg18.txt'
% smallSamplePostfix),
'r').xreadlines():
row = [x for x in lines.split('\t')] # CONVERT TO LIST SO MUTABLE
row[1] = int(row[1]) # CONVERT FROM STRING TO INTEGER
ucsc_slices[row[1]] = row
ucsc = ucsc_db[row[1]] # GET THE ANNOTATION OBJECT FOR THIS EXON
msa.addAnnotation(ucsc) # SAVE IT TO GENOME MAPPING
ucsc_db.clear_cache() # not really necessary; cache should autoGC
# SHELVE SHOULD BE EXPLICITLY CLOSED IN ORDER TO SAVE CURRENT CONTENTS
ucsc_slices.close()
msa.build() # FINALIZE GENOME ALIGNMENT INDEXES
ucsc_db.__doc__ = 'Most Conserved Elements for hg18'
pygr.Data.addResource('TEST.Annotation.UCSC.hg18.mostconserved',
ucsc_db)
msa.__doc__ = 'NLMSA for Most Conserved Elements for hg18'
pygr.Data.addResource('TEST.Annotation.UCSC.NLMSA.hg18.mostconserved',
msa)
ucsc_schema = pygr.Data.ManyToManyRelation(hg18, ucsc_db,
bindAttrs=('element1', ))
ucsc_schema.__doc__ = \
'Schema for UCSC Most Conserved Elements for hg18'
pygr.Data.addSchema('TEST.Annotation.UCSC.NLMSA.hg18.mostconserved',
ucsc_schema)
# BUILD ANNOTATION DATABASE FOR SNP126 FROM UCSC
snp_slices = Collection(filename=os.path.join(self.path,
'snp126_hg18.cdb'),
intKeys=True, protocol=2, mode='cr',
writeback=False)
snp_db = seqdb.AnnotationDB(snp_slices, hg18,
sliceAttrDict=dict(id=0, snp_id=1,
orientation=2,
gene_id=3, start=4,
stop=5, score=6,
ref_NCBI=7, ref_UCSC=8,
observed=9, molType=10,
myClass=11, myValid=12,
avHet=13, avHetSE=14,
myFunc=15, locType=16,
myWeight=17))
msa = cnestedlist.NLMSA(os.path.join(self.path, 'snp126_hg18'), 'w',
pairwiseMode=True, bidirectional=False)
for lines in open(os.path.join(testInputDir, 'snp126%s_hg18.txt'
% smallSamplePostfix),
'r').xreadlines():
row = [x for x in lines.split('\t')] # CONVERT TO LIST SO MUTABLE
row[1] = int(row[1]) # CONVERT FROM STRING TO INTEGER
snp_slices[row[1]] = row
snp = snp_db[row[1]] # GET THE ANNOTATION OBJECT FOR THIS EXON
msa.addAnnotation(snp) # SAVE IT TO GENOME MAPPING
snp_db.clear_cache() # not really necessary; cache should autoGC
# SHELVE SHOULD BE EXPLICITLY CLOSED IN ORDER TO SAVE CURRENT CONTENTS
snp_slices.close()
msa.build() # FINALIZE GENOME ALIGNMENT INDEXES
snp_db.__doc__ = 'SNP126 for hg18'
pygr.Data.addResource('TEST.Annotation.UCSC.hg18.snp126', snp_db)
msa.__doc__ = 'NLMSA for SNP126 for hg18'
pygr.Data.addResource('TEST.Annotation.UCSC.NLMSA.hg18.snp126', msa)
snp_schema = pygr.Data.ManyToManyRelation(hg18, snp_db,
bindAttrs=('snp1', ))
snp_schema.__doc__ = 'Schema for UCSC SNP126 for hg18'
pygr.Data.addSchema('TEST.Annotation.UCSC.NLMSA.hg18.snp126',
snp_schema)
pygr.Data.save()
pygr.Data.clear_cache()
# QUERY TO EXON AND SPLICES ANNOTATION DATABASE
hg18 = pygr.Data.getResource('TEST.Seq.Genome.hg18')
exonmsa = pygr.Data.getResource('TEST.Annotation.NLMSA.hg18.exons')
splicemsa = pygr.Data.getResource('TEST.Annotation.NLMSA.hg18.splices')
conservedmsa = \
pygr.Data.getResource('TEST.Annotation.UCSC.NLMSA.hg18.mostconserved')
snpmsa = \
pygr.Data.getResource('TEST.Annotation.UCSC.NLMSA.hg18.snp126')
cdsmsa = pygr.Data.getResource('TEST.Annotation.NLMSA.hg18.cdss')
exons = pygr.Data.getResource('TEST.Annotation.hg18.exons')
splices = pygr.Data.getResource('TEST.Annotation.hg18.splices')
mostconserved = \
pygr.Data.getResource('TEST.Annotation.UCSC.hg18.mostconserved')
snp126 = pygr.Data.getResource('TEST.Annotation.UCSC.hg18.snp126')
cdss = pygr.Data.getResource('TEST.Annotation.hg18.cdss')
# OPEN hg18_MULTIZ28WAY NLMSA
msa = cnestedlist.NLMSA(os.path.join(msaDir, 'hg18_multiz28way'), 'r',
trypath=[seqDir])
exonAnnotFileName = os.path.join(testInputDir,
'Annotation_ConservedElement_Exons%s_hg18.txt'
% smallSamplePostfix)
intronAnnotFileName = os.path.join(testInputDir,
'Annotation_ConservedElement_Introns%s_hg18.txt'
% smallSamplePostfix)
stopAnnotFileName = os.path.join(testInputDir,
'Annotation_ConservedElement_Stop%s_hg18.txt'
% smallSamplePostfix)
newexonAnnotFileName = os.path.join(self.path, 'new_Exons_hg18.txt')
newintronAnnotFileName = os.path.join(self.path,
'new_Introns_hg18.txt')
newstopAnnotFileName = os.path.join(self.path, 'new_stop_hg18.txt')
tmpexonAnnotFileName = self.copyFile(exonAnnotFileName)
tmpintronAnnotFileName = self.copyFile(intronAnnotFileName)
tmpstopAnnotFileName = self.copyFile(stopAnnotFileName)
if smallSampleKey:
chrList = [smallSampleKey]
else:
chrList = hg18.seqLenDict.keys()
chrList.sort()
outfile = open(newexonAnnotFileName, 'w')
for chrid in chrList:
slice = hg18[chrid]
# EXON ANNOTATION DATABASE
try:
ex1 = exonmsa[slice]
except:
continue
else:
exlist1 = [(ix.exon_id, ix) for ix in ex1.keys()]
exlist1.sort()
for ixx, exon in exlist1:
saveList = []
tmp = exon.sequence
tmpexon = exons[exon.exon_id]
tmpslice = tmpexon.sequence # FOR REAL EXON COORDINATE
wlist1 = 'EXON', chrid, tmpexon.exon_id, tmpexon.gene_id, \
tmpslice.start, tmpslice.stop
try:
out1 = conservedmsa[tmp]
except KeyError:
pass
else:
elementlist = [(ix.ucsc_id, ix) for ix in out1.keys()]
elementlist.sort()
for iyy, element in elementlist:
if element.stop - element.start < 100:
continue
score = int(string.split(element.gene_id, '=')[1])
if score < 100:
continue
tmp2 = element.sequence
tmpelement = mostconserved[element.ucsc_id]
# FOR REAL ELEMENT COORDINATE
tmpslice2 = tmpelement.sequence
wlist2 = wlist1 + (tmpelement.ucsc_id,
tmpelement.gene_id,
tmpslice2.start,
tmpslice2.stop)
slicestart, sliceend = max(tmp.start, tmp2.start),\
min(tmp.stop, tmp2.stop)
if slicestart < 0 or sliceend < 0:
sys.exit('wrong query')
tmp1 = msa.seqDict['hg18.' + chrid][slicestart:
sliceend]
edges = msa[tmp1].edges()
for src, dest, e in edges:
if src.stop - src.start < 100:
continue
palign, pident = e.pAligned(), e.pIdentity()
if palign < 0.8 or pident < 0.8:
continue
palign, pident = '%.2f' % palign, \
'%.2f' % pident
wlist3 = wlist2 + ((~msa.seqDict)[src],
str(src), src.start,
src.stop,
(~msa.seqDict)[dest],
str(dest), dest.start,
dest.stop, palign, pident)
saveList.append('\t'.join(map(str, wlist3))
+ '\n')
saveList.sort()
for saveline in saveList:
outfile.write(saveline)
outfile.close()
md5old = hashlib.md5()
md5old.update(open(tmpexonAnnotFileName, 'r').read())
md5new = hashlib.md5()
md5new.update(open(newexonAnnotFileName, 'r').read())
assert md5old.digest() == md5new.digest()
outfile = open(newintronAnnotFileName, 'w')
for chrid in chrList:
slice = hg18[chrid]
# SPLICE ANNOTATION DATABASE
try:
sp1 = splicemsa[slice]
except:
continue
else:
splist1 = [(ix.splice_id, ix) for ix in sp1.keys()]
splist1.sort()
for ixx, splice in splist1:
saveList = []
tmp = splice.sequence
tmpsplice = splices[splice.splice_id]
tmpslice = tmpsplice.sequence # FOR REAL EXON COORDINATE
wlist1 = 'INTRON', chrid, tmpsplice.splice_id, \
tmpsplice.gene_id, tmpslice.start, tmpslice.stop
try:
out1 = conservedmsa[tmp]
except KeyError:
pass
else:
elementlist = [(ix.ucsc_id, ix) for ix in out1.keys()]
elementlist.sort()
for iyy, element in elementlist:
if element.stop - element.start < 100:
continue
score = int(string.split(element.gene_id, '=')[1])
if score < 100:
continue
tmp2 = element.sequence
tmpelement = mostconserved[element.ucsc_id]
# FOR REAL ELEMENT COORDINATE
tmpslice2 = tmpelement.sequence
wlist2 = wlist1 + (tmpelement.ucsc_id,
tmpelement.gene_id,
tmpslice2.start,
tmpslice2.stop)
slicestart, sliceend = max(tmp.start, tmp2.start),\
min(tmp.stop, tmp2.stop)
if slicestart < 0 or sliceend < 0:
sys.exit('wrong query')
tmp1 = msa.seqDict['hg18.' + chrid][slicestart:
sliceend]
edges = msa[tmp1].edges()
for src, dest, e in edges:
if src.stop - src.start < 100:
continue
palign, pident = e.pAligned(), e.pIdentity()
if palign < 0.8 or pident < 0.8:
continue
palign, pident = '%.2f' % palign, \
'%.2f' % pident
wlist3 = wlist2 + ((~msa.seqDict)[src],
str(src), src.start,
src.stop,
(~msa.seqDict)[dest],
str(dest), dest.start,
dest.stop, palign, pident)
saveList.append('\t'.join(map(str, wlist3))
+ '\n')
saveList.sort()
for saveline in saveList:
outfile.write(saveline)
# SNP IN SPLICE SITES
saveList = []
gt = tmpslice[:2]
ag = tmpslice[-2:]
try:
gtout = snpmsa[gt]
agout = snpmsa[ag]
except KeyError:
pass
else:
gtlist = gtout.keys()
aglist = agout.keys()
for snp in gtlist:
tmpsnp = snp.sequence
annsnp = snp126[snp.snp_id]
wlist2 = ('SNP5', chrid, tmpsplice.gene_id,
gt.start, gt.stop, str(gt)) + \
(annsnp.snp_id, tmpsnp.start, tmpsnp.stop,
str(tmpsnp), annsnp.gene_id,
annsnp.ref_NCBI, annsnp.ref_UCSC,
annsnp.observed, annsnp.molType,
annsnp.myClass, annsnp.myValid)
tmp1 = msa.seqDict['hg18.' + chrid][abs(gt.start):\
abs(gt.stop)]
edges = msa[tmp1].edges()
for src, dest, e in edges:
if src.stop - src.start != 2 or \
dest.stop - dest.start != 2:
continue
palign, pident = e.pAligned(), e.pIdentity()
palign, pident = '%.2f' % palign, \
'%.2f' % pident
wlist3 = wlist2 + ((~msa.seqDict)[src],
str(src), src.start,
src.stop,
(~msa.seqDict)[dest],
str(dest), dest.start,
dest.stop, palign, pident)
saveList.append('\t'.join(map(str, wlist3))
+ '\n')
for snp in aglist:
tmpsnp = snp.sequence
annsnp = snp126[snp.snp_id]
wlist2 = ('SNP3', chrid, tmpsplice.gene_id,
ag.start, ag.stop, str(ag)) + \
(annsnp.snp_id, tmpsnp.start, tmpsnp.stop,
str(tmpsnp), annsnp.gene_id,
annsnp.ref_NCBI, annsnp.ref_UCSC,
annsnp.observed, annsnp.molType,
annsnp.myClass, annsnp.myValid)
tmp1 = msa.seqDict['hg18.' + chrid][abs(ag.start):\
abs(ag.stop)]
edges = msa[tmp1].edges()
for src, dest, e in edges:
if src.stop - src.start != 2 or \
dest.stop - dest.start != 2:
continue
palign, pident = e.pAligned(), e.pIdentity()
palign, pident = '%.2f' % palign, \
'%.2f' % pident
wlist3 = wlist2 + ((~msa.seqDict)[src],
str(src), src.start,
src.stop,
(~msa.seqDict)[dest],
str(dest), dest.start,
dest.stop, palign, pident)
saveList.append('\t'.join(map(str, wlist3))
+ '\n')
saveList.sort()
for saveline in saveList:
outfile.write(saveline)
outfile.close()
md5old = hashlib.md5()
md5old.update(open(tmpintronAnnotFileName, 'r').read())
md5new = hashlib.md5()
md5new.update(open(newintronAnnotFileName, 'r').read())
assert md5old.digest() == md5new.digest()
outfile = open(newstopAnnotFileName, 'w')
for chrid in chrList:
slice = hg18[chrid]
# STOP ANNOTATION DATABASE
try:
cds1 = cdsmsa[slice]
except:
continue
else:
cdslist1 = [(ix.cds_id, ix) for ix in cds1.keys()]
cdslist1.sort()
for ixx, cds in cdslist1:
saveList = []
tmp = cds.sequence
tmpcds = cdss[cds.cds_id]
tmpslice = tmpcds.sequence # FOR REAL EXON COORDINATE
wlist1 = 'STOP', chrid, tmpcds.cds_id, tmpcds.gene_id, \
tmpslice.start, tmpslice.stop
if tmpslice.start < 0:
stopstart, stopend = -tmpslice.stop, -tmpslice.start
stop = -hg18[chrid][stopstart:stopstart+3]
else:
stopstart, stopend = tmpslice.start, tmpslice.stop
stop = hg18[chrid][stopend-3:stopend]
if str(stop).upper() not in ('TAA', 'TAG', 'TGA'):
continue
try:
snp1 = snpmsa[stop]
except KeyError:
pass
else:
snplist = [(ix.snp_id, ix) for ix in snp1.keys()]
snplist.sort()
for iyy, snp in snplist:
tmpsnp = snp.sequence
annsnp = snp126[snp.snp_id]
wlist2 = wlist1 + (str(stop), stop.start,
stop.stop) + \
(annsnp.snp_id, tmpsnp.start, tmpsnp.stop,
str(tmpsnp), annsnp.gene_id,
annsnp.ref_NCBI, annsnp.ref_UCSC,
annsnp.observed, annsnp.molType,
annsnp.myClass, annsnp.myValid)
if tmpslice.start < 0:
tmp1 = -msa.seqDict['hg18.' + chrid]\
[stopstart:stopstart + 3]
else:
tmp1 = msa.seqDict['hg18.' + chrid]\
[stopend - 3:stopend]
edges = msa[tmp1].edges()
for src, dest, e in edges:
if src.stop - src.start != 3 or \
dest.stop - dest.start != 3:
continue
palign, pident = e.pAligned(), e.pIdentity()
palign, pident = '%.2f' % palign, \
'%.2f' % pident
if str(dest).upper() not in ('TAA', 'TAG',
'TGA'):
nonstr = 'NONSENSE'
else:
nonstr = 'STOP'
wlist3 = wlist2 + ((~msa.seqDict)[src],
str(src), src.start,
src.stop,
(~msa.seqDict)[dest],
str(dest), dest.start,
dest.stop, palign, pident,
nonstr)
saveList.append('\t'.join(map(str, wlist3))
+ '\n')
saveList.sort()
for saveline in saveList:
outfile.write(saveline)
outfile.close()
md5old = hashlib.md5()
md5old.update(open(tmpstopAnnotFileName, 'r').read())
md5new = hashlib.md5()
md5new.update(open(newstopAnnotFileName, 'r').read())
assert md5old.digest() == md5new.digest()
def test_mysqlannot(self):
'Test building an AnnotationDB from MySQL'
from pygr import seqdb, cnestedlist, sqlgraph
hg18 = pygr.Data.getResource('TEST.Seq.Genome.hg18')
# BUILD ANNOTATION DATABASE FOR REFSEQ EXONS: MYSQL VERSION
exon_slices = sqlgraph.SQLTableClustered(
'%s.pygr_refGene_exonAnnot%s_hg18' % (testInputDB,
smallSamplePostfix),
clusterKey='chromosome', maxCache=0)
exon_db = seqdb.AnnotationDB(exon_slices, hg18,
sliceAttrDict=dict(id='chromosome',
gene_id='name',
exon_id='exon_id'))
msa = cnestedlist.NLMSA(os.path.join(self.path,
'refGene_exonAnnot_SQL_hg18'),
'w', pairwiseMode=True, bidirectional=False)
for id in exon_db:
msa.addAnnotation(exon_db[id])
exon_db.clear_cache() # not really necessary; cache should autoGC
exon_slices.clear_cache()
msa.build()
exon_db.__doc__ = 'SQL Exon Annotation Database for hg18'
pygr.Data.addResource('TEST.Annotation.SQL.hg18.exons', exon_db)
msa.__doc__ = 'SQL NLMSA Exon for hg18'
pygr.Data.addResource('TEST.Annotation.NLMSA.SQL.hg18.exons', msa)
exon_schema = pygr.Data.ManyToManyRelation(hg18, exon_db,
bindAttrs=('exon2', ))
exon_schema.__doc__ = 'SQL Exon Schema for hg18'
pygr.Data.addSchema('TEST.Annotation.NLMSA.SQL.hg18.exons',
exon_schema)
# BUILD ANNOTATION DATABASE FOR REFSEQ SPLICES: MYSQL VERSION
splice_slices = sqlgraph.SQLTableClustered(
'%s.pygr_refGene_spliceAnnot%s_hg18' % (testInputDB,
smallSamplePostfix),
clusterKey='chromosome', maxCache=0)
splice_db = seqdb.AnnotationDB(splice_slices, hg18,
sliceAttrDict=dict(id='chromosome',
gene_id='name',
splice_id='splice_id'))
msa = cnestedlist.NLMSA(os.path.join(self.path,
'refGene_spliceAnnot_SQL_hg18'),
'w', pairwiseMode=True, bidirectional=False)
for id in splice_db:
msa.addAnnotation(splice_db[id])
splice_db.clear_cache() # not really necessary; cache should autoGC
splice_slices.clear_cache()
msa.build()
splice_db.__doc__ = 'SQL Splice Annotation Database for hg18'
pygr.Data.addResource('TEST.Annotation.SQL.hg18.splices', splice_db)
msa.__doc__ = 'SQL NLMSA Splice for hg18'
pygr.Data.addResource('TEST.Annotation.NLMSA.SQL.hg18.splices', msa)
splice_schema = pygr.Data.ManyToManyRelation(hg18, splice_db,
bindAttrs=('splice2', ))
splice_schema.__doc__ = 'SQL Splice Schema for hg18'
pygr.Data.addSchema('TEST.Annotation.NLMSA.SQL.hg18.splices',
splice_schema)
# BUILD ANNOTATION DATABASE FOR REFSEQ EXONS: MYSQL VERSION
cds_slices = sqlgraph.SQLTableClustered(
'%s.pygr_refGene_cdsAnnot%s_hg18' % (testInputDB,
smallSamplePostfix),
clusterKey='chromosome', maxCache=0)
cds_db = seqdb.AnnotationDB(cds_slices, hg18,
sliceAttrDict=dict(id='chromosome',
gene_id='name',
cds_id='cds_id'))
msa = cnestedlist.NLMSA(os.path.join(self.path,
'refGene_cdsAnnot_SQL_hg18'), 'w',
pairwiseMode=True, bidirectional=False)
for id in cds_db:
msa.addAnnotation(cds_db[id])
cds_db.clear_cache() # not really necessary; cache should autoGC
cds_slices.clear_cache()
msa.build()
cds_db.__doc__ = 'SQL CDS Annotation Database for hg18'
pygr.Data.addResource('TEST.Annotation.SQL.hg18.cdss', cds_db)
msa.__doc__ = 'SQL NLMSA CDS for hg18'
pygr.Data.addResource('TEST.Annotation.NLMSA.SQL.hg18.cdss', msa)
cds_schema = pygr.Data.ManyToManyRelation(hg18, cds_db,
bindAttrs=('cds2', ))
cds_schema.__doc__ = 'SQL CDS Schema for hg18'
pygr.Data.addSchema('TEST.Annotation.NLMSA.SQL.hg18.cdss', cds_schema)
# BUILD ANNOTATION DATABASE FOR MOST CONSERVED ELEMENTS FROM UCSC:
# MYSQL VERSION
ucsc_slices = \
sqlgraph.SQLTableClustered('%s.pygr_phastConsElements28way%s_hg18'
% (testInputDB, smallSamplePostfix),
clusterKey='chromosome', maxCache=0)
ucsc_db = seqdb.AnnotationDB(ucsc_slices, hg18,
sliceAttrDict=dict(id='chromosome',
gene_id='name',
ucsc_id='ucsc_id'))
msa = cnestedlist.NLMSA(os.path.join(self.path,
'phastConsElements28way_SQL_hg18'),
'w', pairwiseMode=True, bidirectional=False)
for id in ucsc_db:
msa.addAnnotation(ucsc_db[id])
ucsc_db.clear_cache() # not really necessary; cache should autoGC
ucsc_slices.clear_cache()
msa.build()
ucsc_db.__doc__ = 'SQL Most Conserved Elements for hg18'
pygr.Data.addResource('TEST.Annotation.UCSC.SQL.hg18.mostconserved',
ucsc_db)
msa.__doc__ = 'SQL NLMSA for Most Conserved Elements for hg18'
pygr.Data.addResource(
'TEST.Annotation.UCSC.NLMSA.SQL.hg18.mostconserved', msa)
ucsc_schema = pygr.Data.ManyToManyRelation(hg18, ucsc_db,
bindAttrs=('element2', ))
ucsc_schema.__doc__ = \
'SQL Schema for UCSC Most Conserved Elements for hg18'
pygr.Data.addSchema(
'TEST.Annotation.UCSC.NLMSA.SQL.hg18.mostconserved', ucsc_schema)
# BUILD ANNOTATION DATABASE FOR SNP126 FROM UCSC: MYSQL VERSION
snp_slices = sqlgraph.SQLTableClustered('%s.pygr_snp126%s_hg18'
% (testInputDB,
smallSamplePostfix),
clusterKey='clusterKey',
maxCache=0)
snp_db = seqdb.AnnotationDB(snp_slices, hg18,
sliceAttrDict=dict(id='chromosome',
gene_id='name',
snp_id='snp_id',
score='score',
ref_NCBI='ref_NCBI',
ref_UCSC='ref_UCSC',
observed='observed',
molType='molType',
myClass='myClass',
myValid='myValid',
avHet='avHet',
avHetSE='avHetSE',
myFunc='myFunc',
locType='locType',
myWeight='myWeight'))
msa = cnestedlist.NLMSA(os.path.join(self.path, 'snp126_SQL_hg18'),
'w', pairwiseMode=True, bidirectional=False)
for id in snp_db:
msa.addAnnotation(snp_db[id])
snp_db.clear_cache() # not really necessary; cache should autoGC
snp_slices.clear_cache()
msa.build()
snp_db.__doc__ = 'SQL SNP126 for hg18'
pygr.Data.addResource('TEST.Annotation.UCSC.SQL.hg18.snp126', snp_db)
msa.__doc__ = 'SQL NLMSA for SNP126 for hg18'
pygr.Data.addResource('TEST.Annotation.UCSC.NLMSA.SQL.hg18.snp126',
msa)
snp_schema = pygr.Data.ManyToManyRelation(hg18, snp_db,
bindAttrs=('snp2', ))
snp_schema.__doc__ = 'SQL Schema for UCSC SNP126 for hg18'
pygr.Data.addSchema('TEST.Annotation.UCSC.NLMSA.SQL.hg18.snp126',
snp_schema)
pygr.Data.save()
pygr.Data.clear_cache()
# QUERY TO EXON AND SPLICES ANNOTATION DATABASE
hg18 = pygr.Data.getResource('TEST.Seq.Genome.hg18')
exonmsa = pygr.Data.getResource('TEST.Annotation.NLMSA.SQL.hg18.exons')
splicemsa = \
pygr.Data.getResource('TEST.Annotation.NLMSA.SQL.hg18.splices')
conservedmsa = \
pygr.Data.getResource('TEST.Annotation.UCSC.NLMSA.SQL.hg18.mostconserved')
snpmsa = \
pygr.Data.getResource('TEST.Annotation.UCSC.NLMSA.SQL.hg18.snp126')
cdsmsa = pygr.Data.getResource('TEST.Annotation.NLMSA.SQL.hg18.cdss')
exons = pygr.Data.getResource('TEST.Annotation.SQL.hg18.exons')
splices = pygr.Data.getResource('TEST.Annotation.SQL.hg18.splices')
mostconserved = \
pygr.Data.getResource('TEST.Annotation.UCSC.SQL.hg18.mostconserved')
snp126 = pygr.Data.getResource('TEST.Annotation.UCSC.SQL.hg18.snp126')
cdss = pygr.Data.getResource('TEST.Annotation.SQL.hg18.cdss')
# OPEN hg18_MULTIZ28WAY NLMSA
msa = cnestedlist.NLMSA(os.path.join(msaDir, 'hg18_multiz28way'), 'r',
trypath=[seqDir])
exonAnnotFileName = os.path.join(testInputDir,
'Annotation_ConservedElement_Exons%s_hg18.txt'
% smallSamplePostfix)
intronAnnotFileName = os.path.join(testInputDir,
'Annotation_ConservedElement_Introns%s_hg18.txt'
% smallSamplePostfix)
stopAnnotFileName = os.path.join(testInputDir,
'Annotation_ConservedElement_Stop%s_hg18.txt'
% smallSamplePostfix)
newexonAnnotFileName = os.path.join(self.path, 'new_Exons_hg18.txt')
newintronAnnotFileName = os.path.join(self.path,
'new_Introns_hg18.txt')
newstopAnnotFileName = os.path.join(self.path, 'new_stop_hg18.txt')
tmpexonAnnotFileName = self.copyFile(exonAnnotFileName)
tmpintronAnnotFileName = self.copyFile(intronAnnotFileName)
tmpstopAnnotFileName = self.copyFile(stopAnnotFileName)
if smallSampleKey:
chrList = [smallSampleKey]
else:
chrList = hg18.seqLenDict.keys()
chrList.sort()
outfile = open(newexonAnnotFileName, 'w')
for chrid in chrList:
slice = hg18[chrid]
# EXON ANNOTATION DATABASE
try:
ex1 = exonmsa[slice]
except:
continue
else:
exlist1 = [(ix.exon_id, ix) for ix in ex1.keys()]
exlist1.sort()
for ixx, exon in exlist1:
saveList = []
tmp = exon.sequence
tmpexon = exons[exon.exon_id]
tmpslice = tmpexon.sequence # FOR REAL EXON COORDINATE
wlist1 = 'EXON', chrid, tmpexon.exon_id, tmpexon.gene_id, \
tmpslice.start, tmpslice.stop
try:
out1 = conservedmsa[tmp]
except KeyError:
pass
else:
elementlist = [(ix.ucsc_id, ix) for ix in out1.keys()]
elementlist.sort()
for iyy, element in elementlist:
if element.stop - element.start < 100:
continue
score = int(string.split(element.gene_id, '=')[1])
if score < 100:
continue
tmp2 = element.sequence
tmpelement = mostconserved[element.ucsc_id]
# FOR REAL ELEMENT COORDINATE
tmpslice2 = tmpelement.sequence
wlist2 = wlist1 + (tmpelement.ucsc_id,
tmpelement.gene_id,
tmpslice2.start,
tmpslice2.stop)
slicestart, sliceend = max(tmp.start, tmp2.start),\
min(tmp.stop, tmp2.stop)
if slicestart < 0 or sliceend < 0:
sys.exit('wrong query')
tmp1 = msa.seqDict['hg18.' + chrid][slicestart:
sliceend]
edges = msa[tmp1].edges()
for src, dest, e in edges:
if src.stop - src.start < 100:
continue
palign, pident = e.pAligned(), e.pIdentity()
if palign < 0.8 or pident < 0.8:
continue
palign, pident = '%.2f' % palign, \
'%.2f' % pident
wlist3 = wlist2 + ((~msa.seqDict)[src],
str(src), src.start,
src.stop,
(~msa.seqDict)[dest],
str(dest), dest.start,
dest.stop, palign, pident)
saveList.append('\t'.join(map(str, wlist3))
+ '\n')
saveList.sort()
for saveline in saveList:
outfile.write(saveline)
outfile.close()
md5old = hashlib.md5()
md5old.update(open(tmpexonAnnotFileName, 'r').read())
md5new = hashlib.md5()
md5new.update(open(newexonAnnotFileName, 'r').read())
assert md5old.digest() == md5new.digest()
outfile = open(newintronAnnotFileName, 'w')
for chrid in chrList:
slice = hg18[chrid]
# SPLICE ANNOTATION DATABASE
try:
sp1 = splicemsa[slice]
except:
continue
else:
splist1 = [(ix.splice_id, ix) for ix in sp1.keys()]
splist1.sort()
for ixx, splice in splist1:
saveList = []
tmp = splice.sequence
tmpsplice = splices[splice.splice_id]
tmpslice = tmpsplice.sequence # FOR REAL EXON COORDINATE
wlist1 = 'INTRON', chrid, tmpsplice.splice_id, \
tmpsplice.gene_id, tmpslice.start, tmpslice.stop
try:
out1 = conservedmsa[tmp]
except KeyError:
pass
else:
elementlist = [(ix.ucsc_id, ix) for ix in out1.keys()]
elementlist.sort()
for iyy, element in elementlist:
if element.stop - element.start < 100:
continue
score = int(string.split(element.gene_id, '=')[1])
if score < 100:
continue
tmp2 = element.sequence
tmpelement = mostconserved[element.ucsc_id]
# FOR REAL ELEMENT COORDINATE
tmpslice2 = tmpelement.sequence
wlist2 = wlist1 + (tmpelement.ucsc_id,
tmpelement.gene_id,
tmpslice2.start,
tmpslice2.stop)
slicestart, sliceend = max(tmp.start, tmp2.start),\
min(tmp.stop, tmp2.stop)
if slicestart < 0 or sliceend < 0:
sys.exit('wrong query')
tmp1 = msa.seqDict['hg18.' + chrid][slicestart:
sliceend]
edges = msa[tmp1].edges()
for src, dest, e in edges:
if src.stop - src.start < 100:
continue
palign, pident = e.pAligned(), e.pIdentity()
if palign < 0.8 or pident < 0.8:
continue
palign, pident = '%.2f' % palign, \
'%.2f' % pident
wlist3 = wlist2 + ((~msa.seqDict)[src],
str(src), src.start,
src.stop,
(~msa.seqDict)[dest],
str(dest), dest.start,
dest.stop, palign, pident)
saveList.append('\t'.join(map(str, wlist3))
+ '\n')
saveList.sort()
for saveline in saveList:
outfile.write(saveline)
# SNP IN SPLICE SITES
saveList = []
gt = tmpslice[:2]
ag = tmpslice[-2:]
try:
gtout = snpmsa[gt]
agout = snpmsa[ag]
except KeyError:
pass
else:
gtlist = gtout.keys()
aglist = agout.keys()
for snp in gtlist:
tmpsnp = snp.sequence
annsnp = snp126[snp.snp_id]
wlist2 = ('SNP5', chrid, tmpsplice.gene_id,
gt.start, gt.stop, str(gt)) + \
(annsnp.snp_id, tmpsnp.start, tmpsnp.stop,
str(tmpsnp), annsnp.gene_id,
annsnp.ref_NCBI, annsnp.ref_UCSC,
annsnp.observed, annsnp.molType,
annsnp.myClass, annsnp.myValid)
tmp1 = msa.seqDict['hg18.' + chrid][abs(gt.start):
abs(gt.stop)]
edges = msa[tmp1].edges()
for src, dest, e in edges:
if src.stop - src.start != 2 or \
dest.stop - dest.start != 2:
continue
palign, pident = e.pAligned(), e.pIdentity()
palign, pident = '%.2f' % palign, \
'%.2f' % pident
wlist3 = wlist2 + ((~msa.seqDict)[src],
str(src), src.start,
src.stop,
(~msa.seqDict)[dest],
str(dest), dest.start,
dest.stop, palign, pident)
saveList.append('\t'.join(map(str, wlist3))
+ '\n')
for snp in aglist:
tmpsnp = snp.sequence
annsnp = snp126[snp.snp_id]
wlist2 = ('SNP3', chrid, tmpsplice.gene_id,
ag.start, ag.stop, str(ag)) + \
(annsnp.snp_id, tmpsnp.start, tmpsnp.stop,
str(tmpsnp), annsnp.gene_id,
annsnp.ref_NCBI, annsnp.ref_UCSC,
annsnp.observed, annsnp.molType,
annsnp.myClass, annsnp.myValid)
tmp1 = msa.seqDict['hg18.' + chrid][abs(ag.start):
abs(ag.stop)]
edges = msa[tmp1].edges()
for src, dest, e in edges:
if src.stop - src.start != 2 or \
dest.stop - dest.start != 2:
continue
palign, pident = e.pAligned(), e.pIdentity()
palign, pident = '%.2f' % palign, \
'%.2f' % pident
wlist3 = wlist2 + ((~msa.seqDict)[src],
str(src), src.start,
src.stop,
(~msa.seqDict)[dest],
str(dest), dest.start,
dest.stop, palign, pident)
saveList.append('\t'.join(map(str, wlist3))
+ '\n')
saveList.sort()
for saveline in saveList:
outfile.write(saveline)
outfile.close()
md5old = hashlib.md5()
md5old.update(open(tmpintronAnnotFileName, 'r').read())
md5new = hashlib.md5()
md5new.update(open(newintronAnnotFileName, 'r').read())
assert md5old.digest() == md5new.digest()
outfile = open(newstopAnnotFileName, 'w')
for chrid in chrList:
slice = hg18[chrid]
# STOP ANNOTATION DATABASE
try:
cds1 = cdsmsa[slice]
except:
continue
else:
cdslist1 = [(ix.cds_id, ix) for ix in cds1.keys()]
cdslist1.sort()
for ixx, cds in cdslist1:
saveList = []
tmp = cds.sequence
tmpcds = cdss[cds.cds_id]
tmpslice = tmpcds.sequence # FOR REAL EXON COORDINATE
wlist1 = 'STOP', chrid, tmpcds.cds_id, tmpcds.gene_id, \
tmpslice.start, tmpslice.stop
if tmpslice.start < 0:
stopstart, stopend = -tmpslice.stop, -tmpslice.start
stop = -hg18[chrid][stopstart:stopstart+3]
else:
stopstart, stopend = tmpslice.start, tmpslice.stop
stop = hg18[chrid][stopend-3:stopend]
if str(stop).upper() not in ('TAA', 'TAG', 'TGA'):
continue
try:
snp1 = snpmsa[stop]
except KeyError:
pass
else:
snplist = [(ix.snp_id, ix) for ix in snp1.keys()]
snplist.sort()
for iyy, snp in snplist:
tmpsnp = snp.sequence
annsnp = snp126[snp.snp_id]
wlist2 = wlist1 + (str(stop), stop.start,
stop.stop) + (annsnp.snp_id,
tmpsnp.start,
tmpsnp.stop,
str(tmpsnp),
annsnp.gene_id,
annsnp.ref_NCBI,
annsnp.ref_UCSC,
annsnp.observed,
annsnp.molType,
annsnp.myClass,
annsnp.myValid)
if tmpslice.start < 0:
tmp1 = -msa.seqDict['hg18.' + chrid]\
[stopstart:stopstart + 3]
else:
tmp1 = msa.seqDict['hg18.' + chrid]\
[stopend - 3:stopend]
edges = msa[tmp1].edges()
for src, dest, e in edges:
if src.stop - src.start != 3 or \
dest.stop - dest.start != 3:
continue
palign, pident = e.pAligned(), e.pIdentity()
palign, pident = '%.2f' % palign, '%.2f' \
% pident
if str(dest).upper() not in ('TAA', 'TAG',
'TGA'):
nonstr = 'NONSENSE'
else:
nonstr = 'STOP'
wlist3 = wlist2 + ((~msa.seqDict)[src],
str(src), src.start,
src.stop,
(~msa.seqDict)[dest],
str(dest), dest.start,
dest.stop, palign, pident,
nonstr)
saveList.append('\t'.join(map(str, wlist3))
+ '\n')
saveList.sort()
for saveline in saveList:
outfile.write(saveline)
outfile.close()
md5old = hashlib.md5()
md5old.update(open(tmpstopAnnotFileName, 'r').read())
md5new = hashlib.md5()
md5new.update(open(newstopAnnotFileName, 'r').read())
assert md5old.digest() == md5new.digest()
if __name__ == '__main__':
PygrTestProgram(verbosity=2) | unknown | codeparrot/codeparrot-clean | ||
"""Glance unit tests."""
# Copyright 2015 Solinea, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.test import SimpleTestCase
class DataViewTests(SimpleTestCase):
"""Test the data view."""
def _evaluate(self, response):
"""Check the response."""
import json
from django.http import HttpResponse
self.assertIsInstance(response, HttpResponse)
self.assertIsNotNone(response.content)
j = json.loads(response.content)
self.assertIsInstance(j, list)
def test_get_images(self):
"""GET to /images."""
from django.contrib.auth import get_user_model
from goldstone.test_utils import create_and_login, \
AUTHORIZATION_PAYLOAD
get_user_model().objects.all().delete()
token = create_and_login()
self._evaluate(
self.client.get("/glance/images/",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)) | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress.zlib;
import java.io.IOException;
import java.util.zip.DataFormatException;
import java.util.zip.Inflater;
import org.apache.hadoop.io.compress.Decompressor;
/**
* A wrapper around java.util.zip.Inflater to make it conform
* to org.apache.hadoop.io.compress.Decompressor interface.
*
*/
public class BuiltInZlibInflater extends Inflater implements Decompressor {
public BuiltInZlibInflater(boolean nowrap) {
super(nowrap);
}
public BuiltInZlibInflater() {
super();
}
@Override
public synchronized int decompress(byte[] b, int off, int len)
throws IOException {
try {
return super.inflate(b, off, len);
} catch (DataFormatException dfe) {
throw new IOException(dfe.getMessage());
}
}
} | java | github | https://github.com/apache/hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibInflater.java |
//===--- IRGenDebugInfo.cpp - Debug Info Support --------------------------===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
//
// This file implements IR debug info generation for Swift.
//
//===----------------------------------------------------------------------===//
#include "IRGenDebugInfo.h"
#include "GenEnum.h"
#include "GenOpaque.h"
#include "GenStruct.h"
#include "GenTuple.h"
#include "GenType.h"
#include "IRBuilder.h"
#include "swift/AST/ASTDemangler.h"
#include "swift/AST/ASTMangler.h"
#include "swift/AST/Attr.h"
#include "swift/AST/Decl.h"
#include "swift/AST/DeclContext.h"
#include "swift/AST/Expr.h"
#include "swift/AST/GenericEnvironment.h"
#include "swift/AST/IRGenOptions.h"
#include "swift/AST/Module.h"
#include "swift/AST/ModuleLoader.h"
#include "swift/AST/ParameterList.h"
#include "swift/AST/Pattern.h"
#include "swift/AST/TypeDifferenceVisitor.h"
#include "swift/AST/TypeWalker.h"
#include "swift/AST/Types.h"
#include "swift/Basic/Assertions.h"
#include "swift/Basic/Compiler.h"
#include "swift/Basic/SourceManager.h"
#include "swift/Basic/Version.h"
#include "swift/ClangImporter/ClangImporter.h"
#include "swift/ClangImporter/ClangModule.h"
#include "swift/Demangling/ManglingMacros.h"
#include "swift/SIL/SILArgument.h"
#include "swift/SIL/SILBasicBlock.h"
#include "swift/SIL/SILDebugScope.h"
#include "swift/SIL/SILModule.h"
#include "swift/Serialization/SerializedModuleLoader.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Lex/HeaderSearchOptions.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Serialization/ASTReader.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Config/config.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DIBuilder.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/Local.h"
#define DEBUG_TYPE "debug-info"
using namespace swift;
using namespace irgen;
llvm::cl::opt<bool> VerifyLineTable(
"verify-linetable", llvm::cl::init(false),
llvm::cl::desc(
"Verify that the debug locations within one scope are contiguous."));
namespace {
using TrackingDIRefMap =
llvm::DenseMap<const llvm::MDString *, llvm::TrackingMDNodeRef>;
class EqualUpToClangTypes
: public CanTypeDifferenceVisitor<EqualUpToClangTypes> {
public:
bool visitDifferentTypeStructure(CanType t1, CanType t2) {
#define COMPARE_UPTO_CLANG_TYPE(CLASS) \
if (auto f1 = dyn_cast<CLASS>(t1)) { \
auto f2 = cast<CLASS>(t2); \
return !f1->getExtInfo().isEqualTo(f2->getExtInfo(), \
/*useClangTypes*/ false); \
}
COMPARE_UPTO_CLANG_TYPE(FunctionType);
COMPARE_UPTO_CLANG_TYPE(SILFunctionType);
#undef COMPARE_UPTO_CLANG_TYPE
return true;
}
bool check(Type t1, Type t2) {
return !visit(t1->getCanonicalType(), t2->getCanonicalType());
};
};
/// FIXME: This should be removed in favor of fixing ASTDemangler to wrap types in
/// ExistentialType where appropriate.
static bool equalWithoutExistentialTypes(Type t1, Type t2) {
static Type (*withoutExistentialTypes)(Type) = [](Type type) -> Type {
return type.transformRec([](TypeBase *type) -> std::optional<Type> {
if (auto existential = dyn_cast<ExistentialType>(type)) {
return withoutExistentialTypes(existential->getConstraintType());
}
return std::nullopt;
});
};
return withoutExistentialTypes(t1)
->isEqual(withoutExistentialTypes(t2));
}
class IRGenDebugInfoImpl : public IRGenDebugInfo {
const IRGenOptions &Opts;
ClangImporter &CI;
SourceManager &SM;
llvm::Module &M;
llvm::DIBuilder DBuilder;
IRGenModule &IGM;
const PathRemapper &DebugPrefixMap;
struct FileAndLocation {
unsigned Line = 0;
uint16_t Column = 0;
llvm::DIFile *File = nullptr;
StringRef getFilename() const { return File ? File->getFilename() : ""; }
bool operator==(const FileAndLocation &other) const {
return Line == other.Line && Column == other.Column && File == other.File;
}
};
/// Various caches.
/// \{
llvm::StringSet<> VarNames;
using VarID = std::tuple<llvm::MDNode *, llvm::StringRef, unsigned, uint16_t>;
llvm::DenseMap<VarID, llvm::TrackingMDNodeRef> LocalVarCache;
llvm::DenseMap<const SILDebugScope *, llvm::TrackingMDNodeRef> ScopeCache;
llvm::DenseMap<const SILDebugScope *, llvm::TrackingMDNodeRef> InlinedAtCache;
llvm::DenseMap<const void *, FileAndLocation> FileAndLocationCache;
llvm::DenseMap<TypeBase *, llvm::TrackingMDNodeRef> DITypeCache;
llvm::DenseMap<const void *, llvm::TrackingMDNodeRef> DIModuleCache;
llvm::StringMap<llvm::TrackingMDNodeRef> DIFileCache;
llvm::StringMap<llvm::TrackingMDNodeRef> RuntimeErrorFnCache;
llvm::StringSet<> OriginallyDefinedInTypes;
TrackingDIRefMap DIRefMap;
TrackingDIRefMap InnerTypeCache;
TrackingDIRefMap ExistentialTypeAliasMap;
/// \}
/// A list of replaceable fwddecls that need to be RAUWed at the end.
std::vector<std::pair<StringRef, llvm::TrackingMDRef>> FwdDeclTypes;
/// The set of imported modules.
llvm::DenseSet<ModuleDecl *> ImportedModules;
llvm::BumpPtrAllocator DebugInfoNames;
/// The current working directory.
StringRef CWDName;
/// User-provided -D macro definitions.
SmallString<0> ConfigMacros;
/// The current compilation unit.
llvm::DICompileUnit *TheCU = nullptr;
/// The main file.
llvm::DIFile *MainFile = nullptr;
/// The default file for compiler-generated code.
llvm::DIFile *CompilerGeneratedFile = nullptr;
/// The current module.
llvm::DIModule *MainModule = nullptr;
/// Scope of entry point function (main by default).
llvm::DIScope *EntryPointFn = nullptr;
/// The artificial type decls for named archetypes.
llvm::StringMap<TypeAliasDecl *> MetadataTypeDeclCache;
/// Catch-all type for opaque internal types.
llvm::DIType *InternalType = nullptr;
/// The last location that was emitted.
FileAndLocation LastFileAndLocation;
/// The scope of that last location.
const SILDebugScope *LastScope = nullptr;
/// Used by pushLoc.
SmallVector<std::pair<FileAndLocation, const SILDebugScope *>, 8>
LocationStack;
#ifndef NDEBUG
using UUFTuple = std::pair<std::pair<unsigned, unsigned>, llvm::DIFile *>;
struct FileAndLocationKey : public UUFTuple {
FileAndLocationKey(FileAndLocation DL)
: UUFTuple({{DL.Line, DL.Column}, DL.File}) {}
inline bool operator==(const FileAndLocation &DL) const {
return first.first == DL.Line && first.second == DL.Column &&
second == DL.File;
}
};
llvm::DenseSet<UUFTuple> PreviousLineEntries;
FileAndLocation PreviousFileAndLocation;
#endif
public:
IRGenDebugInfoImpl(const IRGenOptions &Opts, ClangImporter &CI,
IRGenModule &IGM, llvm::Module &M,
StringRef MainOutputFilenameForDebugInfo,
StringRef PrivateDiscriminator);
~IRGenDebugInfoImpl() {
// FIXME: SILPassManager sometimes creates an IGM and doesn't finalize it.
if (!FwdDeclTypes.empty())
finalize();
assert(FwdDeclTypes.empty() && "finalize() was not called");
}
void finalize();
void setCurrentLoc(IRBuilder &Builder, const SILDebugScope *DS,
SILLocation Loc);
void addFailureMessageToCurrentLoc(IRBuilder &Builder, StringRef failureMsg);
void clearLoc(IRBuilder &Builder);
void pushLoc();
void popLoc();
void setInlinedTrapLocation(IRBuilder &Builder, const SILDebugScope *Scope);
void setEntryPointLoc(IRBuilder &Builder);
llvm::DIScope *getEntryPointFn();
llvm::DIScope *getOrCreateScope(const SILDebugScope *DS);
void emitImport(ImportDecl *D);
llvm::DISubprogram *emitFunction(const SILDebugScope *DS, llvm::Function *Fn,
SILFunctionTypeRepresentation Rep,
SILType Ty, DeclContext *DeclCtx = nullptr,
StringRef outlinedFromName = StringRef());
llvm::DISubprogram *emitFunction(SILFunction &SILFn, llvm::Function *Fn);
void emitArtificialFunction(IRBuilder &Builder, llvm::Function *Fn,
SILType SILTy);
void emitOutlinedFunction(IRBuilder &Builder,
llvm::Function *Fn,
StringRef outlinedFromName);
/// Return false if we fail to create the right DW_OP_LLVM_fragment operand.
bool handleFragmentDIExpr(const SILDIExprOperand &CurDIExprOp,
llvm::DIExpression::FragmentInfo &Fragment);
/// Return false if we fail to create the right DW_OP_LLVM_fragment operand.
bool handleTupleFragmentDIExpr(const SILDIExprOperand &CurDIExprOp,
llvm::DIExpression::FragmentInfo &Fragment);
/// Return false if we fail to create the desired !DIExpression.
bool buildDebugInfoExpression(const SILDebugVariable &VarInfo,
SmallVectorImpl<uint64_t> &Operands,
llvm::DIExpression::FragmentInfo &Fragment);
/// Emit a dbg.declare at the current insertion point in Builder.
void emitVariableDeclaration(IRBuilder &Builder,
ArrayRef<llvm::Value *> Storage,
DebugTypeInfo Ty, const SILDebugScope *DS,
std::optional<SILLocation> VarLoc,
SILDebugVariable VarInfo,
IndirectionKind = DirectValue,
ArtificialKind = RealValue,
AddrDbgInstrKind = AddrDbgInstrKind::DbgDeclare);
void emitDbgIntrinsic(IRBuilder &Builder, llvm::Value *Storage,
llvm::DILocalVariable *Var, llvm::DIExpression *Expr,
unsigned Line, unsigned Col, llvm::DILocalScope *Scope,
const SILDebugScope *DS, bool InCoroContext,
AddrDbgInstrKind = AddrDbgInstrKind::DbgDeclare);
void emitGlobalVariableDeclaration(llvm::GlobalVariable *Storage,
StringRef Name, StringRef LinkageName,
DebugTypeInfo DebugType,
bool IsLocalToUnit,
std::optional<SILLocation> Loc);
void emitTypeMetadata(IRGenFunction &IGF, llvm::Value *Metadata,
unsigned Depth, unsigned Index, StringRef Name);
void emitPackCountParameter(IRGenFunction &IGF, llvm::Value *Metadata,
SILDebugVariable VarInfo);
/// Return flags which enable debug info emission for call sites, provided
/// that it is supported and enabled.
llvm::DINode::DIFlags getCallSiteRelatedAttrs() const;
/// Return the DIBuilder.
llvm::DIBuilder &getBuilder() { return DBuilder; }
/// Decode (and cache) a SourceLoc.
FileAndLocation decodeSourceLoc(SourceLoc SL) {
auto &Cached = FileAndLocationCache[SL.getOpaquePointerValue()];
if (Cached.File)
return Cached;
if (!SL.isValid()) {
Cached.File = CompilerGeneratedFile;
return Cached;
}
// If the source buffer is a macro, extract its full text.
std::optional<StringRef> Source;
bool ForceGeneratedSourceToDisk = Opts.DWARFVersion < 5;
if (!ForceGeneratedSourceToDisk) {
auto BufferID = SM.findBufferContainingLoc(SL);
if (auto generatedInfo = SM.getGeneratedSourceInfo(BufferID)) {
// We only care about macro expansion buffers,
// so skip everything else.
if (generatedInfo->kind != GeneratedSourceInfo::ReplacedFunctionBody &&
generatedInfo->kind != GeneratedSourceInfo::PrettyPrinted &&
generatedInfo->kind != GeneratedSourceInfo::DefaultArgument &&
generatedInfo->kind != GeneratedSourceInfo::AttributeFromClang)
if (auto *MemBuf = SM.getLLVMSourceMgr().getMemoryBuffer(BufferID)) {
Source = MemBuf->getBuffer();
// This is copying the buffer twice, but Xcode depends on this
// comment in the file.
auto origRange = generatedInfo->originalSourceRange;
if (origRange.isValid()) {
std::string s;
{
llvm::raw_string_ostream buffer(s);
buffer << MemBuf->getBuffer() << "\n";
auto originalFilename =
SM.getDisplayNameForLoc(origRange.getStart(), true);
unsigned startLine, startColumn, endLine, endColumn;
std::tie(startLine, startColumn) =
SM.getPresumedLineAndColumnForLoc(origRange.getStart());
std::tie(endLine, endColumn) =
SM.getPresumedLineAndColumnForLoc(origRange.getEnd());
buffer << "// original-source-range: "
<< DebugPrefixMap.remapPath(originalFilename) << ":"
<< startLine << ":" << startColumn << "-" << endLine
<< ":" << endColumn << "\n";
}
Source = BumpAllocatedString(s);
}
}
}
}
Cached.File = getOrCreateFile(
SM.getDisplayNameForLoc(SL, ForceGeneratedSourceToDisk), Source);
std::tie(Cached.Line, Cached.Column) =
SM.getPresumedLineAndColumnForLoc(SL);
// When WinDbg finds two locations with the same line but different
// columns, the user must select an address when they break on that
// line. Also, clang does not emit column locations in CodeView for C++.
if (Opts.DebugInfoFormat == IRGenDebugInfoFormat::CodeView)
Cached.Column = 0;
return Cached;
}
IRGenDebugInfoFormat getDebugInfoFormat() { return Opts.DebugInfoFormat; }
private:
/// Convert a SILLocation into the corresponding LLVM Loc.
FileAndLocation computeLLVMLoc(const SILDebugScope *DS, SILLocation Loc);
/// Compute the LLVM DebugLoc when targeting CodeView. In CodeView, zero is
/// not an artificial line location; attempt to avoid those line locations near
/// user code to reduce the number of breaks in the linetables.
FileAndLocation computeLLVMLocCodeView(const SILDebugScope *DS,
SILLocation Loc);
static StringRef getFilenameFromDC(const DeclContext *DC) {
if (auto *LF = dyn_cast<LoadedFile>(DC))
return LF->getFilename();
if (auto *SF = dyn_cast<SourceFile>(DC))
return SF->getFilename();
if (auto *M = dyn_cast<ModuleDecl>(DC))
return M->getModuleFilename();
return {};
}
FileAndLocation getDeserializedLoc(Pattern *) { return {}; }
FileAndLocation getDeserializedLoc(Expr *) { return {}; }
FileAndLocation getDeserializedLoc(Decl *D) {
FileAndLocation L;
const DeclContext *DC = D->getDeclContext()->getModuleScopeContext();
StringRef Filename = getFilenameFromDC(DC);
if (!Filename.empty())
L.File = getOrCreateFile(Filename, {});
return L;
}
FileAndLocation
getFileAndLocation(const SILLocation::FilenameAndLocation &FL) {
// When WinDbg finds two locations with the same line but different
// columns, the user must select an address when they break on that
// line. Also, clang does not emit column locations in CodeView for C++.
bool CodeView = Opts.DebugInfoFormat == IRGenDebugInfoFormat::CodeView;
return {FL.line, CodeView ? (uint16_t)0 : FL.column,
getOrCreateFile(FL.filename, {})};
}
/// Use the Swift SM to figure out the actual line/column of a SourceLoc.
template <typename WithLoc>
FileAndLocation getSwiftFileAndLocation(WithLoc *ASTNode, bool End) {
if (!ASTNode)
return {};
SourceLoc Loc = End ? ASTNode->getEndLoc() : ASTNode->getStartLoc();
if (Loc.isInvalid())
// This may be a deserialized or clang-imported decl. And modules
// don't come with SourceLocs right now. Get at least the name of
// the module.
return getDeserializedLoc(ASTNode);
return decodeSourceLoc(Loc);
}
FileAndLocation getFileAndLocation(Pattern *P, bool End = false) {
return getSwiftFileAndLocation(P, End);
}
FileAndLocation getFileAndLocation(Expr *E, bool End = false) {
return getSwiftFileAndLocation(E, End);
}
FileAndLocation getFileAndLocation(Decl *D, bool End = false) {
FileAndLocation L;
if (!D)
return L;
if (auto *ClangDecl = D->getClangDecl()) {
clang::SourceLocation ClangSrcLoc = ClangDecl->getBeginLoc();
clang::SourceManager &ClangSM =
CI.getClangASTContext().getSourceManager();
clang::PresumedLoc PresumedLoc = ClangSM.getPresumedLoc(ClangSrcLoc);
if (!PresumedLoc.isValid())
return L;
L.Line = PresumedLoc.getLine();
L.Column = PresumedLoc.getColumn();
L.File = getOrCreateFile(PresumedLoc.getFilename(), {});
return L;
}
return getSwiftFileAndLocation(D, End);
}
FileAndLocation getStartLocation(std::optional<SILLocation> OptLoc) {
if (!OptLoc)
return {};
if (OptLoc->isFilenameAndLocation())
return getFileAndLocation(*OptLoc->getFilenameAndLocation());
return decodeSourceLoc(OptLoc->getStartSourceLoc());
}
FileAndLocation decodeFileAndLocation(SILLocation Loc) {
if (Loc.isFilenameAndLocation())
return getFileAndLocation(*Loc.getFilenameAndLocation());
return decodeSourceLoc(Loc.getSourceLocForDebugging());
}
/// Strdup a raw char array using the bump pointer.
StringRef BumpAllocatedString(const char *Data, size_t Length) {
char *Ptr = DebugInfoNames.Allocate<char>(Length + 1);
memcpy(Ptr, Data, Length);
*(Ptr + Length) = 0;
return StringRef(Ptr, Length);
}
/// Strdup S using the bump pointer.
StringRef BumpAllocatedString(std::string S) {
if (S.empty())
return {};
return BumpAllocatedString(S.c_str(), S.length());
}
/// Strdup StringRef S using the bump pointer.
StringRef BumpAllocatedString(StringRef S) {
if (S.empty())
return {};
return BumpAllocatedString(S.data(), S.size());
}
/// Return the size reported by a type.
static unsigned getSizeInBits(llvm::DIType *Ty) {
if (!Ty)
return 0;
// Follow derived types until we reach a type that
// reports back a size.
while (isa<llvm::DIDerivedType>(Ty) && !Ty->getSizeInBits()) {
auto *DT = cast<llvm::DIDerivedType>(Ty);
Ty = DT->getBaseType();
if (!Ty)
return 0;
}
return Ty->getSizeInBits();
}
#ifndef NDEBUG
/// Return the size reported by the variable's type.
static unsigned getSizeInBits(const llvm::DILocalVariable *Var) {
llvm::DIType *Ty = Var->getType();
return getSizeInBits(Ty);
}
#endif
/// Determine whether this debug scope belongs to an explicit closure.
static bool isExplicitClosure(const SILFunction *SILFn) {
if (SILFn && SILFn->hasLocation())
if (Expr *E = SILFn->getLocation().getAsASTNode<Expr>())
if (isa<ClosureExpr>(E))
return true;
return false;
}
public:
llvm::MDNode *createInlinedAt(const SILDebugScope *DS) {
auto *CS = DS->InlinedCallSite;
if (!CS)
return nullptr;
auto CachedInlinedAt = InlinedAtCache.find(CS);
if (CachedInlinedAt != InlinedAtCache.end())
return cast<llvm::MDNode>(CachedInlinedAt->second);
auto L = decodeFileAndLocation(CS->Loc);
auto Scope = getOrCreateScope(CS->Parent.dyn_cast<const SILDebugScope *>());
if (auto *Fn = CS->Parent.dyn_cast<SILFunction *>())
Scope = getOrCreateScope(Fn->getDebugScope());
// Pretend transparent functions don't exist.
if (!Scope)
return createInlinedAt(CS);
auto InlinedAt = llvm::DILocation::getDistinct(
IGM.getLLVMContext(), L.Line, L.Column, Scope, createInlinedAt(CS));
InlinedAtCache.insert({CS, llvm::TrackingMDNodeRef(InlinedAt)});
return InlinedAt;
}
private:
#ifndef NDEBUG
/// Perform a couple of soundness checks on scopes.
static bool parentScopesAreSane(const SILDebugScope *DS) {
auto *Parent = DS;
while ((Parent = Parent->Parent.dyn_cast<const SILDebugScope *>())) {
if (!DS->InlinedCallSite)
assert(!Parent->InlinedCallSite &&
"non-inlined scope has an inlined parent");
}
return true;
}
/// Assert that within one lexical block, each location is only visited once.
bool lineEntryIsSane(FileAndLocation DL, const SILDebugScope *DS);
#endif
llvm::DIFile *getOrCreateFile(StringRef Filename,
std::optional<StringRef> Source) {
if (Filename.empty())
Filename = SILLocation::getCompilerGeneratedLoc()->filename;
// Look in the cache first.
auto CachedFile = DIFileCache.find(Filename);
if (CachedFile != DIFileCache.end()) {
// Verify that the information still exists.
if (llvm::Metadata *V = CachedFile->second)
return cast<llvm::DIFile>(V);
}
// Detect the main file.
StringRef MainFileName = MainFile->getFilename();
if (MainFile && Filename.ends_with(MainFileName)) {
SmallString<256> AbsThisFile, AbsMainFile;
AbsThisFile = Filename;
llvm::sys::fs::make_absolute(AbsThisFile);
if (llvm::sys::path::is_absolute(MainFileName))
AbsMainFile = MainFileName;
else
llvm::sys::path::append(AbsMainFile, MainFile->getDirectory(),
MainFileName);
if (AbsThisFile == DebugPrefixMap.remapPath(AbsMainFile)) {
DIFileCache[Filename] = llvm::TrackingMDNodeRef(MainFile);
return MainFile;
}
}
return createFile(Filename, std::nullopt, Source);
}
/// This is effectively \p clang::CGDebugInfo::createFile().
llvm::DIFile *
createFile(StringRef FileName,
std::optional<llvm::DIFile::ChecksumInfo<StringRef>> CSInfo,
std::optional<StringRef> Source) {
StringRef File, Dir;
StringRef CurDir = Opts.DebugCompilationDir;
SmallString<128> NormalizedFile(FileName);
SmallString<128> FileBuf, DirBuf;
llvm::sys::path::remove_dots(NormalizedFile);
if (llvm::sys::path::is_absolute(NormalizedFile) &&
llvm::sys::path::is_absolute(CurDir)) {
// Strip the common prefix (if it is more than just "/") from current
// directory and FileName for a more space-efficient encoding.
auto FileIt = llvm::sys::path::begin(NormalizedFile);
auto FileE = llvm::sys::path::end(NormalizedFile);
auto CurDirIt = llvm::sys::path::begin(CurDir);
auto CurDirE = llvm::sys::path::end(CurDir);
for (; CurDirIt != CurDirE && *CurDirIt == *FileIt; ++CurDirIt, ++FileIt)
llvm::sys::path::append(DirBuf, *CurDirIt);
if (std::distance(llvm::sys::path::begin(CurDir), CurDirIt) == 1) {
// Don't strip the common prefix if it is only the root "/"
// since that would make LLVM diagnostic locations confusing.
Dir = {};
File = NormalizedFile;
} else {
for (; FileIt != FileE; ++FileIt)
llvm::sys::path::append(FileBuf, *FileIt);
Dir = DirBuf;
File = FileBuf;
}
} else {
File = NormalizedFile;
// Leave <compiler-generated> & friends as is, without directory.
if (!(File.starts_with("<") && File.ends_with(">")))
Dir = CurDir;
else
Dir = llvm::sys::path::root_directory(CurDir);
}
llvm::DIFile *F =
DBuilder.createFile(DebugPrefixMap.remapPath(File),
DebugPrefixMap.remapPath(Dir), CSInfo, Source);
DIFileCache[FileName].reset(F);
return F;
}
StringRef getName(const FuncDecl &FD) {
// Getters and Setters are anonymous functions, so we forge a name
// using its parent declaration.
if (auto accessor = dyn_cast<AccessorDecl>(&FD))
if (ValueDecl *VD = accessor->getStorage()) {
const char *Kind;
switch (accessor->getAccessorKind()) {
case AccessorKind::Get:
Kind = ".get";
break;
case AccessorKind::DistributedGet:
Kind = "._distributed_get";
break;
case AccessorKind::Set:
Kind = ".set";
break;
case AccessorKind::WillSet:
Kind = ".willset";
break;
case AccessorKind::DidSet:
Kind = ".didset";
break;
case AccessorKind::Address:
Kind = ".addressor";
break;
case AccessorKind::MutableAddress:
Kind = ".mutableAddressor";
break;
case AccessorKind::Read:
Kind = ".read";
break;
case AccessorKind::Modify:
Kind = ".modify";
break;
case AccessorKind::Init:
Kind = ".init";
break;
case AccessorKind::YieldingMutate:
Kind = ".yielding_mutate";
break;
case AccessorKind::YieldingBorrow:
Kind = ".yielding_borrow";
break;
case AccessorKind::Borrow:
Kind = ".borrow";
break;
case AccessorKind::Mutate:
Kind = ".mutate";
break;
}
SmallVector<char, 64> Buf;
StringRef Name =
(VD->getBaseName().userFacingName() + Twine(Kind)).toStringRef(Buf);
return BumpAllocatedString(Name);
}
if (FD.hasName())
return FD.getBaseIdentifier().str();
return StringRef();
}
StringRef getName(SILLocation L) {
if (L.isNull())
return StringRef();
if (FuncDecl *FD = L.getAsASTNode<FuncDecl>())
return getName(*FD);
if (ValueDecl *D = L.getAsASTNode<ValueDecl>())
return D->getBaseName().userFacingName();
if (auto *D = L.getAsASTNode<MacroExpansionDecl>())
return D->getMacroName().getBaseIdentifier().str();
if (auto *E = L.getAsASTNode<MacroExpansionExpr>())
return E->getMacroName().getBaseIdentifier().str();
return StringRef();
}
static CanSILFunctionType getFunctionType(SILType SILTy) {
if (!SILTy)
return CanSILFunctionType();
auto FnTy = SILTy.getAs<SILFunctionType>();
if (!FnTy) {
LLVM_DEBUG(llvm::dbgs() << "Unexpected function type: ";
SILTy.print(llvm::dbgs()); llvm::dbgs() << "\n");
return CanSILFunctionType();
}
return FnTy;
}
llvm::DIScope *getOrCreateContext(DeclContext *DC) {
if (!DC)
return TheCU;
auto createContext = [&](NominalTypeDecl &NTD) {
GenericContextScope scope(
IGM, NTD.getGenericSignature().getCanonicalSignature());
auto Ty = NTD.getDeclaredInterfaceType();
// Create a Forward-declared type.
auto DbgTy = DebugTypeInfo::getForwardDecl(Ty);
return getOrCreateType(DbgTy);
};
if (isa<FuncDecl>(DC))
if (auto *Decl = IGM.getSILModule().lookUpFunction(SILDeclRef(
cast<AbstractFunctionDecl>(DC), SILDeclRef::Kind::Func)))
return getOrCreateScope(Decl->getDebugScope());
switch (DC->getContextKind()) {
// The interesting cases are already handled above.
case DeclContextKind::AbstractFunctionDecl:
case DeclContextKind::AbstractClosureExpr:
case DeclContextKind::SerializedAbstractClosure:
// We don't model these in DWARF.
case DeclContextKind::Initializer:
case DeclContextKind::SubscriptDecl:
case DeclContextKind::EnumElementDecl:
case DeclContextKind::TopLevelCodeDecl:
case DeclContextKind::SerializedTopLevelCodeDecl:
return getOrCreateContext(DC->getParent());
case DeclContextKind::Package: {
auto *pkg = cast<PackageUnit>(DC);
return getOrCreateContext(pkg);
}
case DeclContextKind::Module:
return getOrCreateModule(
{ImportPath::Access(), cast<ModuleDecl>(DC)});
case DeclContextKind::FileUnit:
// A module may contain multiple files.
return getOrCreateContext(DC->getParent());
case DeclContextKind::MacroDecl:
return getOrCreateContext(DC->getParent());
case DeclContextKind::ExtensionDecl: {
auto *ED = cast<ExtensionDecl>(DC);
if (auto *NTD = ED->getExtendedNominal())
return createContext(*NTD);
return getOrCreateContext(DC->getParent());
}
case DeclContextKind::GenericTypeDecl: {
// The generic signature of this nominal type has no relation to the
// current function's generic signature.
auto *NTD = cast<NominalTypeDecl>(DC);
return createContext(*NTD);
}
}
return TheCU;
}
void createParameterType(llvm::SmallVectorImpl<llvm::Metadata *> &Parameters,
SILType type) {
auto RealType = type.getASTType();
auto DbgTy = DebugTypeInfo::getForwardDecl(RealType);
Parameters.push_back(getOrCreateType(DbgTy));
}
// This is different from SILFunctionType::getAllResultsType() in some subtle
// ways.
static SILType getResultTypeForDebugInfo(IRGenModule &IGM,
CanSILFunctionType fnTy) {
if (fnTy->getNumResults() == 1) {
return fnTy->getResults()[0].getSILStorageType(
IGM.getSILModule(), fnTy, IGM.getMaximalTypeExpansionContext());
} else if (!fnTy->getNumIndirectFormalResults()) {
return fnTy->getDirectFormalResultsType(
IGM.getSILModule(), IGM.getMaximalTypeExpansionContext());
} else {
SmallVector<TupleTypeElt, 4> eltTys;
for (auto &result : fnTy->getResults()) {
eltTys.push_back(result.getReturnValueType(
IGM.getSILModule(), fnTy, IGM.getMaximalTypeExpansionContext()));
}
return SILType::getPrimitiveAddressType(
CanType(TupleType::get(eltTys, fnTy->getASTContext())));
}
}
llvm::DITypeRefArray createParameterTypes(SILType SILTy) {
if (!SILTy)
return nullptr;
return createParameterTypes(SILTy.castTo<SILFunctionType>());
}
llvm::DITypeRefArray createParameterTypes(CanSILFunctionType FnTy) {
SmallVector<llvm::Metadata *, 16> Parameters;
GenericContextScope scope(IGM, FnTy->getInvocationGenericSignature());
// The function return type is the first element in the list.
createParameterType(Parameters, getResultTypeForDebugInfo(IGM, FnTy));
for (auto &Param : FnTy->getParameters())
createParameterType(
Parameters, IGM.silConv.getSILType(
Param, FnTy, IGM.getMaximalTypeExpansionContext()));
return DBuilder.getOrCreateTypeArray(Parameters);
}
/// FIXME: replace this condition with something more sound.
static bool isAllocatingConstructor(SILFunctionTypeRepresentation Rep,
DeclContext *DeclCtx) {
return Rep != SILFunctionTypeRepresentation::Method && DeclCtx &&
isa<ConstructorDecl>(DeclCtx);
}
void createImportedModule(llvm::DIScope *Context,
ImportedModule M, llvm::DIFile *File,
unsigned Line) {
// For overlays of Clang modules also emit an import of the underlying Clang
// module. The helps the debugger resolve types that are present only in the
// underlying module.
if (const clang::Module *UnderlyingClangModule =
M.importedModule->findUnderlyingClangModule()) {
DBuilder.createImportedModule(
Context,
getOrCreateModule(
{*const_cast<clang::Module *>(UnderlyingClangModule)},
UnderlyingClangModule),
File, 0);
}
DBuilder.createImportedModule(Context, getOrCreateModule(M), File, Line);
}
llvm::DIModule *getOrCreateModule(const void *Key, llvm::DIScope *Parent,
StringRef Name, StringRef IncludePath,
uint64_t Signature = ~1ULL,
StringRef ASTFile = {}) {
// Look in the cache first.
auto Val = DIModuleCache.find(Key);
if (Val != DIModuleCache.end())
return cast<llvm::DIModule>(Val->second);
std::string RemappedIncludePath = DebugPrefixMap.remapPath(IncludePath);
std::string RemappedASTFile = DebugPrefixMap.remapPath(ASTFile);
// For Clang modules / PCH, create a Skeleton CU pointing to the PCM/PCH.
if (!Opts.DisableClangModuleSkeletonCUs) {
bool CreateSkeletonCU = !ASTFile.empty();
bool IsRootModule = !Parent;
if (CreateSkeletonCU && IsRootModule) {
llvm::DIBuilder DIB(M);
DIB.createCompileUnit(IGM.ObjCInterop ? llvm::dwarf::DW_LANG_ObjC
: llvm::dwarf::DW_LANG_C99,
DIB.createFile(Name, RemappedIncludePath),
TheCU->getProducer(), true, StringRef(), 0,
RemappedASTFile, llvm::DICompileUnit::FullDebug,
Signature);
// NOTE: not setting DebugInfoForProfiling here
DIB.finalize();
}
}
llvm::DIModule *M =
DBuilder.createModule(Parent, Name, ConfigMacros, RemappedIncludePath);
DIModuleCache.insert({Key, llvm::TrackingMDNodeRef(M)});
return M;
}
using ASTSourceDescriptor = clang::ASTSourceDescriptor;
/// Create a DIModule from a clang module or PCH.
/// The clang::Module pointer is passed separately because the recursive case
/// needs to fudge the AST descriptor.
llvm::DIModule *getOrCreateModule(ASTSourceDescriptor Desc,
const clang::Module *ClangModule) {
// PCH files don't have a signature field in the control block,
// but LLVM detects skeleton CUs by looking for a non-zero DWO id.
// We use the lower 64 bits for debug info.
uint64_t Signature =
Desc.getSignature() ? Desc.getSignature().truncatedValue() : ~1ULL;
// Clang modules using fmodule-file-home-is-cwd should have their
// include path set to the working directory.
auto &HSI =
CI.getClangPreprocessor().getHeaderSearchInfo().getHeaderSearchOpts();
StringRef IncludePath =
HSI.ModuleFileHomeIsCwd ? Opts.DebugCompilationDir : Desc.getPath();
StringRef ASTFile =
Desc.getCASID().empty() ? Desc.getASTFile() : Desc.getCASID();
// Handle Clang modules.
if (ClangModule) {
llvm::DIModule *Parent = nullptr;
if (ClangModule->Parent) {
// The loading of additional modules by Sema may trigger an out-of-date
// PCM rebuild in the Clang module dependencies of the additional
// module. A PCM rebuild causes the ModuleManager to unload previously
// loaded ASTFiles. For this reason we must use the cached ASTFile
// information here instead of the potentially dangling pointer to the
// ASTFile that is stored in the clang::Module object.
//
// Note: The implementation here assumes that all clang submodules
// belong to the same PCM file.
ASTSourceDescriptor ParentDescriptor(*ClangModule->Parent);
Parent = getOrCreateModule(
{ParentDescriptor.getModuleName(), ParentDescriptor.getPath(),
Desc.getASTFile(), Desc.getSignature(), Desc.getCASID()},
ClangModule->Parent);
}
return getOrCreateModule(ClangModule, Parent, Desc.getModuleName(),
IncludePath, Signature, ASTFile);
}
// Handle PCH.
return getOrCreateModule(Desc.getASTFile().bytes_begin(), nullptr,
Desc.getModuleName(), IncludePath, Signature,
ASTFile);
};
static std::optional<ASTSourceDescriptor>
getClangModule(const ModuleDecl &M) {
for (auto *FU : M.getFiles())
if (auto *CMU = dyn_cast_or_null<ClangModuleUnit>(FU))
if (auto Desc = CMU->getASTSourceDescriptor())
return Desc;
return std::nullopt;
}
llvm::DIModule *getOrCreateModule(ImportedModule IM) {
ModuleDecl *M = IM.importedModule;
if (std::optional<ASTSourceDescriptor> ModuleDesc = getClangModule(*M))
return getOrCreateModule(*ModuleDesc, ModuleDesc->getModuleOrNull());
StringRef Path = getFilenameFromDC(M);
// Use the module 'real' name, which can be different from the name if module
// aliasing was used (swift modules only). For example, if a source file has
// 'import Foo', and '-module-alias Foo=Bar' was passed in, the real name of
// the module on disk is Bar (.swiftmodule or .swiftinterface), and is used
// for loading and mangling.
StringRef Name = M->getRealName().str();
return getOrCreateModule(M, TheCU, Name, Path);
}
TypeAliasDecl *getMetadataType(StringRef ArchetypeName) {
TypeAliasDecl *&Entry = MetadataTypeDeclCache[ArchetypeName];
if (Entry)
return Entry;
SourceLoc NoLoc;
Entry = new (IGM.Context) TypeAliasDecl(
NoLoc, NoLoc, IGM.Context.getIdentifier(ArchetypeName), NoLoc,
/*genericparams*/ nullptr, IGM.Context.TheBuiltinModule);
Entry->setUnderlyingType(IGM.Context.TheRawPointerType);
return Entry;
}
/// Return the DIFile that is the ancestor of Scope.
llvm::DIFile *getFile(llvm::DIScope *Scope) {
while (!isa<llvm::DIFile>(Scope)) {
switch (Scope->getTag()) {
case llvm::dwarf::DW_TAG_lexical_block:
Scope = cast<llvm::DILexicalBlock>(Scope)->getScope();
break;
case llvm::dwarf::DW_TAG_subprogram:
Scope = cast<llvm::DISubprogram>(Scope)->getFile();
break;
default:
return MainFile;
}
if (Scope)
return MainFile;
}
if (Scope)
return cast<llvm::DIFile>(Scope);
return MainFile;
}
static unsigned getStorageSizeInBits(const llvm::DataLayout &DL,
ArrayRef<llvm::Value *> Storage) {
unsigned SizeInBits = 0;
for (llvm::Value *Piece : Storage)
SizeInBits += DL.getTypeSizeInBits(Piece->getType());
return SizeInBits;
}
struct MangledNames {
StringRef Sugared, Canonical;
};
MangledNames getMangledName(DebugTypeInfo DbgTy) {
if (DbgTy.isMetadataType())
return {{},
MetadataTypeDeclCache.find(DbgTy.getDecl()->getName().str())
->getKey()};
// This is a bit of a hack. We need a generic signature to use for mangling.
// If we started with an interface type, just use IGM.getCurGenericContext(),
// since callers that use interface types typically push a signature that way.
//
// Otherwise, if we have a contextual type, find an archetype and ask it for
// it's generic signature. The context generic signature from the IRGenModule
// is unlikely to be useful here.
GenericSignature Sig;
Type Ty = DbgTy.getType();
if (Ty->hasArchetype()) {
Ty.findIf([&](Type t) -> bool {
if (auto *archetypeTy = t->getAs<PrimaryArchetypeType>()) {
Sig = archetypeTy->getGenericEnvironment()->getGenericSignature();
return true;
}
if (auto *archetypeTy = t->getAs<PackArchetypeType>()) {
Sig = archetypeTy->getGenericEnvironment()->getGenericSignature();
return true;
}
return false;
});
Ty = Ty->mapTypeOutOfEnvironment();
} else {
Sig = IGM.getCurGenericContext();
}
// Strip off top level of type sugar (except for type aliases).
// We don't want Optional<T> and T? to get different debug types.
while (true) {
if (auto *SugarTy = dyn_cast<SyntaxSugarType>(Ty.getPointer())) {
Ty = SugarTy->getSinglyDesugaredType();
continue;
}
break;
}
// TODO: Eliminate substitutions in SILFunctionTypes for now.
// On platforms where the substitutions affect representation, we will need
// to preserve this info and teach type reconstruction about it.
Ty = Ty->replaceSubstitutedSILFunctionTypesWithUnsubstituted(
IGM.getSILModule());
Mangle::ASTMangler Mangler(IGM.Context);
std::string SugaredName, CanonicalName;
SugaredName = Mangler.mangleTypeForDebugger(Ty, Sig);
CanType CanTy = Ty->getCanonicalType();
if (CanTy.getPointer() != Ty.getPointer()) {
CanonicalName = Mangler.mangleTypeForDebugger(CanTy, Sig);
if (SugaredName == CanonicalName)
CanonicalName.clear();
}
bool IsTypeOriginallyDefinedIn = containsOriginallyDefinedIn(DbgTy.getType());
bool IsCxxType = containsCxxType(DbgTy.getType());
// There's no way to round trip when respecting @_originallyDefinedIn for a type.
// TODO(https://github.com/apple/swift/issues/57699): We currently cannot round trip some C++ types.
if (!Opts.DisableRoundTripDebugTypes && !IsTypeOriginallyDefinedIn && !IsCxxType) {
// Make sure we can reconstruct mangled types for the debugger.
auto &Ctx = Ty->getASTContext();
Type Reconstructed = Demangle::getTypeForMangling(Ctx, SugaredName, Sig);
if (!Reconstructed) {
ABORT([&](auto &out) {
out << "Failed to reconstruct type for " << SugaredName << "\n";
out << "Original type:\n";
Ty->dump(out);
if (Sig)
out << "Generic signature: " << Sig << "\n";
out << SWIFT_CRASH_BUG_REPORT_MESSAGE << "\n"
<< "Pass '-Xfrontend -disable-round-trip-debug-types' to disable "
"this assertion.";
});
} else if (!Reconstructed->isEqual(Ty) &&
// FIXME: Some existential types are reconstructed without
// an explicit ExistentialType wrapping the constraint.
!equalWithoutExistentialTypes(Reconstructed, Ty) &&
!EqualUpToClangTypes().check(Reconstructed, Ty)) {
// [FIXME: Include-Clang-type-in-mangling] Remove second check
ABORT([&](auto &out) {
out << "Incorrect reconstructed type for " << SugaredName << "\n";
out << "Original type:\n";
Ty->dump(out);
out << "Reconstructed type:\n";
Reconstructed->dump(out);
if (Sig)
out << "Generic signature: " << Sig << "\n";
out << SWIFT_CRASH_BUG_REPORT_MESSAGE << "\n"
<< "Pass '-Xfrontend -disable-round-trip-debug-types' to disable "
"this assertion.";
});
}
}
// Only return a dedicated sugared name if it's different from the canonical
// one.
if (CanonicalName.empty())
std::swap(SugaredName, CanonicalName);
return {BumpAllocatedString(SugaredName),
BumpAllocatedString(CanonicalName)};
}
llvm::DIDerivedType *
createMemberType(llvm::DIType *DITy, StringRef Name, unsigned &OffsetInBits,
unsigned AlignInBits, llvm::DIScope *Scope,
llvm::DIFile *File, llvm::DINode::DIFlags Flags) {
auto SizeInBits = getSizeInBits(DITy);
llvm::DIDerivedType *DIMemberTy = DBuilder.createMemberType(
Scope, Name, File, 0, SizeInBits, 0, OffsetInBits, Flags, DITy);
OffsetInBits += SizeInBits;
if (AlignInBits)
OffsetInBits = llvm::alignTo(OffsetInBits, AlignInBits);
return DIMemberTy;
}
/// Creates a temporary replaceable forward decl to protect against recursion.
llvm::TempDIType createTemporaryReplaceableForwardDecl(
TypeBase *Type, llvm::DIScope *Scope, llvm::DIFile *File, unsigned Line,
unsigned SizeInBits, unsigned AlignInBits, llvm::DINode::DIFlags Flags,
StringRef MangledName, StringRef Name) {
#ifndef NDEBUG
{
if (MangledName.empty())
assert(!Name.empty() &&
"no mangled name and no human readable name given");
else
assert(swift::Demangle::isMangledName(MangledName) &&
"UID is not a mangled name");
auto UID = llvm::MDString::get(IGM.getLLVMContext(), MangledName);
assert(DIRefMap.count(UID) == 0 && "type is already cached");
}
#endif
auto ReplaceableType = DBuilder.createReplaceableCompositeType(
llvm::dwarf::DW_TAG_structure_type, "", Scope, File, Line,
llvm::dwarf::DW_LANG_Swift, SizeInBits, AlignInBits, Flags,
MangledName);
auto FwdDecl = llvm::TempDIType(ReplaceableType);
auto TH = llvm::TrackingMDNodeRef(FwdDecl.get());
DITypeCache[Type] = TH;
if (auto UID = ReplaceableType->getRawIdentifier())
DIRefMap[UID] = llvm::TrackingMDNodeRef(TH);
return FwdDecl;
}
using TrackingDIType = llvm::TypedTrackingMDRef<llvm::DIType>;
struct MemberDIType {
StringRef Name;
unsigned AlignInBits;
TrackingDIType DIType;
MemberDIType(StringRef Name, unsigned AlignInBits, llvm::DIType *DIType)
: Name(Name), AlignInBits(AlignInBits), DIType(DIType) {}
};
unsigned getByteSize() { return CI.getTargetInfo().getCharWidth(); }
llvm::DICompositeType *createStructType(
NominalOrBoundGenericNominalType *Type, NominalTypeDecl *Decl,
llvm::DIScope *Scope, llvm::DIFile *File, unsigned Line,
unsigned SizeInBits, unsigned AlignInBits, llvm::DINode::DIFlags Flags,
StringRef MangledName, llvm::DIType *SpecificationOf = nullptr) {
StringRef Name = Decl->getName().str();
auto FwdDecl = createTemporaryReplaceableForwardDecl(
Type, Scope, File, Line, SizeInBits, AlignInBits, Flags, MangledName,
Name);
// Collect the members.
SmallVector<MemberDIType, 16> MemberTypes;
if (!IGM.isResilient(Decl, ResilienceExpansion::Maximal)) {
for (VarDecl *VD : Decl->getStoredProperties()) {
auto memberTy = Type->getTypeOfMember(VD);
if (auto DbgTy = CompletedDebugTypeInfo::getFromTypeInfo(
memberTy,
IGM.getTypeInfoForUnlowered(
IGM.getSILTypes().getAbstractionPattern(VD), memberTy),
IGM))
MemberTypes.emplace_back(VD->getName().str(),
getByteSize() *
DbgTy->getAlignment().getValue(),
getOrCreateType(*DbgTy));
else
// Without complete type info we can only create a forward decl.
return DBuilder.createForwardDecl(
llvm::dwarf::DW_TAG_structure_type, MangledName, Scope, File, Line,
llvm::dwarf::DW_LANG_Swift, SizeInBits, 0);
}
}
SmallVector<llvm::Metadata *, 16> Members;
unsigned OffsetInBits = 0;
for (auto &Member : MemberTypes)
Members.push_back(createMemberType(Member.DIType, Member.Name,
OffsetInBits, Member.AlignInBits,
Scope, File, Flags));
llvm::DINodeArray BoundParams = collectGenericParams(Type);
llvm::DICompositeType *DITy = createStruct(
Scope, Name, File, Line, SizeInBits, AlignInBits, Flags, MangledName,
DBuilder.getOrCreateArray(Members), BoundParams, SpecificationOf);
return DBuilder.replaceTemporary(std::move(FwdDecl), DITy);
}
/// Creates debug info for a generic struct or class with archetypes (e.g.:
/// Pair<τ_0_0, τ_0_1>). For types with unsubstituted generic type parameters,
/// debug info generation doesn't attempt to emit the size and aligment of the
/// type, as in the general case those are all dependent on substituting the
/// type parameters in (some exceptions exist, like generic types that are
/// class constrained). It also doesn't attempt to emit the members for the
/// same reason.
llvm::DICompositeType *createUnsubstitutedGenericStructOrClassType(
DebugTypeInfo DbgTy, NominalTypeDecl *Decl, Type UnsubstitutedType,
llvm::DIScope *Scope, llvm::DIFile *File, unsigned Line,
llvm::DINode::DIFlags Flags, llvm::DIType *DerivedFrom,
unsigned RuntimeLang, StringRef UniqueID) {
// FIXME: ideally, we'd like to emit this type with no size and alignment at
// all (instead of emitting them as 0). Fix this by changing DIBuilder to
// allow for struct types that have optional size and alignment.
unsigned SizeInBits = 0;
unsigned AlignInBits = 0;
StringRef Name = Decl->getName().str();
auto FwdDecl = createTemporaryReplaceableForwardDecl(
DbgTy.getType(), Scope, File, Line, SizeInBits, AlignInBits, Flags,
UniqueID, Name);
// Collect the members.
SmallVector<MemberDIType, 16> MemberTypes;
if (!IGM.isResilient(Decl, ResilienceExpansion::Maximal)) {
for (VarDecl *VD : Decl->getStoredProperties()) {
Type memberTy = UnsubstitutedType->getTypeOfMember(VD);
auto DbgTy = DebugTypeInfo::getFromTypeInfo(
memberTy,
IGM.getTypeInfoForUnlowered(
IGM.getSILTypes().getAbstractionPattern(VD), memberTy),
IGM);
MemberTypes.emplace_back(VD->getName().str(),
getByteSize() * DbgTy.getAlignment().getValue(),
getOrCreateType(DbgTy));
}
}
SmallVector<llvm::Metadata *, 16> Members;
for (auto &Member : MemberTypes) {
unsigned OffsetInBits = 0;
auto *member = createMemberType(Member.DIType, Member.Name,
OffsetInBits, Member.AlignInBits,
Scope, File, Flags);
Members.push_back(member);
}
llvm::DICompositeType *DITy = DBuilder.createStructType(
Scope, Name, File, Line, SizeInBits, AlignInBits, Flags, DerivedFrom,
DBuilder.getOrCreateArray(Members), RuntimeLang, nullptr, UniqueID);
if (auto SuperClassTy = UnsubstitutedType->getSuperclass()) {
auto SuperClassDbgTy = DebugTypeInfo::getFromTypeInfo(
SuperClassTy, IGM.getTypeInfoForUnlowered(SuperClassTy), IGM);
if (llvm::DIType *SuperClassDITy = getOrCreateType(SuperClassDbgTy)) {
DBuilder.retainType(DBuilder.createInheritance(
DITy, SuperClassDITy, 0, 0, llvm::DINode::FlagZero));
}
}
return DBuilder.replaceTemporary(std::move(FwdDecl), DITy);
}
std::pair<bool, Type> getUnsubstitutedType(Type Ty, StringRef MangledName) {
if (!Ty)
return {false,{}};
// Go from Pair<Int, Double> to Pair<T, U>.
auto *Decl = Ty->getNominalOrBoundGenericNominal();
if (!Decl)
return {false, {}};
// Go from Pair<Int, Double> to Pair<T, U>.
Type InterfaceTy = Decl->getDeclaredInterfaceType();
Type UnsubstitutedTy = Decl->mapTypeIntoEnvironment(InterfaceTy);
Mangle::ASTMangler Mangler(IGM.Context);
std::string DeclTypeMangledName = Mangler.mangleTypeForDebugger(
UnsubstitutedTy->mapTypeOutOfEnvironment(), {});
bool IsUnsubstituted = (DeclTypeMangledName == MangledName);
return {IsUnsubstituted, UnsubstitutedTy};
}
llvm::DIType *
createSpecializedEnumType(NominalOrBoundGenericNominalType *EnumTy,
EnumDecl *Decl, StringRef MangledName,
unsigned SizeInBits, unsigned AlignInBits,
llvm::DIScope *Scope, llvm::DIFile *File,
unsigned Line, llvm::DINode::DIFlags Flags) {
auto [IsUnsubstituted, UnsubstitutedTy] =
getUnsubstitutedType(EnumTy, MangledName);
auto UnsubstitutedDbgTy = DebugTypeInfo::getFromTypeInfo(
UnsubstitutedTy, IGM.getTypeInfoForUnlowered(UnsubstitutedTy), IGM);
if (IsUnsubstituted)
return createUnsubstitutedVariantType(UnsubstitutedDbgTy, Decl,
MangledName, Scope, File, 0, Flags);
StringRef Name = Decl->getName().str();
auto FwdDecl = createTemporaryReplaceableForwardDecl(
EnumTy, Scope, File, Line, SizeInBits, AlignInBits, Flags, MangledName,
Name);
// Force the creation of the unsubstituted type, don't create it
// directly so it goes through all the caching/verification logic.
auto UnsubstitutedDITy = getOrCreateType(UnsubstitutedDbgTy);
llvm::DICompositeType *DIType = createOpaqueStruct(
Scope, "", File, 0, SizeInBits, AlignInBits, Flags, MangledName,
collectGenericParams(EnumTy), UnsubstitutedDITy);
return DBuilder.replaceTemporary(std::move(FwdDecl), DIType);
}
/// Create a DICompositeType from a specialized struct. A specialized type
/// is a generic type, or a child type whose parent is generic.
llvm::DIType *createSpecializedStructOrClassType(
NominalOrBoundGenericNominalType *Type,
llvm::DIScope *Scope, llvm::DIFile *File, unsigned Line,
unsigned SizeInBits, unsigned AlignInBits, llvm::DINode::DIFlags Flags,
StringRef MangledName, bool IsClass = false) {
// To emit debug info of the DwarfTypes level for generic types, the
// strategy is to emit a description of all the fields for the type with
// archetypes, and still the same debug info as the ASTTypes level for the
// specialized type. For example, given: struct Pair<T, U> {
// let t: T
// let u: U
// }
// When emitting debug information for a type such as Pair<Int, Double>,
// emit a description of all the fields for Pair<T, U>, and emit the regular
// debug information for Pair<Int, Double>.
auto *Decl = Type->getNominalOrBoundGenericNominal();
if (!Decl)
return nullptr;
auto [IsUnsubstitued, UnsubstitutedType] =
getUnsubstitutedType(Type, MangledName);
auto UnsubstitutedDbgTy = DebugTypeInfo::getFromTypeInfo(
UnsubstitutedType, IGM.getTypeInfoForUnlowered(UnsubstitutedType), IGM);
if (IsUnsubstitued) {
return createUnsubstitutedGenericStructOrClassType(
UnsubstitutedDbgTy, Decl, UnsubstitutedType, Scope, File, Line, Flags,
nullptr, llvm::dwarf::DW_LANG_Swift, MangledName);
}
// Force the creation of the unsubstituted type, don't create it
// directly so it goes through all the caching/verification logic.
auto UnsubstitutedDITy = getOrCreateType(UnsubstitutedDbgTy);
if (auto *ClassTy = llvm::dyn_cast<BoundGenericClassType>(Type)) {
auto SuperClassTy = ClassTy->getSuperclass();
if (SuperClassTy) {
auto SuperClassDbgTy = DebugTypeInfo::getFromTypeInfo(
SuperClassTy, IGM.getTypeInfoForUnlowered(SuperClassTy), IGM);
llvm::DIType *SuperClassDITy = getOrCreateType(SuperClassDbgTy);
assert(SuperClassDITy && "getOrCreateType should never return null!");
DBuilder.createInheritance(UnsubstitutedDITy, SuperClassDITy, 0, 0,
llvm::DINode::FlagZero);
}
}
// Generally, we don't emit members of a specialized bound generic, because
// these can be reconstructed by substituting the "template parameters" in
// the unspecialized type. We make an exception for inline arrays, because
// DWARF has special support for arrays.
if ((Type->isInlineArray() || Type->is_InlineArray()) &&
!Type->hasTypeParameter() &&
!Type->hasPrimaryArchetype())
// Create the substituted type.
return createStructType(Type, Decl, Scope, File, Line, SizeInBits,
AlignInBits, Flags, MangledName,
UnsubstitutedDITy);
// Create the substituted type (without members).
llvm::DIType *SpecializedDITy = createOpaqueStructWithSizedContainer(
Scope, Decl ? Decl->getNameStr() : "", File, Line, SizeInBits,
AlignInBits, Flags, MangledName, collectGenericParams(Type),
UnsubstitutedDITy);
return SpecializedDITy;
}
/// Create debug information for an enum with a raw type (enum E : Int {}).
llvm::DICompositeType *createRawEnumType(CompletedDebugTypeInfo DbgTy,
EnumDecl *Decl,
StringRef MangledName,
llvm::DIScope *Scope,
llvm::DIFile *File, unsigned Line,
llvm::DINode::DIFlags Flags) {
assert(
Decl->hasRawType() &&
"Trying to create a raw enum debug info from enum with no raw type!");
StringRef Name = Decl->getName().str();
unsigned SizeInBits = DbgTy.getSizeInBits();
// Default, since Swift doesn't allow specifying a custom alignment.
unsigned AlignInBits = 0;
auto FwdDecl = createTemporaryReplaceableForwardDecl(
DbgTy.getType(), Scope, File, Line, SizeInBits, AlignInBits, Flags,
MangledName, Name);
auto RawType = Decl->getRawType();
auto &TI = IGM.getTypeInfoForUnlowered(RawType);
std::optional<CompletedDebugTypeInfo> ElemDbgTy =
CompletedDebugTypeInfo::getFromTypeInfo(RawType, TI, IGM);
if (!ElemDbgTy)
// Without complete type info we can only create a forward decl.
return DBuilder.createForwardDecl(
llvm::dwarf::DW_TAG_enumeration_type, Name, Scope, File, Line,
llvm::dwarf::DW_LANG_Swift, SizeInBits, 0, MangledName);
SmallVector<llvm::Metadata *, 16> Elements;
for (auto *ElemDecl : Decl->getAllElements()) {
// TODO: add the option to emit an enumerator with no value, and use that
// instead of emitting a 0.
auto MTy =
DBuilder.createEnumerator(ElemDecl->getBaseIdentifier().str(), 0);
Elements.push_back(MTy);
}
auto EnumType = getOrCreateType(*ElemDbgTy);
llvm::DICompositeType *DITy = DBuilder.createEnumerationType(
Scope, Name, File, Line, SizeInBits, AlignInBits,
DBuilder.getOrCreateArray(Elements), EnumType,
llvm::dwarf::DW_LANG_Swift, MangledName, false);
return DBuilder.replaceTemporary(std::move(FwdDecl), DITy);
}
/// Create debug information for an enum with no raw type.
llvm::DICompositeType *createVariantType(CompletedDebugTypeInfo DbgTy,
EnumDecl *Decl,
StringRef MangledName,
unsigned AlignInBits,
llvm::DIScope *Scope,
llvm::DIFile *File, unsigned Line,
llvm::DINode::DIFlags Flags) {
assert(!Decl->getRawType() &&
"Attempting to create variant debug info from raw enum!");;
StringRef Name = Decl->getName().str();
unsigned SizeInBits = DbgTy.getSizeInBits();
auto NumExtraInhabitants = DbgTy.getNumExtraInhabitants();
// A variant part should actually be a child to a DW_TAG_structure_type
// according to the DWARF spec.
auto FwdDecl = createTemporaryReplaceableForwardDecl(
DbgTy.getType(), Scope, File, Line, SizeInBits, AlignInBits, Flags,
MangledName, Name);
SmallVector<MemberDIType, 16> MemberTypes;
for (auto *ElemDecl : Decl->getAllElements()) {
std::optional<CompletedDebugTypeInfo> ElemDbgTy;
if (auto PayloadTy = ElemDecl->getPayloadInterfaceType()) {
// A variant case which carries a payload.
PayloadTy = ElemDecl->getParentEnum()->mapTypeIntoEnvironment(PayloadTy);
auto &TI = IGM.getTypeInfoForUnlowered(PayloadTy);
ElemDbgTy = CompletedDebugTypeInfo::getFromTypeInfo(PayloadTy, TI, IGM);
// FIXME: This is not correct, but seems to be the only way to emit
// children for opaque-sized payload-carrying enums.
if (!ElemDbgTy)
ElemDbgTy =
CompletedDebugTypeInfo::getFromTypeInfo(PayloadTy, TI, IGM, 0);
if (!ElemDbgTy) {
// Without complete type info we can only create a forward decl.
return DBuilder.createForwardDecl(
llvm::dwarf::DW_TAG_structure_type, Name, Scope, File, Line,
llvm::dwarf::DW_LANG_Swift, SizeInBits, 0, MangledName);
}
MemberTypes.emplace_back(ElemDecl->getBaseIdentifier().str(),
getByteSize() *
ElemDbgTy->getAlignment().getValue(),
TrackingDIType(getOrCreateType(*ElemDbgTy)));
} else {
// A variant with no payload.
MemberTypes.emplace_back(ElemDecl->getBaseIdentifier().str(), 0,
nullptr);
}
}
SmallVector<llvm::Metadata *, 16> Members;
for (auto &Member : MemberTypes) {
unsigned Offset = 0;
Members.push_back(createMemberType(Member.DIType, Member.Name, Offset,
Member.AlignInBits, Scope, File,
Flags));
}
auto VPTy = DBuilder.createVariantPart(
Scope, {}, File, Line, SizeInBits, AlignInBits, Flags, nullptr,
DBuilder.getOrCreateArray(Members), /*UniqueIdentifier=*/"");
llvm::DICompositeType *DITy = DBuilder.createStructType(
Scope, Name, File, Line, SizeInBits, AlignInBits, Flags, nullptr,
DBuilder.getOrCreateArray(VPTy), llvm::dwarf::DW_LANG_Swift, nullptr,
MangledName, nullptr, NumExtraInhabitants ? *NumExtraInhabitants : 0);
return DBuilder.replaceTemporary(std::move(FwdDecl), DITy);
}
// Create debug information for an enum with no raw type.
llvm::DICompositeType *
createUnsubstitutedVariantType(DebugTypeInfo DbgTy, EnumDecl *Decl,
StringRef MangledName,
llvm::DIScope *Scope, llvm::DIFile *File,
unsigned Line, llvm::DINode::DIFlags Flags) {
assert(!Decl->getRawType() &&
"Attempting to create variant debug info from raw enum!");
StringRef Name = Decl->getName().str();
auto NumExtraInhabitants = DbgTy.getNumExtraInhabitants();
unsigned SizeInBits = 0;
unsigned AlignInBits = 0;
// A variant part should actually be a child to a DW_TAG_structure_type
// according to the DWARF spec.
auto FwdDecl = createTemporaryReplaceableForwardDecl(
DbgTy.getType(), Scope, File, Line, SizeInBits, AlignInBits, Flags,
MangledName, Name);
SmallVector<MemberDIType, 16> MemberTypes;
for (auto *ElemDecl : Decl->getAllElements()) {
std::optional<DebugTypeInfo> ElemDbgTy;
if (auto PayloadTy = ElemDecl->getPayloadInterfaceType()) {
// A variant case which carries a payload.
PayloadTy = ElemDecl->getParentEnum()->mapTypeIntoEnvironment(PayloadTy);
ElemDbgTy = DebugTypeInfo::getFromTypeInfo(
PayloadTy, IGM.getTypeInfoForUnlowered(PayloadTy), IGM);
MemberTypes.emplace_back(ElemDecl->getBaseIdentifier().str(),
getByteSize() *
ElemDbgTy->getAlignment().getValue(),
TrackingDIType(getOrCreateType(*ElemDbgTy)));
} else {
// A variant with no payload.
MemberTypes.emplace_back(ElemDecl->getBaseIdentifier().str(), 0,
nullptr);
}
}
SmallVector<llvm::Metadata *, 16> Members;
for (auto &Member : MemberTypes) {
unsigned Offset = 0;
Members.push_back(createMemberType(Member.DIType, Member.Name, Offset,
Member.AlignInBits, Scope, File,
Flags));
}
auto VPTy = DBuilder.createVariantPart(Scope, {}, File, Line, SizeInBits,
AlignInBits, Flags, nullptr,
DBuilder.getOrCreateArray(Members));
llvm::DICompositeType *DITy = DBuilder.createStructType(
Scope, Name, File, Line, SizeInBits, AlignInBits, Flags, nullptr,
DBuilder.getOrCreateArray(VPTy), llvm::dwarf::DW_LANG_Swift, nullptr,
MangledName, nullptr, NumExtraInhabitants.value_or(0));
return DBuilder.replaceTemporary(std::move(FwdDecl), DITy);
}
llvm::DICompositeType *createEnumType(CompletedDebugTypeInfo DbgTy,
EnumDecl *Decl, StringRef MangledName,
unsigned AlignInBits,
llvm::DIScope *Scope,
llvm::DIFile *File, unsigned Line,
llvm::DINode::DIFlags Flags) {
if (Decl->hasRawType())
return createRawEnumType(DbgTy, Decl, MangledName, Scope, File, Line,
Flags);
return createVariantType(DbgTy, Decl, MangledName, AlignInBits, Scope, File,
Line, Flags);
}
llvm::DIType *getOrCreateDesugaredType(Type Ty, DebugTypeInfo DbgTy) {
DebugTypeInfo BlandDbgTy(
Ty, DbgTy.getAlignment(), DbgTy.hasDefaultAlignment(), false,
DbgTy.isFixedBuffer(), DbgTy.getNumExtraInhabitants());
return getOrCreateType(BlandDbgTy);
}
uint64_t getSizeOfBasicType(CompletedDebugTypeInfo DbgTy) {
uint64_t BitWidth = DbgTy.getSizeInBits();
return BitWidth;
}
/// Collect the type parameters of a bound generic type. This is needed to
/// anchor any typedefs that may appear in parameters so they can be
/// resolved in the debugger without needing to query the Swift module.
llvm::DINodeArray collectGenericParams(NominalOrBoundGenericNominalType *BGT,
bool AsForwardDeclarations = false) {
// Collect the generic args from the type and its parent.
std::vector<Type> GenericArgs;
Type CurrentType = BGT;
while (CurrentType && CurrentType->getAnyNominal()) {
if (auto *BGT = CurrentType->getAs<BoundGenericType>())
GenericArgs.insert(GenericArgs.end(), BGT->getGenericArgs().begin(),
BGT->getGenericArgs().end());
CurrentType = CurrentType->getNominalParent();
}
SmallVector<llvm::Metadata *, 16> TemplateParams;
for (auto Arg : GenericArgs) {
DebugTypeInfo ParamDebugType;
if (Opts.DebugInfoLevel > IRGenDebugInfoLevel::ASTTypes &&
!AsForwardDeclarations) {
if (Arg->is<IntegerType>()) {
ParamDebugType = DebugTypeInfo(Arg);
} else {
// For the DwarfTypes level don't generate just a forward declaration
// for the generic type parameters.
ParamDebugType = DebugTypeInfo::getFromTypeInfo(
Arg, IGM.getTypeInfoForUnlowered(Arg), IGM);
}
} else {
ParamDebugType = DebugTypeInfo::getForwardDecl(Arg);
}
TemplateParams.push_back(DBuilder.createTemplateTypeParameter(
TheCU, "", getOrCreateType(ParamDebugType), false));
}
return DBuilder.getOrCreateArray(TemplateParams);
}
/// Create a sized container for a sizeless type. Used to represent
/// BoundGenericEnums that may have different sizes depending on what they are
/// bound to, but still share a mangled name.
llvm::DIType *createOpaqueStructWithSizedContainer(
llvm::DIScope *Scope, StringRef Name, llvm::DIFile *File, unsigned Line,
unsigned SizeInBits, unsigned AlignInBits, llvm::DINode::DIFlags Flags,
StringRef MangledName, llvm::DINodeArray BoundParams,
llvm::DIType *SpecificationOf = nullptr) {
// This uses a separate cache and not DIRefMap for the inner type to avoid
// associating the anonymous container (which is specific to the
// variable/storage and not the type) with the MangledName.
llvm::DICompositeType *UniqueType = nullptr;
auto *UID = llvm::MDString::get(IGM.getLLVMContext(), MangledName);
if (llvm::Metadata *V = InnerTypeCache.lookup(UID))
UniqueType = cast<llvm::DICompositeType>(V);
else {
UniqueType = DBuilder.createForwardDecl(
llvm::dwarf::DW_TAG_structure_type, MangledName, Scope, File, Line,
llvm::dwarf::DW_LANG_Swift, 0, 0);
if (BoundParams)
DBuilder.replaceArrays(UniqueType, nullptr, BoundParams);
InnerTypeCache[UID] = llvm::TrackingMDNodeRef(UniqueType);
}
llvm::Metadata *Elements[] = {DBuilder.createMemberType(
Scope, "", File, 0, SizeInBits, AlignInBits, 0, Flags, UniqueType)};
// FIXME: It's a limitation of LLVM that a forward declaration cannot have a
// specificationOf, so this attritbute is put on the sized container type
// instead. This is confusing consumers, and LLDB has to go out of its way
// to parse these confusing types as intended.
return DBuilder.createStructType(
Scope, "", File, Line, SizeInBits, AlignInBits, Flags,
/* DerivedFrom */ nullptr, DBuilder.getOrCreateArray(Elements),
llvm::dwarf::DW_LANG_Swift, nullptr, "", SpecificationOf, 0);
}
llvm::DIType *
createPointerSizedStruct(llvm::DIScope *Scope, StringRef Name,
llvm::DIFile *File, unsigned Line,
llvm::DINode::DIFlags Flags, StringRef MangledName,
llvm::DIType *SpecificationOf = nullptr) {
if (Opts.DebugInfoLevel > IRGenDebugInfoLevel::ASTTypes) {
auto FwdDecl = DBuilder.createForwardDecl(
llvm::dwarf::DW_TAG_structure_type, Name, Scope, File, Line,
llvm::dwarf::DW_LANG_Swift, 0, 0);
return createPointerSizedStruct(Scope, Name, FwdDecl, File, Line, Flags,
MangledName, SpecificationOf);
} else {
unsigned SizeInBits = CI.getTargetInfo().getPointerWidth(clang::LangAS::Default);
return createOpaqueStruct(Scope, Name, File, Line, SizeInBits, 0, Flags,
MangledName);
}
}
llvm::DIType *createPointerSizedStruct(
llvm::DIScope *Scope, StringRef Name, llvm::DIType *PointeeTy,
llvm::DIFile *File, unsigned Line, llvm::DINode::DIFlags Flags,
StringRef MangledName, llvm::DIType *SpecificationOf = nullptr) {
unsigned PtrSize =
CI.getTargetInfo().getPointerWidth(clang::LangAS::Default);
auto PtrTy = DBuilder.createPointerType(PointeeTy, PtrSize, 0);
llvm::Metadata *Elements[] = {DBuilder.createMemberType(
Scope, "ptr", File, 0, PtrSize, 0, 0, Flags, PtrTy)};
return DBuilder.createStructType(
Scope, Name, File, Line, PtrSize, 0, Flags,
/* DerivedFrom */ nullptr, DBuilder.getOrCreateArray(Elements),
llvm::dwarf::DW_LANG_Swift, nullptr, MangledName, SpecificationOf);
}
llvm::DIType *
createDoublePointerSizedStruct(llvm::DIScope *Scope, StringRef Name,
llvm::DIType *PointeeTy, llvm::DIFile *File,
unsigned Line, llvm::DINode::DIFlags Flags,
StringRef MangledName) {
unsigned PtrSize = CI.getTargetInfo().getPointerWidth(clang::LangAS::Default);
llvm::Metadata *Elements[] = {
DBuilder.createMemberType(
Scope, "ptr", File, 0, PtrSize, 0, 0, Flags,
DBuilder.createPointerType(PointeeTy, PtrSize, 0)),
DBuilder.createMemberType(
Scope, "_", File, 0, PtrSize, 0, 0, Flags,
DBuilder.createPointerType(nullptr, PtrSize, 0))};
return DBuilder.createStructType(
Scope, Name, File, Line, 2 * PtrSize, 0, Flags,
/* DerivedFrom */ nullptr, DBuilder.getOrCreateArray(Elements),
llvm::dwarf::DW_LANG_Swift, nullptr, MangledName);
}
llvm::DIType *createFixedValueBufferStruct(llvm::DIType *PointeeTy) {
unsigned Line = 0;
unsigned PtrSize = CI.getTargetInfo().getPointerWidth(clang::LangAS::Default);
llvm::DINode::DIFlags Flags = llvm::DINode::FlagArtificial;
llvm::DIFile *File = MainFile;
llvm::DIScope *Scope = TheCU;
llvm::Metadata *Elements[] = {DBuilder.createMemberType(
Scope, "contents", File, 0, PtrSize, 0, 0, Flags, PointeeTy)};
return DBuilder.createStructType(
Scope, "$swift.fixedbuffer", File, Line, 3 * PtrSize, 0, Flags,
/* DerivedFrom */ nullptr, DBuilder.getOrCreateArray(Elements),
llvm::dwarf::DW_LANG_Swift, nullptr);
}
/// Create struct with a single member, used for Swift types that do not yet
/// have specialized DIDerivedTypes.
llvm::DIType *createSingleMemberStruct(
llvm::DIScope *Scope, StringRef Name, llvm::DIFile *File, unsigned Line,
unsigned SizeInBits, unsigned AlignInBits, llvm::DINode::DIFlags Flags,
StringRef MangledName, StringRef MemberName, llvm::DIType *MemberType) {
llvm::Metadata *Elements[] = {
DBuilder.createMemberType(Scope, MemberName, File, 0, SizeInBits,
AlignInBits, 0, Flags, MemberType)};
return DBuilder.createStructType(
Scope, Name, File, Line, SizeInBits, AlignInBits, Flags,
/* DerivedFrom */ nullptr, DBuilder.getOrCreateArray(Elements),
llvm::dwarf::DW_LANG_Swift, nullptr, MangledName, nullptr, 0);
}
llvm::DIType *createFunctionPointer(DebugTypeInfo DbgTy, llvm::DIScope *Scope,
unsigned SizeInBits, unsigned AlignInBits,
llvm::DINode::DIFlags Flags,
StringRef MangledName) {
auto FwdDecl = createTemporaryReplaceableForwardDecl(
DbgTy.getType(), Scope, MainFile, 0, SizeInBits, AlignInBits, Flags,
MangledName, MangledName);
CanSILFunctionType FunTy;
TypeBase *BaseTy = DbgTy.getType();
if (auto *SILFnTy = dyn_cast<SILFunctionType>(BaseTy))
FunTy = CanSILFunctionType(SILFnTy);
// FIXME: Handling of generic parameters in SIL type lowering is in flux.
// DebugInfo doesn't appear to care about the generic context, so just
// throw it away before lowering.
else if (isa<GenericFunctionType>(BaseTy)) {
auto *fTy = cast<AnyFunctionType>(BaseTy);
auto *nongenericTy = FunctionType::get(fTy->getParams(), fTy->getResult(),
fTy->getExtInfo());
FunTy = IGM.getLoweredType(nongenericTy).castTo<SILFunctionType>();
} else
FunTy = IGM.getLoweredType(BaseTy).castTo<SILFunctionType>();
auto Params = createParameterTypes(FunTy);
auto FnTy = DBuilder.createSubroutineType(Params, Flags);
llvm::DIType *DITy;
if (FunTy->getRepresentation() == SILFunctionType::Representation::Thick) {
if (SizeInBits == 2 * CI.getTargetInfo().getPointerWidth(clang::LangAS::Default))
// This is a FunctionPairTy: { i8*, %swift.refcounted* }.
DITy = createDoublePointerSizedStruct(Scope, MangledName, FnTy,
MainFile, 0, Flags, MangledName);
else
// This is a generic function as noted above.
DITy = createOpaqueStruct(Scope, MangledName, MainFile, 0, SizeInBits,
AlignInBits, Flags, MangledName);
} else {
assert(SizeInBits == CI.getTargetInfo().getPointerWidth(clang::LangAS::Default));
DITy = createPointerSizedStruct(Scope, MangledName, FnTy, MainFile, 0,
Flags, MangledName);
}
return DBuilder.replaceTemporary(std::move(FwdDecl), DITy);
}
llvm::DIType *createTuple(DebugTypeInfo DbgTy, llvm::DIScope *Scope,
unsigned SizeInBits, unsigned AlignInBits,
llvm::DINode::DIFlags Flags,
StringRef MangledName) {
auto FwdDecl = createTemporaryReplaceableForwardDecl(
DbgTy.getType(), Scope, MainFile, 0, SizeInBits, AlignInBits, Flags,
MangledName, MangledName);
TypeBase *BaseTy = DbgTy.getType();
auto *TupleTy = BaseTy->castTo<TupleType>();
SmallVector<MemberDIType, 16> MemberTypes;
auto genericSig = IGM.getCurGenericContext();
for (auto ElemTy : TupleTy->getElementTypes()) {
auto &elemTI = IGM.getTypeInfoForUnlowered(
AbstractionPattern(genericSig, ElemTy->getCanonicalType()), ElemTy);
auto DbgTy =
DebugTypeInfo::getFromTypeInfo(ElemTy, elemTI, IGM);
MemberTypes.emplace_back("",
getByteSize() * DbgTy.getAlignment().getValue(),
getOrCreateType(DbgTy));
}
SmallVector<llvm::Metadata *, 16> Members;
unsigned OffsetInBits = 0;
for (auto &Member : MemberTypes)
Members.emplace_back(createMemberType(Member.DIType, Member.Name,
OffsetInBits, Member.AlignInBits,
Scope, MainFile, Flags));
// FIXME: assert that SizeInBits == OffsetInBits.
llvm::DICompositeType *DITy = DBuilder.createStructType(
Scope, MangledName, MainFile, 0, SizeInBits, AlignInBits, Flags,
nullptr, // DerivedFrom
DBuilder.getOrCreateArray(Members), llvm::dwarf::DW_LANG_Swift, nullptr,
MangledName);
return DBuilder.replaceTemporary(std::move(FwdDecl), DITy);
}
llvm::DICompositeType *
createStruct(llvm::DIScope *Scope, StringRef Name, llvm::DIFile *File,
unsigned Line, unsigned SizeInBits, unsigned AlignInBits,
llvm::DINode::DIFlags Flags, StringRef MangledName,
llvm::DINodeArray Elements, llvm::DINodeArray BoundParams,
llvm::DIType *SpecificationOf) {
auto StructType = DBuilder.createStructType(
Scope, Name, File, Line, SizeInBits, AlignInBits, Flags,
/* DerivedFrom */ nullptr, Elements, llvm::dwarf::DW_LANG_Swift,
nullptr, MangledName, SpecificationOf);
if (BoundParams)
DBuilder.replaceArrays(StructType, nullptr, BoundParams);
return StructType;
}
llvm::DICompositeType *
createOpaqueStruct(llvm::DIScope *Scope, StringRef Name, llvm::DIFile *File,
unsigned Line, unsigned SizeInBits, unsigned AlignInBits,
llvm::DINode::DIFlags Flags, StringRef MangledName,
llvm::DINodeArray BoundParams = {},
llvm::DIType *SpecificationOf = nullptr) {
return createStruct(Scope, Name, File, Line, SizeInBits, AlignInBits, Flags,
MangledName, {}, BoundParams, SpecificationOf);
}
bool shouldCacheDIType(llvm::DIType *DITy, DebugTypeInfo &DbgTy) {
// Don't cache a type alias to a forward declaration either.
if (DbgTy.isFixedBuffer() || DITy->isForwardDecl())
return false;
if (auto Ty = DbgTy.getType())
// FIXME: Primary archetypes carry all sorts of auxiliary information
// that isn't contained in their mangled name. See also
// getMangledName().
return Ty->getKind() != swift::TypeKind::PrimaryArchetype;
return true;
}
std::optional<CompletedDebugTypeInfo> completeType(DebugTypeInfo DbgTy) {
if (!DbgTy.getType() || DbgTy.getType()->hasTypeParameter() ||
isa<IntegerType>(DbgTy.getType()))
return {};
return CompletedDebugTypeInfo::getFromTypeInfo(
DbgTy.getType(), IGM.getTypeInfoForUnlowered(DbgTy.getType()), IGM);
}
llvm::DIType *createType(DebugTypeInfo DbgTy, StringRef MangledName,
llvm::DIScope *Scope, llvm::DIFile *File) {
// FIXME: For SizeInBits, clang uses the actual size of the type on
// the target machine instead of the storage size that is alloca'd
// in the LLVM IR. For all types that are boxed in a struct, we are
// emitting the storage size of the struct, but it may be necessary
// to emit the (target!) size of the underlying basic type.
uint64_t SizeOfByte = CI.getTargetInfo().getCharWidth();
std::optional<CompletedDebugTypeInfo> CompletedDbgTy = completeType(DbgTy);
std::optional<uint64_t> SizeInBitsOrNull;
if (CompletedDbgTy)
SizeInBitsOrNull = CompletedDbgTy->getSizeInBits();
uint64_t SizeInBits = SizeInBitsOrNull.value_or(0);
unsigned AlignInBits = DbgTy.hasDefaultAlignment()
? 0
: DbgTy.getAlignment().getValue() * SizeOfByte;
unsigned Encoding = 0;
uint32_t NumExtraInhabitants = DbgTy.getNumExtraInhabitants().value_or(0);
llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
TypeBase *BaseTy = DbgTy.getType();
if (!BaseTy) {
LLVM_DEBUG(llvm::dbgs() << "Type without TypeBase: ";
DbgTy.getType()->dump(llvm::dbgs()); llvm::dbgs() << "\n");
if (!InternalType) {
StringRef Name = "<internal>";
InternalType = DBuilder.createForwardDecl(
llvm::dwarf::DW_TAG_structure_type, Name, Scope, File,
/*Line*/ 0, llvm::dwarf::DW_LANG_Swift, SizeInBits, AlignInBits,
MangledName);
}
return InternalType;
}
llvm::DIType *SpecificationOf = nullptr;
// Here goes!
switch (BaseTy->getKind()) {
case TypeKind::BuiltinUnboundGeneric:
llvm_unreachable("not a real type");
case TypeKind::BuiltinBorrow: {
llvm_unreachable("todo");
}
case TypeKind::BuiltinFixedArray: {
if (Opts.DebugInfoLevel > IRGenDebugInfoLevel::ASTTypes) {
auto *FixedArray = llvm::cast<swift::BuiltinFixedArrayType>(BaseTy);
llvm::DIType *ElementTy = getOrCreateType(FixedArray->getElementType());
llvm::SmallVector<llvm::Metadata *, 2> Subscripts;
if (auto NumElts = FixedArray->getFixedInhabitedSize()) {
auto *NumEltsNode = llvm::ConstantAsMetadata::get(
llvm::ConstantInt::get(IGM.Int64Ty, *NumElts));
Subscripts.push_back(DBuilder.getOrCreateSubrange(
NumEltsNode /*count*/, nullptr /*lowerBound*/,
nullptr /*upperBound*/, nullptr /*stride*/));
}
return DBuilder.createArrayType(SizeInBits, AlignInBits, ElementTy,
DBuilder.getOrCreateArray(Subscripts));
}
unsigned FwdDeclLine = 0;
return createOpaqueStruct(Scope, "Builtin.FixedArray", MainFile,
FwdDeclLine, SizeInBits, AlignInBits, Flags,
MangledName);
}
case TypeKind::BuiltinPackIndex:
case TypeKind::BuiltinInteger: {
Encoding = llvm::dwarf::DW_ATE_unsigned;
if (CompletedDbgTy)
SizeInBits = getSizeOfBasicType(*CompletedDbgTy);
break;
}
case TypeKind::BuiltinIntegerLiteral: {
Encoding = llvm::dwarf::DW_ATE_unsigned; // ?
if (CompletedDbgTy)
SizeInBits = getSizeOfBasicType(*CompletedDbgTy);
break;
}
case TypeKind::BuiltinFloat: {
auto *FloatTy = BaseTy->castTo<BuiltinFloatType>();
// Assuming that the bitwidth and FloatTy->getFPKind() are identical.
SizeInBits = FloatTy->getBitWidth();
Encoding = llvm::dwarf::DW_ATE_float;
break;
}
case TypeKind::BuiltinNativeObject:
case TypeKind::BuiltinBridgeObject:
case TypeKind::BuiltinRawPointer:
case TypeKind::BuiltinRawUnsafeContinuation:
case TypeKind::BuiltinJob: {
unsigned PtrSize =
CI.getTargetInfo().getPointerWidth(clang::LangAS::Default);
if (Opts.DebugInfoLevel > IRGenDebugInfoLevel::ASTTypes) {
Flags |= llvm::DINode::FlagArtificial;
llvm::DICompositeType *PTy = DBuilder.createStructType(
Scope, MangledName, File, 0, PtrSize, 0, Flags, nullptr, nullptr,
llvm::dwarf::DW_LANG_Swift, nullptr, {}, nullptr,
NumExtraInhabitants);
return PTy;
}
llvm::DIDerivedType *PTy = DBuilder.createPointerType(
nullptr, PtrSize, 0,
/* DWARFAddressSpace */ std::nullopt, MangledName);
// FIXME: Set DIFlagObjectPointer and make sure it is only set for `self`.
return PTy;
}
case TypeKind::BuiltinExecutor: {
return createDoublePointerSizedStruct(
Scope, "Builtin.Executor", nullptr, MainFile, 0,
llvm::DINode::FlagArtificial, MangledName);
}
case TypeKind::BuiltinImplicitActor: {
return createDoublePointerSizedStruct(
Scope, "Builtin.ImplicitActor", nullptr, MainFile, 0,
llvm::DINode::FlagArtificial, MangledName);
}
case TypeKind::DynamicSelf: {
// Self. We don't have a way to represent instancetype in DWARF,
// so we emit the static type instead. This is similar to what we
// do with instancetype in Objective-C.
auto *DynamicSelfTy = BaseTy->castTo<DynamicSelfType>();
auto SelfTy =
getOrCreateDesugaredType(DynamicSelfTy->getSelfType(), DbgTy);
return DBuilder.createTypedef(SelfTy, MangledName, File, 0, File);
}
// Even builtin swift types usually come boxed in a struct.
case TypeKind::Struct: {
auto *StructTy = BaseTy->castTo<StructType>();
auto *Decl = StructTy->getDecl();
auto L = getFileAndLocation(Decl);
// No line numbers are attached to type forward declarations. This is
// intentional: It interferes with the efficacy of incremental builds.
// We don't want a whitespace change to an secondary file trigger a
// recompilation of the debug info of a primary source file.
unsigned FwdDeclLine = 0;
if (Opts.DebugInfoLevel > IRGenDebugInfoLevel::ASTTypes) {
if (StructTy->isSpecialized())
return createSpecializedStructOrClassType(
StructTy, Scope, L.File, L.Line, SizeInBits, AlignInBits,
Flags, MangledName);
return createStructType(StructTy, Decl, Scope, L.File, L.Line,
SizeInBits, AlignInBits, Flags, MangledName);
}
StringRef Name = Decl->getName().str();
if (!SizeInBitsOrNull)
return DBuilder.createForwardDecl(
llvm::dwarf::DW_TAG_structure_type, MangledName, Scope, L.File,
FwdDeclLine, llvm::dwarf::DW_LANG_Swift, 0, AlignInBits);
if (DbgTy.isFixedBuffer())
return DBuilder.createForwardDecl(
llvm::dwarf::DW_TAG_structure_type, MangledName, Scope, L.File,
FwdDeclLine, llvm::dwarf::DW_LANG_Swift, 0, AlignInBits);
return createOpaqueStruct(Scope, Name, L.File, FwdDeclLine, SizeInBits,
AlignInBits, Flags, MangledName, {},
SpecificationOf);
}
case TypeKind::Class: {
// Classes are represented as DW_TAG_structure_type. This way the
// DW_AT_APPLE_runtime_class(DW_LANG_Swift) attribute can be
// used to differentiate them from C++ and ObjC classes.
auto *ClassTy = BaseTy->castTo<ClassType>();
auto *Decl = ClassTy->getDecl();
auto L = getFileAndLocation(Decl);
unsigned FwdDeclLine = 0;
assert(SizeInBits ==
CI.getTargetInfo().getPointerWidth(clang::LangAS::Default));
if (Opts.DebugInfoLevel > IRGenDebugInfoLevel::ASTTypes) {
if (ClassTy->isSpecialized())
return createSpecializedStructOrClassType(
ClassTy, Scope, L.File, L.Line, SizeInBits, AlignInBits,
Flags, MangledName);
auto *DIType =
createStructType(ClassTy, Decl, Scope, File, L.Line, SizeInBits,
AlignInBits, Flags, MangledName);
assert(DIType && "Unexpected null DIType!");
assert(DIType && "createStructType should never return null!");
auto SuperClassTy = ClassTy->getSuperclass();
if (SuperClassTy) {
auto SuperClassDbgTy = DebugTypeInfo::getFromTypeInfo(
SuperClassTy, IGM.getTypeInfoForUnlowered(SuperClassTy), IGM);
llvm::DIType *SuperClassDITy = getOrCreateType(SuperClassDbgTy);
assert(SuperClassDITy && "getOrCreateType should never return null!");
DBuilder.retainType(DBuilder.createInheritance(
DIType, SuperClassDITy, 0, 0, llvm::DINode::FlagZero));
}
return DIType;
}
return createPointerSizedStruct(Scope, Decl->getNameStr(), L.File,
FwdDeclLine, Flags, MangledName,
SpecificationOf);
}
case TypeKind::Existential: {
auto *ExistentialTy = BaseTy->castTo<ExistentialType>();
Type ConstraintTy = ExistentialTy->getConstraintType();
TypeBase *TyPtr = ConstraintTy.getPointer();
if (!isa<ProtocolType>(TyPtr) && !isa<ProtocolCompositionType>(TyPtr) &&
!isa<ParameterizedProtocolType>(TyPtr)) {
// This could be an alias type, which we need to anchor in DWARF.
auto *Decl = DbgTy.getDecl();
auto L = getFileAndLocation(Decl);
unsigned FwdDeclLine = 0;
return createSingleMemberStruct(
Scope, Decl ? Decl->getNameStr() : MangledName, L.File, FwdDeclLine,
SizeInBits, AlignInBits, Flags, MangledName, "$swift.constraint",
getOrCreateType(ConstraintTy));
}
// If the existential is just a protocol type it shares its mangled name
// with it, so we can just represent it directly as a protocol.
BaseTy = TyPtr;
}
LLVM_FALLTHROUGH;
// FIXME: (LLVM branch) This should probably be a DW_TAG_interface_type.
case TypeKind::Protocol:
case TypeKind::ProtocolComposition:
case TypeKind::ParameterizedProtocol: {
auto *Decl = DbgTy.getDecl();
auto L = getFileAndLocation(Decl);
unsigned FwdDeclLine = 0;
return createOpaqueStruct(Scope, Decl ? Decl->getNameStr() : MangledName,
L.File, FwdDeclLine, SizeInBits, AlignInBits,
Flags, MangledName);
}
case TypeKind::UnboundGeneric: {
auto *UnboundTy = BaseTy->castTo<UnboundGenericType>();
auto *Decl = UnboundTy->getDecl();
auto L = getFileAndLocation(Decl);
unsigned FwdDeclLine = 0;
assert(SizeInBits ==
CI.getTargetInfo().getPointerWidth(clang::LangAS::Default));
return createPointerSizedStruct(Scope,
Decl ? Decl->getNameStr() : MangledName,
L.File, FwdDeclLine, Flags, MangledName);
}
case TypeKind::BoundGenericStruct: {
auto *StructTy = BaseTy->castTo<BoundGenericStructType>();
auto *Decl = StructTy->getDecl();
auto L = getFileAndLocation(Decl);
unsigned FwdDeclLine = 0;
if (Opts.DebugInfoLevel > IRGenDebugInfoLevel::ASTTypes)
return createSpecializedStructOrClassType(
StructTy, Scope, L.File, L.Line, SizeInBits, AlignInBits,
Flags, MangledName);
return createOpaqueStructWithSizedContainer(
Scope, Decl ? Decl->getNameStr() : "", L.File, FwdDeclLine,
SizeInBits, AlignInBits, Flags, MangledName,
collectGenericParams(StructTy), SpecificationOf);
}
case TypeKind::BoundGenericClass: {
auto *ClassTy = BaseTy->castTo<BoundGenericClassType>();
auto *Decl = ClassTy->getDecl();
auto L = getFileAndLocation(Decl);
return createSpecializedStructOrClassType(ClassTy, Scope, L.File,
L.Line, SizeInBits, AlignInBits,
Flags, MangledName);
}
case TypeKind::Pack:
case TypeKind::PackElement:
case TypeKind::SILPack:
case TypeKind::PackExpansion:
// assert(SizeInBits == CI.getTargetInfo().getPointerWidth(0));
return createPointerSizedStruct(Scope, MangledName, MainFile, 0, Flags,
MangledName);
case TypeKind::BuiltinTuple:
llvm_unreachable("BuiltinTupleType should not show up here");
case TypeKind::Tuple: {
// Tuples are also represented as structs. Since tuples are ephemeral
// (not nominal) they don't have a source location.
if (Opts.DebugInfoLevel > IRGenDebugInfoLevel::ASTTypes)
return createTuple(DbgTy, Scope, SizeInBits, AlignInBits, Flags,
MangledName);
else
return createOpaqueStruct(Scope, MangledName, MainFile, 0, SizeInBits,
AlignInBits, Flags, MangledName);
}
case TypeKind::InOut:
break;
case TypeKind::OpaqueTypeArchetype:
case TypeKind::PrimaryArchetype:
case TypeKind::ExistentialArchetype:
case TypeKind::ElementArchetype:
case TypeKind::PackArchetype: {
auto *Archetype = BaseTy->castTo<ArchetypeType>();
AssociatedTypeDecl *assocType = nullptr;
if (auto depMemTy =
Archetype->getInterfaceType()->getAs<DependentMemberType>())
assocType = depMemTy->getAssocType();
auto L = getFileAndLocation(assocType);
if (!L.File)
L.File = CompilerGeneratedFile;
unsigned FwdDeclLine = 0;
auto Superclass = Archetype->getSuperclass();
auto DerivedFrom = Superclass.isNull()
? nullptr
: getOrCreateDesugaredType(Superclass, DbgTy);
llvm::TempDICompositeType FwdDecl(DBuilder.createReplaceableCompositeType(
llvm::dwarf::DW_TAG_structure_type, MangledName, Scope, L.File,
FwdDeclLine, llvm::dwarf::DW_LANG_Swift, SizeInBits, AlignInBits,
Flags));
// Emit the protocols the archetypes conform to.
SmallVector<llvm::Metadata *, 4> Protocols;
for (auto *ProtocolDecl : Archetype->getConformsTo()) {
// Skip marker protocols, as they are not available at runtime.
if (ProtocolDecl->isMarkerProtocol())
continue;
auto PTy =
IGM.getLoweredType(ProtocolDecl->getInterfaceType()).getASTType();
auto PDbgTy =
DebugTypeInfo::getFromTypeInfo(ProtocolDecl->getInterfaceType(),
IGM.getTypeInfoForLowered(PTy), IGM);
auto PDITy = getOrCreateType(PDbgTy);
Protocols.push_back(
DBuilder.createInheritance(FwdDecl.get(), PDITy, 0, 0, Flags));
}
llvm::DICompositeType *DITy = DBuilder.createStructType(
Scope, MangledName, L.File, FwdDeclLine, SizeInBits, AlignInBits,
Flags, DerivedFrom, DBuilder.getOrCreateArray(Protocols),
llvm::dwarf::DW_LANG_Swift, nullptr);
return DBuilder.replaceTemporary(std::move(FwdDecl), DITy);
}
case TypeKind::ExistentialMetatype:
case TypeKind::Metatype: {
// Metatypes are (mostly) singleton type descriptors, often without
// storage.
Flags |= llvm::DINode::FlagArtificial;
auto L = getFileAndLocation(DbgTy.getDecl());
unsigned FwdDeclLine = 0;
return DBuilder.createStructType(Scope, MangledName, L.File, FwdDeclLine,
SizeInBits, AlignInBits, Flags, nullptr,
nullptr, llvm::dwarf::DW_LANG_Swift,
nullptr, MangledName);
}
case TypeKind::SILFunction:
case TypeKind::Function:
case TypeKind::GenericFunction: {
if (Opts.DebugInfoLevel > IRGenDebugInfoLevel::ASTTypes)
return createFunctionPointer(DbgTy, Scope, SizeInBits, AlignInBits,
Flags, MangledName);
else
return createOpaqueStruct(Scope, MangledName, MainFile, 0, SizeInBits,
AlignInBits, Flags, MangledName);
}
case TypeKind::Enum: {
auto *EnumTy = BaseTy->castTo<EnumType>();
auto *Decl = EnumTy->getDecl();
auto L = getFileAndLocation(Decl);
unsigned FwdDeclLine = 0;
if (Opts.DebugInfoLevel > IRGenDebugInfoLevel::ASTTypes) {
if (EnumTy->isSpecialized() && !Decl->hasRawType())
return createSpecializedEnumType(EnumTy, Decl, MangledName,
SizeInBits, AlignInBits, Scope, File,
FwdDeclLine, Flags);
if (CompletedDbgTy)
return createEnumType(*CompletedDbgTy, Decl, MangledName, AlignInBits,
Scope, L.File, L.Line, Flags);
}
return createOpaqueStruct(Scope, Decl->getName().str(), L.File,
FwdDeclLine, SizeInBits, AlignInBits, Flags,
MangledName, {}, SpecificationOf);
}
case TypeKind::BoundGenericEnum: {
auto *EnumTy = BaseTy->castTo<BoundGenericEnumType>();
auto *Decl = EnumTy->getDecl();
auto L = getFileAndLocation(Decl);
unsigned FwdDeclLine = 0;
if (Opts.DebugInfoLevel > IRGenDebugInfoLevel::ASTTypes) {
if (EnumTy->isSpecialized() && !EnumTy->hasTypeParameter() &&
!EnumTy->hasPrimaryArchetype())
return createSpecializedEnumType(EnumTy, Decl, MangledName,
SizeInBits, AlignInBits, Scope, File,
FwdDeclLine, Flags);
if (CompletedDbgTy)
return createEnumType(*CompletedDbgTy, Decl, MangledName, AlignInBits,
Scope, L.File, L.Line, Flags);
}
return createOpaqueStructWithSizedContainer(
Scope, Decl->getName().str(), L.File, FwdDeclLine, SizeInBits,
AlignInBits, Flags, MangledName, collectGenericParams(EnumTy),
SpecificationOf);
}
case TypeKind::BuiltinVector: {
// FIXME: Emit the name somewhere.
(void)MangledName;
auto *BuiltinVectorTy = BaseTy->castTo<BuiltinVectorType>();
auto ElemTy = BuiltinVectorTy->getElementType();
auto ElemDbgTy = DebugTypeInfo::getFromTypeInfo(
ElemTy, IGM.getTypeInfoForUnlowered(ElemTy), IGM);
unsigned Count = BuiltinVectorTy->getNumElements();
auto Subscript = DBuilder.getOrCreateSubrange(0, Count ? Count : -1);
return DBuilder.createVectorType(SizeInBits, AlignInBits,
getOrCreateType(ElemDbgTy),
DBuilder.getOrCreateArray(Subscript));
}
// Reference storage types.
#define REF_STORAGE(Name, ...) case TypeKind::Name##Storage:
#include "swift/AST/ReferenceStorage.def"
{
auto *ReferenceTy = cast<ReferenceStorageType>(BaseTy);
auto CanTy = ReferenceTy->getReferentType();
auto L = getFileAndLocation(DbgTy.getDecl());
unsigned CompilerGeneratedLine = 0;
return DBuilder.createTypedef(getOrCreateDesugaredType(CanTy, DbgTy),
MangledName, L.File,
CompilerGeneratedLine, File);
}
// Sugared types.
case TypeKind::TypeAlias: {
auto *TypeAliasTy = cast<TypeAliasType>(BaseTy);
auto *Decl = TypeAliasTy->getDecl();
auto L = getFileAndLocation(Decl);
auto AliasedTy = TypeAliasTy->getSinglyDesugaredType();
// For TypeAlias types, the DeclContext for the aliased type is
// in the decl of the alias type.
DebugTypeInfo AliasedDbgTy(
AliasedTy, DbgTy.getAlignment(), DbgTy.hasDefaultAlignment(),
/* IsMetadataType = */ false, DbgTy.isFixedBuffer(),
DbgTy.getNumExtraInhabitants());
auto *TypeDef = DBuilder.createTypedef(getOrCreateType(AliasedDbgTy),
MangledName, L.File, 0, Scope);
// Bound generic types don't reference their type parameters in ASTTypes
// mode, so we need to artificially keep typealiases alive, since they can
// appear in reflection metadata.
if (Opts.DebugInfoLevel < IRGenDebugInfoLevel::DwarfTypes)
DBuilder.retainType(TypeDef);
return TypeDef;
}
case TypeKind::Locatable: {
auto *Sugar = cast<LocatableType>(BaseTy);
auto *CanTy = Sugar->getSinglyDesugaredType();
return getOrCreateDesugaredType(CanTy, DbgTy);
}
// SyntaxSugarType derivations.
case TypeKind::Dictionary:
case TypeKind::ArraySlice:
case TypeKind::InlineArray:
case TypeKind::Optional:
case TypeKind::VariadicSequence: {
auto *SyntaxSugarTy = cast<SyntaxSugarType>(BaseTy);
auto *CanTy = SyntaxSugarTy->getSinglyDesugaredType();
return getOrCreateDesugaredType(CanTy, DbgTy);
}
// SILBox should appear only inside of coroutine contexts.
case TypeKind::SILBox:
case TypeKind::DependentMember:
case TypeKind::GenericTypeParam: {
// FIXME: Provide a more meaningful debug type.
return DBuilder.createStructType(
Scope, MangledName, File, 0, SizeInBits, AlignInBits, Flags, nullptr,
nullptr, llvm::dwarf::DW_LANG_Swift, nullptr, MangledName);
}
// The following types exist primarily for internal use by the type
// checker.
case TypeKind::Error:
case TypeKind::LValue:
case TypeKind::TypeVariable:
case TypeKind::ErrorUnion:
case TypeKind::Placeholder:
case TypeKind::Module:
case TypeKind::SILBlockStorage:
case TypeKind::SILToken:
case TypeKind::BuiltinUnsafeValueBuffer:
case TypeKind::BuiltinDefaultActorStorage:
case TypeKind::BuiltinNonDefaultDistributedActorStorage:
case TypeKind::SILMoveOnlyWrapped:
case TypeKind::Integer:
LLVM_DEBUG(llvm::dbgs() << "Unhandled type: ";
DbgTy.getType()->dump(llvm::dbgs()); llvm::dbgs() << "\n");
MangledName = "<unknown>";
}
return DBuilder.createBasicType(MangledName, SizeInBits, Encoding,
llvm::DINode::FlagZero,
NumExtraInhabitants);
}
/// Determine if there exists a name mangling for the given type.
static bool canMangle(TypeBase *Ty) {
switch (Ty->getKind()) {
case TypeKind::GenericFunction: // Not yet supported.
case TypeKind::SILBlockStorage: // Not supported at all.
return false;
default:
return true;
}
}
llvm::DIType *getTypeOrNull(TypeBase *Ty) {
auto CachedType = DITypeCache.find(Ty);
if (CachedType != DITypeCache.end()) {
// Verify that the information still exists.
if (llvm::Metadata *Val = CachedType->second) {
auto DITy = cast<llvm::DIType>(Val);
return DITy;
}
}
return nullptr;
}
/// The private discriminator is represented as an inline namespace.
llvm::DIScope *getFilePrivateScope(llvm::DIScope *Parent, TypeDecl *Decl) {
// Retrieve the private discriminator.
auto *MSC = Decl->getDeclContext()->getModuleScopeContext();
auto *FU = cast<FileUnit>(MSC);
Identifier PD = FU->getDiscriminatorForPrivateDecl(Decl);
bool ExportSymbols = true;
return DBuilder.createNameSpace(Parent, PD.str(), ExportSymbols);
}
#ifndef NDEBUG
/// Verify that the size of this type matches the one of the cached type.
bool sanityCheckCachedType(DebugTypeInfo DbgTy, llvm::DIType *CachedType) {
// If this is a temporary, we're in the middle of creating a recursive type,
// so skip the sanity check.
if (CachedType->isTemporary())
return true;
if (!isa<llvm::DICompositeType>(CachedType))
return true;
bool IsUnsubstituted =
getUnsubstitutedType(DbgTy.getType(), getMangledName(DbgTy).Canonical)
.first;
std::optional<uint64_t> SizeInBits;
if (!IsUnsubstituted)
if (auto CompletedDbgTy = completeType(DbgTy))
SizeInBits = CompletedDbgTy->getSizeInBits();
unsigned CachedSizeInBits = getSizeInBits(CachedType);
if (SizeInBits && CachedSizeInBits != *SizeInBits) {
// Note that CachedSizeInBits && !SizeInBits may happen and is benign,
// because the cached copy would win. When the sizeless type is generated
// it should be emitted as a forward declaration and thus never make it
// into the cache.
// In some situation a specialized type is emitted with size 0, even if
// the real type has a size.
if (DbgTy.getType()->isSpecialized() && SizeInBits && *SizeInBits > 0 &&
CachedSizeInBits == 0)
return true;
CachedType->dump();
DbgTy.dump();
llvm::errs() << "SizeInBits = " << SizeInBits << "\n";
llvm::errs() << "CachedSizeInBits = " << CachedSizeInBits << "\n";
return false;
}
return true;
}
#endif
/// Emits the special builtin types into the debug info. These types are the
/// ones that are unconditionally emitted into the stdlib's metadata and are
/// needed to correctly calculate the layout of more complex types built on
/// top of them.
void createSpecialStlibBuiltinTypes() {
if (Opts.DebugInfoLevel <= IRGenDebugInfoLevel::ASTTypes)
return;
for (auto BuiltinType: IGM.getOrCreateSpecialStlibBuiltinTypes()) {
auto DbgTy = DebugTypeInfo::getFromTypeInfo(
BuiltinType, IGM.getTypeInfoForUnlowered(BuiltinType), IGM);
DBuilder.retainType(getOrCreateType(DbgTy));
}
}
/// A TypeWalker that finds if a given type's mangling is affected by an
/// @_originallyDefinedIn annotation.
struct OriginallyDefinedInFinder : public TypeWalker {
bool visitedOriginallyDefinedIn = false;
TypeWalker::Action walkToTypePre(Type T) override {
if (visitedOriginallyDefinedIn)
return TypeWalker::Action::Stop;
DeclContext *D = nullptr;
if (auto *TAT = llvm::dyn_cast<TypeAliasType>(T.getPointer()))
D = TAT->getDecl()->getDeclContext();
else if (auto *NT = llvm::dyn_cast<NominalOrBoundGenericNominalType>(
T.getPointer()))
D = NT->getDecl()->getDeclContext();
// A type inside a function uses that function's signature as part of
// its mangling, so check if any types in the generic signature are
// annotated with @_originallyDefinedIn.
if (auto AFD = llvm::dyn_cast_or_null<AbstractFunctionDecl>(D)) {
OriginallyDefinedInFinder InnerWalker;
AFD->getInterfaceType().walk(InnerWalker);
if (InnerWalker.visitedOriginallyDefinedIn) {
visitedOriginallyDefinedIn = true;
return TypeWalker::Action::Stop;
}
}
auto *TypeDecl = T->getNominalOrBoundGenericNominal();
if (!TypeDecl)
return TypeWalker::Action::Continue;
NominalTypeDecl *ParentDecl = TypeDecl;
while (llvm::isa_and_nonnull<NominalTypeDecl>(ParentDecl->getParent()))
ParentDecl = llvm::cast<NominalTypeDecl>(ParentDecl->getParent());
if (ParentDecl->getAttrs().hasAttribute<OriginallyDefinedInAttr>()) {
visitedOriginallyDefinedIn = true;
return TypeWalker::Action::Stop;
}
return TypeWalker::Action::Continue;
}
};
/// Returns true if the type's mangled name is affected by an
/// @_originallyDefinedIn annotation. This annotation can be on the type
/// itself, one of its generic arguments, etc.
bool containsOriginallyDefinedIn(Type T) {
OriginallyDefinedInFinder Walker;
T.walk(Walker);
return Walker.visitedOriginallyDefinedIn;
}
/// Returns true if the type contains an imported C++ type. Due to
/// various unimplemented features these cannot round-trip through
/// the ASTDemangler.
///
/// FIXME: Get these cases working with the ASTDemangler instead.
bool containsCxxType(Type T) {
return T.findIf([&](Type t) -> bool {
if (auto *decl = t->getAnyNominal()) {
if (auto *clangDecl = decl->getClangDecl()) {
// Lookup of template instantiations is not implemented.
if (isa<clang::ClassTemplateSpecializationDecl>(clangDecl))
return true;
// Lookup of types in weird contexts is not implemented.
if (isa<clang::EnumDecl>(clangDecl) ||
isa<clang::CXXRecordDecl>(clangDecl)) {
auto *dc = clangDecl->getDeclContext();
while (!isa<clang::TranslationUnitDecl>(dc)) {
// ... in namespaces,
if (isa<clang::NamespaceDecl>(dc))
return true;
// ... or inside other types.
if (isa<clang::CXXRecordDecl>(dc))
return true;
dc = dc->getParent();
}
}
}
}
return false;
});
}
/// Returns the decl of the type's parent chain annotated by
/// @_originallyDefinedIn. Returns null if no type is annotated.
NominalTypeDecl *getDeclAnnotatedByOriginallyDefinedIn(DebugTypeInfo DbgTy) {
auto Type = DbgTy.getType();
auto *TypeDecl = Type->getNominalOrBoundGenericNominal();
if (!TypeDecl)
return nullptr;
// Find the outermost type, since only those can have @_originallyDefinedIn
// attached to them.
NominalTypeDecl *ParentDecl = TypeDecl;
while (llvm::isa_and_nonnull<NominalTypeDecl>(ParentDecl->getParent()))
ParentDecl = llvm::cast<NominalTypeDecl>(ParentDecl->getParent());
if (ParentDecl->getAttrs().hasAttribute<OriginallyDefinedInAttr>())
return ParentDecl;;
return nullptr;
}
/// If this is a nominal type that has the @_originallyDefinedIn
/// attribute, IRGenDebugInfo emits an imported declaration of the type as
/// a child of the real module. We do this so LLDB has enough
/// information to both find the type in reflection metadata (the module name
/// in the type's mangled name), and find it in the swiftmodule (the type's
/// imported declaration's parent module name).
void handleOriginallyDefinedIn(DebugTypeInfo DbgTy, llvm::DIType *DITy,
StringRef MangledName, llvm::DIFile *File) {
if (OriginallyDefinedInTypes.contains(MangledName))
return;
// Force the generation of the generic type parameters as forward
// declarations, as those types might be annotated with
// @_originallyDefinedIn.
if (auto *BoundDecl = llvm::dyn_cast<BoundGenericType>(DbgTy.getType()))
collectGenericParams(BoundDecl, /*AsForwardDeclarations=*/true);
NominalTypeDecl *OriginallyDefinedInDecl = getDeclAnnotatedByOriginallyDefinedIn(DbgTy);
if (!OriginallyDefinedInDecl)
return;
// Emit the imported declaration under the real swiftmodule the type lives on.
auto RealModule = getOrCreateContext(OriginallyDefinedInDecl->getParent());
DBuilder.createImportedDeclaration(RealModule, DITy, File, 0, MangledName);
OriginallyDefinedInTypes.insert(MangledName);
}
/// Retrieve the context of the type, as opposed to the DeclContext
/// of the variable.
///
/// FIXME: Builtin and qualified types in LLVM have no parent
/// scope. TODO: This can be fixed by extending DIBuilder.
/// Make sure to retrieve the context of the type alias, not the pointee.
llvm::DIScope *updateScope(llvm::DIScope *Scope, DebugTypeInfo DbgTy) {
DeclContext *Context = nullptr;
const Decl *TypeDecl = nullptr;
const clang::Decl *ClangDecl = nullptr;
if (auto Alias = dyn_cast<TypeAliasType>(DbgTy.getType())) {
TypeAliasDecl *AliasDecl = Alias->getDecl();
TypeDecl = AliasDecl;
Context = AliasDecl->getParent();
ClangDecl = AliasDecl->getClangDecl();
} else if (auto *ND = DbgTy.getType()->getNominalOrBoundGenericNominal()) {
TypeDecl = ND;
// If this is an originally defined in type, we want to emit this type's
// scope to be the ABI module.
if (auto Attribute =
ND->getAttrs().getAttribute<OriginallyDefinedInAttr>()) {
auto Identifier = IGM.getSILModule().getASTContext().getIdentifier(
Attribute->getManglingModuleName());
void *Key = (void *)Identifier.get();
Scope =
getOrCreateModule(Key, TheCU, Attribute->getManglingModuleName(), {});
} else {
Context = ND->getParent();
}
ClangDecl = ND->getClangDecl();
} else if (auto BNO = dyn_cast<BuiltinType>(DbgTy.getType())) {
Context = BNO->getASTContext().TheBuiltinModule;
}
if (ClangDecl) {
clang::ASTReader &Reader = *CI.getClangInstance().getASTReader();
auto Idx = ClangDecl->getOwningModuleID();
auto SubModuleDesc = Reader.getSourceDescriptor(Idx);
auto TopLevelModuleDesc = getClangModule(*TypeDecl->getModuleContext());
if (SubModuleDesc) {
if (TopLevelModuleDesc)
// Describe the submodule, but substitute the cached ASTFile from
// the toplevel module. The ASTFile pointer in SubModule may be
// dangling and cant be trusted.
Scope = getOrCreateModule({SubModuleDesc->getModuleName(),
SubModuleDesc->getPath(),
TopLevelModuleDesc->getASTFile(),
TopLevelModuleDesc->getSignature(),
TopLevelModuleDesc->getCASID()},
SubModuleDesc->getModuleOrNull());
else if (SubModuleDesc->getModuleOrNull() == nullptr)
// This is (bridging header) PCH.
Scope = getOrCreateModule(*SubModuleDesc, nullptr);
}
}
if (!Scope)
Scope = getOrCreateContext(Context);
// Scope outermost fileprivate decls in an inline private discriminator
// namespace.
//
// We need to don't do this for decls imported from Clang modules because
// the scopes of C/C++ symbols are not restricted to a particular file unit.
if (auto *Decl = DbgTy.getDecl())
if (Decl->isOutermostPrivateOrFilePrivateScope() &&
!isa<ClangModuleUnit>(
Decl->getDeclContext()->getModuleScopeContext()))
Scope = getFilePrivateScope(Scope, Decl);
return Scope;
}
static bool isExistentialTypeAlias(DebugTypeInfo DbgTy) {
TypeBase *BaseTy = DbgTy.getType();
auto *ExistentialTy = BaseTy->getAs<ExistentialType>();
if (!ExistentialTy)
return false;
Type ConstraintTy = ExistentialTy->getConstraintType();
TypeBase *TyPtr = ConstraintTy.getPointer();
return isa<TypeAliasType>(TyPtr);
}
llvm::DIType *getOrCreateType(DebugTypeInfo DbgTy,
llvm::DIScope *Scope = nullptr) {
// Is this an empty type?
if (DbgTy.isNull())
// We can't use the empty type as an index into DenseMap.
return createType(DbgTy, "", TheCU, MainFile);
// Look in the cache first.
if (auto *DITy = getTypeOrNull(DbgTy.getType())) {
assert(sanityCheckCachedType(DbgTy, DITy));
return DITy;
}
// Use a separate DIRefMap cache for existential typealiases
// because they have identical mangled names as their inner
// protocol types and cause conflicts in the cache. For example,
// protocol P<Value> {
// associatedtype Value
// }
// actor A<Value> {
// public typealias T = P<Value>
// let t: any T
// ...
// }
// "any T" (existential type) and "P<Value>" (parameterized protocol type)
// have the same mangled name but distinct DI types.
TrackingDIRefMap &RefMap =
isExistentialTypeAlias(DbgTy) ? ExistentialTypeAliasMap : DIRefMap;
// Second line of defense: Look up the mangled name. TypeBase*'s are
// not necessarily unique, but name mangling is too expensive to do
// every time.
MangledNames Mangled;
llvm::MDString *UID = nullptr;
if (canMangle(DbgTy.getType())) {
Mangled = getMangledName(DbgTy);
if (!Mangled.Sugared.empty()) {
UID = llvm::MDString::get(IGM.getLLVMContext(), Mangled.Sugared);
if (llvm::Metadata *CachedTy = RefMap.lookup(UID))
return cast<llvm::DIType>(CachedTy);
if (DbgTy.getType()->getKind() != swift::TypeKind::TypeAlias) {
// A type with the same canonical type already exists, emit a typedef.
// This extra step is necessary to break out of loops: We don't
// canoncialize types before mangling to preserve sugared types. But
// some types can also have different equivalent non-canonical
// representations with no sugar involved, for example a type
// recursively that appears iniside itself. To deal with the latter we
// directly emit a type alias to the canonical type.
UID = llvm::MDString::get(IGM.getLLVMContext(), Mangled.Canonical);
if (llvm::Metadata *CachedTy = RefMap.lookup(UID)) {
Scope = updateScope(Scope, DbgTy);
llvm::DIType *DITy = cast<llvm::DIType>(CachedTy);
llvm::DIType *TypeDef = DBuilder.createTypedef(
DITy, Mangled.Sugared, MainFile, 0, Scope);
return TypeDef;
}
UID = llvm::MDString::get(IGM.getLLVMContext(), Mangled.Sugared);
}
// Fall through and create the sugared type.
} else if (auto *AliasTy =
llvm::dyn_cast<TypeAliasType>(DbgTy.getType())) {
// An alias type, but the mangler failed to produce a sugared type, just
// return the desugared type.
llvm::DIType *Desugared =
getOrCreateDesugaredType(AliasTy->getSinglyDesugaredType(), DbgTy);
StringRef Name;
if (auto *AliasDecl = AliasTy->getDecl())
Name = AliasDecl->getName().str();
if (!Name.empty())
return DBuilder.createTypedef(Desugared, Name, MainFile, 0,
updateScope(Scope, DbgTy));
return Desugared;
} else if (llvm::Metadata *CachedTy = RefMap.lookup(UID)) {
auto *DITy = cast<llvm::DIType>(CachedTy);
assert(sanityCheckCachedType(DbgTy, DITy));
return DITy;
} else {
UID = llvm::MDString::get(IGM.getLLVMContext(), Mangled.Canonical);
if (llvm::Metadata *CachedTy = RefMap.lookup(UID))
return cast<llvm::DIType>(CachedTy);
}
}
Scope = updateScope(Scope, DbgTy);
StringRef MangledName =
!Mangled.Sugared.empty() ? Mangled.Sugared : Mangled.Canonical;
StringRef Name = MangledName;
if (auto *Decl = DbgTy.getDecl())
Name = Decl->getName().str();
// If this is a forward decl, create one for this mangled name and don't
// cache it.
if (!isa<PrimaryArchetypeType>(DbgTy.getType()) &&
!isa<TypeAliasType>(DbgTy.getType()) &&
(DbgTy.isForwardDecl() || DbgTy.isFixedBuffer() ||
!completeType(DbgTy))) {
// In LTO type uniquing is performed based on the UID. Forward
// declarations may not have a unique ID to avoid a forward declaration
// winning over a full definition.
auto *FwdDecl = DBuilder.createReplaceableCompositeType(
llvm::dwarf::DW_TAG_structure_type, Name, Scope, 0, 0,
llvm::dwarf::DW_LANG_Swift, 0, 0, llvm::DINode::FlagFwdDecl,
MangledName);
FwdDeclTypes.emplace_back(
std::piecewise_construct, std::make_tuple(MangledName),
std::make_tuple(static_cast<llvm::Metadata *>(FwdDecl)));
handleOriginallyDefinedIn(DbgTy, FwdDecl, MangledName, getFile(Scope));
return FwdDecl;
}
llvm::DIType *DITy = createType(DbgTy, MangledName, Scope, getFile(Scope));
if (!shouldCacheDIType(DITy, DbgTy))
return DITy;
// Incrementally build the DIRefMap.
if (auto *CTy = dyn_cast<llvm::DICompositeType>(DITy)) {
#ifndef NDEBUG
// Soundness check.
if (llvm::Metadata *V = RefMap.lookup(UID)) {
auto *CachedTy = cast<llvm::DIType>(V);
assert(CachedTy == DITy && "conflicting types for one UID");
}
#endif
// If this type supports a UID, enter it to the cache.
if (auto UID = CTy->getRawIdentifier()) {
assert(UID->getString() == MangledName &&
"Unique identifier is different from mangled name ");
RefMap[UID] = llvm::TrackingMDNodeRef(DITy);
}
}
// Store it in the cache.
DITypeCache.insert({DbgTy.getType(), llvm::TrackingMDNodeRef(DITy)});
handleOriginallyDefinedIn(DbgTy, DITy, MangledName, getFile(Scope));
return DITy;
}
};
IRGenDebugInfoImpl::IRGenDebugInfoImpl(const IRGenOptions &Opts,
ClangImporter &CI, IRGenModule &IGM,
llvm::Module &M,
StringRef MainOutputFilenameForDebugInfo,
StringRef PD)
: Opts(Opts), CI(CI), SM(IGM.Context.SourceMgr), M(M), DBuilder(M),
IGM(IGM), DebugPrefixMap(Opts.DebugPrefixMap) {
assert(Opts.DebugInfoLevel > IRGenDebugInfoLevel::None &&
"no debug info should be generated");
llvm::SmallString<256> SourcePath;
if (MainOutputFilenameForDebugInfo.empty())
SourcePath = "<unknown>";
else
SourcePath = MainOutputFilenameForDebugInfo;
unsigned Lang = llvm::dwarf::DW_LANG_Swift;
std::string Producer = version::getSwiftFullVersion(
IGM.Context.LangOpts.EffectiveLanguageVersion);
unsigned Major, Minor;
std::tie(Major, Minor) = version::getSwiftNumericVersion();
unsigned MajorRuntimeVersion = Major;
// No split DWARF on Darwin.
StringRef SplitName = StringRef();
// Note that File + Dir need not result in a valid path.
// The directory part of the main file is the current working directory.
std::string RemappedFile = DebugPrefixMap.remapPath(SourcePath);
std::string RemappedDir = DebugPrefixMap.remapPath(Opts.DebugCompilationDir);
bool RelFile = llvm::sys::path::is_relative(RemappedFile);
bool RelDir = llvm::sys::path::is_relative(RemappedDir);
MainFile = (RelFile && RelDir)
? createFile(SourcePath, {}, {})
: DBuilder.createFile(RemappedFile, RemappedDir);
CompilerGeneratedFile = getOrCreateFile("", {});
StringRef Sysroot = IGM.Context.SearchPathOpts.getSDKPath();
StringRef SDK;
{
auto B = llvm::sys::path::rbegin(Sysroot);
auto E = llvm::sys::path::rend(Sysroot);
auto It = std::find_if(B, E, [](auto SDK) { return SDK.ends_with(".sdk"); });
if (It != E)
SDK = *It;
}
bool EnableCXXInterop =
IGM.getSILModule().getASTContext().LangOpts.EnableCXXInterop;
bool EnableEmbeddedSwift =
IGM.getSILModule().getASTContext().LangOpts.hasFeature(Feature::Embedded);
TheCU = DBuilder.createCompileUnit(
Lang, MainFile, Producer, Opts.shouldOptimize(),
Opts.getDebugFlags(PD, EnableCXXInterop, EnableEmbeddedSwift),
MajorRuntimeVersion, SplitName,
Opts.DebugInfoLevel > IRGenDebugInfoLevel::LineTables
? llvm::DICompileUnit::FullDebug
: llvm::DICompileUnit::LineTablesOnly,
/* DWOId */ 0, /* SplitDebugInlining */ true,
/* DebugInfoForProfiling */ Opts.DebugInfoForProfiling,
llvm::DICompileUnit::DebugNameTableKind::Default,
/* RangesBaseAddress */ false, DebugPrefixMap.remapPath(Sysroot), SDK);
// Because the swift compiler relies on Clang to setup the Module,
// the clang CU is always created first. Several dwarf-reading
// tools (older versions of ld64, and lldb) can get confused if the
// first CU in an object is empty, so ensure that the Swift CU comes
// first by rearranging the list of CUs in the LLVM module.
llvm::NamedMDNode *CU_Nodes = M.getNamedMetadata("llvm.dbg.cu");
SmallVector<llvm::DICompileUnit *, 2> CUs;
for (auto *N : CU_Nodes->operands())
CUs.push_back(cast<llvm::DICompileUnit>(N));
CU_Nodes->dropAllReferences();
for (auto CU = CUs.rbegin(), CE = CUs.rend(); CU != CE; ++CU)
CU_Nodes->addOperand(*CU);
// Create a module for the current compile unit.
auto *MDecl = IGM.getSwiftModule();
StringRef Path = Opts.DebugModulePath;
if (Path.empty()) {
llvm::sys::path::remove_filename(SourcePath);
Path = SourcePath;
}
MainModule = getOrCreateModule(MDecl, TheCU, Opts.ModuleName, Path);
DBuilder.createImportedModule(MainFile, MainModule, MainFile, 0);
// Macro definitions that were defined by the user with "-Xcc -D" on the
// command line. This does not include any macros defined by ClangImporter.
llvm::raw_svector_ostream OS(ConfigMacros);
unsigned I = 0;
// Translate the macro definitions back into a command line.
for (auto &Macro : Opts.ClangDefines) {
if (++I > 1)
OS << ' ';
OS << '"';
for (char c : Macro)
switch (c) {
case '\\':
OS << "\\\\";
break;
case '"':
OS << "\\\"";
break;
default:
OS << c;
}
OS << '"';
}
createSpecialStlibBuiltinTypes();
}
void IRGenDebugInfoImpl::finalize() {
assert(LocationStack.empty() && "Mismatch of pushLoc() and popLoc().");
// Get the list of imported modules (which may actually be different
// from all ImportDecls).
SmallVector<ImportedModule, 8> ModuleWideImports;
IGM.getSwiftModule()->getImportedModules(ModuleWideImports,
ModuleDecl::getImportFilterLocal());
for (auto M : ModuleWideImports)
if (!ImportedModules.count(M.importedModule))
createImportedModule(MainFile, M, MainFile, 0);
// Finalize all replaceable forward declarations.
auto finalize = [&](llvm::MDNode *FwdDeclType, llvm::MDNode *FullType,
llvm::MDString *UID = nullptr) {
llvm::TempDICompositeType FwdDecl(cast<llvm::DICompositeType>(FwdDeclType));
llvm::Metadata *Replacement = FullType ? FullType : FwdDeclType;
llvm::Metadata *Replaced = DBuilder.replaceTemporary(
std::move(FwdDecl), cast<llvm::MDNode>(Replacement));
// Unique all identical forward declarations.
if (UID && !FullType)
DIRefMap[UID] = llvm::TrackingMDNodeRef(cast<llvm::MDNode>(Replaced));
};
for (auto &Ty : FwdDeclTypes) {
auto *UID = llvm::MDString::get(IGM.getLLVMContext(), Ty.first);
finalize(cast<llvm::MDNode>(Ty.second),
llvm::cast_or_null<llvm::DIType>(DIRefMap.lookup(UID)), UID);
}
FwdDeclTypes.clear();
// Finalize the DIBuilder.
DBuilder.finalize();
}
#ifndef NDEBUG
bool IRGenDebugInfoImpl::lineEntryIsSane(FileAndLocation DL,
const SILDebugScope *DS) {
// All bets are off for optimized code.
if (!VerifyLineTable || Opts.shouldOptimize())
return true;
// We entered a new lexical block.
if (DS != LastScope)
PreviousLineEntries.clear();
if (DL.Line == 0 || DL == PreviousFileAndLocation)
return true;
// Save the last non-zero line entry.
PreviousFileAndLocation = DL;
auto ItNew = PreviousLineEntries.insert(FileAndLocationKey(DL));
// Return true iff DL was not yet in PreviousLineEntries.
return ItNew.second;
}
#endif
IRGenDebugInfoImpl::FileAndLocation
IRGenDebugInfoImpl::computeLLVMLocCodeView(const SILDebugScope *DS,
SILLocation Loc) {
// If the scope has not changed and the line number is either zero or
// artificial, we want to keep the most recent debug location.
if (DS == LastScope && (Loc.is<ArtificialUnreachableLocation>() ||
Loc.isLineZero(SM) || Loc.isHiddenFromDebugInfo()))
return LastFileAndLocation;
// Decode the location.
return decodeFileAndLocation(Loc);
}
IRGenDebugInfoImpl::FileAndLocation
IRGenDebugInfoImpl::computeLLVMLoc(const SILDebugScope *DS, SILLocation Loc) {
SILFunction *Fn = DS->getInlinedFunction();
if (Fn && (Fn->isThunk() || Fn->isTransparent()))
return {0, 0, CompilerGeneratedFile};
if (Opts.DebugInfoFormat == IRGenDebugInfoFormat::CodeView)
return computeLLVMLocCodeView(DS, Loc);
FileAndLocation L =
Loc.isInPrologue() ? FileAndLocation() : decodeFileAndLocation(Loc);
// Otherwise use a line 0 artificial location, but the file from the location.
if (Loc.isHiddenFromDebugInfo()) {
L.Line = 0;
L.Column = 0;
}
return L;
}
void IRGenDebugInfoImpl::setCurrentLoc(IRBuilder &Builder,
const SILDebugScope *DS,
SILLocation Loc) {
assert(DS && "empty scope");
auto *Scope = getOrCreateScope(DS);
if (!Scope)
return;
FileAndLocation L = computeLLVMLoc(DS, Loc);
if (L.getFilename() != Scope->getFilename()) {
// We changed files in the middle of a scope. This happens, for
// example, when constructors are inlined. Create a new scope to
// reflect this.
Scope = DBuilder.createLexicalBlockFile(Scope, L.File);
}
assert(lineEntryIsSane(L, DS) &&
"non-contiguous debug location in same scope at -Onone");
LastFileAndLocation = L;
LastScope = DS;
auto *InlinedAt = createInlinedAt(DS);
assert(((!InlinedAt) || (InlinedAt && Scope)) && "inlined w/o scope");
assert(parentScopesAreSane(DS) && "parent scope sanity check failed");
auto DL = llvm::DILocation::get(IGM.getLLVMContext(), L.Line, L.Column, Scope,
InlinedAt);
#ifndef NDEBUG
{
llvm::DILocalScope *Scope = DL->getInlinedAtScope();
llvm::DISubprogram *SP = Scope->getSubprogram();
llvm::Function *F = Builder.GetInsertBlock()->getParent();
assert((!F || SP->describes(F)) && "location points to different function");
}
#endif
Builder.SetCurrentDebugLocation(DL);
}
void IRGenDebugInfoImpl::addFailureMessageToCurrentLoc(IRBuilder &Builder,
StringRef failureMsg) {
auto TrapLoc = Builder.getCurrentDebugLocation();
// Create a function in the debug info which has failureMsg as name.
// TrapSc is the SIL debug scope which corresponds to TrapSP in the LLVM debug
// info.
RegularLocation ALoc = RegularLocation::getAutoGeneratedLocation();
const SILDebugScope *TrapSc = new (IGM.getSILModule()) SILDebugScope(ALoc);
llvm::DISubroutineType *DIFnTy = DBuilder.createSubroutineType(nullptr);
llvm::DISubprogram *TrapSP;
auto It = RuntimeErrorFnCache.find(failureMsg);
if (It != RuntimeErrorFnCache.end())
TrapSP = llvm::cast<llvm::DISubprogram>(It->second);
else {
std::string FuncName = "Swift runtime failure: ";
FuncName += failureMsg;
// CodeView consumers do not correctly handle an artificially generated
// file, thus use the original location's file as the file for the debug
// function, and prevent reuse of this debug function.
bool useCompilerGeneratedFile = !Opts.isDebugInfoCodeView();
llvm::DIFile *File =
useCompilerGeneratedFile ? getOrCreateFile({}, {}) : TrapLoc->getFile();
TrapSP = DBuilder.createFunction(
File, FuncName, StringRef(), File, 0,
DIFnTy, 0, llvm::DINode::FlagArtificial,
llvm::DISubprogram::SPFlagDefinition, nullptr, nullptr, nullptr);
if (useCompilerGeneratedFile)
RuntimeErrorFnCache.insert({failureMsg, llvm::TrackingMDNodeRef(TrapSP)});
}
ScopeCache[TrapSc] = llvm::TrackingMDNodeRef(TrapSP);
LastScope = TrapSc;
assert(parentScopesAreSane(TrapSc) && "parent scope sanity check failed");
// Wrap the existing TrapLoc into the failure function.
// Line 0 is invalid in CodeView, so use the line and column from the original
// trap location.
auto DL = llvm::DILocation::get(
IGM.getLLVMContext(), Opts.isDebugInfoCodeView() ? TrapLoc.getLine() : 0,
Opts.isDebugInfoCodeView() ? TrapLoc.getCol() : 0, TrapSP, TrapLoc);
Builder.SetCurrentDebugLocation(DL);
}
void IRGenDebugInfoImpl::clearLoc(IRBuilder &Builder) {
LastFileAndLocation = {};
LastScope = nullptr;
Builder.SetCurrentDebugLocation(llvm::DebugLoc());
}
/// Push the current debug location onto a stack and initialize the
/// IRBuilder to an empty location.
void IRGenDebugInfoImpl::pushLoc() {
LocationStack.push_back(std::make_pair(LastFileAndLocation, LastScope));
LastFileAndLocation = {};
LastScope = nullptr;
}
/// Restore the current debug location from the stack.
void IRGenDebugInfoImpl::popLoc() {
std::tie(LastFileAndLocation, LastScope) = LocationStack.pop_back_val();
}
/// This is done for WinDbg to avoid having two non-contiguous sets of
/// instructions because the ``@llvm.trap`` instruction gets placed at the end
/// of the function.
void IRGenDebugInfoImpl::setInlinedTrapLocation(IRBuilder &Builder,
const SILDebugScope *Scope) {
if (Opts.DebugInfoFormat != IRGenDebugInfoFormat::CodeView)
return;
// The @llvm.trap could be inlined into a chunk of code that was also inlined.
// If this is the case then simply using the LastScope's location would
// generate debug info that claimed Function A owned Block X and Block X
// thought it was owned by Function B. Therefore, we need to find the last
// inlined scope to point to.
const SILDebugScope *TheLastScope = LastScope;
while (TheLastScope->InlinedCallSite &&
TheLastScope->InlinedCallSite != TheLastScope) {
TheLastScope = TheLastScope->InlinedCallSite;
}
auto LastLocation = llvm::DILocation::get(
IGM.getLLVMContext(), LastFileAndLocation.Line,
LastFileAndLocation.Column, getOrCreateScope(TheLastScope));
// FIXME: This location should point to stdlib instead of being artificial.
auto DL = llvm::DILocation::get(IGM.getLLVMContext(), 0, 0,
getOrCreateScope(Scope), LastLocation);
Builder.SetCurrentDebugLocation(DL);
}
void IRGenDebugInfoImpl::setEntryPointLoc(IRBuilder &Builder) {
auto DL = llvm::DILocation::get(IGM.getLLVMContext(), 0, 0, getEntryPointFn(),
nullptr);
Builder.SetCurrentDebugLocation(DL);
}
llvm::DIScope *IRGenDebugInfoImpl::getEntryPointFn() {
// Lazily create EntryPointFn.
if (!EntryPointFn) {
EntryPointFn = DBuilder.createReplaceableCompositeType(
llvm::dwarf::DW_TAG_subroutine_type,
IGM.getSILModule().getASTContext().getEntryPointFunctionName(),
MainFile, MainFile, 0);
}
return EntryPointFn;
}
llvm::DIScope *IRGenDebugInfoImpl::getOrCreateScope(const SILDebugScope *DS) {
if (DS == nullptr)
return MainFile;
// Try to find it in the cache first.
auto CachedScope = ScopeCache.find(DS);
if (CachedScope != ScopeCache.end())
return cast<llvm::DIScope>(CachedScope->second);
// If this is an (inlined) function scope, the function may
// not have been created yet.
if (auto *SILFn = DS->Parent.dyn_cast<SILFunction *>()) {
auto *FnScope = SILFn->getDebugScope();
// FIXME: This is a bug in the SIL deserialization.
if (!FnScope)
SILFn->setDebugScope(DS);
auto CachedScope = ScopeCache.find(FnScope);
if (CachedScope != ScopeCache.end())
return cast<llvm::DIScope>(CachedScope->second);
// Force the debug info for the function to be emitted, even if it
// is external or has been inlined.
llvm::Function *Fn = nullptr;
// Avoid materializing generic functions in embedded Swift mode.
bool genericInEmbedded =
IGM.Context.LangOpts.hasFeature(Feature::Embedded) &&
SILFn->isGeneric();
if (!SILFn->getName().empty() && !SILFn->isZombie() && !genericInEmbedded)
Fn = IGM.getAddrOfSILFunction(SILFn, NotForDefinition);
auto *SP = emitFunction(*SILFn, Fn);
// Cache it.
ScopeCache[DS] = llvm::TrackingMDNodeRef(SP);
return SP;
}
auto *ParentScope = cast<const SILDebugScope *>(DS->Parent);
llvm::DIScope *Parent = getOrCreateScope(ParentScope);
assert(isa<llvm::DILocalScope>(Parent) && "not a local scope");
if (Opts.DebugInfoLevel <= IRGenDebugInfoLevel::LineTables)
return Parent;
assert(DS->Parent && "lexical block must have a parent subprogram");
auto L = getStartLocation(DS->Loc);
auto *DScope = DBuilder.createLexicalBlock(Parent, L.File, L.Line, L.Column);
// Cache it.
ScopeCache[DS] = llvm::TrackingMDNodeRef(DScope);
return DScope;
}
void IRGenDebugInfoImpl::emitImport(ImportDecl *D) {
if (Opts.DebugInfoLevel <= IRGenDebugInfoLevel::LineTables)
return;
assert(D->getModule() && "compiler-synthesized ImportDecl is incomplete");
ImportedModule Imported = { D->getAccessPath(), D->getModule() };
auto L = getFileAndLocation(D);
createImportedModule(L.File, Imported, L.File, L.Line);
ImportedModules.insert(Imported.importedModule);
}
/// This is effectively \p clang::CGDebugInfo::getCallSiteRelatedAttrs().
llvm::DINode::DIFlags IRGenDebugInfoImpl::getCallSiteRelatedAttrs() const {
// Do not generate callsite attributes if unless the -gen-callsite-info flag
// is passed.
if (!Opts.DebugCallsiteInfo)
return llvm::DINode::FlagZero;
auto SwiftLangOpts = IGM.Context.LangOpts;
auto Loader = IGM.getSILModule().getASTContext().getClangModuleLoader();
auto *Importer = static_cast<ClangImporter *>(&*Loader);
auto &CGO = Importer->getCodeGenOpts();
// Do not generate callsite attributes if there is no debug info to be
// emitted.
if (CGO.getDebugInfo() == llvm::codegenoptions::NoDebugInfo ||
CGO.getDebugInfo() == llvm::codegenoptions::LocTrackingOnly)
return llvm::DINode::FlagZero;
// Callsite attributes are available in DWARFv5. However, for swift, lldb can
// accept these attributes as if they were part of DWARFv4.
if (Opts.DWARFVersion < 4)
return llvm::DINode::FlagZero;
return llvm::DINode::FlagAllCallsDescribed;
}
llvm::DISubprogram *IRGenDebugInfoImpl::emitFunction(SILFunction &SILFn,
llvm::Function *Fn) {
auto *DS = SILFn.getDebugScope();
assert(DS && "SIL function has no debug scope");
(void)DS;
return emitFunction(SILFn.getDebugScope(), Fn, SILFn.getRepresentation(),
SILFn.getLoweredType(), SILFn.getDeclContext());
}
llvm::DISubprogram *
IRGenDebugInfoImpl::emitFunction(const SILDebugScope *DS, llvm::Function *Fn,
SILFunctionTypeRepresentation Rep,
SILType SILTy, DeclContext *DeclCtx,
StringRef outlinedFromName) {
auto Cached = ScopeCache.find(DS);
if (Cached != ScopeCache.end()) {
auto SP = cast<llvm::DISubprogram>(Cached->second);
// If we created the DISubprogram for a forward declaration,
// attach it to the function now.
if (!Fn->getSubprogram() && !Fn->isDeclaration())
Fn->setSubprogram(SP);
return SP;
}
// Some IRGen-generated helper functions don't have a corresponding
// SIL function, hence the dyn_cast.
auto *SILFn = DS ? DS->Parent.dyn_cast<SILFunction *>() : nullptr;
StringRef LinkageName;
if (!outlinedFromName.empty())
LinkageName = outlinedFromName;
else if (Fn)
LinkageName = Fn->getName();
else if (DS)
LinkageName = SILFn->getName();
else
llvm_unreachable("function has no mangled name");
StringRef Name;
if (DS) {
if (DS->Loc.isSILFile())
Name = SILFn->getName();
else
Name = getName(DS->Loc);
}
/// The source line used for the function prologue.
unsigned ScopeLine = 0;
FileAndLocation L;
if (!DS || (SILFn && (SILFn->isBare() || SILFn->isThunk() ||
SILFn->isTransparent()))) {
// Bare functions and thunks should not have any line numbers. This
// is especially important for shared functions like reabstraction
// thunk helpers, where DS->Loc is an arbitrary location of whichever use
// was emitted first.
L = {0, 0, CompilerGeneratedFile};
} else {
L = decodeFileAndLocation(DS->Loc);
ScopeLine = L.Line;
}
auto Line = L.Line;
auto File = L.File;
llvm::DIScope *Scope = MainModule;
if (SILFn && SILFn->getDeclContext())
Scope = getOrCreateContext(SILFn->getDeclContext()->getParent());
// We know that main always comes from MainFile.
if (LinkageName ==
IGM.getSILModule().getASTContext().getEntryPointFunctionName()) {
File = MainFile;
Line = 1;
Name = LinkageName;
}
llvm::DISubprogram *ReplaceableType = DBuilder.createTempFunctionFwdDecl(
Scope, Name, LinkageName, File, Line, /*Type=*/nullptr, ScopeLine);
auto FwdDecl = llvm::TempDISubprogram(ReplaceableType);
ScopeCache[DS] = llvm::TrackingMDNodeRef(FwdDecl.get());
CanSILFunctionType FnTy = getFunctionType(SILTy);
auto Params = Opts.DebugInfoLevel > IRGenDebugInfoLevel::LineTables
? createParameterTypes(SILTy)
: nullptr;
llvm::DISubroutineType *DIFnTy = DBuilder.createSubroutineType(Params);
llvm::DITemplateParameterArray TemplateParameters = nullptr;
llvm::DISubprogram *Decl = nullptr;
// Various flags.
llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
// Mark everything that is not visible from the source code (i.e.,
// does not have a Swift name) as artificial, so the debugger can
// ignore it. Explicit closures are exempt from this rule. We also
// make an exception for toplevel code, which, although it does not
// have a Swift name, does appear prominently in the source code.
// ObjC thunks should also not show up in the linetable, because we
// never want to set a breakpoint there.
if ((Name.empty() &&
LinkageName !=
IGM.getSILModule().getASTContext().getEntryPointFunctionName() &&
!isExplicitClosure(SILFn)) ||
(Rep == SILFunctionTypeRepresentation::ObjCMethod) ||
isAllocatingConstructor(Rep, DeclCtx)) {
Flags |= llvm::DINode::FlagArtificial;
ScopeLine = 0;
}
if (FnTy &&
FnTy->getRepresentation() == SILFunctionType::Representation::Block)
Flags |= llvm::DINode::FlagAppleBlock;
// Get the throws information.
llvm::DITypeArray Error = nullptr;
if (FnTy && (Opts.DebugInfoLevel > IRGenDebugInfoLevel::LineTables))
if (auto ErrorInfo = FnTy->getOptionalErrorResult()) {
GenericContextScope scope(IGM, FnTy->getInvocationGenericSignature());
CanType errorResultTy = ErrorInfo->getReturnValueType(
IGM.getSILModule(), FnTy,
IGM.getMaximalTypeExpansionContext());
SILType SILTy = IGM.silConv.getSILType(
*ErrorInfo, FnTy, IGM.getMaximalTypeExpansionContext());
errorResultTy = SILFn->mapTypeIntoEnvironment(errorResultTy)
->getCanonicalType();
SILTy = SILFn->mapTypeIntoEnvironment(SILTy);
auto DTI = DebugTypeInfo::getFromTypeInfo(
errorResultTy,
IGM.getTypeInfo(SILTy), IGM);
Error = DBuilder.getOrCreateArray({getOrCreateType(DTI)}).get();
}
llvm::DISubprogram::DISPFlags SPFlags = llvm::DISubprogram::toSPFlags(
/*IsLocalToUnit=*/Fn ? Fn->hasInternalLinkage() : true,
/*IsDefinition=*/true, /*IsOptimized=*/Opts.shouldOptimize());
// When the function is a method, we want a DW_AT_declaration there.
// Because there's no good way to cross the CU boundary to insert a nested
// DISubprogram definition in one CU into a type defined in another CU when
// doing LTO builds.
if (llvm::isa<llvm::DICompositeType>(Scope) &&
(Rep == SILFunctionTypeRepresentation::Method ||
Rep == SILFunctionTypeRepresentation::ObjCMethod ||
Rep == SILFunctionTypeRepresentation::WitnessMethod ||
Rep == SILFunctionTypeRepresentation::CXXMethod ||
Rep == SILFunctionTypeRepresentation::CFunctionPointer ||
Rep == SILFunctionTypeRepresentation::Thin)) {
llvm::DISubprogram::DISPFlags SPFlags = llvm::DISubprogram::toSPFlags(
/*IsLocalToUnit=*/Fn ? Fn->hasInternalLinkage() : true,
/*IsDefinition=*/false, /*IsOptimized=*/Opts.shouldOptimize());
Decl = DBuilder.createMethod(Scope, Name, LinkageName, File, Line, DIFnTy,
0, 0, nullptr, Flags, SPFlags,
TemplateParameters, Error);
}
// Construct the DISubprogram.
llvm::DISubprogram *SP =
DBuilder.createFunction(Scope, Name, LinkageName, File, Line, DIFnTy,
ScopeLine, Flags | getCallSiteRelatedAttrs(),
SPFlags, TemplateParameters, Decl, Error);
if (Fn && !Fn->isDeclaration())
Fn->setSubprogram(SP);
// RAUW the entry point function forward declaration with the real thing.
if (LinkageName ==
IGM.getSILModule().getASTContext().getEntryPointFunctionName()) {
if (EntryPointFn) {
assert(EntryPointFn->isTemporary() &&
"more than one entry point function");
EntryPointFn->replaceAllUsesWith(SP);
llvm::MDNode::deleteTemporary(EntryPointFn);
}
EntryPointFn = SP;
}
if (!DS)
return nullptr;
DBuilder.replaceTemporary(std::move(FwdDecl), SP);
ScopeCache[DS] = llvm::TrackingMDNodeRef(SP);
return SP;
}
void IRGenDebugInfoImpl::emitArtificialFunction(IRBuilder &Builder,
llvm::Function *Fn,
SILType SILTy) {
RegularLocation ALoc = RegularLocation::getAutoGeneratedLocation();
const SILDebugScope *Scope = new (IGM.getSILModule()) SILDebugScope(ALoc);
emitFunction(Scope, Fn, SILFunctionTypeRepresentation::Thin, SILTy);
/// Reusing the current file would be wrong: An objc thunk, for example, could
/// be triggered from any random location. Use a placeholder name instead.
setCurrentLoc(Builder, Scope, ALoc);
}
void IRGenDebugInfoImpl::emitOutlinedFunction(IRBuilder &Builder,
llvm::Function *Fn,
StringRef outlinedFromName) {
RegularLocation ALoc = RegularLocation::getAutoGeneratedLocation();
const SILDebugScope *Scope = new (IGM.getSILModule()) SILDebugScope(ALoc);
emitFunction(Scope, Fn, SILFunctionTypeRepresentation::Thin, SILType(),
nullptr, outlinedFromName);
/// Reusing the current file would be wrong: An objc thunk, for example, could
/// be triggered from any random location. Use a placeholder name instead.
setCurrentLoc(Builder, Scope, ALoc);
}
bool IRGenDebugInfoImpl::handleFragmentDIExpr(
const SILDIExprOperand &CurDIExprOp,
llvm::DIExpression::FragmentInfo &Fragment) {
if (CurDIExprOp.getOperator() == SILDIExprOperator::TupleFragment)
return handleTupleFragmentDIExpr(CurDIExprOp, Fragment);
assert(CurDIExprOp.getOperator() == SILDIExprOperator::Fragment);
// Expecting a VarDecl that points to a field in an struct
auto DIExprArgs = CurDIExprOp.args();
auto *VD = dyn_cast_or_null<VarDecl>(DIExprArgs.size() ?
DIExprArgs[0].getAsDecl() : nullptr);
assert(VD && "Expecting a VarDecl as the operand for "
"DIExprOperator::Fragment");
// Translate the based type
DeclContext *ParentDecl = VD->getDeclContext();
assert(ParentDecl && "VarDecl has no parent context?");
SILType ParentSILType =
IGM.getLoweredType(ParentDecl->getDeclaredTypeInContext());
// Retrieve the offset & size of the field
llvm::Constant *Offset =
emitPhysicalStructMemberFixedOffset(IGM, ParentSILType, VD);
auto *FieldTypeInfo = getPhysicalStructFieldTypeInfo(IGM, ParentSILType, VD);
// FIXME: This will only happen if IRGen hasn't processed ParentSILType
// (into its own representation) but we probably should ask IRGen to process
// it right now.
if (!FieldTypeInfo)
return false;
llvm::Type *FieldTy = FieldTypeInfo->getStorageType();
// Doesn't support non-fixed or empty types right now.
if (!Offset || !FieldTy || !FieldTy->isSized())
return false;
uint64_t SizeOfByte = CI.getTargetInfo().getCharWidth();
uint64_t SizeInBits = IGM.DataLayout.getTypeSizeInBits(FieldTy);
uint64_t OffsetInBits =
Offset->getUniqueInteger().getLimitedValue() * SizeOfByte;
// Translate to DW_OP_LLVM_fragment operands
Fragment = {SizeInBits, OffsetInBits};
return true;
}
bool IRGenDebugInfoImpl::handleTupleFragmentDIExpr(
const SILDIExprOperand &CurDIExprOp,
llvm::DIExpression::FragmentInfo &Fragment) {
assert(CurDIExprOp.getOperator() == SILDIExprOperator::TupleFragment);
// Expecting a TupleType followed by an index
auto DIExprArgs = CurDIExprOp.args();
assert(DIExprArgs.size() >= 2 && "Expecting two arguments for "
"DIExprOperator::TupleFragment");
auto *TT = dyn_cast<TupleType>(DIExprArgs[0].getAsType().getPointer());
assert(TT && "Expecting a TupleType as the first operand for "
"DIExprOperator::TupleFragment");
auto Idx = DIExprArgs[1].getAsConstInt();
assert(Idx && "Expecting an index as the second operand for "
"DIExprOperator::TupleFragment");
// Translate the based type
SILType ParentSILType = IGM.getLoweredType(TT);
// Retrieve the offset & size of the field
auto Offset = getFixedTupleElementOffset(IGM, ParentSILType, *Idx);
auto ElementType = TT->getElement(*Idx).getType()->getCanonicalType();
llvm::Type *FieldTy = IGM.getStorageTypeForLowered(ElementType);
// Doesn't support non-fixed or empty types right now.
if (!Offset || !FieldTy || !FieldTy->isSized())
return false;
uint64_t SizeInBits = IGM.DataLayout.getTypeSizeInBits(FieldTy);
uint64_t OffsetInBits = Offset->getValueInBits();
// Translate to DW_OP_LLVM_fragment operands
Fragment = {SizeInBits, OffsetInBits};
return true;
}
bool IRGenDebugInfoImpl::buildDebugInfoExpression(
const SILDebugVariable &VarInfo, SmallVectorImpl<uint64_t> &Operands,
llvm::DIExpression::FragmentInfo &Fragment) {
assert(VarInfo.DIExpr && "SIL debug info expression not found");
const auto &DIExpr = VarInfo.DIExpr;
for (const SILDIExprOperand &ExprOperand : DIExpr.operands()) {
llvm::DIExpression::FragmentInfo SubFragment = {0, 0};
switch (ExprOperand.getOperator()) {
case SILDIExprOperator::Fragment:
case SILDIExprOperator::TupleFragment:
if (!handleFragmentDIExpr(ExprOperand, SubFragment))
return false;
assert(!Fragment.SizeInBits
|| (SubFragment.OffsetInBits + SubFragment.SizeInBits
<= Fragment.SizeInBits)
&& "Invalid nested fragments");
Fragment.OffsetInBits += SubFragment.OffsetInBits;
Fragment.SizeInBits = SubFragment.SizeInBits;
break;
case SILDIExprOperator::Dereference:
Operands.push_back(llvm::dwarf::DW_OP_deref);
break;
case SILDIExprOperator::Plus:
Operands.push_back(llvm::dwarf::DW_OP_plus);
break;
case SILDIExprOperator::Minus:
Operands.push_back(llvm::dwarf::DW_OP_minus);
break;
case SILDIExprOperator::ConstUInt:
Operands.push_back(llvm::dwarf::DW_OP_constu);
Operands.push_back(*ExprOperand[1].getAsConstInt());
break;
case SILDIExprOperator::ConstSInt:
Operands.push_back(llvm::dwarf::DW_OP_consts);
Operands.push_back(*ExprOperand[1].getAsConstInt());
break;
case SILDIExprOperator::INVALID:
return false;
}
}
if (Operands.size() && Operands.back() != llvm::dwarf::DW_OP_deref) {
Operands.push_back(llvm::dwarf::DW_OP_stack_value);
}
return true;
}
void IRGenDebugInfoImpl::emitVariableDeclaration(
IRBuilder &Builder, ArrayRef<llvm::Value *> Storage, DebugTypeInfo DbgTy,
const SILDebugScope *DS, std::optional<SILLocation> DbgInstLoc,
SILDebugVariable VarInfo, IndirectionKind Indirection,
ArtificialKind Artificial, AddrDbgInstrKind AddrDInstrKind) {
assert(DS && "variable has no scope");
if (Opts.DebugInfoLevel <= IRGenDebugInfoLevel::LineTables)
return;
// We cannot yet represent local archetypes.
if (DbgTy.getType()->hasLocalArchetype())
return;
auto *Scope = dyn_cast_or_null<llvm::DILocalScope>(getOrCreateScope(DS));
assert(Scope && "variable has no local scope");
auto DInstLoc = getStartLocation(DbgInstLoc);
// FIXME: this should be the scope of the type's declaration.
// If this is an argument, attach it to the current function scope.
uint16_t ArgNo = VarInfo.ArgNo;
if (ArgNo > 0) {
while (isa<llvm::DILexicalBlock>(Scope))
Scope = cast<llvm::DILexicalBlock>(Scope)->getScope();
}
assert(isa_and_nonnull<llvm::DIScope>(Scope) && "variable has no scope");
llvm::DIFile *Unit = getFile(Scope);
llvm::DIType *DITy = getOrCreateType(DbgTy);
assert(DITy && "could not determine debug type of variable");
if (VarInfo.Constant)
DITy = DBuilder.createQualifiedType(llvm::dwarf::DW_TAG_const_type, DITy);
unsigned DInstLine = DInstLoc.Line;
// Self is always an artificial argument, so are variables without location.
if (!DInstLine || (ArgNo > 0 && VarInfo.Name == IGM.Context.Id_self.str()))
Artificial = ArtificialValue;
llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
if (Artificial || DITy->isArtificial() || DITy == InternalType)
Flags |= llvm::DINode::FlagArtificial;
// Create the descriptor for the variable.
unsigned DVarLine = DInstLine;
uint16_t DVarCol = DInstLoc.Column;
auto VarInfoLoc = VarInfo.Loc ? VarInfo.Loc : DbgInstLoc;
if (VarInfoLoc) {
auto VarLoc = VarInfoLoc->strippedForDebugVariable();
if (VarLoc != DbgInstLoc) {
auto DVarLoc = getStartLocation(VarLoc);
DVarLine = DVarLoc.Line;
DVarCol = DVarLoc.Column;
}
}
llvm::DIScope *VarScope = Scope;
if (ArgNo == 0 && VarInfo.Scope) {
if (auto *VS = dyn_cast_or_null<llvm::DILocalScope>(
getOrCreateScope(VarInfo.Scope))) {
VarScope = VS;
}
}
// Get or create the DILocalVariable.
llvm::DILocalVariable *Var;
// VarInfo.Name points into tail-allocated storage in debug_value insns.
llvm::StringRef UniqueName = VarNames.insert(VarInfo.Name).first->getKey();
VarID Key(VarScope, UniqueName, DVarLine, DVarCol);
auto CachedVar = LocalVarCache.find(Key);
if (CachedVar != LocalVarCache.end()) {
Var = cast<llvm::DILocalVariable>(CachedVar->second);
} else {
// The llvm.dbg.value(undef) emitted for zero-sized variables get filtered
// out by DwarfDebug::collectEntityInfo(), so all variables need to be
// preserved even at -Onone.
bool Preserve = true;
if (ArgNo > 0)
Var = DBuilder.createParameterVariable(
VarScope, VarInfo.Name, ArgNo, Unit, DVarLine, DITy, Preserve, Flags);
else
Var = DBuilder.createAutoVariable(VarScope, VarInfo.Name, Unit, DVarLine,
DITy, Preserve, Flags);
LocalVarCache.insert({Key, llvm::TrackingMDNodeRef(Var)});
}
// Running variables for the current/previous piece.
bool IsPiece = Storage.size() > 1;
uint64_t SizeOfByte = CI.getTargetInfo().getCharWidth();
unsigned AlignInBits = SizeOfByte;
unsigned OffsetInBits = 0;
unsigned SizeInBits = 0;
llvm::DIExpression::FragmentInfo Fragment = {0, 0};
auto appendDIExpression =
[&VarInfo, this](llvm::DIExpression *DIExpr,
llvm::DIExpression::FragmentInfo PieceFragment,
bool IsFirstAndOnlyPiece) -> llvm::DIExpression * {
if (!VarInfo.DIExpr) {
if (!PieceFragment.SizeInBits || IsFirstAndOnlyPiece)
return DIExpr;
return llvm::DIExpression::createFragmentExpression(
DIExpr, PieceFragment.OffsetInBits, PieceFragment.SizeInBits)
.value_or(nullptr);
}
llvm::SmallVector<uint64_t, 2> Operands;
llvm::DIExpression::FragmentInfo VarFragment = {0, 0};
if (!buildDebugInfoExpression(VarInfo, Operands, VarFragment))
return nullptr;
if (!Operands.empty())
DIExpr = llvm::DIExpression::append(DIExpr, Operands);
// Add the fragment of the SIL variable.
if (VarFragment.SizeInBits && !IsFirstAndOnlyPiece)
DIExpr = llvm::DIExpression::createFragmentExpression(
DIExpr, VarFragment.OffsetInBits, VarFragment.SizeInBits)
.value_or(nullptr);
if (!DIExpr)
return nullptr;
// When the fragment of the SIL variable is further split into other
// fragments (PieceFragment), merge them into one DW_OP_LLVM_Fragment
// expression.
if (PieceFragment.SizeInBits)
return llvm::DIExpression::createFragmentExpression(
DIExpr, PieceFragment.OffsetInBits, PieceFragment.SizeInBits)
.value_or(nullptr);
return DIExpr;
};
for (llvm::Value *Piece : Storage) {
SmallVector<uint64_t, 3> Operands;
if (DbgTy.getType()->isForeignReferenceType())
Operands.push_back(llvm::dwarf::DW_OP_deref);
if (Indirection == IndirectValue || Indirection == CoroIndirectValue)
Operands.push_back(llvm::dwarf::DW_OP_deref);
if (IsPiece) {
// Advance the offset for the next piece.
OffsetInBits += SizeInBits;
SizeInBits = IGM.DataLayout.getTypeSizeInBits(Piece->getType());
AlignInBits = IGM.DataLayout.getABITypeAlign(Piece->getType()).value();
if (!AlignInBits)
AlignInBits = SizeOfByte;
// Soundness checks.
#ifndef NDEBUG
assert(SizeInBits && "zero-sized piece");
if (getSizeInBits(Var)) {
assert(SizeInBits < getSizeInBits(Var) && "piece covers entire var");
assert(OffsetInBits + SizeInBits <= getSizeInBits(Var) &&
"pars > totum");
}
#endif
// Add the piece DW_OP_LLVM_fragment operands
Fragment.OffsetInBits = OffsetInBits;
Fragment.SizeInBits = SizeInBits;
}
// LLVM complains if a single fragment covers the entire variable. This can
// happen if, e.g., the optimizer takes the _value out of an Int
// struct. Detect this case and don't emit a fragment.
bool IsFirstAndOnlyPiece =
!IsPiece && Fragment.OffsetInBits == 0 &&
Fragment.SizeInBits == getSizeInBits(Var->getType());
llvm::DIExpression *DIExpr = DBuilder.createExpression(Operands);
DIExpr = appendDIExpression(DIExpr, Fragment, IsFirstAndOnlyPiece);
if (DIExpr)
emitDbgIntrinsic(
Builder, Piece, Var, DIExpr, DInstLine, DInstLoc.Column, Scope, DS,
Indirection == CoroDirectValue || Indirection == CoroIndirectValue,
AddrDInstrKind);
}
// Emit locationless intrinsic for variables that were optimized away.
if (Storage.empty()) {
llvm::DIExpression::FragmentInfo NoFragment = {0, 0};
if (auto *DIExpr =
appendDIExpression(DBuilder.createExpression(), NoFragment, false))
emitDbgIntrinsic(Builder, llvm::ConstantInt::get(IGM.Int64Ty, 0), Var,
DIExpr, DInstLine, DInstLoc.Column, Scope, DS,
Indirection == CoroDirectValue ||
Indirection == CoroIndirectValue,
AddrDInstrKind);
}
}
namespace {
/// A helper struct that is used by emitDbgIntrinsic to factor redundant code.
struct DbgIntrinsicEmitter {
PointerUnion<llvm::BasicBlock *, llvm::Instruction *> InsertPt;
irgen::IRBuilder &IRBuilder;
llvm::DIBuilder &DIBuilder;
AddrDbgInstrKind ForceDbgDeclareOrDeclareValue;
/// Initialize the emitter and initialize the emitter to assume that it is
/// going to insert an llvm.dbg.declare or an llvm.dbg.addr either at the
/// current "generalized insertion point" of the IRBuilder. The "generalized
/// insertion point" is
DbgIntrinsicEmitter(irgen::IRBuilder &IRBuilder, llvm::DIBuilder &DIBuilder,
AddrDbgInstrKind AddrDInstrKind)
: InsertPt(), IRBuilder(IRBuilder), DIBuilder(DIBuilder),
ForceDbgDeclareOrDeclareValue(AddrDInstrKind) {
auto *ParentBB = IRBuilder.GetInsertBlock();
auto InsertBefore = IRBuilder.GetInsertPoint();
if (InsertBefore != ParentBB->end())
InsertPt = &*InsertBefore;
else
InsertPt = ParentBB;
}
///
llvm::DbgInstPtr insert(llvm::Value *Addr, llvm::DILocalVariable *VarInfo,
llvm::DIExpression *Expr,
const llvm::DILocation *DL) {
if (auto *Inst = InsertPt.dyn_cast<llvm::Instruction *>()) {
return insert(Addr, VarInfo, Expr, DL, Inst);
} else {
return insert(Addr, VarInfo, Expr, DL,
cast<llvm::BasicBlock *>(InsertPt));
}
}
llvm::DbgInstPtr insert(llvm::Value *Addr, llvm::DILocalVariable *VarInfo,
llvm::DIExpression *Expr,
const llvm::DILocation *DL,
llvm::Instruction *InsertBefore) {
if (ForceDbgDeclareOrDeclareValue == AddrDbgInstrKind::DbgDeclare)
return DIBuilder.insertDeclare(Addr, VarInfo, Expr, DL,
InsertBefore->getIterator());
if (ForceDbgDeclareOrDeclareValue == AddrDbgInstrKind::DbgDeclareValue)
return DIBuilder.insertDeclareValue(Addr, VarInfo, Expr, DL,
InsertBefore->getIterator());
Expr = llvm::DIExpression::append(Expr, llvm::dwarf::DW_OP_deref);
return DIBuilder.insertDbgValueIntrinsic(Addr, VarInfo, Expr, DL,
InsertBefore->getIterator());
}
llvm::DbgInstPtr insert(llvm::Value *Addr, llvm::DILocalVariable *VarInfo,
llvm::DIExpression *Expr,
const llvm::DILocation *DL,
llvm::BasicBlock *Block) {
if (ForceDbgDeclareOrDeclareValue == AddrDbgInstrKind::DbgDeclare)
return DIBuilder.insertDeclare(Addr, VarInfo, Expr, DL, Block);
if (ForceDbgDeclareOrDeclareValue == AddrDbgInstrKind::DbgDeclareValue)
return DIBuilder.insertDeclareValue(Addr, VarInfo, Expr, DL, Block);
Expr = llvm::DIExpression::append(Expr, llvm::dwarf::DW_OP_deref);
return DIBuilder.insertDbgValueIntrinsic(Addr, VarInfo, Expr, DL, Block);
}
};
} // namespace
void IRGenDebugInfoImpl::emitDbgIntrinsic(
IRBuilder &Builder, llvm::Value *Storage, llvm::DILocalVariable *Var,
llvm::DIExpression *Expr, unsigned Line, unsigned Col,
llvm::DILocalScope *Scope, const SILDebugScope *DS, bool InCoroContext,
AddrDbgInstrKind AddrDInstKind) {
Storage = Storage->stripPointerCasts();
// Set the location/scope of the intrinsic.
auto *InlinedAt = createInlinedAt(DS);
auto DL =
llvm::DILocation::get(IGM.getLLVMContext(), Line, Col, Scope, InlinedAt);
// Fragment DIExpression cannot cover the whole variable
// or going out-of-bound.
if (auto Fragment = Expr->getFragmentInfo()) {
if (auto VarSize = Var->getSizeInBits()) {
unsigned FragSize = Fragment->SizeInBits;
unsigned FragOffset = Fragment->OffsetInBits;
if (FragOffset + FragSize > *VarSize || FragSize == *VarSize) {
// Drop the fragment part
assert(Expr->isValid());
// Since this expression is valid, DW_OP_LLVM_fragment
// and its arguments must be the last 3 elements.
auto OrigElements = Expr->getElements();
Expr = DBuilder.createExpression(OrigElements.drop_back(3));
}
}
}
auto *ParentBlock = Builder.GetInsertBlock();
// First before we do anything, check if we have an Undef. In this case, we
// /always/ emit an llvm.dbg.value of undef.
// If we have undef, always emit a llvm.dbg.value in the current position.
if (isa<llvm::UndefValue>(Storage)) {
if (Expr->getNumElements() &&
(Expr->getElement(0) == llvm::dwarf::DW_OP_consts
|| Expr->getElement(0) == llvm::dwarf::DW_OP_constu)) {
/// Convert `undef, expr op_consts:N:...` to `N, expr ...`
Storage = llvm::ConstantInt::get(
llvm::IntegerType::getInt64Ty(Builder.getContext()),
Expr->getElement(1));
Expr = llvm::DIExpression::get(Builder.getContext(),
Expr->getElements().drop_front(2));
}
DBuilder.insertDbgValueIntrinsic(Storage, Var, Expr, DL, ParentBlock);
return;
}
bool optimized = DS->getParentFunction()->shouldOptimize();
if (optimized && (!InCoroContext || !Var->isParameter()))
AddrDInstKind = AddrDbgInstrKind::DbgValueDeref;
if (InCoroContext && AddrDInstKind != AddrDbgInstrKind::DbgValueDeref)
AddrDInstKind = AddrDbgInstrKind::DbgDeclareValue;
DbgIntrinsicEmitter inserter{Builder, DBuilder, AddrDInstKind};
// If we have a single alloca...
if (auto *Alloca = dyn_cast<llvm::AllocaInst>(Storage)) {
auto InsertBefore = Builder.GetInsertPoint();
if (AddrDInstKind == AddrDbgInstrKind::DbgDeclare ||
AddrDInstKind == AddrDbgInstrKind::DbgDeclareValue) {
ParentBlock = Alloca->getParent();
InsertBefore = std::next(Alloca->getIterator());
}
if (InsertBefore != ParentBlock->end()) {
inserter.insert(Alloca, Var, Expr, DL, &*InsertBefore);
} else {
inserter.insert(Alloca, Var, Expr, DL, ParentBlock);
}
return;
}
if ((isa<llvm::IntrinsicInst>(Storage) &&
cast<llvm::IntrinsicInst>(Storage)->getIntrinsicID() ==
llvm::Intrinsic::coro_alloca_get)) {
inserter.insert(Storage, Var, Expr, DL, ParentBlock);
return;
}
if (InCoroContext && (Var->isParameter() || !optimized)) {
PointerUnion<llvm::BasicBlock *, llvm::Instruction *> InsertPt;
// If we have a dbg.declare, we are relying on a contract with the coroutine
// splitter that in split coroutines we always create debug info for values
// in the coroutine context by creating a llvm.dbg.declare for the variable
// in the entry block of each funclet.
if (AddrDInstKind == AddrDbgInstrKind::DbgDeclare ||
AddrDInstKind == AddrDbgInstrKind::DbgDeclareValue) {
// Function arguments in async functions are emitted without a shadow copy
// (that would interfere with coroutine splitting) but with a
// llvm.dbg.declare to give CoroSplit.cpp license to emit a shadow copy
// for them pointing inside the Swift Context argument that is valid
// throughout the function.
auto &EntryBlock = ParentBlock->getParent()->getEntryBlock();
if (auto *InsertBefore = &*EntryBlock.getFirstInsertionPt()) {
InsertPt = InsertBefore;
} else {
InsertPt = &EntryBlock;
}
} else {
// For llvm.dbg.value, we just want to insert the intrinsic at the current
// insertion point. This is because our contract with the coroutine
// splitter is that the coroutine splitter just needs to emit the
// llvm.dbg.value where we placed them. It shouldn't move them or do
// anything special with it. Instead, we have previously inserted extra
// debug_value clones previously after each instruction at the SIL level
// that corresponds with a funclet edge. This operation effectively sets
// up the rest of the pipeline to be stupid and just emit the
// llvm.dbg.value in the correct places. This is done by the SILOptimizer
// pass DebugInfoCanonicalizer.
auto InsertBefore = Builder.GetInsertPoint();
if (InsertBefore != ParentBlock->end()) {
InsertPt = &*InsertBefore;
} else {
InsertPt = ParentBlock;
}
}
// Ok, we now have our insert pt. Call the appropriate operations.
assert(InsertPt);
if (auto *InsertBefore = InsertPt.dyn_cast<llvm::Instruction *>()) {
inserter.insert(Storage, Var, Expr, DL, InsertBefore);
} else {
inserter.insert(Storage, Var, Expr, DL,
cast<llvm::BasicBlock *>(InsertPt));
}
return;
}
// Insert a dbg.value at the current insertion point.
if (isa<llvm::Argument>(Storage) && !Var->getArg()) {
const auto InsertPt = ParentBlock->getFirstNonPHIOrDbg();
if (InsertPt != ParentBlock->end()) {
// SelectionDAGISel only generates debug info for a dbg.value
// that is associated with a llvm::Argument if either its !DIVariable
// is marked as argument or there is no non-debug intrinsic instruction
// before it. So In the case of associating a llvm::Argument with a
// non-argument debug variable -- usually via a !DIExpression -- we
// need to make sure that dbg.value is before any non-phi / no-dbg
// instruction.
DBuilder.insertDbgValueIntrinsic(Storage, Var, Expr, DL, InsertPt);
return;
}
}
DBuilder.insertDbgValueIntrinsic(Storage, Var, Expr, DL, ParentBlock->end());
}
void IRGenDebugInfoImpl::emitGlobalVariableDeclaration(
llvm::GlobalVariable *Var, StringRef Name, StringRef LinkageName,
DebugTypeInfo DbgTy, bool IsLocalToUnit, std::optional<SILLocation> Loc) {
if (Opts.DebugInfoLevel <= IRGenDebugInfoLevel::LineTables)
return;
llvm::DIType *DITy = getOrCreateType(DbgTy);
VarDecl *VD = nullptr;
if (Loc)
VD = dyn_cast_or_null<VarDecl>(Loc->getAsASTNode<Decl>());
if (!VD || VD->isLet())
DITy = DBuilder.createQualifiedType(llvm::dwarf::DW_TAG_const_type, DITy);
if (DITy->isArtificial() || DITy == InternalType || !Loc)
// FIXME: Really these should be marked as artificial, but LLVM
// currently has no support for flags to be put on global
// variables. In the mean time, elide these variables, they
// would confuse both the user and LLDB.
return;
if (DbgTy.isFixedBuffer())
DITy = createFixedValueBufferStruct(DITy);
auto L = getStartLocation(Loc);
// Emit it as global variable of the current module.
llvm::DIExpression *Expr = nullptr;
if (!Var)
Expr = DBuilder.createConstantValueExpression(0);
auto *GV = DBuilder.createGlobalVariableExpression(
MainModule, Name, LinkageName, L.File, L.Line, DITy, IsLocalToUnit, true,
Expr);
if (Var)
Var->addDebugInfo(GV);
}
void IRGenDebugInfoImpl::emitTypeMetadata(IRGenFunction &IGF,
llvm::Value *Metadata, unsigned Depth,
unsigned Index, StringRef Name) {
if (Opts.DebugInfoLevel <= IRGenDebugInfoLevel::LineTables)
return;
// Don't emit debug info in transparent functions.
auto *DS = IGF.getDebugScope();
if (!DS || DS->getInlinedFunction()->isTransparent())
return;
llvm::SmallString<8> Buf;
static const char *Tau = SWIFT_UTF8("\u03C4");
llvm::raw_svector_ostream OS(Buf);
OS << '$' << Tau << '_' << Depth << '_' << Index;
uint64_t PtrWidthInBits = CI.getTargetInfo().getPointerWidth(clang::LangAS::Default);
assert(PtrWidthInBits % 8 == 0);
auto DbgTy = DebugTypeInfo::getTypeMetadata(
getMetadataType(Name)->getDeclaredInterfaceType().getPointer(),
Size(PtrWidthInBits / 8),
Alignment(CI.getTargetInfo().getPointerAlign(clang::LangAS::Default)));
emitVariableDeclaration(IGF.Builder, Metadata, DbgTy, IGF.getDebugScope(),
{}, {OS.str().str(), 0, false},
// swift.type is already a pointer type,
// having a shadow copy doesn't add another
// layer of indirection.
IGF.isAsync() ? CoroDirectValue : DirectValue,
ArtificialValue);
}
void IRGenDebugInfoImpl::emitPackCountParameter(IRGenFunction &IGF,
llvm::Value *Metadata,
SILDebugVariable VarInfo) {
if (Opts.DebugInfoLevel <= IRGenDebugInfoLevel::LineTables)
return;
// Don't emit debug info in transparent functions.
auto *DS = IGF.getDebugScope();
if (!DS || DS->getInlinedFunction()->isTransparent())
return;
Type IntTy = BuiltinIntegerType::get(CI.getTargetInfo().getPointerWidth(clang::LangAS::Default),
IGM.getSwiftModule()->getASTContext());
auto &TI = IGM.getTypeInfoForUnlowered(IntTy);
auto DbgTy = *CompletedDebugTypeInfo::getFromTypeInfo(IntTy, TI, IGM);
emitVariableDeclaration(
IGF.Builder, Metadata, DbgTy, IGF.getDebugScope(), {}, VarInfo,
IGF.isAsync() ? CoroDirectValue : DirectValue, ArtificialValue);
}
} // anonymous namespace
std::unique_ptr<IRGenDebugInfo> IRGenDebugInfo::createIRGenDebugInfo(
const IRGenOptions &Opts, ClangImporter &CI, IRGenModule &IGM,
llvm::Module &M, StringRef MainOutputFilenameForDebugInfo,
StringRef PrivateDiscriminator) {
return std::make_unique<IRGenDebugInfoImpl>(
Opts, CI, IGM, M, MainOutputFilenameForDebugInfo, PrivateDiscriminator);
}
IRGenDebugInfo::~IRGenDebugInfo() {}
// Forwarding to the private implementation.
void IRGenDebugInfo::finalize() {
static_cast<IRGenDebugInfoImpl *>(this)->finalize();
}
void IRGenDebugInfo::setCurrentLoc(IRBuilder &Builder, const SILDebugScope *DS,
SILLocation Loc) {
static_cast<IRGenDebugInfoImpl *>(this)->setCurrentLoc(Builder, DS, Loc);
}
void IRGenDebugInfo::addFailureMessageToCurrentLoc(IRBuilder &Builder,
StringRef failureMsg) {
static_cast<IRGenDebugInfoImpl *>(this)->addFailureMessageToCurrentLoc(
Builder, failureMsg);
}
void IRGenDebugInfo::clearLoc(IRBuilder &Builder) {
static_cast<IRGenDebugInfoImpl *>(this)->clearLoc(Builder);
}
void IRGenDebugInfo::pushLoc() {
static_cast<IRGenDebugInfoImpl *>(this)->pushLoc();
}
void IRGenDebugInfo::popLoc() {
static_cast<IRGenDebugInfoImpl *>(this)->popLoc();
}
void IRGenDebugInfo::setInlinedTrapLocation(IRBuilder &Builder,
const SILDebugScope *Scope) {
static_cast<IRGenDebugInfoImpl *>(this)->setInlinedTrapLocation(Builder,
Scope);
}
void IRGenDebugInfo::setEntryPointLoc(IRBuilder &Builder) {
static_cast<IRGenDebugInfoImpl *>(this)->setEntryPointLoc(Builder);
}
llvm::DIScope *IRGenDebugInfo::getEntryPointFn() {
return static_cast<IRGenDebugInfoImpl *>(this)->getEntryPointFn();
}
llvm::DIScope *IRGenDebugInfo::getOrCreateScope(const SILDebugScope *DS) {
return static_cast<IRGenDebugInfoImpl *>(this)->getOrCreateScope(DS);
}
void IRGenDebugInfo::emitImport(ImportDecl *D) {
static_cast<IRGenDebugInfoImpl *>(this)->emitImport(D);
}
llvm::DISubprogram *
IRGenDebugInfo::emitFunction(const SILDebugScope *DS, llvm::Function *Fn,
SILFunctionTypeRepresentation Rep, SILType Ty,
DeclContext *DeclCtx, GenericEnvironment *GE) {
return static_cast<IRGenDebugInfoImpl *>(this)->emitFunction(DS, Fn, Rep, Ty,
DeclCtx);
}
llvm::DISubprogram *IRGenDebugInfo::emitFunction(SILFunction &SILFn,
llvm::Function *Fn) {
return static_cast<IRGenDebugInfoImpl *>(this)->emitFunction(SILFn, Fn);
}
void IRGenDebugInfo::emitArtificialFunction(IRBuilder &Builder,
llvm::Function *Fn, SILType SILTy) {
static_cast<IRGenDebugInfoImpl *>(this)->emitArtificialFunction(Builder, Fn,
SILTy);
}
void IRGenDebugInfo::emitOutlinedFunction(IRBuilder &Builder,
llvm::Function *Fn, StringRef name) {
static_cast<IRGenDebugInfoImpl *>(this)->emitOutlinedFunction(Builder, Fn,
name);
}
void IRGenDebugInfo::emitVariableDeclaration(
IRBuilder &Builder, ArrayRef<llvm::Value *> Storage, DebugTypeInfo Ty,
const SILDebugScope *DS, std::optional<SILLocation> VarLoc,
SILDebugVariable VarInfo, IndirectionKind Indirection,
ArtificialKind Artificial, AddrDbgInstrKind AddrDInstKind) {
static_cast<IRGenDebugInfoImpl *>(this)->emitVariableDeclaration(
Builder, Storage, Ty, DS, VarLoc, VarInfo, Indirection, Artificial,
AddrDInstKind);
}
void IRGenDebugInfo::emitDbgIntrinsic(IRBuilder &Builder, llvm::Value *Storage,
llvm::DILocalVariable *Var,
llvm::DIExpression *Expr, unsigned Line,
unsigned Col, llvm::DILocalScope *Scope,
const SILDebugScope *DS,
bool InCoroContext,
AddrDbgInstrKind AddrDInstKind) {
static_cast<IRGenDebugInfoImpl *>(this)->emitDbgIntrinsic(
Builder, Storage, Var, Expr, Line, Col, Scope, DS, InCoroContext,
AddrDInstKind);
}
void IRGenDebugInfo::emitGlobalVariableDeclaration(
llvm::GlobalVariable *Storage, StringRef Name, StringRef LinkageName,
DebugTypeInfo DebugType, bool IsLocalToUnit,
std::optional<SILLocation> Loc) {
static_cast<IRGenDebugInfoImpl *>(this)->emitGlobalVariableDeclaration(
Storage, Name, LinkageName, DebugType, IsLocalToUnit, Loc);
}
void IRGenDebugInfo::emitTypeMetadata(IRGenFunction &IGF, llvm::Value *Metadata,
unsigned Depth, unsigned Index,
StringRef Name) {
static_cast<IRGenDebugInfoImpl *>(this)->emitTypeMetadata(IGF, Metadata,
Depth, Index, Name);
}
void IRGenDebugInfo::emitPackCountParameter(IRGenFunction &IGF,
llvm::Value *Metadata,
SILDebugVariable VarInfo) {
static_cast<IRGenDebugInfoImpl *>(this)->emitPackCountParameter(IGF, Metadata,
VarInfo);
}
llvm::DIBuilder &IRGenDebugInfo::getBuilder() {
return static_cast<IRGenDebugInfoImpl *>(this)->getBuilder();
}
AutoRestoreLocation::AutoRestoreLocation(IRGenDebugInfo *DI, IRBuilder &Builder)
: DI(DI), Builder(Builder) {
if (DI)
SavedLocation = Builder.getCurrentDebugLocation();
}
/// Autorestore everything back to normal.
AutoRestoreLocation::~AutoRestoreLocation() {
if (DI)
Builder.SetCurrentDebugLocation(SavedLocation);
}
ArtificialLocation::ArtificialLocation(const SILDebugScope *DS,
IRGenDebugInfo *DI, IRBuilder &Builder)
: AutoRestoreLocation(DI, Builder) {
if (DI) {
unsigned Line = 0;
auto *Scope = DI->getOrCreateScope(DS);
auto DII = static_cast<IRGenDebugInfoImpl *>(DI);
if (DII->getDebugInfoFormat() == IRGenDebugInfoFormat::CodeView) {
// In CodeView, line zero is not an artificial line location and so we
// try to use the location of the scope.
if (auto *LB = dyn_cast<llvm::DILexicalBlock>(Scope))
Line = LB->getLine();
else if (auto *SP = dyn_cast<llvm::DISubprogram>(Scope))
Line = SP->getLine();
}
auto DL = llvm::DILocation::get(Scope->getContext(), Line, 0, Scope,
DII->createInlinedAt(DS));
Builder.SetCurrentDebugLocation(DL);
}
}
PrologueLocation::PrologueLocation(IRGenDebugInfo *DI, IRBuilder &Builder)
: AutoRestoreLocation(DI, Builder) {
if (DI)
DI->clearLoc(Builder);
} | cpp | github | https://github.com/apple/swift | lib/IRGen/IRGenDebugInfo.cpp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.