code
stringlengths
1
25.8M
language
stringclasses
18 values
source
stringclasses
4 values
repo
stringclasses
78 values
path
stringlengths
0
268
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)] pub fn scalbnf16(x: f16, n: i32) -> f16 { super::generic::scalbn(x, n) }
rust
github
https://github.com/nodejs/node
deps/crates/vendor/libm/src/math/scalbnf16.rs
"""Functions that listen for event signals and queue up emails. All triggered emails live here. """ from datetime import datetime from modularodm import Q from website import mails, settings from framework.auth import signals as auth_signals from website.project import signals as project_signals from website.conferences import signals as conference_signals @auth_signals.unconfirmed_user_created.connect def queue_no_addon_email(user): """Queue an email for user who has not connected an addon after `settings.NO_ADDON_WAIT_TIME` months of signing up for the OSF. """ mails.queue_mail( to_addr=user.username, mail=mails.NO_ADDON, send_at=datetime.utcnow() + settings.NO_ADDON_WAIT_TIME, user=user, fullname=user.fullname ) @project_signals.privacy_set_public.connect def queue_first_public_project_email(user, node, meeting_creation): """Queue and email after user has made their first non-OSF4M project public. """ if not meeting_creation: sent_mail = mails.QueuedMail.find(Q('user', 'eq', user) & Q('sent_at', 'ne', None) & Q('email_type', 'eq', mails.NEW_PUBLIC_PROJECT_TYPE)) if not sent_mail.count(): mails.queue_mail( to_addr=user.username, mail=mails.NEW_PUBLIC_PROJECT, send_at=datetime.utcnow() + settings.NEW_PUBLIC_PROJECT_WAIT_TIME, user=user, nid=node._id, fullname=user.fullname, project_title=node.title ) @conference_signals.osf4m_user_created.connect def queue_osf4m_welcome_email(user, conference, node): """Queue an email once a new user is created for OSF for Meetings""" root = (node.get_addon('osfstorage')).get_root() root_children = [child for child in root.children if child.is_file] mails.queue_mail( to_addr=user.username, mail=mails.WELCOME_OSF4M, send_at=datetime.utcnow() + settings.WELCOME_OSF4M_WAIT_TIME, user=user, conference=conference.name, fullname=user.fullname, fid=root_children[0]._id if len(root_children) else None )
unknown
codeparrot/codeparrot-clean
<?php namespace Illuminate\Tests\Database; use Illuminate\Database\Connection; use Illuminate\Database\Query\Expression; use Illuminate\Database\Schema\Blueprint; use Illuminate\Database\Schema\ForeignIdColumnDefinition; use Illuminate\Database\Schema\Grammars\MariaDbGrammar; use Illuminate\Database\Schema\MariaDbBuilder; use Illuminate\Tests\Database\Fixtures\Enums\Foo; use Mockery as m; use PHPUnit\Framework\TestCase; class DatabaseMariaDbSchemaGrammarTest extends TestCase { public function testBasicCreateTable() { $conn = $this->getConnection(); $conn->shouldReceive('getConfig')->once()->with('charset')->andReturn('utf8'); $conn->shouldReceive('getConfig')->once()->with('collation')->andReturn('utf8_unicode_ci'); $conn->shouldReceive('getConfig')->once()->with('engine')->andReturn(null); $blueprint = new Blueprint($conn, 'users'); $blueprint->create(); $blueprint->increments('id'); $blueprint->string('email'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame("create table `users` (`id` int unsigned not null auto_increment primary key, `email` varchar(255) not null) default character set utf8 collate 'utf8_unicode_ci'", $statements[0]); $conn = $this->getConnection(); $conn->shouldReceive('getConfig')->andReturn(null); $blueprint = new Blueprint($conn, 'users'); $blueprint->increments('id'); $blueprint->string('email'); $statements = $blueprint->toSql(); $this->assertCount(2, $statements); $this->assertSame([ 'alter table `users` add `id` int unsigned not null auto_increment primary key', 'alter table `users` add `email` varchar(255) not null', ], $statements); $conn = $this->getConnection(); $conn->shouldReceive('getConfig')->andReturn(null); $conn->shouldReceive('getServerVersion')->andReturn('10.7.0'); $blueprint = new Blueprint($conn, 'users'); $blueprint->create(); $blueprint->uuid('id')->primary(); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('create table `users` (`id` uuid not null, primary key (`id`))', $statements[0]); } public function testAutoIncrementStartingValue() { $conn = $this->getConnection(); $conn->shouldReceive('getConfig')->once()->with('charset')->andReturn('utf8'); $conn->shouldReceive('getConfig')->once()->with('collation')->andReturn('utf8_unicode_ci'); $conn->shouldReceive('getConfig')->once()->with('engine')->andReturn(null); $blueprint = new Blueprint($conn, 'users'); $blueprint->create(); $blueprint->increments('id')->startingValue(1000); $blueprint->string('email'); $statements = $blueprint->toSql(); $this->assertCount(2, $statements); $this->assertSame("create table `users` (`id` int unsigned not null auto_increment primary key, `email` varchar(255) not null) default character set utf8 collate 'utf8_unicode_ci'", $statements[0]); $this->assertSame('alter table `users` auto_increment = 1000', $statements[1]); } public function testAddColumnsWithMultipleAutoIncrementStartingValue() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->id()->from(100); $blueprint->string('name')->from(200); $statements = $blueprint->toSql(); $this->assertEquals([ 'alter table `users` add `id` bigint unsigned not null auto_increment primary key', 'alter table `users` add `name` varchar(255) not null', 'alter table `users` auto_increment = 100', ], $statements); } public function testEngineCreateTable() { $conn = $this->getConnection(); $conn->shouldReceive('getConfig')->once()->with('charset')->andReturn('utf8'); $conn->shouldReceive('getConfig')->once()->with('collation')->andReturn('utf8_unicode_ci'); $blueprint = new Blueprint($conn, 'users'); $blueprint->create(); $blueprint->increments('id'); $blueprint->string('email'); $blueprint->engine('InnoDB'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame("create table `users` (`id` int unsigned not null auto_increment primary key, `email` varchar(255) not null) default character set utf8 collate 'utf8_unicode_ci' engine = InnoDB", $statements[0]); $conn = $this->getConnection(); $conn->shouldReceive('getConfig')->once()->with('charset')->andReturn('utf8'); $conn->shouldReceive('getConfig')->once()->with('collation')->andReturn('utf8_unicode_ci'); $conn->shouldReceive('getConfig')->once()->with('engine')->andReturn('InnoDB'); $blueprint = new Blueprint($conn, 'users'); $blueprint->create(); $blueprint->increments('id'); $blueprint->string('email'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame("create table `users` (`id` int unsigned not null auto_increment primary key, `email` varchar(255) not null) default character set utf8 collate 'utf8_unicode_ci' engine = InnoDB", $statements[0]); } public function testCharsetCollationCreateTable() { $conn = $this->getConnection(); $conn->shouldReceive('getConfig')->once()->with('engine')->andReturn(null); $blueprint = new Blueprint($conn, 'users'); $blueprint->create(); $blueprint->increments('id'); $blueprint->string('email'); $blueprint->charset('utf8mb4'); $blueprint->collation('utf8mb4_unicode_ci'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame("create table `users` (`id` int unsigned not null auto_increment primary key, `email` varchar(255) not null) default character set utf8mb4 collate 'utf8mb4_unicode_ci'", $statements[0]); $conn = $this->getConnection(); $conn->shouldReceive('getConfig')->once()->with('charset')->andReturn('utf8'); $conn->shouldReceive('getConfig')->once()->with('collation')->andReturn('utf8_unicode_ci'); $conn->shouldReceive('getConfig')->once()->with('engine')->andReturn(null); $blueprint = new Blueprint($conn, 'users'); $blueprint->create(); $blueprint->increments('id'); $blueprint->string('email')->charset('utf8mb4')->collation('utf8mb4_unicode_ci'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame("create table `users` (`id` int unsigned not null auto_increment primary key, `email` varchar(255) character set utf8mb4 collate 'utf8mb4_unicode_ci' not null) default character set utf8 collate 'utf8_unicode_ci'", $statements[0]); } public function testBasicCreateTableWithPrefix() { $conn = $this->getConnection(prefix: 'prefix_'); $conn->shouldReceive('getConfig')->andReturn(null); $blueprint = new Blueprint($conn, 'users'); $blueprint->create(); $blueprint->increments('id'); $blueprint->string('email'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('create table `prefix_users` (`id` int unsigned not null auto_increment primary key, `email` varchar(255) not null)', $statements[0]); } public function testCreateTemporaryTable() { $conn = $this->getConnection(); $conn->shouldReceive('getConfig')->andReturn(null); $blueprint = new Blueprint($conn, 'users'); $blueprint->create(); $blueprint->temporary(); $blueprint->increments('id'); $blueprint->string('email'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('create temporary table `users` (`id` int unsigned not null auto_increment primary key, `email` varchar(255) not null)', $statements[0]); } public function testDropTable() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->drop(); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('drop table `users`', $statements[0]); } public function testDropTableIfExists() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->dropIfExists(); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('drop table if exists `users`', $statements[0]); } public function testDropColumn() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->dropColumn('foo'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` drop `foo`', $statements[0]); $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->dropColumn(['foo', 'bar']); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` drop `foo`, drop `bar`', $statements[0]); $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->dropColumn('foo', 'bar'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` drop `foo`, drop `bar`', $statements[0]); } public function testDropPrimary() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->dropPrimary(); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` drop primary key', $statements[0]); } public function testDropUnique() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->dropUnique('foo'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` drop index `foo`', $statements[0]); } public function testDropIndex() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->dropIndex('foo'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` drop index `foo`', $statements[0]); } public function testDropSpatialIndex() { $blueprint = new Blueprint($this->getConnection(), 'geo'); $blueprint->dropSpatialIndex(['coordinates']); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `geo` drop index `geo_coordinates_spatialindex`', $statements[0]); } public function testDropForeign() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->dropForeign('foo'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` drop foreign key `foo`', $statements[0]); } public function testDropTimestamps() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->dropTimestamps(); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` drop `created_at`, drop `updated_at`', $statements[0]); } public function testDropTimestampsTz() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->dropTimestampsTz(); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` drop `created_at`, drop `updated_at`', $statements[0]); } public function testDropMorphs() { $blueprint = new Blueprint($this->getConnection(), 'photos'); $blueprint->dropMorphs('imageable'); $statements = $blueprint->toSql(); $this->assertCount(2, $statements); $this->assertSame('alter table `photos` drop index `photos_imageable_type_imageable_id_index`', $statements[0]); $this->assertSame('alter table `photos` drop `imageable_type`, drop `imageable_id`', $statements[1]); } public function testRenameTable() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->rename('foo'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('rename table `users` to `foo`', $statements[0]); } public function testRenameIndex() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->renameIndex('foo', 'bar'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` rename index `foo` to `bar`', $statements[0]); } public function testAddingPrimaryKey() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->primary('foo', 'bar'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add primary key (`foo`)', $statements[0]); } public function testAddingPrimaryKeyWithAlgorithm() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->primary('foo', 'bar', 'hash'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add primary key using hash(`foo`)', $statements[0]); } public function testAddingUniqueKey() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->unique('foo', 'bar'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add unique `bar`(`foo`)', $statements[0]); } public function testAddingIndex() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->index(['foo', 'bar'], 'baz'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add index `baz`(`foo`, `bar`)', $statements[0]); } public function testAddingIndexWithAlgorithm() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->index(['foo', 'bar'], 'baz', 'hash'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add index `baz` using hash(`foo`, `bar`)', $statements[0]); } public function testAddingFulltextIndex() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->fulltext('body'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add fulltext `users_body_fulltext`(`body`)', $statements[0]); } public function testAddingSpatialIndex() { $blueprint = new Blueprint($this->getConnection(), 'geo'); $blueprint->spatialIndex('coordinates'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `geo` add spatial index `geo_coordinates_spatialindex`(`coordinates`)', $statements[0]); } public function testAddingFluentSpatialIndex() { $blueprint = new Blueprint($this->getConnection(), 'geo'); $blueprint->geometry('coordinates', 'point')->spatialIndex(); $statements = $blueprint->toSql(); $this->assertCount(2, $statements); $this->assertSame('alter table `geo` add spatial index `geo_coordinates_spatialindex`(`coordinates`)', $statements[1]); } public function testAddingRawIndex() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->rawIndex('(function(column))', 'raw_index'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add index `raw_index`((function(column)))', $statements[0]); } public function testAddingForeignKey() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->foreign('foo_id')->references('id')->on('orders'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add constraint `users_foo_id_foreign` foreign key (`foo_id`) references `orders` (`id`)', $statements[0]); $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->foreign('foo_id')->references('id')->on('orders')->cascadeOnDelete(); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add constraint `users_foo_id_foreign` foreign key (`foo_id`) references `orders` (`id`) on delete cascade', $statements[0]); $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->foreign('foo_id')->references('id')->on('orders')->cascadeOnUpdate(); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add constraint `users_foo_id_foreign` foreign key (`foo_id`) references `orders` (`id`) on update cascade', $statements[0]); } public function testAddingIncrementingID() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->increments('id'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `id` int unsigned not null auto_increment primary key', $statements[0]); } public function testAddingSmallIncrementingID() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->smallIncrements('id'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `id` smallint unsigned not null auto_increment primary key', $statements[0]); } public function testAddingID() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->id(); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `id` bigint unsigned not null auto_increment primary key', $statements[0]); $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->id('foo'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` bigint unsigned not null auto_increment primary key', $statements[0]); } public function testAddingForeignID() { $blueprint = new Blueprint($this->getConnection(), 'users'); $foreignId = $blueprint->foreignId('foo'); $blueprint->foreignId('company_id')->constrained(); $blueprint->foreignId('laravel_idea_id')->constrained(); $blueprint->foreignId('team_id')->references('id')->on('teams'); $blueprint->foreignId('team_column_id')->constrained('teams'); $statements = $blueprint->toSql(); $this->assertInstanceOf(ForeignIdColumnDefinition::class, $foreignId); $this->assertSame([ 'alter table `users` add `foo` bigint unsigned not null', 'alter table `users` add `company_id` bigint unsigned not null', 'alter table `users` add constraint `users_company_id_foreign` foreign key (`company_id`) references `companies` (`id`)', 'alter table `users` add `laravel_idea_id` bigint unsigned not null', 'alter table `users` add constraint `users_laravel_idea_id_foreign` foreign key (`laravel_idea_id`) references `laravel_ideas` (`id`)', 'alter table `users` add `team_id` bigint unsigned not null', 'alter table `users` add constraint `users_team_id_foreign` foreign key (`team_id`) references `teams` (`id`)', 'alter table `users` add `team_column_id` bigint unsigned not null', 'alter table `users` add constraint `users_team_column_id_foreign` foreign key (`team_column_id`) references `teams` (`id`)', ], $statements); } public function testAddingForeignIdSpecifyingIndexNameInConstraint() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->foreignId('company_id')->constrained(indexName: 'my_index'); $statements = $blueprint->toSql(); $this->assertSame([ 'alter table `users` add `company_id` bigint unsigned not null', 'alter table `users` add constraint `my_index` foreign key (`company_id`) references `companies` (`id`)', ], $statements); } public function testAddingBigIncrementingID() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->bigIncrements('id'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `id` bigint unsigned not null auto_increment primary key', $statements[0]); } public function testAddingColumnInTableFirst() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->string('name')->first(); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `name` varchar(255) not null first', $statements[0]); } public function testAddingColumnAfterAnotherColumn() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->string('name')->after('foo'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `name` varchar(255) not null after `foo`', $statements[0]); } public function testAddingMultipleColumnsAfterAnotherColumn() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->after('foo', function ($blueprint) { $blueprint->string('one'); $blueprint->string('two'); }); $blueprint->string('three'); $statements = $blueprint->toSql(); $this->assertCount(3, $statements); $this->assertSame([ 'alter table `users` add `one` varchar(255) not null after `foo`', 'alter table `users` add `two` varchar(255) not null after `one`', 'alter table `users` add `three` varchar(255) not null', ], $statements); } public function testAddingGeneratedColumn() { $blueprint = new Blueprint($this->getConnection(), 'products'); $blueprint->integer('price'); $blueprint->integer('discounted_virtual')->virtualAs('price - 5'); $blueprint->integer('discounted_stored')->storedAs('price - 5'); $statements = $blueprint->toSql(); $this->assertCount(3, $statements); $this->assertSame([ 'alter table `products` add `price` int not null', 'alter table `products` add `discounted_virtual` int as (price - 5)', 'alter table `products` add `discounted_stored` int as (price - 5) stored', ], $statements); $blueprint = new Blueprint($this->getConnection(), 'products'); $blueprint->integer('price'); $blueprint->integer('discounted_virtual')->virtualAs('price - 5')->nullable(false); $blueprint->integer('discounted_stored')->storedAs('price - 5')->nullable(false); $statements = $blueprint->toSql(); $this->assertCount(3, $statements); $this->assertSame([ 'alter table `products` add `price` int not null', 'alter table `products` add `discounted_virtual` int as (price - 5) not null', 'alter table `products` add `discounted_stored` int as (price - 5) stored not null', ], $statements); } public function testAddingGeneratedColumnWithCharset() { $blueprint = new Blueprint($this->getConnection(), 'links'); $blueprint->string('url', 2083)->charset('ascii'); $blueprint->string('url_hash_virtual', 64)->virtualAs('sha2(url, 256)')->charset('ascii'); $blueprint->string('url_hash_stored', 64)->storedAs('sha2(url, 256)')->charset('ascii'); $statements = $blueprint->toSql(); $this->assertCount(3, $statements); $this->assertSame([ 'alter table `links` add `url` varchar(2083) character set ascii not null', 'alter table `links` add `url_hash_virtual` varchar(64) character set ascii as (sha2(url, 256))', 'alter table `links` add `url_hash_stored` varchar(64) character set ascii as (sha2(url, 256)) stored', ], $statements); } public function testAddingGeneratedColumnByExpression() { $blueprint = new Blueprint($this->getConnection(), 'products'); $blueprint->integer('price'); $blueprint->integer('discounted_virtual')->virtualAs(new Expression('price - 5')); $blueprint->integer('discounted_stored')->storedAs(new Expression('price - 5')); $statements = $blueprint->toSql(); $this->assertCount(3, $statements); $this->assertSame([ 'alter table `products` add `price` int not null', 'alter table `products` add `discounted_virtual` int as (price - 5)', 'alter table `products` add `discounted_stored` int as (price - 5) stored', ], $statements); } public function testAddingInvisibleColumn() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->string('secret', 64)->nullable(false)->invisible(); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `secret` varchar(64) not null invisible', $statements[0]); } public function testAddingString() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->string('foo'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` varchar(255) not null', $statements[0]); $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->string('foo', 100); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` varchar(100) not null', $statements[0]); $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->string('foo', 100)->nullable()->default('bar'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` varchar(100) null default \'bar\'', $statements[0]); $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->string('foo', 100)->nullable()->default(new Expression('CURRENT TIMESTAMP')); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` varchar(100) null default CURRENT TIMESTAMP', $statements[0]); $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->string('foo', 100)->nullable()->default(Foo::BAR); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` varchar(100) null default \'bar\'', $statements[0]); } public function testAddingText() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->text('foo'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` text not null', $statements[0]); } public function testAddingBigInteger() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->bigInteger('foo'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` bigint not null', $statements[0]); $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->bigInteger('foo', true); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` bigint not null auto_increment primary key', $statements[0]); } public function testAddingInteger() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->integer('foo'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` int not null', $statements[0]); $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->integer('foo', true); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` int not null auto_increment primary key', $statements[0]); } public function testAddingIncrementsWithStartingValues() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->id()->startingValue(1000); $statements = $blueprint->toSql(); $this->assertCount(2, $statements); $this->assertSame('alter table `users` add `id` bigint unsigned not null auto_increment primary key', $statements[0]); $this->assertSame('alter table `users` auto_increment = 1000', $statements[1]); } public function testAddingMediumInteger() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->mediumInteger('foo'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` mediumint not null', $statements[0]); $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->mediumInteger('foo', true); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` mediumint not null auto_increment primary key', $statements[0]); } public function testAddingSmallInteger() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->smallInteger('foo'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` smallint not null', $statements[0]); $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->smallInteger('foo', true); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` smallint not null auto_increment primary key', $statements[0]); } public function testAddingTinyInteger() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->tinyInteger('foo'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` tinyint not null', $statements[0]); $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->tinyInteger('foo', true); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` tinyint not null auto_increment primary key', $statements[0]); } public function testAddingFloat() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->float('foo', 5); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` float(5) not null', $statements[0]); } public function testAddingDouble() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->double('foo'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` double not null', $statements[0]); } public function testAddingDecimal() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->decimal('foo', 5, 2); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` decimal(5, 2) not null', $statements[0]); } public function testAddingBoolean() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->boolean('foo'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` tinyint(1) not null', $statements[0]); } public function testAddingEnum() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->enum('role', ['member', 'admin']); $blueprint->enum('status', Foo::cases()); $statements = $blueprint->toSql(); $this->assertCount(2, $statements); $this->assertSame('alter table `users` add `role` enum(\'member\', \'admin\') not null', $statements[0]); $this->assertSame('alter table `users` add `status` enum(\'bar\') not null', $statements[1]); } public function testAddingSet() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->set('role', ['member', 'admin']); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `role` set(\'member\', \'admin\') not null', $statements[0]); } public function testAddingJson() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->json('foo'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` json not null', $statements[0]); } public function testAddingJsonb() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->jsonb('foo'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` json not null', $statements[0]); } public function testAddingDate() { $conn = $this->getConnection(); $conn->shouldReceive('isMaria')->andReturn(true); $conn->shouldReceive('getServerVersion')->andReturn('10.3.0'); $blueprint = new Blueprint($conn, 'users'); $blueprint->date('foo'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` date not null', $statements[0]); } public function testAddingDateWithDefaultCurrent() { $conn = $this->getConnection(); $conn->shouldReceive('isMaria')->andReturn(true); $conn->shouldReceive('getServerVersion')->andReturn('10.3.0'); $blueprint = new Blueprint($conn, 'users'); $blueprint->date('foo')->useCurrent(); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` date not null default (CURDATE())', $statements[0]); } public function testAddingYear() { $conn = $this->getConnection(); $conn->shouldReceive('isMaria')->andReturn(true); $conn->shouldReceive('getServerVersion')->andReturn('10.3.0'); $blueprint = new Blueprint($conn, 'users'); $blueprint->year('birth_year'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `birth_year` year not null', $statements[0]); } public function testAddingYearWithDefaultCurrent() { $conn = $this->getConnection(); $conn->shouldReceive('isMaria')->andReturn(true); $conn->shouldReceive('getServerVersion')->andReturn('10.3.0'); $blueprint = new Blueprint($conn, 'users'); $blueprint->year('birth_year')->useCurrent(); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `birth_year` year not null default (YEAR(CURDATE()))', $statements[0]); } public function testAddingDateTime() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->dateTime('foo'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` datetime not null', $statements[0]); $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->dateTime('foo', 1); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` datetime(1) not null', $statements[0]); } public function testAddingDateTimeWithDefaultCurrent() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->dateTime('foo')->useCurrent(); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` datetime not null default CURRENT_TIMESTAMP', $statements[0]); } public function testAddingDateTimeWithOnUpdateCurrent() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->dateTime('foo')->useCurrentOnUpdate(); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` datetime not null on update CURRENT_TIMESTAMP', $statements[0]); } public function testAddingDateTimeWithDefaultCurrentAndOnUpdateCurrent() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->dateTime('foo')->useCurrent()->useCurrentOnUpdate(); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` datetime not null default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP', $statements[0]); } public function testAddingDateTimeWithDefaultCurrentOnUpdateCurrentAndPrecision() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->dateTime('foo', 3)->useCurrent()->useCurrentOnUpdate(); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` datetime(3) not null default CURRENT_TIMESTAMP(3) on update CURRENT_TIMESTAMP(3)', $statements[0]); } public function testAddingDateTimeTz() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->dateTimeTz('foo', 1); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` datetime(1) not null', $statements[0]); $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->dateTimeTz('foo'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` datetime not null', $statements[0]); } public function testAddingTime() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->time('created_at'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `created_at` time not null', $statements[0]); } public function testAddingTimeWithPrecision() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->time('created_at', 1); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `created_at` time(1) not null', $statements[0]); } public function testAddingTimeTz() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->timeTz('created_at'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `created_at` time not null', $statements[0]); } public function testAddingTimeTzWithPrecision() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->timeTz('created_at', 1); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `created_at` time(1) not null', $statements[0]); } public function testAddingTimestamp() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->timestamp('created_at'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `created_at` timestamp not null', $statements[0]); } public function testAddingTimestampWithPrecision() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->timestamp('created_at', 1); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `created_at` timestamp(1) not null', $statements[0]); } public function testAddingTimestampWithDefault() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->timestamp('created_at')->default('2015-07-22 11:43:17'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame("alter table `users` add `created_at` timestamp not null default '2015-07-22 11:43:17'", $statements[0]); } public function testAddingTimestampWithDefaultCurrentSpecifyingPrecision() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->timestamp('created_at', 1)->useCurrent(); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `created_at` timestamp(1) not null default CURRENT_TIMESTAMP(1)', $statements[0]); } public function testAddingTimestampWithOnUpdateCurrentSpecifyingPrecision() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->timestamp('created_at', 1)->useCurrentOnUpdate(); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `created_at` timestamp(1) not null on update CURRENT_TIMESTAMP(1)', $statements[0]); } public function testAddingTimestampWithDefaultCurrentAndOnUpdateCurrentSpecifyingPrecision() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->timestamp('created_at', 1)->useCurrent()->useCurrentOnUpdate(); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `created_at` timestamp(1) not null default CURRENT_TIMESTAMP(1) on update CURRENT_TIMESTAMP(1)', $statements[0]); } public function testAddingTimestampTz() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->timestampTz('created_at'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `created_at` timestamp not null', $statements[0]); } public function testAddingTimestampTzWithPrecision() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->timestampTz('created_at', 1); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `created_at` timestamp(1) not null', $statements[0]); } public function testAddingTimeStampTzWithDefault() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->timestampTz('created_at')->default('2015-07-22 11:43:17'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame("alter table `users` add `created_at` timestamp not null default '2015-07-22 11:43:17'", $statements[0]); } public function testAddingTimestamps() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->timestamps(); $statements = $blueprint->toSql(); $this->assertCount(2, $statements); $this->assertSame([ 'alter table `users` add `created_at` timestamp null', 'alter table `users` add `updated_at` timestamp null', ], $statements); } public function testAddingTimestampsTz() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->timestampsTz(); $statements = $blueprint->toSql(); $this->assertCount(2, $statements); $this->assertSame([ 'alter table `users` add `created_at` timestamp null', 'alter table `users` add `updated_at` timestamp null', ], $statements); } public function testAddingRememberToken() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->rememberToken(); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `remember_token` varchar(100) null', $statements[0]); } public function testAddingBinary() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->binary('foo'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` blob not null', $statements[0]); } public function testAddingUuid() { $conn = $this->getConnection(); $conn->shouldReceive('getServerVersion')->andReturn('10.7.0'); $blueprint = new Blueprint($conn, 'users'); $blueprint->uuid('foo'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` uuid not null', $statements[0]); } public function testAddingUuidOn106() { $conn = $this->getConnection(); $conn->shouldReceive('getServerVersion')->andReturn('10.6.21'); $blueprint = new Blueprint($conn, 'users'); $blueprint->uuid('foo'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` char(36) not null', $statements[0]); } public function testAddingUuidDefaultsColumnName() { $conn = $this->getConnection(); $conn->shouldReceive('getServerVersion')->andReturn('10.7.0'); $blueprint = new Blueprint($conn, 'users'); $blueprint->uuid(); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `uuid` uuid not null', $statements[0]); } public function testAddingForeignUuid() { $conn = $this->getConnection(); $conn->shouldReceive('getServerVersion')->andReturn('10.7.0'); $blueprint = new Blueprint($conn, 'users'); $foreignUuid = $blueprint->foreignUuid('foo'); $blueprint->foreignUuid('company_id')->constrained(); $blueprint->foreignUuid('laravel_idea_id')->constrained(); $blueprint->foreignUuid('team_id')->references('id')->on('teams'); $blueprint->foreignUuid('team_column_id')->constrained('teams'); $statements = $blueprint->toSql(); $this->assertInstanceOf(ForeignIdColumnDefinition::class, $foreignUuid); $this->assertSame([ 'alter table `users` add `foo` uuid not null', 'alter table `users` add `company_id` uuid not null', 'alter table `users` add constraint `users_company_id_foreign` foreign key (`company_id`) references `companies` (`id`)', 'alter table `users` add `laravel_idea_id` uuid not null', 'alter table `users` add constraint `users_laravel_idea_id_foreign` foreign key (`laravel_idea_id`) references `laravel_ideas` (`id`)', 'alter table `users` add `team_id` uuid not null', 'alter table `users` add constraint `users_team_id_foreign` foreign key (`team_id`) references `teams` (`id`)', 'alter table `users` add `team_column_id` uuid not null', 'alter table `users` add constraint `users_team_column_id_foreign` foreign key (`team_column_id`) references `teams` (`id`)', ], $statements); } public function testAddingIpAddress() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->ipAddress('foo'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` varchar(45) not null', $statements[0]); } public function testAddingIpAddressDefaultsColumnName() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->ipAddress(); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `ip_address` varchar(45) not null', $statements[0]); } public function testAddingMacAddress() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->macAddress('foo'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `foo` varchar(17) not null', $statements[0]); } public function testAddingMacAddressDefaultsColumnName() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->macAddress(); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `users` add `mac_address` varchar(17) not null', $statements[0]); } public function testAddingGeometry() { $blueprint = new Blueprint($this->getConnection(), 'geo'); $blueprint->geometry('coordinates'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `geo` add `coordinates` geometry not null', $statements[0]); } public function testAddingGeography() { $blueprint = new Blueprint($this->getConnection(), 'geo'); $blueprint->geography('coordinates'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `geo` add `coordinates` geometry ref_system_id=4326 not null', $statements[0]); } public function testAddingPoint() { $blueprint = new Blueprint($this->getConnection(), 'geo'); $blueprint->geometry('coordinates', 'point'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `geo` add `coordinates` point not null', $statements[0]); } public function testAddingPointWithSrid() { $blueprint = new Blueprint($this->getConnection(), 'geo'); $blueprint->geometry('coordinates', 'point', 4326); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `geo` add `coordinates` point ref_system_id=4326 not null', $statements[0]); } public function testAddingPointWithSridColumn() { $blueprint = new Blueprint($this->getConnection(), 'geo'); $blueprint->geometry('coordinates', 'point', 4326)->after('id'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `geo` add `coordinates` point ref_system_id=4326 not null after `id`', $statements[0]); } public function testAddingLineString() { $blueprint = new Blueprint($this->getConnection(), 'geo'); $blueprint->geometry('coordinates', 'linestring'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `geo` add `coordinates` linestring not null', $statements[0]); } public function testAddingPolygon() { $blueprint = new Blueprint($this->getConnection(), 'geo'); $blueprint->geometry('coordinates', 'polygon'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `geo` add `coordinates` polygon not null', $statements[0]); } public function testAddingGeometryCollection() { $blueprint = new Blueprint($this->getConnection(), 'geo'); $blueprint->geometry('coordinates', 'geometrycollection'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `geo` add `coordinates` geometrycollection not null', $statements[0]); } public function testAddingMultiPoint() { $blueprint = new Blueprint($this->getConnection(), 'geo'); $blueprint->geometry('coordinates', 'multipoint'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `geo` add `coordinates` multipoint not null', $statements[0]); } public function testAddingMultiLineString() { $blueprint = new Blueprint($this->getConnection(), 'geo'); $blueprint->geometry('coordinates', 'multilinestring'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `geo` add `coordinates` multilinestring not null', $statements[0]); } public function testAddingMultiPolygon() { $blueprint = new Blueprint($this->getConnection(), 'geo'); $blueprint->geometry('coordinates', 'multipolygon'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame('alter table `geo` add `coordinates` multipolygon not null', $statements[0]); } public function testAddingComment() { $blueprint = new Blueprint($this->getConnection(), 'users'); $blueprint->string('foo')->comment("Escape ' when using words like it's"); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame("alter table `users` add `foo` varchar(255) not null comment 'Escape \\' when using words like it\\'s'", $statements[0]); } public function testCreateDatabase() { $connection = $this->getConnection(); $connection->shouldReceive('getConfig')->once()->once()->with('charset')->andReturn('utf8mb4_foo'); $connection->shouldReceive('getConfig')->once()->once()->with('collation')->andReturn('utf8mb4_unicode_ci_foo'); $statement = $this->getGrammar($connection)->compileCreateDatabase('my_database_a'); $this->assertSame( 'create database `my_database_a` default character set `utf8mb4_foo` default collate `utf8mb4_unicode_ci_foo`', $statement ); $connection = $this->getConnection(); $connection->shouldReceive('getConfig')->once()->once()->with('charset')->andReturn('utf8mb4_bar'); $connection->shouldReceive('getConfig')->once()->once()->with('collation')->andReturn('utf8mb4_unicode_ci_bar'); $statement = $this->getGrammar($connection)->compileCreateDatabase('my_database_b'); $this->assertSame( 'create database `my_database_b` default character set `utf8mb4_bar` default collate `utf8mb4_unicode_ci_bar`', $statement ); } public function testCreateTableWithVirtualAsColumn() { $conn = $this->getConnection(); $conn->shouldReceive('getConfig')->once()->with('charset')->andReturn('utf8'); $conn->shouldReceive('getConfig')->once()->with('collation')->andReturn('utf8_unicode_ci'); $conn->shouldReceive('getConfig')->once()->with('engine')->andReturn(null); $blueprint = new Blueprint($conn, 'users'); $blueprint->create(); $blueprint->string('my_column'); $blueprint->string('my_other_column')->virtualAs('my_column'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame("create table `users` (`my_column` varchar(255) not null, `my_other_column` varchar(255) as (my_column)) default character set utf8 collate 'utf8_unicode_ci'", $statements[0]); $conn = $this->getConnection(); $conn->shouldReceive('getConfig')->andReturn(null); $blueprint = new Blueprint($conn, 'users'); $blueprint->create(); $blueprint->string('my_json_column'); $blueprint->string('my_other_column')->virtualAsJson('my_json_column->some_attribute'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame("create table `users` (`my_json_column` varchar(255) not null, `my_other_column` varchar(255) as (json_value(`my_json_column`, '$.\"some_attribute\"')))", $statements[0]); $conn = $this->getConnection(); $conn->shouldReceive('getConfig')->andReturn(null); $blueprint = new Blueprint($conn, 'users'); $blueprint->create(); $blueprint->string('my_json_column'); $blueprint->string('my_other_column')->virtualAsJson('my_json_column->some_attribute->nested'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame("create table `users` (`my_json_column` varchar(255) not null, `my_other_column` varchar(255) as (json_value(`my_json_column`, '$.\"some_attribute\".\"nested\"')))", $statements[0]); } public function testCreateTableWithVirtualAsColumnWhenJsonColumnHasArrayKey() { $conn = $this->getConnection(); $conn->shouldReceive('getConfig')->andReturn(null); $blueprint = new Blueprint($conn, 'users'); $blueprint->create(); $blueprint->string('my_json_column')->virtualAsJson('my_json_column->foo[0][1]'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame("create table `users` (`my_json_column` varchar(255) as (json_value(`my_json_column`, '$.\"foo\"[0][1]')))", $statements[0]); } public function testCreateTableWithStoredAsColumn() { $conn = $this->getConnection(); $conn->shouldReceive('getConfig')->once()->with('charset')->andReturn('utf8'); $conn->shouldReceive('getConfig')->once()->with('collation')->andReturn('utf8_unicode_ci'); $conn->shouldReceive('getConfig')->once()->with('engine')->andReturn(null); $blueprint = new Blueprint($conn, 'users'); $blueprint->create(); $blueprint->string('my_column'); $blueprint->string('my_other_column')->storedAs('my_column'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame("create table `users` (`my_column` varchar(255) not null, `my_other_column` varchar(255) as (my_column) stored) default character set utf8 collate 'utf8_unicode_ci'", $statements[0]); $conn = $this->getConnection(); $conn->shouldReceive('getConfig')->andReturn(null); $blueprint = new Blueprint($conn, 'users'); $blueprint->create(); $blueprint->string('my_json_column'); $blueprint->string('my_other_column')->storedAsJson('my_json_column->some_attribute'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame("create table `users` (`my_json_column` varchar(255) not null, `my_other_column` varchar(255) as (json_value(`my_json_column`, '$.\"some_attribute\"')) stored)", $statements[0]); $conn = $this->getConnection(); $conn->shouldReceive('getConfig')->andReturn(null); $blueprint = new Blueprint($conn, 'users'); $blueprint->create(); $blueprint->string('my_json_column'); $blueprint->string('my_other_column')->storedAsJson('my_json_column->some_attribute->nested'); $statements = $blueprint->toSql(); $this->assertCount(1, $statements); $this->assertSame("create table `users` (`my_json_column` varchar(255) not null, `my_other_column` varchar(255) as (json_value(`my_json_column`, '$.\"some_attribute\".\"nested\"')) stored)", $statements[0]); } public function testDropDatabaseIfExists() { $statement = $this->getGrammar()->compileDropDatabaseIfExists('my_database_a'); $this->assertSame( 'drop database if exists `my_database_a`', $statement ); $statement = $this->getGrammar()->compileDropDatabaseIfExists('my_database_b'); $this->assertSame( 'drop database if exists `my_database_b`', $statement ); } public function testDropAllTables() { $connection = $this->getConnection(); $statement = $this->getGrammar($connection)->compileDropAllTables(['alpha', 'beta', 'gamma']); $this->assertSame('drop table `alpha`, `beta`, `gamma`', $statement); } public function testDropAllViews() { $statement = $this->getGrammar()->compileDropAllViews(['alpha', 'beta', 'gamma']); $this->assertSame('drop view `alpha`, `beta`, `gamma`', $statement); } public function testGrammarsAreMacroable() { // compileReplace macro. $this->getGrammar()::macro('compileReplace', function () { return true; }); $c = $this->getGrammar()::compileReplace(); $this->assertTrue($c); } protected function getConnection( ?MariaDbGrammar $grammar = null, ?MariaDbBuilder $builder = null, string $prefix = '' ) { $connection = m::mock(Connection::class) ->shouldReceive('getTablePrefix')->andReturn($prefix) ->shouldReceive('getConfig')->with('prefix_indexes')->andReturn(null) ->getMock(); $grammar ??= $this->getGrammar($connection); $builder ??= $this->getBuilder(); return $connection ->shouldReceive('getSchemaGrammar')->andReturn($grammar) ->shouldReceive('getSchemaBuilder')->andReturn($builder) ->getMock(); } public function getGrammar(?Connection $connection = null) { return new MariaDbGrammar($connection ?? $this->getConnection()); } public function getBuilder() { return mock(MariaDbBuilder::class); } }
php
github
https://github.com/laravel/framework
tests/Database/DatabaseMariaDbSchemaGrammarTest.php
import AsyncHTTPClient extension Application.Clients.Provider { public static var http: Self { .init { $0.clients.use { $0.http.client.shared.delegating(to: $0.eventLoopGroup.next(), logger: $0.logger, byteBufferAllocator: $0.core.storage.allocator) } } } } extension Application.HTTP { public var client: Client { .init(application: self.application) } public struct Client { let application: Application public var shared: HTTPClient { self.application.locks.lock(for: Key.self).withLock { if let existing = self.application.storage[Key.self] { return existing } let new = HTTPClient( eventLoopGroupProvider: .shared(self.application.eventLoopGroup), configuration: self.configuration, backgroundActivityLogger: self.application.logger ) self.application.storage.setFirstTime(Key.self, to: new, onShutdown: { try $0.syncShutdown() }) { try await $0.shutdown() } return new } } public var configuration: HTTPClient.Configuration { get { self.application.storage[ConfigurationKey.self] ?? .init() } nonmutating set { if self.application.storage.contains(Key.self) { self.application.logger.warning("Cannot modify client configuration after client has been used.") } else { self.application.storage[ConfigurationKey.self] = newValue } } } struct Key: StorageKey, LockKey { typealias Value = HTTPClient } struct ConfigurationKey: StorageKey { typealias Value = HTTPClient.Configuration } } }
swift
github
https://github.com/vapor/vapor
Sources/Vapor/HTTP/Client/Application+HTTP+Client.swift
<?php /* * This file is part of the Symfony package. * * (c) Fabien Potencier <fabien@symfony.com> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace Symfony\Component\DependencyInjection\Loader\Configurator; use Symfony\Component\Form\Extension\Csrf\Type\FormTypeCsrfExtension; return static function (ContainerConfigurator $container) { $container->services() ->set('form.type_extension.csrf', FormTypeCsrfExtension::class) ->args([ service('security.csrf.token_manager'), param('form.type_extension.csrf.enabled'), param('form.type_extension.csrf.field_name'), service('translator')->nullOnInvalid(), param('validator.translation_domain'), service('form.server_params'), param('form.type_extension.csrf.field_attr'), param('.form.type_extension.csrf.token_id'), ]) ->tag('form.type_extension') ; };
php
github
https://github.com/symfony/symfony
src/Symfony/Bundle/FrameworkBundle/Resources/config/form_csrf.php
<?php /* * This file is part of the Symfony package. * * (c) Fabien Potencier <fabien@symfony.com> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace Symfony\Bridge\Twig\Tests\Extension; use PHPUnit\Framework\Attributes\DataProvider; use PHPUnit\Framework\TestCase; use Symfony\Bridge\Twig\Extension\DumpExtension; use Symfony\Component\VarDumper\Cloner\VarCloner; use Symfony\Component\VarDumper\Dumper\HtmlDumper; use Symfony\Component\VarDumper\VarDumper; use Twig\Environment; use Twig\Loader\ArrayLoader; class DumpExtensionTest extends TestCase { #[DataProvider('getDumpTags')] public function testDumpTag($template, $debug, $expectedOutput, $expectedDumped) { $extension = new DumpExtension(new VarCloner()); $twig = new Environment(new ArrayLoader(['template' => $template]), [ 'debug' => $debug, 'cache' => false, 'optimizations' => 0, ]); $twig->addExtension($extension); $dumped = null; $exception = null; $prevDumper = VarDumper::setHandler(static function ($var) use (&$dumped) { $dumped = $var; }); try { $this->assertEquals($expectedOutput, $twig->render('template')); } catch (\Exception $exception) { } VarDumper::setHandler($prevDumper); if (null !== $exception) { throw $exception; } $this->assertSame($expectedDumped, $dumped); } public static function getDumpTags() { return [ ['A{% dump %}B', true, 'AB', []], ['A{% set foo="bar"%}B{% dump %}C', true, 'ABC', ['foo' => 'bar']], ['A{% dump %}B', false, 'AB', null], ]; } #[DataProvider('getDumpArgs')] public function testDump($context, $args, $expectedOutput, $debug = true) { $extension = new DumpExtension(new VarCloner()); $twig = new Environment(new ArrayLoader(), [ 'debug' => $debug, 'cache' => false, 'optimizations' => 0, ]); array_unshift($args, $context); array_unshift($args, $twig); $dump = $extension->dump(...$args); if ($debug) { $this->assertStringStartsWith('<script>', $dump); $dump = preg_replace('/^.*?<pre/', '<pre', $dump); $dump = preg_replace('/sf-dump-\d+/', 'sf-dump', $dump); $dump = preg_replace('/<samp [^>]++>/', '<samp>', $dump); } $this->assertEquals($expectedOutput, $dump); } public static function getDumpArgs() { return [ [[], [], '', false], [[], [], "<pre class=sf-dump id=sf-dump data-indent-pad=\" \">[]\n</pre><script>Sfdump(\"sf-dump\")</script>\n"], [ [], [123, 456], "<pre class=sf-dump id=sf-dump data-indent-pad=\" \"><span class=sf-dump-num>123</span>\n</pre><script>Sfdump(\"sf-dump\")</script>\n" ."<pre class=sf-dump id=sf-dump data-indent-pad=\" \"><span class=sf-dump-num>456</span>\n</pre><script>Sfdump(\"sf-dump\")</script>\n", ], [ ['foo' => 'bar'], [], "<pre class=sf-dump id=sf-dump data-indent-pad=\" \"><span class=sf-dump-note>array:1</span> [<samp>\n" ." \"<span class=sf-dump-key>foo</span>\" => \"<span class=sf-dump-str title=\"3 characters\">bar</span>\"\n" ."</samp>]\n" ."</pre><script>Sfdump(\"sf-dump\")</script>\n", ], ]; } public function testCustomDumper() { $output = ''; $lineDumper = static function ($line) use (&$output) { $output .= $line; }; $dumper = new HtmlDumper($lineDumper); $dumper->setDumpHeader(''); $dumper->setDumpBoundaries( '<pre class=sf-dump-test id=%s data-indent-pad="%s">', '</pre><script>Sfdump("%s")</script>' ); $extension = new DumpExtension(new VarCloner(), $dumper); $twig = new Environment(new ArrayLoader(), [ 'debug' => true, 'cache' => false, 'optimizations' => 0, ]); $dump = $extension->dump($twig, [], 'foo'); $dump = preg_replace('/sf-dump-\d+/', 'sf-dump', $dump); $this->assertEquals( '<pre class=sf-dump-test id=sf-dump data-indent-pad=" ">"'. "<span class=sf-dump-str title=\"3 characters\">foo</span>\"\n". "</pre><script>Sfdump(\"sf-dump\")</script>\n", $dump, 'Custom dumper should be used to dump data.' ); $this->assertSame('', $output, 'Dumper output should be ignored.'); } }
php
github
https://github.com/symfony/symfony
src/Symfony/Bridge/Twig/Tests/Extension/DumpExtensionTest.php
from __future__ import annotations import importlib import sys from typing import ( TYPE_CHECKING, Literal, overload, ) import warnings from pandas.util._exceptions import find_stack_level from pandas.util.version import Version if TYPE_CHECKING: import types # Update install.rst, actions-311-minimum_versions.yaml, # deps_minimum.toml & pyproject.toml when updating versions! VERSIONS = { "adbc-driver-postgresql": "1.2.0", "adbc-driver-sqlite": "1.2.0", "bs4": "4.12.3", "bottleneck": "1.4.2", "fastparquet": "2024.11.0", "fsspec": "2024.10.0", "html5lib": "1.1", "hypothesis": "6.116.0", "gcsfs": "2024.10.0", "jinja2": "3.1.5", "lxml.etree": "5.3.0", "matplotlib": "3.9.3", "numba": "0.60.0", "numexpr": "2.10.2", "odfpy": "1.4.1", "openpyxl": "3.1.5", "psycopg2": "2.9.10", # (dt dec pq3 ext lo64) "pymysql": "1.1.1", "pyarrow": "13.0.0", "pyiceberg": "0.8.1", "pyreadstat": "1.2.8", "pytest": "8.3.4", "python-calamine": "0.3.0", "pytz": "2024.2", "pyxlsb": "1.0.10", "s3fs": "2024.10.0", "scipy": "1.14.1", "sqlalchemy": "2.0.36", "tables": "3.10.1", "tabulate": "0.9.0", "xarray": "2024.10.0", "xlrd": "2.0.1", "xlsxwriter": "3.2.0", "zstandard": "0.23.0", "qtpy": "2.4.2", "pyqt5": "5.15.9", } # A mapping from import name to package name (on PyPI) for packages where # these two names are different. INSTALL_MAPPING = { "bs4": "beautifulsoup4", "bottleneck": "Bottleneck", "jinja2": "Jinja2", "lxml.etree": "lxml", "odf": "odfpy", "python_calamine": "python-calamine", "sqlalchemy": "SQLAlchemy", "tables": "pytables", } def get_version(module: types.ModuleType) -> str: version = getattr(module, "__version__", None) if version is None: raise ImportError(f"Can't determine version for {module.__name__}") if module.__name__ == "psycopg2": # psycopg2 appends " (dt dec pq3 ext lo64)" to it's version version = version.split()[0] return version @overload def import_optional_dependency( name: str, extra: str = ..., min_version: str | None = ..., *, errors: Literal["raise"] = ..., ) -> types.ModuleType: ... @overload def import_optional_dependency( name: str, extra: str = ..., min_version: str | None = ..., *, errors: Literal["warn", "ignore"], ) -> types.ModuleType | None: ... def import_optional_dependency( name: str, extra: str = "", min_version: str | None = None, *, errors: Literal["raise", "warn", "ignore"] = "raise", ) -> types.ModuleType | None: """ Import an optional dependency. By default, if a dependency is missing an ImportError with a nice message will be raised. If a dependency is present, but too old, we raise. Parameters ---------- name : str The module name. extra : str Additional text to include in the ImportError message. errors : str {'raise', 'warn', 'ignore'} What to do when a dependency is not found or its version is too old. * raise : Raise an ImportError * warn : Only applicable when a module's version is to old. Warns that the version is too old and returns None * ignore: If the module is not installed, return None, otherwise, return the module, even if the version is too old. It's expected that users validate the version locally when using ``errors="ignore"`` (see. ``io/html.py``) min_version : str, default None Specify a minimum version that is different from the global pandas minimum version required. Returns ------- maybe_module : Optional[ModuleType] The imported module, when found and the version is correct. None is returned when the package is not found and `errors` is False, or when the package's version is too old and `errors` is ``'warn'`` or ``'ignore'``. """ assert errors in {"warn", "raise", "ignore"} package_name = INSTALL_MAPPING.get(name) install_name = package_name if package_name is not None else name msg = ( f"`Import {install_name}` failed. {extra} " f"Use pip or conda to install the {install_name} package." ) try: module = importlib.import_module(name) except ImportError as err: if errors == "raise": raise ImportError(msg) from err return None # Handle submodules: if we have submodule, grab parent module from sys.modules parent = name.split(".")[0] if parent != name: install_name = parent module_to_get = sys.modules[install_name] else: module_to_get = module minimum_version = min_version if min_version is not None else VERSIONS.get(parent) if minimum_version: version = get_version(module_to_get) if version and Version(version) < Version(minimum_version): msg = ( f"Pandas requires version '{minimum_version}' or newer of '{parent}' " f"(version '{version}' currently installed)." ) if errors == "warn": warnings.warn( msg, UserWarning, stacklevel=find_stack_level(), ) return None elif errors == "raise": raise ImportError(msg) else: return None return module
python
github
https://github.com/pandas-dev/pandas
pandas/compat/_optional.py
"""SCons.Defaults Builders and other things for the local site. Here's where we'll duplicate the functionality of autoconf until we move it into the installation procedure or use something like qmconf. The code that reads the registry to find MSVC components was borrowed from distutils.msvccompiler. """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # from __future__ import division __revision__ = "src/engine/SCons/Defaults.py 5134 2010/08/16 23:02:40 bdeegan" import os import errno import shutil import stat import time import sys import SCons.Action import SCons.Builder import SCons.CacheDir import SCons.Environment import SCons.PathList import SCons.Subst import SCons.Tool # A placeholder for a default Environment (for fetching source files # from source code management systems and the like). This must be # initialized later, after the top-level directory is set by the calling # interface. _default_env = None # Lazily instantiate the default environment so the overhead of creating # it doesn't apply when it's not needed. def _fetch_DefaultEnvironment(*args, **kw): """ Returns the already-created default construction environment. """ global _default_env return _default_env def DefaultEnvironment(*args, **kw): """ Initial public entry point for creating the default construction Environment. After creating the environment, we overwrite our name (DefaultEnvironment) with the _fetch_DefaultEnvironment() function, which more efficiently returns the initialized default construction environment without checking for its existence. (This function still exists with its _default_check because someone else (*cough* Script/__init__.py *cough*) may keep a reference to this function. So we can't use the fully functional idiom of having the name originally be a something that *only* creates the construction environment and then overwrites the name.) """ global _default_env if not _default_env: import SCons.Util _default_env = SCons.Environment.Environment(*args, **kw) if SCons.Util.md5: _default_env.Decider('MD5') else: _default_env.Decider('timestamp-match') global DefaultEnvironment DefaultEnvironment = _fetch_DefaultEnvironment _default_env._CacheDir_path = None return _default_env # Emitters for setting the shared attribute on object files, # and an action for checking that all of the source files # going into a shared library are, in fact, shared. def StaticObjectEmitter(target, source, env): for tgt in target: tgt.attributes.shared = None return (target, source) def SharedObjectEmitter(target, source, env): for tgt in target: tgt.attributes.shared = 1 return (target, source) def SharedFlagChecker(source, target, env): same = env.subst('$STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME') if same == '0' or same == '' or same == 'False': for src in source: try: shared = src.attributes.shared except AttributeError: shared = None if not shared: raise SCons.Errors.UserError("Source file: %s is static and is not compatible with shared target: %s" % (src, target[0])) SharedCheck = SCons.Action.Action(SharedFlagChecker, None) # Some people were using these variable name before we made # SourceFileScanner part of the public interface. Don't break their # SConscript files until we've given them some fair warning and a # transition period. CScan = SCons.Tool.CScanner DScan = SCons.Tool.DScanner LaTeXScan = SCons.Tool.LaTeXScanner ObjSourceScan = SCons.Tool.SourceFileScanner ProgScan = SCons.Tool.ProgramScanner # These aren't really tool scanners, so they don't quite belong with # the rest of those in Tool/__init__.py, but I'm not sure where else # they should go. Leave them here for now. import SCons.Scanner.Dir DirScanner = SCons.Scanner.Dir.DirScanner() DirEntryScanner = SCons.Scanner.Dir.DirEntryScanner() # Actions for common languages. CAction = SCons.Action.Action("$CCCOM", "$CCCOMSTR") ShCAction = SCons.Action.Action("$SHCCCOM", "$SHCCCOMSTR") CXXAction = SCons.Action.Action("$CXXCOM", "$CXXCOMSTR") ShCXXAction = SCons.Action.Action("$SHCXXCOM", "$SHCXXCOMSTR") ASAction = SCons.Action.Action("$ASCOM", "$ASCOMSTR") ASPPAction = SCons.Action.Action("$ASPPCOM", "$ASPPCOMSTR") LinkAction = SCons.Action.Action("$LINKCOM", "$LINKCOMSTR") ShLinkAction = SCons.Action.Action("$SHLINKCOM", "$SHLINKCOMSTR") LdModuleLinkAction = SCons.Action.Action("$LDMODULECOM", "$LDMODULECOMSTR") # Common tasks that we allow users to perform in platform-independent # ways by creating ActionFactory instances. ActionFactory = SCons.Action.ActionFactory def get_paths_str(dest): # If dest is a list, we need to manually call str() on each element if SCons.Util.is_List(dest): elem_strs = [] for element in dest: elem_strs.append('"' + str(element) + '"') return '[' + ', '.join(elem_strs) + ']' else: return '"' + str(dest) + '"' def chmod_func(dest, mode): SCons.Node.FS.invalidate_node_memos(dest) if not SCons.Util.is_List(dest): dest = [dest] for element in dest: os.chmod(str(element), mode) def chmod_strfunc(dest, mode): return 'Chmod(%s, 0%o)' % (get_paths_str(dest), mode) Chmod = ActionFactory(chmod_func, chmod_strfunc) def copy_func(dest, src): SCons.Node.FS.invalidate_node_memos(dest) if SCons.Util.is_List(src) and os.path.isdir(dest): for file in src: shutil.copy2(file, dest) return 0 elif os.path.isfile(src): return shutil.copy2(src, dest) else: return shutil.copytree(src, dest, 1) Copy = ActionFactory(copy_func, lambda dest, src: 'Copy("%s", "%s")' % (dest, src), convert=str) def delete_func(dest, must_exist=0): SCons.Node.FS.invalidate_node_memos(dest) if not SCons.Util.is_List(dest): dest = [dest] for entry in dest: entry = str(entry) if not must_exist and not os.path.exists(entry): continue if not os.path.exists(entry) or os.path.isfile(entry): os.unlink(entry) continue else: shutil.rmtree(entry, 1) continue def delete_strfunc(dest, must_exist=0): return 'Delete(%s)' % get_paths_str(dest) Delete = ActionFactory(delete_func, delete_strfunc) def mkdir_func(dest): SCons.Node.FS.invalidate_node_memos(dest) if not SCons.Util.is_List(dest): dest = [dest] for entry in dest: try: os.makedirs(str(entry)) except os.error, e: p = str(entry) if (e.args[0] == errno.EEXIST or (sys.platform=='win32' and e.args[0]==183)) \ and os.path.isdir(str(entry)): pass # not an error if already exists else: raise Mkdir = ActionFactory(mkdir_func, lambda dir: 'Mkdir(%s)' % get_paths_str(dir)) def move_func(dest, src): SCons.Node.FS.invalidate_node_memos(dest) SCons.Node.FS.invalidate_node_memos(src) shutil.move(src, dest) Move = ActionFactory(move_func, lambda dest, src: 'Move("%s", "%s")' % (dest, src), convert=str) def touch_func(dest): SCons.Node.FS.invalidate_node_memos(dest) if not SCons.Util.is_List(dest): dest = [dest] for file in dest: file = str(file) mtime = int(time.time()) if os.path.exists(file): atime = os.path.getatime(file) else: open(file, 'w') atime = mtime os.utime(file, (atime, mtime)) Touch = ActionFactory(touch_func, lambda file: 'Touch(%s)' % get_paths_str(file)) # Internal utility functions def _concat(prefix, list, suffix, env, f=lambda x: x, target=None, source=None): """ Creates a new list from 'list' by first interpolating each element in the list using the 'env' dictionary and then calling f on the list, and finally calling _concat_ixes to concatenate 'prefix' and 'suffix' onto each element of the list. """ if not list: return list l = f(SCons.PathList.PathList(list).subst_path(env, target, source)) if l is not None: list = l return _concat_ixes(prefix, list, suffix, env) def _concat_ixes(prefix, list, suffix, env): """ Creates a new list from 'list' by concatenating the 'prefix' and 'suffix' arguments onto each element of the list. A trailing space on 'prefix' or leading space on 'suffix' will cause them to be put into separate list elements rather than being concatenated. """ result = [] # ensure that prefix and suffix are strings prefix = str(env.subst(prefix, SCons.Subst.SUBST_RAW)) suffix = str(env.subst(suffix, SCons.Subst.SUBST_RAW)) for x in list: if isinstance(x, SCons.Node.FS.File): result.append(x) continue x = str(x) if x: if prefix: if prefix[-1] == ' ': result.append(prefix[:-1]) elif x[:len(prefix)] != prefix: x = prefix + x result.append(x) if suffix: if suffix[0] == ' ': result.append(suffix[1:]) elif x[-len(suffix):] != suffix: result[-1] = result[-1]+suffix return result def _stripixes(prefix, itms, suffix, stripprefixes, stripsuffixes, env, c=None): """ This is a wrapper around _concat()/_concat_ixes() that checks for the existence of prefixes or suffixes on list items and strips them where it finds them. This is used by tools (like the GNU linker) that need to turn something like 'libfoo.a' into '-lfoo'. """ if not itms: return itms if not callable(c): env_c = env['_concat'] if env_c != _concat and callable(env_c): # There's a custom _concat() method in the construction # environment, and we've allowed people to set that in # the past (see test/custom-concat.py), so preserve the # backwards compatibility. c = env_c else: c = _concat_ixes stripprefixes = list(map(env.subst, SCons.Util.flatten(stripprefixes))) stripsuffixes = list(map(env.subst, SCons.Util.flatten(stripsuffixes))) stripped = [] for l in SCons.PathList.PathList(itms).subst_path(env, None, None): if isinstance(l, SCons.Node.FS.File): stripped.append(l) continue if not SCons.Util.is_String(l): l = str(l) for stripprefix in stripprefixes: lsp = len(stripprefix) if l[:lsp] == stripprefix: l = l[lsp:] # Do not strip more than one prefix break for stripsuffix in stripsuffixes: lss = len(stripsuffix) if l[-lss:] == stripsuffix: l = l[:-lss] # Do not strip more than one suffix break stripped.append(l) return c(prefix, stripped, suffix, env) def processDefines(defs): """process defines, resolving strings, lists, dictionaries, into a list of strings """ if SCons.Util.is_List(defs): l = [] for d in defs: if SCons.Util.is_List(d) or isinstance(d, tuple): l.append(str(d[0]) + '=' + str(d[1])) else: l.append(str(d)) elif SCons.Util.is_Dict(defs): # The items in a dictionary are stored in random order, but # if the order of the command-line options changes from # invocation to invocation, then the signature of the command # line will change and we'll get random unnecessary rebuilds. # Consequently, we have to sort the keys to ensure a # consistent order... l = [] for k,v in sorted(defs.items()): if v is None: l.append(str(k)) else: l.append(str(k) + '=' + str(v)) else: l = [str(defs)] return l def _defines(prefix, defs, suffix, env, c=_concat_ixes): """A wrapper around _concat_ixes that turns a list or string into a list of C preprocessor command-line definitions. """ return c(prefix, env.subst_path(processDefines(defs)), suffix, env) class NullCmdGenerator(object): """This is a callable class that can be used in place of other command generators if you don't want them to do anything. The __call__ method for this class simply returns the thing you instantiated it with. Example usage: env["DO_NOTHING"] = NullCmdGenerator env["LINKCOM"] = "${DO_NOTHING('$LINK $SOURCES $TARGET')}" """ def __init__(self, cmd): self.cmd = cmd def __call__(self, target, source, env, for_signature=None): return self.cmd class Variable_Method_Caller(object): """A class for finding a construction variable on the stack and calling one of its methods. We use this to support "construction variables" in our string eval()s that actually stand in for methods--specifically, use of "RDirs" in call to _concat that should actually execute the "TARGET.RDirs" method. (We used to support this by creating a little "build dictionary" that mapped RDirs to the method, but this got in the way of Memoizing construction environments, because we had to create new environment objects to hold the variables.) """ def __init__(self, variable, method): self.variable = variable self.method = method def __call__(self, *args, **kw): try: 1//0 except ZeroDivisionError: # Don't start iterating with the current stack-frame to # prevent creating reference cycles (f_back is safe). frame = sys.exc_info()[2].tb_frame.f_back variable = self.variable while frame: if variable in frame.f_locals: v = frame.f_locals[variable] if v: method = getattr(v, self.method) return method(*args, **kw) frame = frame.f_back return None ConstructionEnvironment = { 'BUILDERS' : {}, 'SCANNERS' : [], 'CONFIGUREDIR' : '#/.sconf_temp', 'CONFIGURELOG' : '#/config.log', 'CPPSUFFIXES' : SCons.Tool.CSuffixes, 'DSUFFIXES' : SCons.Tool.DSuffixes, 'ENV' : {}, 'IDLSUFFIXES' : SCons.Tool.IDLSuffixes, # 'LATEXSUFFIXES' : SCons.Tool.LaTeXSuffixes, # moved to the TeX tools generate functions '_concat' : _concat, '_defines' : _defines, '_stripixes' : _stripixes, '_LIBFLAGS' : '${_concat(LIBLINKPREFIX, LIBS, LIBLINKSUFFIX, __env__)}', '_LIBDIRFLAGS' : '$( ${_concat(LIBDIRPREFIX, LIBPATH, LIBDIRSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)', '_CPPINCFLAGS' : '$( ${_concat(INCPREFIX, CPPPATH, INCSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)', '_CPPDEFFLAGS' : '${_defines(CPPDEFPREFIX, CPPDEFINES, CPPDEFSUFFIX, __env__)}', 'TEMPFILE' : NullCmdGenerator, 'Dir' : Variable_Method_Caller('TARGET', 'Dir'), 'Dirs' : Variable_Method_Caller('TARGET', 'Dirs'), 'File' : Variable_Method_Caller('TARGET', 'File'), 'RDirs' : Variable_Method_Caller('TARGET', 'RDirs'), } # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
unknown
codeparrot/codeparrot-clean
import datetime import sys from types import MethodType from datashape import dshape import pandas as pd import pandas.util.testing as tm import pytest import numpy as np from odo import into, append from odo.backends.csv import CSV from blaze import discover, transform from blaze.compatibility import pickle from blaze.expr import symbol from blaze.interactive import Data, compute, concrete_head, expr_repr, to_html from blaze.utils import tmpfile, example data = (('Alice', 100), ('Bob', 200)) L = [[1, 'Alice', 100], [2, 'Bob', -200], [3, 'Charlie', 300], [4, 'Denis', 400], [5, 'Edith', -500]] t = Data(data, fields=['name', 'amount']) x = np.ones((2, 2)) def test_table_raises_on_inconsistent_inputs(): with pytest.raises(ValueError): t = Data(data, schema='{name: string, amount: float32}', dshape=dshape("{name: string, amount: float32}")) def test_resources(): assert t._resources() == {t: t.data} def test_resources_fail(): t = symbol('t', 'var * {x: int, y: int}') d = t[t['x'] > 100] with pytest.raises(ValueError): compute(d) def test_compute_on_Data_gives_back_data(): assert compute(Data([1, 2, 3])) == [1, 2, 3] def test_len(): assert len(t) == 2 assert len(t.name) == 2 def test_compute(): assert list(compute(t['amount'] + 1)) == [101, 201] def test_create_with_schema(): t = Data(data, schema='{name: string, amount: float32}') assert t.schema == dshape('{name: string, amount: float32}') def test_create_with_raw_data(): t = Data(data, fields=['name', 'amount']) assert t.schema == dshape('{name: string, amount: int64}') assert t.name assert t.data == data def test_repr(): result = expr_repr(t['name']) print(result) assert isinstance(result, str) assert 'Alice' in result assert 'Bob' in result assert '...' not in result result = expr_repr(t['amount'] + 1) print(result) assert '101' in result t2 = Data(tuple((i, i**2) for i in range(100)), fields=['x', 'y']) assert t2.dshape == dshape('100 * {x: int64, y: int64}') result = expr_repr(t2) print(result) assert len(result.split('\n')) < 20 assert '...' in result def test_str_does_not_repr(): # see GH issue #1240. d = Data([('aa', 1), ('b', 2)], name="ZZZ", dshape='2 * {a: string, b: int64}') expr = transform(d, c=d.a.strlen() + d.b) assert str( expr) == "Merge(_child=ZZZ, children=(ZZZ, label(strlen(_child=ZZZ.a) + ZZZ.b, 'c')))" def test_repr_of_scalar(): assert repr(t.amount.sum()) == '300' def test_mutable_backed_repr(): mutable_backed_table = Data([[0]], fields=['col1']) repr(mutable_backed_table) def test_dataframe_backed_repr(): df = pd.DataFrame(data=[0], columns=['col1']) dataframe_backed_table = Data(df) repr(dataframe_backed_table) def test_dataframe_backed_repr_complex(): df = pd.DataFrame([(1, 'Alice', 100), (2, 'Bob', -200), (3, 'Charlie', 300), (4, 'Denis', 400), (5, 'Edith', -500)], columns=['id', 'name', 'balance']) t = Data(df) repr(t[t['balance'] < 0]) def test_repr_html_on_no_resources_symbol(): t = symbol('t', '5 * {id: int, name: string, balance: int}') assert to_html(t) == 't' def test_expr_repr_empty(): s = repr(t[t.amount > 1e9]) assert isinstance(s, str) assert 'amount' in s def test_to_html(): s = to_html(t) assert s assert 'Alice' in s assert '<table' in s assert to_html(1) == '1' assert to_html(t.count()) == '2' def test_to_html_on_arrays(): s = to_html(Data(np.ones((2, 2)))) assert '1' in s assert 'br>' in s def test_repr_html(): assert '<table' in t._repr_html_() assert '<table' in t.name._repr_html_() def test_into(): assert into(list, t) == into(list, data) def test_serialization(): import pickle t2 = pickle.loads(pickle.dumps(t)) assert t.schema == t2.schema assert t._name == t2._name def test_table_resource(): with tmpfile('csv') as filename: ds = dshape('var * {a: int, b: int}') csv = CSV(filename) append(csv, [[1, 2], [10, 20]], dshape=ds) t = Data(filename) assert isinstance(t.data, CSV) assert into(list, compute(t)) == into(list, csv) def test_concretehead_failure(): t = symbol('t', 'var * {x:int, y:int}') d = t[t['x'] > 100] with pytest.raises(ValueError): concrete_head(d) def test_into_np_ndarray_column(): t = Data(L, fields=['id', 'name', 'balance']) expr = t[t.balance < 0].name colarray = into(np.ndarray, expr) assert len(list(compute(expr))) == len(colarray) def test_into_nd_array_selection(): t = Data(L, fields=['id', 'name', 'balance']) expr = t[t['balance'] < 0] selarray = into(np.ndarray, expr) assert len(list(compute(expr))) == len(selarray) def test_into_nd_array_column_failure(): tble = Data(L, fields=['id', 'name', 'balance']) expr = tble[tble['balance'] < 0] colarray = into(np.ndarray, expr) assert len(list(compute(expr))) == len(colarray) def test_Data_attribute_repr(): t = Data(CSV(example('accounts-datetimes.csv'))) result = t.when.day expected = pd.DataFrame({'when_day': [1, 2, 3, 4, 5]}) assert repr(result) == repr(expected) def test_can_trivially_create_csv_Data(): Data(example('iris.csv')) # in context with Data(example('iris.csv')) as d: assert d is not None def test_can_trivially_create_csv_Data_with_unicode(): if sys.version[0] == '2': assert isinstance(Data(example(u'iris.csv')).data, CSV) def test_can_trivially_create_sqlite_table(): pytest.importorskip('sqlalchemy') Data('sqlite:///'+example('iris.db')+'::iris') # in context with Data('sqlite:///'+example('iris.db')+'::iris') as d: assert d is not None @pytest.mark.xfail(sys.platform != 'darwin', reason="h5py/pytables mismatch") @pytest.mark.skipif(sys.version_info[:2] == (3, 4) and sys.platform == 'win32', reason='PyTables + Windows + Python 3.4 crashes') def test_can_trivially_create_pytables(): pytest.importorskip('tables') with Data(example('accounts.h5')+'::/accounts') as d: assert d is not None def test_data_passes_kwargs_to_resource(): assert Data(example('iris.csv'), encoding='ascii').data.encoding == 'ascii' def test_data_on_iterator_refies_data(): data = [1, 2, 3] d = Data(iter(data)) assert into(list, d) == data assert into(list, d) == data # in context with Data(iter(data)) as d: assert d is not None def test_Data_on_json_is_concrete(): d = Data(example('accounts-streaming.json')) assert compute(d.amount.sum()) == 100 - 200 + 300 + 400 - 500 assert compute(d.amount.sum()) == 100 - 200 + 300 + 400 - 500 def test_repr_on_nd_array_doesnt_err(): d = Data(np.ones((2, 2, 2))) repr(d + 1) def test_generator_reprs_concretely(): x = [1, 2, 3, 4, 5, 6] d = Data(x) expr = d[d > 2] + 1 assert '4' in repr(expr) def test_incompatible_types(): d = Data(pd.DataFrame(L, columns=['id', 'name', 'amount'])) with pytest.raises(ValueError): d.id == 'foo' result = compute(d.id == 3) expected = pd.Series([False, False, True, False, False], name='id') tm.assert_series_equal(result, expected) def test___array__(): x = np.ones(4) d = Data(x) assert (np.array(d + 1) == x + 1).all() d = Data(x[:2]) x[2:] = d + 1 assert x.tolist() == [1, 1, 2, 2] def test_python_scalar_protocols(): d = Data(1) assert int(d + 1) == 2 assert float(d + 1.0) == 2.0 assert bool(d > 0) is True assert complex(d + 1.0j) == 1 + 1.0j def test_iter(): x = np.ones(4) d = Data(x) assert list(d + 1) == [2, 2, 2, 2] @pytest.mark.xfail( reason="DataFrame constructor doesn't yet support __array__" ) def test_DataFrame(): x = np.array([(1, 2), (1., 2.)], dtype=[('a', 'i4'), ('b', 'f4')]) d = Data(x) assert isinstance(pd.DataFrame(d), pd.DataFrame) def test_head_compute(): data = tm.makeMixedDataFrame() t = symbol('t', discover(data)) db = into('sqlite:///:memory:::t', data, dshape=t.dshape) n = 2 d = Data(db) # skip the header and the ... at the end of the repr expr = d.head(n) s = repr(expr) assert '...' not in s result = s.split('\n')[1:] assert len(result) == n def test_scalar_sql_compute(): t = into('sqlite:///:memory:::t', data, dshape=dshape('var * {name: string, amount: int}')) d = Data(t) assert repr(d.amount.sum()) == '300' def test_no_name_for_simple_data(): d = Data([1, 2, 3]) assert repr(d) == ' \n0 1\n1 2\n2 3' assert not d._name d = Data(1) assert not d._name assert repr(d) == '1' def test_coerce_date_and_datetime(): x = datetime.datetime.now().date() d = Data(x) assert repr(d) == repr(x) x = pd.Timestamp.now() d = Data(x) assert repr(d) == repr(x) x = np.nan d = Data(x, dshape='datetime') assert repr(d) == repr(pd.NaT) x = float('nan') d = Data(x, dshape='datetime') assert repr(d) == repr(pd.NaT) def test_highly_nested_repr(): data = [[0, [[1, 2], [3]], 'abc']] d = Data(data) assert 'abc' in repr(d.head()) def test_asarray_fails_on_different_column_names(): vs = {'first': [2., 5., 3.], 'second': [4., 1., 4.], 'third': [6., 4., 3.]} df = pd.DataFrame(vs) with pytest.raises(ValueError): Data(df, fields=list('abc')) def test_functions_as_bound_methods(): """ Test that all functions on an InteractiveSymbol are instance methods of that object. """ # Filter out __class__ and friends that are special, these can be # callables without being instance methods. callable_attrs = filter( callable, (getattr(t, a, None) for a in dir(t) if not a.startswith('__')), ) for attr in callable_attrs: assert isinstance(attr, MethodType) # Make sure this is bound to the correct object. assert attr.__self__ is t def test_all_string_infer_header(): data = """x,tl,z Be careful driving.,hy,en Be careful.,hy,en Can you translate this for me?,hy,en Chicago is very different from Boston.,hy,en Don't worry.,hy,en""" with tmpfile('.csv') as fn: with open(fn, 'w') as f: f.write(data) data = Data(fn, has_header=True) assert data.data.has_header assert data.fields == ['x', 'tl', 'z'] def test_csv_with_trailing_commas(): with tmpfile('.csv') as fn: with open(fn, 'wt') as f: # note the trailing space in the header f.write('a,b,c, \n1, 2, 3, ') csv = CSV(fn) assert repr(Data(fn)) assert discover(csv).measure.names == [ 'a', 'b', 'c', '' ] with tmpfile('.csv') as fn: with open(fn, 'wt') as f: f.write('a,b,c,\n1, 2, 3, ') # NO trailing space in the header csv = CSV(fn) assert repr(Data(fn)) assert discover(csv).measure.names == [ 'a', 'b', 'c', 'Unnamed: 3' ] def test_pickle_roundtrip(): ds = Data(1) assert ds.isidentical(pickle.loads(pickle.dumps(ds))) assert (ds + 1).isidentical(pickle.loads(pickle.dumps(ds + 1))) es = Data(np.array([1, 2, 3])) assert es.isidentical(pickle.loads(pickle.dumps(es))) assert (es + 1).isidentical(pickle.loads(pickle.dumps(es + 1))) def test_nameless_data(): data = [('a', 1)] assert repr(data) in repr(Data(data)) def test_partially_bound_expr(): df = pd.DataFrame([(1, 'Alice', 100), (2, 'Bob', -200), (3, 'Charlie', 300), (4, 'Denis', 400), (5, 'Edith', -500)], columns=['id', 'name', 'balance']) data = Data(df, name='data') a = symbol('a', 'int') expr = data.name[data.balance > a] assert repr(expr) == 'data[data.balance > a].name'
unknown
codeparrot/codeparrot-clean
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package vultr import ( "github.com/prometheus/prometheus/discovery" ) var _ discovery.DiscovererMetrics = (*vultrMetrics)(nil) type vultrMetrics struct { refreshMetrics discovery.RefreshMetricsInstantiator } // Register implements discovery.DiscovererMetrics. func (*vultrMetrics) Register() error { return nil } // Unregister implements discovery.DiscovererMetrics. func (*vultrMetrics) Unregister() {}
go
github
https://github.com/prometheus/prometheus
discovery/vultr/metrics.go
#! /usr/bin/env python # encoding: utf-8 # WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file import os,re,traceback,sys try: import threading except ImportError: pass else: wlock=threading.Lock() class sync_stream(object): def __init__(self,stream): self.stream=stream self.encoding=self.stream.encoding def write(self,txt): try: wlock.acquire() self.stream.write(txt) self.stream.flush() finally: wlock.release() def fileno(self): return self.stream.fileno() def flush(self): self.stream.flush() def isatty(self): return self.stream.isatty() _nocolor=os.environ.get('NOCOLOR','no')not in('no','0','false') try: if not _nocolor: import waflib.ansiterm except ImportError: pass if not os.environ.get('NOSYNC',False): if id(sys.stdout)==id(sys.__stdout__): sys.stdout=sync_stream(sys.stdout) sys.stderr=sync_stream(sys.stderr) import logging LOG_FORMAT="%(asctime)s %(c1)s%(zone)s%(c2)s %(message)s" HOUR_FORMAT="%H:%M:%S" zones='' verbose=0 colors_lst={'USE':True,'BOLD':'\x1b[01;1m','RED':'\x1b[01;31m','GREEN':'\x1b[32m','YELLOW':'\x1b[33m','PINK':'\x1b[35m','BLUE':'\x1b[01;34m','CYAN':'\x1b[36m','NORMAL':'\x1b[0m','cursor_on':'\x1b[?25h','cursor_off':'\x1b[?25l',} got_tty=not os.environ.get('TERM','dumb')in['dumb','emacs'] if got_tty: try: got_tty=sys.stderr.isatty()and sys.stdout.isatty() except AttributeError: got_tty=False if(not got_tty and os.environ.get('TERM','dumb')!='msys')or _nocolor: colors_lst['USE']=False def get_term_cols(): return 80 try: import struct,fcntl,termios except ImportError: pass else: if got_tty: def get_term_cols_real(): dummy_lines,cols=struct.unpack("HHHH",fcntl.ioctl(sys.stderr.fileno(),termios.TIOCGWINSZ,struct.pack("HHHH",0,0,0,0)))[:2] return cols try: get_term_cols_real() except Exception: pass else: get_term_cols=get_term_cols_real get_term_cols.__doc__=""" Get the console width in characters. :return: the number of characters per line :rtype: int """ def get_color(cl): if not colors_lst['USE']:return'' return colors_lst.get(cl,'') class color_dict(object): def __getattr__(self,a): return get_color(a) def __call__(self,a): return get_color(a) colors=color_dict() re_log=re.compile(r'(\w+): (.*)',re.M) class log_filter(logging.Filter): def __init__(self,name=None): pass def filter(self,rec): rec.c1=colors.PINK rec.c2=colors.NORMAL rec.zone=rec.module if rec.levelno>=logging.INFO: if rec.levelno>=logging.ERROR: rec.c1=colors.RED elif rec.levelno>=logging.WARNING: rec.c1=colors.YELLOW else: rec.c1=colors.GREEN return True m=re_log.match(rec.msg) if m: rec.zone=m.group(1) rec.msg=m.group(2) if zones: return getattr(rec,'zone','')in zones or'*'in zones elif not verbose>2: return False return True class formatter(logging.Formatter): def __init__(self): logging.Formatter.__init__(self,LOG_FORMAT,HOUR_FORMAT) def format(self,rec): if rec.levelno>=logging.WARNING or rec.levelno==logging.INFO: try: msg=rec.msg.decode('utf-8') except Exception: msg=rec.msg return'%s%s%s'%(rec.c1,msg,rec.c2) return logging.Formatter.format(self,rec) log=None def debug(*k,**kw): if verbose: k=list(k) k[0]=k[0].replace('\n',' ') global log log.debug(*k,**kw) def error(*k,**kw): global log log.error(*k,**kw) if verbose>2: st=traceback.extract_stack() if st: st=st[:-1] buf=[] for filename,lineno,name,line in st: buf.append(' File "%s", line %d, in %s'%(filename,lineno,name)) if line: buf.append(' %s'%line.strip()) if buf:log.error("\n".join(buf)) def warn(*k,**kw): global log log.warn(*k,**kw) def info(*k,**kw): global log log.info(*k,**kw) def init_log(): global log log=logging.getLogger('waflib') log.handlers=[] log.filters=[] hdlr=logging.StreamHandler() hdlr.setFormatter(formatter()) log.addHandler(hdlr) log.addFilter(log_filter()) log.setLevel(logging.DEBUG) def make_logger(path,name): logger=logging.getLogger(name) hdlr=logging.FileHandler(path,'w') formatter=logging.Formatter('%(message)s') hdlr.setFormatter(formatter) logger.addHandler(hdlr) logger.setLevel(logging.DEBUG) return logger def make_mem_logger(name,to_log,size=10000): from logging.handlers import MemoryHandler logger=logging.getLogger(name) hdlr=MemoryHandler(size,target=to_log) formatter=logging.Formatter('%(message)s') hdlr.setFormatter(formatter) logger.addHandler(hdlr) logger.memhandler=hdlr logger.setLevel(logging.DEBUG) return logger def pprint(col,str,label='',sep='\n'): sys.stderr.write("%s%s%s %s%s"%(colors(col),str,colors.NORMAL,label,sep))
unknown
codeparrot/codeparrot-clean
from tkinter import * from idlelib import macosxSupport class ScrolledList: default = "(None)" def __init__(self, master, **options): # Create top frame, with scrollbar and listbox self.master = master self.frame = frame = Frame(master) self.frame.pack(fill="both", expand=1) self.vbar = vbar = Scrollbar(frame, name="vbar") self.vbar.pack(side="right", fill="y") self.listbox = listbox = Listbox(frame, exportselection=0, background="white") if options: listbox.configure(options) listbox.pack(expand=1, fill="both") # Tie listbox and scrollbar together vbar["command"] = listbox.yview listbox["yscrollcommand"] = vbar.set # Bind events to the list box listbox.bind("<ButtonRelease-1>", self.click_event) listbox.bind("<Double-ButtonRelease-1>", self.double_click_event) if macosxSupport.isAquaTk(): listbox.bind("<ButtonPress-2>", self.popup_event) listbox.bind("<Control-Button-1>", self.popup_event) else: listbox.bind("<ButtonPress-3>", self.popup_event) listbox.bind("<Key-Up>", self.up_event) listbox.bind("<Key-Down>", self.down_event) # Mark as empty self.clear() def close(self): self.frame.destroy() def clear(self): self.listbox.delete(0, "end") self.empty = 1 self.listbox.insert("end", self.default) def append(self, item): if self.empty: self.listbox.delete(0, "end") self.empty = 0 self.listbox.insert("end", str(item)) def get(self, index): return self.listbox.get(index) def click_event(self, event): self.listbox.activate("@%d,%d" % (event.x, event.y)) index = self.listbox.index("active") self.select(index) self.on_select(index) return "break" def double_click_event(self, event): index = self.listbox.index("active") self.select(index) self.on_double(index) return "break" menu = None def popup_event(self, event): if not self.menu: self.make_menu() menu = self.menu self.listbox.activate("@%d,%d" % (event.x, event.y)) index = self.listbox.index("active") self.select(index) menu.tk_popup(event.x_root, event.y_root) def make_menu(self): menu = Menu(self.listbox, tearoff=0) self.menu = menu self.fill_menu() def up_event(self, event): index = self.listbox.index("active") if self.listbox.selection_includes(index): index = index - 1 else: index = self.listbox.size() - 1 if index < 0: self.listbox.bell() else: self.select(index) self.on_select(index) return "break" def down_event(self, event): index = self.listbox.index("active") if self.listbox.selection_includes(index): index = index + 1 else: index = 0 if index >= self.listbox.size(): self.listbox.bell() else: self.select(index) self.on_select(index) return "break" def select(self, index): self.listbox.focus_set() self.listbox.activate(index) self.listbox.selection_clear(0, "end") self.listbox.selection_set(index) self.listbox.see(index) # Methods to override for specific actions def fill_menu(self): pass def on_select(self, index): pass def on_double(self, index): pass def _scrolled_list(parent): root = Tk() root.title("Test ScrolledList") width, height, x, y = list(map(int, re.split('[x+]', parent.geometry()))) root.geometry("+%d+%d"%(x, y + 150)) class MyScrolledList(ScrolledList): def fill_menu(self): self.menu.add_command(label="right click") def on_select(self, index): print("select", self.get(index)) def on_double(self, index): print("double", self.get(index)) scrolled_list = MyScrolledList(root) for i in range(30): scrolled_list.append("Item %02d" % i) root.mainloop() if __name__ == '__main__': from idlelib.idle_test.htest import run run(_scrolled_list)
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- """ pygments.lexers.configs ~~~~~~~~~~~~~~~~~~~~~~~ Lexers for configuration file formats. :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, default, words, bygroups, include, using from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Whitespace from pygments.lexers.shell import BashLexer __all__ = ['IniLexer', 'RegeditLexer', 'PropertiesLexer', 'KconfigLexer', 'Cfengine3Lexer', 'ApacheConfLexer', 'SquidConfLexer', 'NginxConfLexer', 'LighttpdConfLexer', 'DockerLexer'] class IniLexer(RegexLexer): """ Lexer for configuration files in INI style. """ name = 'INI' aliases = ['ini', 'cfg', 'dosini'] filenames = ['*.ini', '*.cfg'] mimetypes = ['text/x-ini'] tokens = { 'root': [ (r'\s+', Text), (r'[;#].*', Comment.Single), (r'\[.*?\]$', Keyword), (r'(.*?)([ \t]*)(=)([ \t]*)(.*(?:\n[ \t].+)*)', bygroups(Name.Attribute, Text, Operator, Text, String)) ] } def analyse_text(text): npos = text.find('\n') if npos < 3: return False return text[0] == '[' and text[npos-1] == ']' class RegeditLexer(RegexLexer): """ Lexer for `Windows Registry <http://en.wikipedia.org/wiki/Windows_Registry#.REG_files>`_ files produced by regedit. .. versionadded:: 1.6 """ name = 'reg' aliases = ['registry'] filenames = ['*.reg'] mimetypes = ['text/x-windows-registry'] tokens = { 'root': [ (r'Windows Registry Editor.*', Text), (r'\s+', Text), (r'[;#].*', Comment.Single), (r'(\[)(-?)(HKEY_[A-Z_]+)(.*?\])$', bygroups(Keyword, Operator, Name.Builtin, Keyword)), # String keys, which obey somewhat normal escaping (r'("(?:\\"|\\\\|[^"])+")([ \t]*)(=)([ \t]*)', bygroups(Name.Attribute, Text, Operator, Text), 'value'), # Bare keys (includes @) (r'(.*?)([ \t]*)(=)([ \t]*)', bygroups(Name.Attribute, Text, Operator, Text), 'value'), ], 'value': [ (r'-', Operator, '#pop'), # delete value (r'(dword|hex(?:\([0-9a-fA-F]\))?)(:)([0-9a-fA-F,]+)', bygroups(Name.Variable, Punctuation, Number), '#pop'), # As far as I know, .reg files do not support line continuation. (r'.+', String, '#pop'), default('#pop'), ] } def analyse_text(text): return text.startswith('Windows Registry Editor') class PropertiesLexer(RegexLexer): """ Lexer for configuration files in Java's properties format. .. versionadded:: 1.4 """ name = 'Properties' aliases = ['properties', 'jproperties'] filenames = ['*.properties'] mimetypes = ['text/x-java-properties'] tokens = { 'root': [ (r'\s+', Text), (r'(?:[;#]|//).*$', Comment), (r'(.*?)([ \t]*)([=:])([ \t]*)(.*(?:(?<=\\)\n.*)*)', bygroups(Name.Attribute, Text, Operator, Text, String)), ], } def _rx_indent(level): # Kconfig *always* interprets a tab as 8 spaces, so this is the default. # Edit this if you are in an environment where KconfigLexer gets expanded # input (tabs expanded to spaces) and the expansion tab width is != 8, # e.g. in connection with Trac (trac.ini, [mimeviewer], tab_width). # Value range here is 2 <= {tab_width} <= 8. tab_width = 8 # Regex matching a given indentation {level}, assuming that indentation is # a multiple of {tab_width}. In other cases there might be problems. if tab_width == 2: space_repeat = '+' else: space_repeat = '{1,%d}' % (tab_width - 1) if level == 1: level_repeat = '' else: level_repeat = '{%s}' % level return r'(?:\t| %s\t| {%s})%s.*\n' % (space_repeat, tab_width, level_repeat) class KconfigLexer(RegexLexer): """ For Linux-style Kconfig files. .. versionadded:: 1.6 """ name = 'Kconfig' aliases = ['kconfig', 'menuconfig', 'linux-config', 'kernel-config'] # Adjust this if new kconfig file names appear in your environment filenames = ['Kconfig', '*Config.in*', 'external.in*', 'standard-modules.in'] mimetypes = ['text/x-kconfig'] # No re.MULTILINE, indentation-aware help text needs line-by-line handling flags = 0 def call_indent(level): # If indentation >= {level} is detected, enter state 'indent{level}' return (_rx_indent(level), String.Doc, 'indent%s' % level) def do_indent(level): # Print paragraphs of indentation level >= {level} as String.Doc, # ignoring blank lines. Then return to 'root' state. return [ (_rx_indent(level), String.Doc), (r'\s*\n', Text), default('#pop:2') ] tokens = { 'root': [ (r'\s+', Text), (r'#.*?\n', Comment.Single), (words(( 'mainmenu', 'config', 'menuconfig', 'choice', 'endchoice', 'comment', 'menu', 'endmenu', 'visible if', 'if', 'endif', 'source', 'prompt', 'select', 'depends on', 'default', 'range', 'option'), suffix=r'\b'), Keyword), (r'(---help---|help)[\t ]*\n', Keyword, 'help'), (r'(bool|tristate|string|hex|int|defconfig_list|modules|env)\b', Name.Builtin), (r'[!=&|]', Operator), (r'[()]', Punctuation), (r'[0-9]+', Number.Integer), (r"'(''|[^'])*'", String.Single), (r'"(""|[^"])*"', String.Double), (r'\S+', Text), ], # Help text is indented, multi-line and ends when a lower indentation # level is detected. 'help': [ # Skip blank lines after help token, if any (r'\s*\n', Text), # Determine the first help line's indentation level heuristically(!). # Attention: this is not perfect, but works for 99% of "normal" # indentation schemes up to a max. indentation level of 7. call_indent(7), call_indent(6), call_indent(5), call_indent(4), call_indent(3), call_indent(2), call_indent(1), default('#pop'), # for incomplete help sections without text ], # Handle text for indentation levels 7 to 1 'indent7': do_indent(7), 'indent6': do_indent(6), 'indent5': do_indent(5), 'indent4': do_indent(4), 'indent3': do_indent(3), 'indent2': do_indent(2), 'indent1': do_indent(1), } class Cfengine3Lexer(RegexLexer): """ Lexer for `CFEngine3 <http://cfengine.org>`_ policy files. .. versionadded:: 1.5 """ name = 'CFEngine3' aliases = ['cfengine3', 'cf3'] filenames = ['*.cf'] mimetypes = [] tokens = { 'root': [ (r'#.*?\n', Comment), (r'(body)(\s+)(\S+)(\s+)(control)', bygroups(Keyword, Text, Keyword, Text, Keyword)), (r'(body|bundle)(\s+)(\S+)(\s+)(\w+)(\()', bygroups(Keyword, Text, Keyword, Text, Name.Function, Punctuation), 'arglist'), (r'(body|bundle)(\s+)(\S+)(\s+)(\w+)', bygroups(Keyword, Text, Keyword, Text, Name.Function)), (r'(")([^"]+)(")(\s+)(string|slist|int|real)(\s*)(=>)(\s*)', bygroups(Punctuation, Name.Variable, Punctuation, Text, Keyword.Type, Text, Operator, Text)), (r'(\S+)(\s*)(=>)(\s*)', bygroups(Keyword.Reserved, Text, Operator, Text)), (r'"', String, 'string'), (r'(\w+)(\()', bygroups(Name.Function, Punctuation)), (r'([\w.!&|()]+)(::)', bygroups(Name.Class, Punctuation)), (r'(\w+)(:)', bygroups(Keyword.Declaration, Punctuation)), (r'@[{(][^)}]+[})]', Name.Variable), (r'[(){},;]', Punctuation), (r'=>', Operator), (r'->', Operator), (r'\d+\.\d+', Number.Float), (r'\d+', Number.Integer), (r'\w+', Name.Function), (r'\s+', Text), ], 'string': [ (r'\$[{(]', String.Interpol, 'interpol'), (r'\\.', String.Escape), (r'"', String, '#pop'), (r'\n', String), (r'.', String), ], 'interpol': [ (r'\$[{(]', String.Interpol, '#push'), (r'[})]', String.Interpol, '#pop'), (r'[^${()}]+', String.Interpol), ], 'arglist': [ (r'\)', Punctuation, '#pop'), (r',', Punctuation), (r'\w+', Name.Variable), (r'\s+', Text), ], } class ApacheConfLexer(RegexLexer): """ Lexer for configuration files following the Apache config file format. .. versionadded:: 0.6 """ name = 'ApacheConf' aliases = ['apacheconf', 'aconf', 'apache'] filenames = ['.htaccess', 'apache.conf', 'apache2.conf'] mimetypes = ['text/x-apacheconf'] flags = re.MULTILINE | re.IGNORECASE tokens = { 'root': [ (r'\s+', Text), (r'(#.*?)$', Comment), (r'(<[^\s>]+)(?:(\s+)(.*?))?(>)', bygroups(Name.Tag, Text, String, Name.Tag)), (r'([a-z]\w*)(\s+)', bygroups(Name.Builtin, Text), 'value'), (r'\.+', Text), ], 'value': [ (r'\\\n', Text), (r'$', Text, '#pop'), (r'\\', Text), (r'[^\S\n]+', Text), (r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number), (r'\d+', Number), (r'/([a-z0-9][\w./-]+)', String.Other), (r'(on|off|none|any|all|double|email|dns|min|minimal|' r'os|productonly|full|emerg|alert|crit|error|warn|' r'notice|info|debug|registry|script|inetd|standalone|' r'user|group)\b', Keyword), (r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double), (r'[^\s"\\]+', Text) ], } class SquidConfLexer(RegexLexer): """ Lexer for `squid <http://www.squid-cache.org/>`_ configuration files. .. versionadded:: 0.9 """ name = 'SquidConf' aliases = ['squidconf', 'squid.conf', 'squid'] filenames = ['squid.conf'] mimetypes = ['text/x-squidconf'] flags = re.IGNORECASE keywords = ( "access_log", "acl", "always_direct", "announce_host", "announce_period", "announce_port", "announce_to", "anonymize_headers", "append_domain", "as_whois_server", "auth_param_basic", "authenticate_children", "authenticate_program", "authenticate_ttl", "broken_posts", "buffered_logs", "cache_access_log", "cache_announce", "cache_dir", "cache_dns_program", "cache_effective_group", "cache_effective_user", "cache_host", "cache_host_acl", "cache_host_domain", "cache_log", "cache_mem", "cache_mem_high", "cache_mem_low", "cache_mgr", "cachemgr_passwd", "cache_peer", "cache_peer_access", "cahce_replacement_policy", "cache_stoplist", "cache_stoplist_pattern", "cache_store_log", "cache_swap", "cache_swap_high", "cache_swap_log", "cache_swap_low", "client_db", "client_lifetime", "client_netmask", "connect_timeout", "coredump_dir", "dead_peer_timeout", "debug_options", "delay_access", "delay_class", "delay_initial_bucket_level", "delay_parameters", "delay_pools", "deny_info", "dns_children", "dns_defnames", "dns_nameservers", "dns_testnames", "emulate_httpd_log", "err_html_text", "fake_user_agent", "firewall_ip", "forwarded_for", "forward_snmpd_port", "fqdncache_size", "ftpget_options", "ftpget_program", "ftp_list_width", "ftp_passive", "ftp_user", "half_closed_clients", "header_access", "header_replace", "hierarchy_stoplist", "high_response_time_warning", "high_page_fault_warning", "hosts_file", "htcp_port", "http_access", "http_anonymizer", "httpd_accel", "httpd_accel_host", "httpd_accel_port", "httpd_accel_uses_host_header", "httpd_accel_with_proxy", "http_port", "http_reply_access", "icp_access", "icp_hit_stale", "icp_port", "icp_query_timeout", "ident_lookup", "ident_lookup_access", "ident_timeout", "incoming_http_average", "incoming_icp_average", "inside_firewall", "ipcache_high", "ipcache_low", "ipcache_size", "local_domain", "local_ip", "logfile_rotate", "log_fqdn", "log_icp_queries", "log_mime_hdrs", "maximum_object_size", "maximum_single_addr_tries", "mcast_groups", "mcast_icp_query_timeout", "mcast_miss_addr", "mcast_miss_encode_key", "mcast_miss_port", "memory_pools", "memory_pools_limit", "memory_replacement_policy", "mime_table", "min_http_poll_cnt", "min_icp_poll_cnt", "minimum_direct_hops", "minimum_object_size", "minimum_retry_timeout", "miss_access", "negative_dns_ttl", "negative_ttl", "neighbor_timeout", "neighbor_type_domain", "netdb_high", "netdb_low", "netdb_ping_period", "netdb_ping_rate", "never_direct", "no_cache", "passthrough_proxy", "pconn_timeout", "pid_filename", "pinger_program", "positive_dns_ttl", "prefer_direct", "proxy_auth", "proxy_auth_realm", "query_icmp", "quick_abort", "quick_abort_max", "quick_abort_min", "quick_abort_pct", "range_offset_limit", "read_timeout", "redirect_children", "redirect_program", "redirect_rewrites_host_header", "reference_age", "refresh_pattern", "reload_into_ims", "request_body_max_size", "request_size", "request_timeout", "shutdown_lifetime", "single_parent_bypass", "siteselect_timeout", "snmp_access", "snmp_incoming_address", "snmp_port", "source_ping", "ssl_proxy", "store_avg_object_size", "store_objects_per_bucket", "strip_query_terms", "swap_level1_dirs", "swap_level2_dirs", "tcp_incoming_address", "tcp_outgoing_address", "tcp_recv_bufsize", "test_reachability", "udp_hit_obj", "udp_hit_obj_size", "udp_incoming_address", "udp_outgoing_address", "unique_hostname", "unlinkd_program", "uri_whitespace", "useragent_log", "visible_hostname", "wais_relay", "wais_relay_host", "wais_relay_port", ) opts = ( "proxy-only", "weight", "ttl", "no-query", "default", "round-robin", "multicast-responder", "on", "off", "all", "deny", "allow", "via", "parent", "no-digest", "heap", "lru", "realm", "children", "q1", "q2", "credentialsttl", "none", "disable", "offline_toggle", "diskd", ) actions = ( "shutdown", "info", "parameter", "server_list", "client_list", r'squid.conf', ) actions_stats = ( "objects", "vm_objects", "utilization", "ipcache", "fqdncache", "dns", "redirector", "io", "reply_headers", "filedescriptors", "netdb", ) actions_log = ("status", "enable", "disable", "clear") acls = ( "url_regex", "urlpath_regex", "referer_regex", "port", "proto", "req_mime_type", "rep_mime_type", "method", "browser", "user", "src", "dst", "time", "dstdomain", "ident", "snmp_community", ) ip_re = ( r'(?:(?:(?:[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}|0x0*[0-9a-f]{1,2}|' r'0+[1-3]?[0-7]{0,2})(?:\.(?:[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}|' r'0x0*[0-9a-f]{1,2}|0+[1-3]?[0-7]{0,2})){3})|(?!.*::.*::)(?:(?!:)|' r':(?=:))(?:[0-9a-f]{0,4}(?:(?<=::)|(?<!::):)){6}(?:[0-9a-f]{0,4}' r'(?:(?<=::)|(?<!::):)[0-9a-f]{0,4}(?:(?<=::)|(?<!:)|(?<=:)(?<!::):)|' r'(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-4]|2[0-4]\d|1\d\d|' r'[1-9]?\d)){3}))' ) tokens = { 'root': [ (r'\s+', Whitespace), (r'#', Comment, 'comment'), (words(keywords, prefix=r'\b', suffix=r'\b'), Keyword), (words(opts, prefix=r'\b', suffix=r'\b'), Name.Constant), # Actions (words(actions, prefix=r'\b', suffix=r'\b'), String), (words(actions_stats, prefix=r'stats/', suffix=r'\b'), String), (words(actions_log, prefix=r'log/', suffix=r'='), String), (words(acls, prefix=r'\b', suffix=r'\b'), Keyword), (ip_re + r'(?:/(?:' + ip_re + r'|\b\d+\b))?', Number.Float), (r'(?:\b\d+\b(?:-\b\d+|%)?)', Number), (r'\S+', Text), ], 'comment': [ (r'\s*TAG:.*', String.Escape, '#pop'), (r'.+', Comment, '#pop'), default('#pop'), ], } class NginxConfLexer(RegexLexer): """ Lexer for `Nginx <http://nginx.net/>`_ configuration files. .. versionadded:: 0.11 """ name = 'Nginx configuration file' aliases = ['nginx'] filenames = [] mimetypes = ['text/x-nginx-conf'] tokens = { 'root': [ (r'(include)(\s+)([^\s;]+)', bygroups(Keyword, Text, Name)), (r'[^\s;#]+', Keyword, 'stmt'), include('base'), ], 'block': [ (r'\}', Punctuation, '#pop:2'), (r'[^\s;#]+', Keyword.Namespace, 'stmt'), include('base'), ], 'stmt': [ (r'\{', Punctuation, 'block'), (r';', Punctuation, '#pop'), include('base'), ], 'base': [ (r'#.*\n', Comment.Single), (r'on|off', Name.Constant), (r'\$[^\s;#()]+', Name.Variable), (r'([a-z0-9.-]+)(:)([0-9]+)', bygroups(Name, Punctuation, Number.Integer)), (r'[a-z-]+/[a-z-+]+', String), # mimetype # (r'[a-zA-Z._-]+', Keyword), (r'[0-9]+[km]?\b', Number.Integer), (r'(~)(\s*)([^\s{]+)', bygroups(Punctuation, Text, String.Regex)), (r'[:=~]', Punctuation), (r'[^\s;#{}$]+', String), # catch all (r'/[^\s;#]*', Name), # pathname (r'\s+', Text), (r'[$;]', Text), # leftover characters ], } class LighttpdConfLexer(RegexLexer): """ Lexer for `Lighttpd <http://lighttpd.net/>`_ configuration files. .. versionadded:: 0.11 """ name = 'Lighttpd configuration file' aliases = ['lighty', 'lighttpd'] filenames = [] mimetypes = ['text/x-lighttpd-conf'] tokens = { 'root': [ (r'#.*\n', Comment.Single), (r'/\S*', Name), # pathname (r'[a-zA-Z._-]+', Keyword), (r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number), (r'[0-9]+', Number), (r'=>|=~|\+=|==|=|\+', Operator), (r'\$[A-Z]+', Name.Builtin), (r'[(){}\[\],]', Punctuation), (r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double), (r'\s+', Text), ], } class DockerLexer(RegexLexer): """ Lexer for `Docker <http://docker.io>`_ configuration files. .. versionadded:: 2.0 """ name = 'Docker' aliases = ['docker', 'dockerfile'] filenames = ['Dockerfile', '*.docker'] mimetypes = ['text/x-dockerfile-config'] _keywords = (r'(?:FROM|MAINTAINER|CMD|EXPOSE|ENV|ADD|ENTRYPOINT|' r'VOLUME|WORKDIR)') flags = re.IGNORECASE | re.MULTILINE tokens = { 'root': [ (r'^(ONBUILD)(\s+)(%s)\b' % (_keywords,), bygroups(Name.Keyword, Whitespace, Keyword)), (r'^(%s)\b(.*)' % (_keywords,), bygroups(Keyword, String)), (r'#.*', Comment), (r'RUN', Keyword), # Rest of line falls through (r'(.*\\\n)*.+', using(BashLexer)), ], }
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python from twisted.internet import reactor from twisted.web import http from twisted.web.proxy import Proxy, ProxyRequest, ProxyClientFactory, ProxyClient from ImageFile import Parser from StringIO import StringIO class InterceptingProxyClient(ProxyClient): def __init__(self, *args, **kwargs): ProxyClient.__init__(self, *args, **kwargs) self.overrides = [] self.restricted_headers = [ 'accept-charset', 'accept-encoding', 'access-control-request-headers', 'access-control-request-method', 'connection', 'content-length', 'cookie', 'cookie2', 'content-transfer-encoding', 'date', 'expect', 'host', 'keep-alive', 'origin', 'referer', 'te', 'trailer', 'transfer-encoding', 'upgrade', 'user-agent', 'via' ] self.all_headers = [] self.unsent_restricted_headers = [] def sendHeader(self, name, value): if "postman-" in name: new_header = name[8:] print "Header %s, %s, %s" % (name, value, new_header) name = new_header header = { "name": name, "value": value } self.all_headers.append(name) ProxyClient.sendHeader(self, name, value) elif name in self.restricted_headers: header = { "name": name, "value": value } print "Restricted header %s" % name self.unsent_restricted_headers.append(header) else: ProxyClient.sendHeader(self, name, value) def endHeaders(self): for header in self.unsent_restricted_headers: if not header["name"] in self.all_headers: ProxyClient.sendHeader(self, header["name"], header["value"]) ProxyClient.endHeaders(self) class InterceptingProxyClientFactory(ProxyClientFactory): protocol = InterceptingProxyClient class InterceptingProxyRequest(ProxyRequest): protocols = {'http': InterceptingProxyClientFactory, 'https': InterceptingProxyClientFactory} class InterceptingProxy(Proxy): requestFactory = InterceptingProxyRequest factory = http.HTTPFactory() factory.protocol = InterceptingProxy port = 8000 reactor.listenTCP(8000, factory) reactor.run() print "Listening on port %d" % port
unknown
codeparrot/codeparrot-clean
""" Creates permissions for all installed apps that need permissions. """ from django.contrib.auth import models as auth_app from django.db.models import get_models, signals def _get_permission_codename(action, opts): return u'%s_%s' % (action, opts.object_name.lower()) def _get_all_permissions(opts): "Returns (codename, name) for all permissions in the given opts." perms = [] for action in ('add', 'change', 'delete'): perms.append((_get_permission_codename(action, opts), u'Can %s %s' % (action, opts.verbose_name_raw))) return perms + list(opts.permissions) def create_permissions(app, created_models, verbosity, **kwargs): from django.contrib.contenttypes.models import ContentType app_models = get_models(app) # This will hold the permissions we're looking for as # (content_type, (codename, name)) searched_perms = list() # The codenames and ctypes that should exist. ctypes = set() for klass in app_models: ctype = ContentType.objects.get_for_model(klass) ctypes.add(ctype) for perm in _get_all_permissions(klass._meta): searched_perms.append((ctype, perm)) # Find all the Permissions that have a context_type for a model we're # looking for. We don't need to check for codenames since we already have # a list of the ones we're going to create. all_perms = set(auth_app.Permission.objects.filter( content_type__in=ctypes, ).values_list( "content_type", "codename" )) for ctype, (codename, name) in searched_perms: # If the permissions exists, move on. if (ctype.pk, codename) in all_perms: continue p = auth_app.Permission.objects.create( codename=codename, name=name, content_type=ctype ) if verbosity >= 2: print "Adding permission '%s'" % p def create_superuser(app, created_models, verbosity, **kwargs): from django.core.management import call_command if auth_app.User in created_models and kwargs.get('interactive', True): msg = ("\nYou just installed Django's auth system, which means you " "don't have any superusers defined.\nWould you like to create one " "now? (yes/no): ") confirm = raw_input(msg) while 1: if confirm not in ('yes', 'no'): confirm = raw_input('Please enter either "yes" or "no": ') continue if confirm == 'yes': call_command("createsuperuser", interactive=True) break signals.post_syncdb.connect(create_permissions, dispatch_uid = "django.contrib.auth.management.create_permissions") signals.post_syncdb.connect(create_superuser, sender=auth_app, dispatch_uid = "django.contrib.auth.management.create_superuser")
unknown
codeparrot/codeparrot-clean
""" Views for a student's profile information. """ from django.conf import settings from django.core.exceptions import ObjectDoesNotExist from django_countries import countries from django.core.urlresolvers import reverse from django.contrib.auth.decorators import login_required from django.http import Http404 from django.views.decorators.http import require_http_methods from edxmako.shortcuts import render_to_response from openedx.core.djangoapps.user_api.accounts.api import get_account_settings from openedx.core.djangoapps.user_api.accounts.serializers import PROFILE_IMAGE_KEY_PREFIX from openedx.core.djangoapps.user_api.errors import UserNotFound, UserNotAuthorized from openedx.core.djangoapps.user_api.preferences.api import get_user_preferences from student.models import User from microsite_configuration import microsite from django.utils.translation import ugettext as _ @login_required @require_http_methods(['GET']) def learner_profile(request, username): """Render the profile page for the specified username. Args: request (HttpRequest) username (str): username of user whose profile is requested. Returns: HttpResponse: 200 if the page was sent successfully HttpResponse: 302 if not logged in (redirect to login page) HttpResponse: 405 if using an unsupported HTTP method Raises: Http404: 404 if the specified user is not authorized or does not exist Example usage: GET /account/profile """ try: return render_to_response( 'student_profile/learner_profile.html', learner_profile_context(request.user, username, request.user.is_staff, request.build_absolute_uri) ) except (UserNotAuthorized, UserNotFound, ObjectDoesNotExist): raise Http404 def learner_profile_context(logged_in_user, profile_username, user_is_staff, build_absolute_uri_func): """Context for the learner profile page. Args: logged_in_user (object): Logged In user. profile_username (str): username of user whose profile is requested. user_is_staff (bool): Logged In user has staff access. build_absolute_uri_func (): Returns: dict Raises: ObjectDoesNotExist: the specified profile_username does not exist. """ profile_user = User.objects.get(username=profile_username) own_profile = (logged_in_user.username == profile_username) account_settings_data = get_account_settings(logged_in_user, profile_username) # Account for possibly relative URLs. for key, value in account_settings_data['profile_image'].items(): if key.startswith(PROFILE_IMAGE_KEY_PREFIX): account_settings_data['profile_image'][key] = build_absolute_uri_func(value) preferences_data = get_user_preferences(profile_user, profile_username) context = { 'data': { 'profile_user_id': profile_user.id, 'default_public_account_fields': settings.ACCOUNT_VISIBILITY_CONFIGURATION['public_fields'], 'default_visibility': settings.ACCOUNT_VISIBILITY_CONFIGURATION['default_visibility'], 'accounts_api_url': reverse("accounts_api", kwargs={'username': profile_username}), 'preferences_api_url': reverse('preferences_api', kwargs={'username': profile_username}), 'preferences_data': preferences_data, 'account_settings_data': account_settings_data, 'profile_image_upload_url': reverse('profile_image_upload', kwargs={'username': profile_username}), 'profile_image_remove_url': reverse('profile_image_remove', kwargs={'username': profile_username}), 'profile_image_max_bytes': settings.PROFILE_IMAGE_MAX_BYTES, 'profile_image_min_bytes': settings.PROFILE_IMAGE_MIN_BYTES, 'account_settings_page_url': reverse('account_settings'), 'has_preferences_access': (logged_in_user.username == profile_username or user_is_staff), 'own_profile': own_profile, 'country_options': list(countries), 'language_options': settings.ALL_LANGUAGES, 'platform_name': microsite.get_value('platform_name', settings.PLATFORM_NAME), } } return context
unknown
codeparrot/codeparrot-clean
""" Copyright 2009 55 Minutes (http://www.55minutes.com) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from django.conf import settings from django.core.management import call_command from django.core.management.commands import test from django_coverage import settings as coverage_settings class Command(test.Command): help = ("Runs the test suite for the specified applications, or the " "entire site if no apps are specified. Then generates coverage " "report both onscreen and as HTML.") def handle(self, *test_labels, **options): """ Replaces the original test runner with the coverage test runner, but keeps track of what the original runner was so that the coverage runner can inherit from it. Then, call the test command. This plays well with apps that override the test command, such as South. """ coverage_settings.ORIG_TEST_RUNNER = settings.TEST_RUNNER settings.TEST_RUNNER = coverage_settings.COVERAGE_TEST_RUNNER call_command('test', *test_labels, **options)
unknown
codeparrot/codeparrot-clean
"""Unit testing for a CLI utility to initialize the local development database. Unit testing for the command line based utility to initialize a local development database. Requires pymox to run. @author: A. Samuel Pottinger (samnsparky - Gleap LLC, 2013) @author: Joanne Cheng (joannecheng - Colorado Code for Communities, 2013) @license: Apache v2 """ import subprocess import unittest import mox import sqlalchemy import database import initalize_environment DATABASE_TYPE = 'testtype' DATABASE_NAME = 'testdb' DATABASE_USER = 'user' DATABASE_PASSWORD = 'pass' ENGINE_URL = 'postgresql://user:pass@localhost/postgres' EXPECTED_CREATE_COMMAND = 'CREATE DATABASE testdb WITH ENCODING=\'UNICODE\'' EXPECTED_ROLE_COMMAND = 'CREATE ROLE user LOGIN PASSWORD \'pass\';' EXPECTED_CREATE_COMMANDS = [EXPECTED_CREATE_COMMAND, 'commit'] EXPECTED_ROLE_COMMANDS = [EXPECTED_ROLE_COMMAND, 'commit'] TEST_CONFIG_SETTINGS = { 'database': { DATABASE_TYPE: { 'user': DATABASE_USER, 'pass': DATABASE_PASSWORD, 'db': DATABASE_NAME } } } # TODO(samnsparky): These could ultimately be mocked using pymox. class MockEngine: """Mock sqlalchemy engine.""" def __init__(self, connection): """Create a new engine that returns the given connection. @param connection: The connection to return to the client when client code attempts to connect to the DB. @type connection: MockDBConnection """ self.connection = connection def connect(self): """Stubbed out connect routine that returns a preloaded connection. @return: Mocked connection. @rtype: MockDBConnection """ return self.connection class MockDBInnerConnection: """Mock inner db-specific connection.""" def __init__(self): """Create a new mocked PostgreSQL native connection.""" self.isolation_levels = [] def set_isolation_level(self, level): """Set the PostgreSQL-native isloation level.""" self.isolation_levels.append(level) class MockDBConnection: """Mock sqlalchemy wrapped DB connection.""" def __init__(self): """Create a new Mock sqlalchemy wrapped DB connection. Create a new Mock sqlalchemy wrapped DB connection with an empty commands history. """ self.last_commands = [] self.connection = MockDBInnerConnection() def execute(self, command): """Simulate executing a command on this connection. @param command: The command to execute. @type command: str """ self.last_commands.append(command) # TODO(samnsparky): Should check if connection is actually closed. def close(self): """Simulate closing the connection.""" pass class TestInitEnvironment(mox.MoxTestBase): """Test suite for initializing the local test environment.""" def test_create_user(self): """Test creating a new user on the local PostgreSQL server.""" expected_elements = ['psql', '-c', EXPECTED_ROLE_COMMAND] self.mox.StubOutWithMock(subprocess, 'call') subprocess.call(expected_elements).AndReturn(0) self.mox.ReplayAll() initalize_environment.create_user(DATABASE_USER, DATABASE_PASSWORD) def test_create_db_lang(self): """Test creating a database language on a local development database.""" expected_elements = ['createlang', 'plpgsql', DATABASE_NAME] self.mox.StubOutWithMock(subprocess, 'call') subprocess.call(expected_elements).AndReturn(0) self.mox.ReplayAll() initalize_environment.create_db_lang(DATABASE_NAME) def test_install_postgis(self): """Test installing the PostGIS extension.""" expected_elements = [ 'psql', '-d', DATABASE_NAME, '-c', 'CREATE EXTENSION IF NOT EXISTS postgis' ] self.mox.StubOutWithMock(subprocess, 'call') subprocess.call(expected_elements).AndReturn(0) self.mox.ReplayAll() initalize_environment.install_postgis(DATABASE_NAME) def test_create_db_engine(self): """Test creating the sqlalchemy DB engine.""" self.mox.StubOutWithMock(sqlalchemy, 'create_engine') sqlalchemy.create_engine(ENGINE_URL).AndReturn(True) self.mox.ReplayAll() engine = initalize_environment.create_db_engine(DATABASE_USER, DATABASE_PASSWORD) self.assertTrue(engine, True) def test_create_db(self): """Test creating a local development database through sqlalchemy.""" mock_connection = MockDBConnection() initalize_environment.create_db(mock_connection, DATABASE_NAME) self.assertEqual(mock_connection.last_commands, EXPECTED_CREATE_COMMANDS) self.assertEqual(mock_connection.connection.isolation_levels, [0, 1]) def test_main(self): """Test the high level environment initialization logic.""" self.mox.StubOutWithMock(initalize_environment, 'create_user') self.mox.StubOutWithMock(initalize_environment, 'create_db_engine') self.mox.StubOutWithMock(initalize_environment, 'get_env_config') self.mox.StubOutWithMock(initalize_environment, 'create_db') self.mox.StubOutWithMock(initalize_environment, 'create_db_lang') self.mox.StubOutWithMock(initalize_environment, 'install_postgis') self.mox.StubOutWithMock(database, 'init_db') mock_connection = MockDBConnection() mock_engine = MockEngine(mock_connection) initalize_environment.create_user(DATABASE_USER, DATABASE_PASSWORD).AndReturn(None) initalize_environment.create_db_engine(DATABASE_USER, DATABASE_PASSWORD).AndReturn(mock_engine) initalize_environment.get_env_config().AndReturn(TEST_CONFIG_SETTINGS) initalize_environment.create_db(mock_connection, DATABASE_NAME).AndReturn(None) initalize_environment.create_db_lang(DATABASE_NAME).AndReturn(None) initalize_environment.install_postgis(DATABASE_NAME).AndReturn(None) database.init_db().AndReturn(mock_connection) self.mox.ReplayAll() initalize_environment.main(DATABASE_TYPE) if __name__ == '__main__': unittest.main()
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- from __future__ import print_function import copy import shutil from datetime import timedelta from functools import wraps from json import loads from textwrap import dedent from unittest import SkipTest from uuid import uuid4 import ddt import django import lxml.html import mock from django.conf import settings from django.contrib.auth.models import User from django.test import TestCase from django.test.utils import override_settings from edxval.api import create_video, get_videos_for_course from fs.osfs import OSFS from lxml import etree from opaque_keys import InvalidKeyError from opaque_keys.edx.keys import CourseKey, UsageKey from opaque_keys.edx.locations import AssetLocation, CourseLocator from path import Path as path from six import text_type from waffle.testutils import override_switch from contentstore.tests.utils import AjaxEnabledTestClient, CourseTestCase, get_url, parse_json from contentstore.utils import delete_course, reverse_course_url, reverse_url from contentstore.views.component import ADVANCED_COMPONENT_TYPES from contentstore.config import waffle from course_action_state.managers import CourseActionStateItemNotFoundError from course_action_state.models import CourseRerunState, CourseRerunUIStateManager from django_comment_common.utils import are_permissions_roles_seeded from openedx.core.lib.tempdir import mkdtemp_clean from student import auth from student.models import CourseEnrollment from student.roles import CourseCreatorRole, CourseInstructorRole from xmodule.capa_module import CapaDescriptor from xmodule.contentstore.content import StaticContent from xmodule.contentstore.django import contentstore from xmodule.contentstore.utils import empty_asset_trashcan, restore_asset_from_trashcan from xmodule.course_module import CourseDescriptor, Textbook from xmodule.exceptions import InvalidVersionError from xmodule.modulestore import ModuleStoreEnum from xmodule.modulestore.django import modulestore from xmodule.modulestore.exceptions import ItemNotFoundError from xmodule.modulestore.inheritance import own_metadata from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, check_mongo_calls from xmodule.modulestore.xml_exporter import export_course_to_xml from xmodule.modulestore.xml_importer import import_course_from_xml, perform_xlint from xmodule.seq_module import SequenceDescriptor TEST_DATA_CONTENTSTORE = copy.deepcopy(settings.CONTENTSTORE) TEST_DATA_CONTENTSTORE['DOC_STORE_CONFIG']['db'] = 'test_xcontent_%s' % uuid4().hex TEST_DATA_DIR = settings.COMMON_TEST_DATA_ROOT def requires_pillow_jpeg(func): """ A decorator to indicate that the function requires JPEG support for Pillow, otherwise it cannot be run """ @wraps(func) def decorated_func(*args, **kwargs): """ Execute the function if we have JPEG support in Pillow. """ try: from PIL import Image except ImportError: raise SkipTest("Pillow is not installed (or not found)") if not getattr(Image.core, "jpeg_decoder", False): raise SkipTest("Pillow cannot open JPEG files") return func(*args, **kwargs) return decorated_func @override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE) class ContentStoreTestCase(CourseTestCase): """ Base class for Content Store Test Cases """ class ImportRequiredTestCases(ContentStoreTestCase): """ Tests which legitimately need to import a course """ def test_no_static_link_rewrites_on_import(self): course_items = import_course_from_xml( self.store, self.user.id, TEST_DATA_DIR, ['toy'], create_if_not_present=True ) course = course_items[0] handouts_usage_key = course.id.make_usage_key('course_info', 'handouts') handouts = self.store.get_item(handouts_usage_key) self.assertIn('/static/', handouts.data) handouts_usage_key = course.id.make_usage_key('html', 'toyhtml') handouts = self.store.get_item(handouts_usage_key) self.assertIn('/static/', handouts.data) def test_xlint_fails(self): err_cnt = perform_xlint(TEST_DATA_DIR, ['toy']) self.assertGreater(err_cnt, 0) def test_invalid_asset_overwrite(self): """ Tests that an asset with invalid displayname can be overwritten if multiple assets have same displayname. It Verifies that: During import, if ('/') or ('\') is present in displayname of an asset, it is replaced with underscores '_'. Export does not fail when an asset has '/' in its displayname. If the converted display matches with any other asset, then it will be replaced. Asset name in XML: "/invalid\\displayname/subs-esLhHcdKGWvKs.srt" """ content_store = contentstore() expected_displayname = u'_invalid_displayname_subs-esLhHcdKGWvKs.srt' import_course_from_xml( self.store, self.user.id, TEST_DATA_DIR, ['import_draft_order'], static_content_store=content_store, verbose=True, create_if_not_present=True ) # Verify the course has imported successfully course = self.store.get_course(self.store.make_course_key( 'test_org', 'import_draft_order', 'import_draft_order' )) self.assertIsNotNone(course) # Add a new asset in the course, and make sure to name it such that it overwrite the one existing # asset in the course. (i.e. _invalid_displayname_subs-esLhHcdKGWvKs.srt) asset_key = course.id.make_asset_key('asset', 'sample_asset.srt') content = StaticContent( asset_key, expected_displayname, 'application/text', 'test', ) content_store.save(content) # Get & verify that course actually has two assets assets, count = content_store.get_all_content_for_course(course.id) self.assertEqual(count, 2) # Verify both assets have similar `displayname` after saving. for asset in assets: self.assertEquals(asset['displayname'], expected_displayname) # Test course export does not fail root_dir = path(mkdtemp_clean()) print('Exporting to tempdir = {0}'.format(root_dir)) export_course_to_xml(self.store, content_store, course.id, root_dir, u'test_export') filesystem = OSFS(text_type(root_dir / 'test_export/static')) exported_static_files = filesystem.listdir(u'/') # Verify that asset have been overwritten during export. self.assertEqual(len(exported_static_files), 1) self.assertTrue(filesystem.exists(expected_displayname)) self.assertEqual(exported_static_files[0], expected_displayname) # Remove exported course shutil.rmtree(root_dir) def test_about_overrides(self): """ This test case verifies that a course can use specialized override for about data, e.g. /about/Fall_2012/effort.html while there is a base definition in /about/effort.html """ course_items = import_course_from_xml( self.store, self.user.id, TEST_DATA_DIR, ['toy'], create_if_not_present=True ) course_key = course_items[0].id effort = self.store.get_item(course_key.make_usage_key('about', 'effort')) self.assertEqual(effort.data, '6 hours') # this one should be in a non-override folder effort = self.store.get_item(course_key.make_usage_key('about', 'end_date')) self.assertEqual(effort.data, 'TBD') @requires_pillow_jpeg def test_asset_import(self): """ This test validates that an image asset is imported and a thumbnail was generated for a .gif """ content_store = contentstore() import_course_from_xml( self.store, self.user.id, TEST_DATA_DIR, ['toy'], static_content_store=content_store, verbose=True, create_if_not_present=True ) course = self.store.get_course(self.store.make_course_key('edX', 'toy', '2012_Fall')) self.assertIsNotNone(course) # make sure we have some assets in our contentstore all_assets, __ = content_store.get_all_content_for_course(course.id) self.assertGreater(len(all_assets), 0) # make sure we have some thumbnails in our contentstore all_thumbnails = content_store.get_all_content_thumbnails_for_course(course.id) self.assertGreater(len(all_thumbnails), 0) location = AssetLocation.from_deprecated_string('/c4x/edX/toy/asset/just_a_test.jpg') content = content_store.find(location) self.assertIsNotNone(content) self.assertIsNotNone(content.thumbnail_location) thumbnail = content_store.find(content.thumbnail_location) self.assertIsNotNone(thumbnail) def test_course_info_updates_import_export(self): """ Test that course info updates are imported and exported with all content fields ('data', 'items') """ content_store = contentstore() data_dir = TEST_DATA_DIR courses = import_course_from_xml( self.store, self.user.id, data_dir, ['course_info_updates'], static_content_store=content_store, verbose=True, create_if_not_present=True ) course = courses[0] self.assertIsNotNone(course) course_updates = self.store.get_item(course.id.make_usage_key('course_info', 'updates')) self.assertIsNotNone(course_updates) # check that course which is imported has files 'updates.html' and 'updates.items.json' filesystem = OSFS(text_type(data_dir + '/course_info_updates/info')) self.assertTrue(filesystem.exists(u'updates.html')) self.assertTrue(filesystem.exists(u'updates.items.json')) # verify that course info update module has same data content as in data file from which it is imported # check 'data' field content with filesystem.open(u'updates.html', 'r') as course_policy: on_disk = course_policy.read() self.assertEqual(course_updates.data, on_disk) # check 'items' field content with filesystem.open(u'updates.items.json', 'r') as course_policy: on_disk = loads(course_policy.read()) self.assertEqual(course_updates.items, on_disk) # now export the course to a tempdir and test that it contains files 'updates.html' and 'updates.items.json' # with same content as in course 'info' directory root_dir = path(mkdtemp_clean()) print('Exporting to tempdir = {0}'.format(root_dir)) export_course_to_xml(self.store, content_store, course.id, root_dir, u'test_export') # check that exported course has files 'updates.html' and 'updates.items.json' filesystem = OSFS(text_type(root_dir / 'test_export/info')) self.assertTrue(filesystem.exists(u'updates.html')) self.assertTrue(filesystem.exists(u'updates.items.json')) # verify that exported course has same data content as in course_info_update module with filesystem.open(u'updates.html', 'r') as grading_policy: on_disk = grading_policy.read() self.assertEqual(on_disk, course_updates.data) with filesystem.open(u'updates.items.json', 'r') as grading_policy: on_disk = loads(grading_policy.read()) self.assertEqual(on_disk, course_updates.items) def test_rewrite_nonportable_links_on_import(self): content_store = contentstore() import_course_from_xml( self.store, self.user.id, TEST_DATA_DIR, ['toy'], static_content_store=content_store, create_if_not_present=True ) # first check a static asset link course_key = self.store.make_course_key('edX', 'toy', 'run') html_module_location = course_key.make_usage_key('html', 'nonportable') html_module = self.store.get_item(html_module_location) self.assertIn('/static/foo.jpg', html_module.data) # then check a intra courseware link html_module_location = course_key.make_usage_key('html', 'nonportable_link') html_module = self.store.get_item(html_module_location) self.assertIn('/jump_to_id/nonportable_link', html_module.data) def verify_content_existence(self, store, root_dir, course_id, dirname, category_name, filename_suffix=''): filesystem = OSFS(root_dir / 'test_export') self.assertTrue(filesystem.exists(dirname)) items = store.get_items(course_id, qualifiers={'category': category_name}) for item in items: filesystem = OSFS(root_dir / ('test_export/' + dirname)) self.assertTrue(filesystem.exists(item.location.block_id + filename_suffix)) @mock.patch('xmodule.course_module.requests.get') def test_export_course_roundtrip(self, mock_get): mock_get.return_value.text = dedent(""" <?xml version="1.0"?><table_of_contents> <entry page="5" page_label="ii" name="Table of Contents"/> </table_of_contents> """).strip() content_store = contentstore() course_id = self.import_and_populate_course() root_dir = path(mkdtemp_clean()) print('Exporting to tempdir = {0}'.format(root_dir)) # export out to a tempdir export_course_to_xml(self.store, content_store, course_id, root_dir, u'test_export') # check for static tabs self.verify_content_existence(self.store, root_dir, course_id, u'tabs', 'static_tab', '.html') # check for about content self.verify_content_existence(self.store, root_dir, course_id, u'about', 'about', '.html') # assert that there is an html and video directory in drafts: draft_dir = OSFS(root_dir / 'test_export/drafts') self.assertTrue(draft_dir.exists(u'html')) self.assertTrue(draft_dir.exists(u'video')) # and assert that they contain the created modules self.assertIn(self.DRAFT_HTML + ".xml", draft_dir.listdir(u'html')) self.assertIn(self.DRAFT_VIDEO + ".xml", draft_dir.listdir(u'video')) # and assert the child of the orphaned draft wasn't exported self.assertNotIn(self.ORPHAN_DRAFT_HTML + ".xml", draft_dir.listdir(u'html')) # check for grading_policy.json filesystem = OSFS(root_dir / 'test_export/policies/2012_Fall') self.assertTrue(filesystem.exists(u'grading_policy.json')) course = self.store.get_course(course_id) # compare what's on disk compared to what we have in our course with filesystem.open(u'grading_policy.json', 'r') as grading_policy: on_disk = loads(grading_policy.read()) self.assertEqual(on_disk, course.grading_policy) # check for policy.json self.assertTrue(filesystem.exists(u'policy.json')) # compare what's on disk to what we have in the course module with filesystem.open(u'policy.json', 'r') as course_policy: on_disk = loads(course_policy.read()) self.assertIn('course/2012_Fall', on_disk) self.assertEqual(on_disk['course/2012_Fall'], own_metadata(course)) # remove old course self.store.delete_course(course_id, self.user.id) # reimport over old course self.check_import(root_dir, content_store, course_id) # import to different course id new_course_id = self.store.make_course_key('anotherX', 'anotherToy', 'Someday') self.check_import(root_dir, content_store, new_course_id) self.assertCoursesEqual(course_id, new_course_id) shutil.rmtree(root_dir) def check_import(self, root_dir, content_store, course_id): """Imports the course in root_dir into the given course_id and verifies its content""" # reimport import_course_from_xml( self.store, self.user.id, root_dir, ['test_export'], static_content_store=content_store, target_id=course_id, ) # verify content of the course self.check_populated_course(course_id) # verify additional export attributes def verify_export_attrs_removed(attributes): """Verifies all temporary attributes added during export are removed""" self.assertNotIn('index_in_children_list', attributes) self.assertNotIn('parent_sequential_url', attributes) self.assertNotIn('parent_url', attributes) vertical = self.store.get_item(course_id.make_usage_key('vertical', self.TEST_VERTICAL)) verify_export_attrs_removed(vertical.xml_attributes) for child in vertical.get_children(): verify_export_attrs_removed(child.xml_attributes) if hasattr(child, 'data'): verify_export_attrs_removed(child.data) def test_export_course_with_metadata_only_video(self): content_store = contentstore() import_course_from_xml(self.store, self.user.id, TEST_DATA_DIR, ['toy'], create_if_not_present=True) course_id = self.store.make_course_key('edX', 'toy', '2012_Fall') # create a new video module and add it as a child to a vertical # this re-creates a bug whereby since the video template doesn't have # anything in 'data' field, the export was blowing up verticals = self.store.get_items(course_id, qualifiers={'category': 'vertical'}) self.assertGreater(len(verticals), 0) parent = verticals[0] ItemFactory.create(parent_location=parent.location, category="video", display_name="untitled") root_dir = path(mkdtemp_clean()) print('Exporting to tempdir = {0}'.format(root_dir)) # export out to a tempdir export_course_to_xml(self.store, content_store, course_id, root_dir, u'test_export') shutil.rmtree(root_dir) def test_export_course_with_metadata_only_word_cloud(self): """ Similar to `test_export_course_with_metadata_only_video`. """ content_store = contentstore() import_course_from_xml(self.store, self.user.id, TEST_DATA_DIR, ['word_cloud'], create_if_not_present=True) course_id = self.store.make_course_key('HarvardX', 'ER22x', '2013_Spring') verticals = self.store.get_items(course_id, qualifiers={'category': 'vertical'}) self.assertGreater(len(verticals), 0) parent = verticals[0] ItemFactory.create(parent_location=parent.location, category="word_cloud", display_name="untitled") root_dir = path(mkdtemp_clean()) print('Exporting to tempdir = {0}'.format(root_dir)) # export out to a tempdir export_course_to_xml(self.store, content_store, course_id, root_dir, u'test_export') shutil.rmtree(root_dir) def test_import_after_renaming_xml_data(self): """ Test that import works fine on split mongo after renaming the blocks url. """ split_store = modulestore()._get_modulestore_by_type(ModuleStoreEnum.Type.split) # pylint: disable=W0212 import_course_from_xml( split_store, self.user.id, TEST_DATA_DIR, ['course_before_rename'], create_if_not_present=True ) course_after_rename = import_course_from_xml( split_store, self.user.id, TEST_DATA_DIR, ['course_after_rename'], create_if_not_present=True ) all_items = split_store.get_items(course_after_rename[0].id, qualifiers={'category': 'chapter'}) renamed_chapter = [item for item in all_items if item.location.block_id == 'renamed_chapter'][0] self.assertIsNotNone(renamed_chapter.published_on) self.assertIsNotNone(renamed_chapter.parent) self.assertIn(renamed_chapter.location, course_after_rename[0].children) original_chapter = [item for item in all_items if item.location.block_id == 'b9870b9af59841a49e6e02765d0e3bbf'][0] self.assertIsNone(original_chapter.published_on) self.assertIsNone(original_chapter.parent) self.assertNotIn(original_chapter.location, course_after_rename[0].children) def test_empty_data_roundtrip(self): """ Test that an empty `data` field is preserved through export/import. """ content_store = contentstore() import_course_from_xml(self.store, self.user.id, TEST_DATA_DIR, ['toy'], create_if_not_present=True) course_id = self.store.make_course_key('edX', 'toy', '2012_Fall') verticals = self.store.get_items(course_id, qualifiers={'category': 'vertical'}) self.assertGreater(len(verticals), 0) parent = verticals[0] # Create a module, and ensure that its `data` field is empty word_cloud = ItemFactory.create(parent_location=parent.location, category="word_cloud", display_name="untitled") del word_cloud.data self.assertEquals(word_cloud.data, '') # Export the course root_dir = path(mkdtemp_clean()) export_course_to_xml(self.store, content_store, course_id, root_dir, u'test_roundtrip') # Reimport and get the video back import_course_from_xml(self.store, self.user.id, root_dir) imported_word_cloud = self.store.get_item(course_id.make_usage_key('word_cloud', 'untitled')) # It should now contain empty data self.assertEquals(imported_word_cloud.data, '') def test_html_export_roundtrip(self): """ Test that a course which has HTML that has style formatting is preserved in export/import """ content_store = contentstore() import_course_from_xml(self.store, self.user.id, TEST_DATA_DIR, ['toy'], create_if_not_present=True) course_id = self.store.make_course_key('edX', 'toy', '2012_Fall') # Export the course root_dir = path(mkdtemp_clean()) export_course_to_xml(self.store, content_store, course_id, root_dir, u'test_roundtrip') # Reimport and get the video back import_course_from_xml(self.store, self.user.id, root_dir, create_if_not_present=True) # get the sample HTML with styling information html_module = self.store.get_item(course_id.make_usage_key('html', 'with_styling')) self.assertIn('<p style="font:italic bold 72px/30px Georgia, serif; color: red; ">', html_module.data) # get the sample HTML with just a simple <img> tag information html_module = self.store.get_item(course_id.make_usage_key('html', 'just_img')) self.assertIn('<img src="/static/foo_bar.jpg" />', html_module.data) def test_export_course_without_content_store(self): # Create toy course course_items = import_course_from_xml( self.store, self.user.id, TEST_DATA_DIR, ['toy'], create_if_not_present=True ) course_id = course_items[0].id root_dir = path(mkdtemp_clean()) print('Exporting to tempdir = {0}'.format(root_dir)) export_course_to_xml(self.store, None, course_id, root_dir, u'test_export_no_content_store') # Delete the course from module store and reimport it self.store.delete_course(course_id, self.user.id) import_course_from_xml( self.store, self.user.id, root_dir, ['test_export_no_content_store'], static_content_store=None, target_id=course_id ) # Verify reimported course items = self.store.get_items( course_id, qualifiers={ 'category': 'sequential', 'name': 'vertical_sequential', } ) self.assertEqual(len(items), 1) def test_export_course_no_xml_attributes(self): """ Test that a module without an `xml_attributes` attr will still be exported successfully """ content_store = contentstore() import_course_from_xml(self.store, self.user.id, TEST_DATA_DIR, ['toy'], create_if_not_present=True) course_id = self.store.make_course_key('edX', 'toy', '2012_Fall') verticals = self.store.get_items(course_id, qualifiers={'category': 'vertical'}) vertical = verticals[0] # create OpenAssessmentBlock: open_assessment = ItemFactory.create( parent_location=vertical.location, category="openassessment", display_name="untitled", ) # convert it to draft draft_open_assessment = self.store.convert_to_draft( open_assessment.location, self.user.id ) # note that it has no `xml_attributes` attribute self.assertFalse(hasattr(draft_open_assessment, "xml_attributes")) # export should still complete successfully root_dir = path(mkdtemp_clean()) export_course_to_xml( self.store, content_store, course_id, root_dir, u'test_no_xml_attributes' ) @ddt.ddt class MiscCourseTests(ContentStoreTestCase): """ Tests that rely on the toy courses. """ def setUp(self): super(MiscCourseTests, self).setUp() # save locs not items b/c the items won't have the subsequently created children in them until refetched self.chapter_loc = self.store.create_child( self.user.id, self.course.location, 'chapter', 'test_chapter' ).location self.seq_loc = self.store.create_child( self.user.id, self.chapter_loc, 'sequential', 'test_seq' ).location self.vert_loc = self.store.create_child(self.user.id, self.seq_loc, 'vertical', 'test_vert').location # now create some things quasi like the toy course had self.problem = self.store.create_child( self.user.id, self.vert_loc, 'problem', 'test_problem', fields={ "data": "<problem>Test</problem>" } ) self.store.create_child( self.user.id, self.vert_loc, 'video', fields={ "youtube_id_0_75": "JMD_ifUUfsU", "youtube_id_1_0": "OEoXaMPEzfM", "youtube_id_1_25": "AKqURZnYqpk", "youtube_id_1_5": "DYpADpL7jAY", "name": "sample_video", } ) self.store.create_child( self.user.id, self.vert_loc, 'video', fields={ "youtube_id_0_75": "JMD_ifUUfsU", "youtube_id_1_0": "OEoXaMPEzfM", "youtube_id_1_25": "AKqURZnYqpk", "youtube_id_1_5": "DYpADpL7jAY", "name": "truncated_video", "end_time": timedelta(hours=10), } ) self.store.create_child( self.user.id, self.vert_loc, 'poll_question', fields={ "name": "T1_changemind_poll_foo_2", "display_name": "Change your answer", "question": "Have you changed your mind?", "answers": [{"id": "yes", "text": "Yes"}, {"id": "no", "text": "No"}], } ) self.course = self.store.publish(self.course.location, self.user.id) def check_components_on_page(self, component_types, expected_types): """ Ensure that the right types end up on the page. component_types is the list of advanced components. expected_types is the list of elements that should appear on the page. expected_types and component_types should be similar, but not exactly the same -- for example, 'video' in component_types should cause 'Video' to be present. """ self.course.advanced_modules = component_types self.store.update_item(self.course, self.user.id) # just pick one vertical resp = self.client.get_html(get_url('container_handler', self.vert_loc)) self.assertEqual(resp.status_code, 200) for expected in expected_types: self.assertIn(expected, resp.content) @ddt.data("<script>alert(1)</script>", "alert('hi')", "</script><script>alert(1)</script>") def test_container_handler_xss_prevent(self, malicious_code): """ Test that XSS attack is prevented """ resp = self.client.get_html(get_url('container_handler', self.vert_loc) + '?action=' + malicious_code) self.assertEqual(resp.status_code, 200) # Test that malicious code does not appear in html self.assertNotIn(malicious_code, resp.content) def test_advanced_components_in_edit_unit(self): # This could be made better, but for now let's just assert that we see the advanced modules mentioned in the # page response HTML self.check_components_on_page( ADVANCED_COMPONENT_TYPES, ['Word cloud', 'Annotation', 'Text Annotation', 'Video Annotation', 'Image Annotation', 'split_test'], ) @ddt.data('/Fake/asset/displayname', '\\Fake\\asset\\displayname') def test_export_on_invalid_displayname(self, invalid_displayname): """ Tests that assets with invalid 'displayname' does not cause export to fail """ content_store = contentstore() exported_asset_name = u'_Fake_asset_displayname' # Create an asset with slash `invalid_displayname` ' asset_key = self.course.id.make_asset_key('asset', "fake_asset.txt") content = StaticContent( asset_key, invalid_displayname, 'application/text', 'test', ) content_store.save(content) # Verify that the course has only one asset and it has been added with an invalid asset name. assets, count = content_store.get_all_content_for_course(self.course.id) self.assertEqual(count, 1) display_name = assets[0]['displayname'] self.assertEqual(display_name, invalid_displayname) # Now export the course to a tempdir and test that it contains assets. The export should pass root_dir = path(mkdtemp_clean()) print('Exporting to tempdir = {0}'.format(root_dir)) export_course_to_xml(self.store, content_store, self.course.id, root_dir, u'test_export') filesystem = OSFS(root_dir / 'test_export/static') exported_static_files = filesystem.listdir(u'/') # Verify that only single asset has been exported with the expected asset name. self.assertTrue(filesystem.exists(exported_asset_name)) self.assertEqual(len(exported_static_files), 1) # Remove tempdir shutil.rmtree(root_dir) @mock.patch( 'lms.djangoapps.ccx.modulestore.CCXModulestoreWrapper.get_item', mock.Mock(return_value=mock.Mock(children=[])) ) def test_export_with_orphan_vertical(self): """ Tests that, export does not fail when a parent xblock does not have draft child xblock information but the draft child xblock has parent information. """ # Make an existing unit a draft self.store.convert_to_draft(self.problem.location, self.user.id) root_dir = path(mkdtemp_clean()) export_course_to_xml(self.store, None, self.course.id, root_dir, u'test_export') # Verify that problem is exported in the drafts. This is expected because we are # mocking get_item to for drafts. Expect no draft is exported. # Specifically get_item is used in `xmodule.modulestore.xml_exporter._export_drafts` export_draft_dir = OSFS(root_dir / 'test_export/drafts') self.assertEqual(len(export_draft_dir.listdir(u'/')), 0) # Remove tempdir shutil.rmtree(root_dir) def test_assets_overwrite(self): """ Tests that assets will similar 'displayname' will be overwritten during export """ content_store = contentstore() asset_displayname = u'Fake_asset.txt' # Create two assets with similar 'displayname' for i in range(2): asset_path = 'sample_asset_{}.txt'.format(i) asset_key = self.course.id.make_asset_key('asset', asset_path) content = StaticContent( asset_key, asset_displayname, 'application/text', 'test', ) content_store.save(content) # Fetch & verify course assets to be equal to 2. assets, count = content_store.get_all_content_for_course(self.course.id) self.assertEqual(count, 2) # Verify both assets have similar 'displayname' after saving. for asset in assets: self.assertEquals(asset['displayname'], asset_displayname) # Now export the course to a tempdir and test that it contains assets. root_dir = path(mkdtemp_clean()) print('Exporting to tempdir = {0}'.format(root_dir)) export_course_to_xml(self.store, content_store, self.course.id, root_dir, u'test_export') # Verify that asset have been overwritten during export. filesystem = OSFS(root_dir / 'test_export/static') exported_static_files = filesystem.listdir(u'/') self.assertTrue(filesystem.exists(asset_displayname)) self.assertEqual(len(exported_static_files), 1) # Remove tempdir shutil.rmtree(root_dir) def test_advanced_components_require_two_clicks(self): self.check_components_on_page(['word_cloud'], ['Word cloud']) def test_malformed_edit_unit_request(self): # just pick one vertical usage_key = self.course.id.make_usage_key('vertical', None) resp = self.client.get_html(get_url('container_handler', usage_key)) self.assertEqual(resp.status_code, 400) def test_edit_unit(self): """Verifies rendering the editor in all the verticals in the given test course""" self._check_verticals([self.vert_loc]) def _get_draft_counts(self, item): cnt = 1 if getattr(item, 'is_draft', False) else 0 for child in item.get_children(): cnt = cnt + self._get_draft_counts(child) return cnt def test_get_items(self): """ This verifies a bug we had where the None setting in get_items() meant 'wildcard' Unfortunately, None = published for the revision field, so get_items() would return both draft and non-draft copies. """ self.store.convert_to_draft(self.problem.location, self.user.id) # Query get_items() and find the html item. This should just return back a single item (not 2). direct_store_items = self.store.get_items( self.course.id, revision=ModuleStoreEnum.RevisionOption.published_only ) items_from_direct_store = [item for item in direct_store_items if item.location == self.problem.location] self.assertEqual(len(items_from_direct_store), 1) self.assertFalse(getattr(items_from_direct_store[0], 'is_draft', False)) # Fetch from the draft store. draft_store_items = self.store.get_items( self.course.id, revision=ModuleStoreEnum.RevisionOption.draft_only ) items_from_draft_store = [item for item in draft_store_items if item.location == self.problem.location] self.assertEqual(len(items_from_draft_store), 1) # TODO the below won't work for split mongo self.assertTrue(getattr(items_from_draft_store[0], 'is_draft', False)) def test_draft_metadata(self): """ This verifies a bug we had where inherited metadata was getting written to the module as 'own-metadata' when publishing. Also verifies the metadata inheritance is properly computed """ # refetch course so it has all the children correct course = self.store.update_item(self.course, self.user.id) course.graceperiod = timedelta(days=1, hours=5, minutes=59, seconds=59) course = self.store.update_item(course, self.user.id) problem = self.store.get_item(self.problem.location) self.assertEqual(problem.graceperiod, course.graceperiod) self.assertNotIn('graceperiod', own_metadata(problem)) self.store.convert_to_draft(problem.location, self.user.id) # refetch to check metadata problem = self.store.get_item(problem.location) self.assertEqual(problem.graceperiod, course.graceperiod) self.assertNotIn('graceperiod', own_metadata(problem)) # publish module self.store.publish(problem.location, self.user.id) # refetch to check metadata problem = self.store.get_item(problem.location) self.assertEqual(problem.graceperiod, course.graceperiod) self.assertNotIn('graceperiod', own_metadata(problem)) # put back in draft and change metadata and see if it's now marked as 'own_metadata' self.store.convert_to_draft(problem.location, self.user.id) problem = self.store.get_item(problem.location) new_graceperiod = timedelta(hours=1) self.assertNotIn('graceperiod', own_metadata(problem)) problem.graceperiod = new_graceperiod # Save the data that we've just changed to the underlying # MongoKeyValueStore before we update the mongo datastore. problem.save() self.assertIn('graceperiod', own_metadata(problem)) self.assertEqual(problem.graceperiod, new_graceperiod) self.store.update_item(problem, self.user.id) # read back to make sure it reads as 'own-metadata' problem = self.store.get_item(problem.location) self.assertIn('graceperiod', own_metadata(problem)) self.assertEqual(problem.graceperiod, new_graceperiod) # republish self.store.publish(problem.location, self.user.id) # and re-read and verify 'own-metadata' self.store.convert_to_draft(problem.location, self.user.id) problem = self.store.get_item(problem.location) self.assertIn('graceperiod', own_metadata(problem)) self.assertEqual(problem.graceperiod, new_graceperiod) def test_get_depth_with_drafts(self): # make sure no draft items have been returned num_drafts = self._get_draft_counts(self.course) self.assertEqual(num_drafts, 0) # put into draft self.store.convert_to_draft(self.problem.location, self.user.id) # make sure we can query that item and verify that it is a draft draft_problem = self.store.get_item(self.problem.location) self.assertTrue(getattr(draft_problem, 'is_draft', False)) # now requery with depth course = self.store.get_course(self.course.id, depth=None) # make sure just one draft item have been returned num_drafts = self._get_draft_counts(course) self.assertEqual(num_drafts, 1) @mock.patch('xmodule.course_module.requests.get') def test_import_textbook_as_content_element(self, mock_get): mock_get.return_value.text = dedent(""" <?xml version="1.0"?><table_of_contents> <entry page="5" page_label="ii" name="Table of Contents"/> </table_of_contents> """).strip() self.course.textbooks = [Textbook("Textbook", "https://s3.amazonaws.com/edx-textbooks/guttag_computation_v3/")] course = self.store.update_item(self.course, self.user.id) self.assertGreater(len(course.textbooks), 0) def test_import_polls(self): items = self.store.get_items(self.course.id, qualifiers={'category': 'poll_question'}) self.assertGreater(len(items), 0) # check that there's actually content in the 'question' field self.assertGreater(len(items[0].question), 0) def test_module_preview_in_whitelist(self): """ Tests the ajax callback to render an XModule """ with override_settings(COURSES_WITH_UNSAFE_CODE=[text_type(self.course.id)]): # also try a custom response which will trigger the 'is this course in whitelist' logic resp = self.client.get_json( get_url('xblock_view_handler', self.vert_loc, kwargs={'view_name': 'container_preview'}) ) self.assertEqual(resp.status_code, 200) vertical = self.store.get_item(self.vert_loc) for child in vertical.children: self.assertContains(resp, text_type(child)) def test_delete(self): # make sure the parent points to the child object which is to be deleted # need to refetch chapter b/c at the time it was assigned it had no children chapter = self.store.get_item(self.chapter_loc) self.assertIn(self.seq_loc, chapter.children) self.client.delete(get_url('xblock_handler', self.seq_loc)) with self.assertRaises(ItemNotFoundError): self.store.get_item(self.seq_loc) chapter = self.store.get_item(self.chapter_loc) # make sure the parent no longer points to the child object which was deleted self.assertNotIn(self.seq_loc, chapter.children) def test_asset_delete_and_restore(self): """ This test will exercise the soft delete/restore functionality of the assets """ asset_key = self._delete_asset_in_course() # now try to find it in store, but they should not be there any longer content = contentstore().find(asset_key, throw_on_not_found=False) self.assertIsNone(content) # now try to find it and the thumbnail in trashcan - should be in there content = contentstore('trashcan').find(asset_key, throw_on_not_found=False) self.assertIsNotNone(content) # let's restore the asset restore_asset_from_trashcan(text_type(asset_key)) # now try to find it in courseware store, and they should be back after restore content = contentstore('trashcan').find(asset_key, throw_on_not_found=False) self.assertIsNotNone(content) def _delete_asset_in_course(self): """ Helper method for: 1) importing course from xml 2) finding asset in course (verifying non-empty) 3) computing thumbnail location of asset 4) deleting the asset from the course """ asset_key = self.course.id.make_asset_key('asset', 'sample_static.html') content = StaticContent( asset_key, "Fake asset", "application/text", "test", ) contentstore().save(content) # go through the website to do the delete, since the soft-delete logic is in the view url = reverse_course_url( 'assets_handler', self.course.id, kwargs={'asset_key_string': text_type(asset_key)} ) resp = self.client.delete(url) self.assertEqual(resp.status_code, 204) return asset_key def test_empty_trashcan(self): """ This test will exercise the emptying of the asset trashcan """ self._delete_asset_in_course() # make sure there's something in the trashcan all_assets, __ = contentstore('trashcan').get_all_content_for_course(self.course.id) self.assertGreater(len(all_assets), 0) # empty the trashcan empty_asset_trashcan([self.course.id]) # make sure trashcan is empty all_assets, count = contentstore('trashcan').get_all_content_for_course(self.course.id) self.assertEqual(len(all_assets), 0) self.assertEqual(count, 0) def test_illegal_draft_crud_ops(self): # this test presumes old mongo and split_draft not full split with self.assertRaises(InvalidVersionError): self.store.convert_to_draft(self.chapter_loc, self.user.id) chapter = self.store.get_item(self.chapter_loc) chapter.data = 'chapter data' self.store.update_item(chapter, self.user.id) newobject = self.store.get_item(self.chapter_loc) self.assertFalse(getattr(newobject, 'is_draft', False)) with self.assertRaises(InvalidVersionError): self.store.unpublish(self.chapter_loc, self.user.id) def test_bad_contentstore_request(self): """ Test that user get proper responses for urls with invalid url or asset/course key """ resp = self.client.get_html('/c4x/CDX/123123/asset/&invalid.png') self.assertEqual(resp.status_code, 400) resp = self.client.get_html('/c4x/CDX/123123/asset/invalid.png') self.assertEqual(resp.status_code, 404) # Now test that 404 response is returned when user tries to access # asset of some invalid course from split ModuleStore with self.store.default_store(ModuleStoreEnum.Type.split): resp = self.client.get_html('/c4x/InvalidOrg/InvalidCourse/asset/invalid.png') self.assertEqual(resp.status_code, 404) @override_switch( '{}.{}'.format(waffle.WAFFLE_NAMESPACE, waffle.ENABLE_ACCESSIBILITY_POLICY_PAGE), active=False) def test_disabled_accessibility_page(self): """ Test that accessibility page returns 404 when waffle switch is disabled """ resp = self.client.get_html('/accessibility') self.assertEqual(resp.status_code, 404) def test_delete_course(self): """ This test creates a course, makes a draft item, and deletes the course. This will also assert that the draft content is also deleted """ # add an asset asset_key = self.course.id.make_asset_key('asset', 'sample_static.html') content = StaticContent( asset_key, "Fake asset", "application/text", "test", ) contentstore().save(content) assets, count = contentstore().get_all_content_for_course(self.course.id) self.assertGreater(len(assets), 0) self.assertGreater(count, 0) self.store.convert_to_draft(self.vert_loc, self.user.id) # delete the course self.store.delete_course(self.course.id, self.user.id) # assert that there's absolutely no non-draft modules in the course # this should also include all draft items items = self.store.get_items(self.course.id) self.assertEqual(len(items), 0) # assert that all content in the asset library is also deleted assets, count = contentstore().get_all_content_for_course(self.course.id) self.assertEqual(len(assets), 0) self.assertEqual(count, 0) def test_course_handouts_rewrites(self): """ Test that the xblock_handler rewrites static handout links """ handouts = self.store.create_item( self.user.id, self.course.id, 'course_info', 'handouts', fields={ "data": "<a href='/static/handouts/sample_handout.txt'>Sample</a>", } ) # get module info (json) resp = self.client.get(get_url('xblock_handler', handouts.location)) # make sure we got a successful response self.assertEqual(resp.status_code, 200) # check that /static/ has been converted to the full path # note, we know the link it should be because that's what in the 'toy' course in the test data asset_key = self.course.id.make_asset_key('asset', 'handouts_sample_handout.txt') self.assertContains(resp, text_type(asset_key)) def test_prefetch_children(self): # make sure we haven't done too many round trips to DB: # 1) the course, # 2 & 3) for the chapters and sequentials # Because we're querying from the top of the tree, we cache information needed for inheritance, # so we don't need to make an extra query to compute it. # set the branch to 'publish' in order to prevent extra lookups of draft versions with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, self.course.id): with check_mongo_calls(3): course = self.store.get_course(self.course.id, depth=2) # make sure we pre-fetched a known sequential which should be at depth=2 self.assertIn(self.seq_loc, course.system.module_data) # make sure we don't have a specific vertical which should be at depth=3 self.assertNotIn(self.vert_loc, course.system.module_data) # Now, test with the branch set to draft. No extra round trips b/c it doesn't go deep enough to get # beyond direct only categories with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, self.course.id): with check_mongo_calls(3): self.store.get_course(self.course.id, depth=2) def _check_verticals(self, locations): """ Test getting the editing HTML for each vertical. """ # Assert is here to make sure that the course being tested actually has verticals (units) to check. self.assertGreater(len(locations), 0) for loc in locations: resp = self.client.get_html(get_url('container_handler', loc)) self.assertEqual(resp.status_code, 200) @ddt.ddt class ContentStoreTest(ContentStoreTestCase): """ Tests for the CMS ContentStore application. """ duplicate_course_error = ("There is already a course defined with the same organization and course number. " "Please change either organization or course number to be unique.") def setUp(self): super(ContentStoreTest, self).setUp() self.course_data = { 'org': 'MITx', 'number': '111', 'display_name': 'Robot Super Course', 'run': '2013_Spring' } def assert_created_course(self, number_suffix=None): """ Checks that the course was created properly. """ test_course_data = {} test_course_data.update(self.course_data) if number_suffix: test_course_data['number'] = '{0}_{1}'.format(test_course_data['number'], number_suffix) course_key = _get_course_id(self.store, test_course_data) _create_course(self, course_key, test_course_data) # Verify that the creator is now registered in the course. self.assertTrue(CourseEnrollment.is_enrolled(self.user, course_key)) return test_course_data def assert_create_course_failed(self, error_message): """ Checks that the course not created. """ resp = self.client.ajax_post('/course/', self.course_data) self.assertEqual(resp.status_code, 400) data = parse_json(resp) self.assertEqual(data['error'], error_message) def test_create_course(self): """Test new course creation - happy path""" self.assert_created_course() @override_settings(DEFAULT_COURSE_LANGUAGE='hr') def test_create_course_default_language(self): """Test new course creation and verify default language""" test_course_data = self.assert_created_course() course_id = _get_course_id(self.store, test_course_data) course_module = self.store.get_course(course_id) self.assertEquals(course_module.language, 'hr') def test_create_course_with_dots(self): """Test new course creation with dots in the name""" self.course_data['org'] = 'org.foo.bar' self.course_data['number'] = 'course.number' self.course_data['run'] = 'run.name' self.assert_created_course() @ddt.data(ModuleStoreEnum.Type.split, ModuleStoreEnum.Type.mongo) def test_course_with_different_cases(self, default_store): """ Tests that course can not be created with different case using an AJAX request to course handler. """ course_number = '99x' with self.store.default_store(default_store): # Verify create a course passes with lower case. self.course_data['number'] = course_number.lower() self.assert_created_course() # Verify create a course fail when same course number is provided with different case. self.course_data['number'] = course_number.upper() self.assert_course_creation_failed(self.duplicate_course_error) def test_create_course_check_forum_seeding(self): """Test new course creation and verify forum seeding """ test_course_data = self.assert_created_course(number_suffix=uuid4().hex) self.assertTrue(are_permissions_roles_seeded(_get_course_id(self.store, test_course_data))) def test_forum_unseeding_on_delete(self): """Test new course creation and verify forum unseeding """ test_course_data = self.assert_created_course(number_suffix=uuid4().hex) course_id = _get_course_id(self.store, test_course_data) self.assertTrue(are_permissions_roles_seeded(course_id)) delete_course(course_id, self.user.id) # should raise an exception for checking permissions on deleted course with self.assertRaises(ItemNotFoundError): are_permissions_roles_seeded(course_id) def test_forum_unseeding_with_multiple_courses(self): """Test new course creation and verify forum unseeding when there are multiple courses""" test_course_data = self.assert_created_course(number_suffix=uuid4().hex) second_course_data = self.assert_created_course(number_suffix=uuid4().hex) # unseed the forums for the first course course_id = _get_course_id(self.store, test_course_data) delete_course(course_id, self.user.id) # should raise an exception for checking permissions on deleted course with self.assertRaises(ItemNotFoundError): are_permissions_roles_seeded(course_id) second_course_id = _get_course_id(self.store, second_course_data) # permissions should still be there for the other course self.assertTrue(are_permissions_roles_seeded(second_course_id)) def test_course_enrollments_and_roles_on_delete(self): """ Test that course deletion doesn't remove course enrollments or user's roles """ test_course_data = self.assert_created_course(number_suffix=uuid4().hex) course_id = _get_course_id(self.store, test_course_data) # test that a user gets his enrollment and its 'student' role as default on creating a course self.assertTrue(CourseEnrollment.is_enrolled(self.user, course_id)) self.assertTrue(self.user.roles.filter(name="Student", course_id=course_id)) delete_course(course_id, self.user.id) # check that user's enrollment for this course is not deleted self.assertTrue(CourseEnrollment.is_enrolled(self.user, course_id)) # check that user has form role "Student" for this course even after deleting it self.assertTrue(self.user.roles.filter(name="Student", course_id=course_id)) def test_course_access_groups_on_delete(self): """ Test that course deletion removes users from 'instructor' and 'staff' groups of this course of all format e.g, 'instructor_edX/Course/Run', 'instructor_edX.Course.Run', 'instructor_Course' """ test_course_data = self.assert_created_course(number_suffix=uuid4().hex) course_id = _get_course_id(self.store, test_course_data) # Add user in possible groups and check that user in instructor groups of this course instructor_role = CourseInstructorRole(course_id) auth.add_users(self.user, instructor_role, self.user) self.assertGreater(len(instructor_role.users_with_role()), 0) # Now delete course and check that user not in instructor groups of this course delete_course(course_id, self.user.id) # Update our cached user since its roles have changed self.user = User.objects.get_by_natural_key(self.user.natural_key()[0]) self.assertFalse(instructor_role.has_user(self.user)) self.assertEqual(len(instructor_role.users_with_role()), 0) def test_delete_course_with_keep_instructors(self): """ Tests that when you delete a course with 'keep_instructors', it does not remove any permissions of users/groups from the course """ test_course_data = self.assert_created_course(number_suffix=uuid4().hex) course_id = _get_course_id(self.store, test_course_data) # Add and verify instructor role for the course instructor_role = CourseInstructorRole(course_id) instructor_role.add_users(self.user) self.assertTrue(instructor_role.has_user(self.user)) delete_course(course_id, self.user.id, keep_instructors=True) # Update our cached user so if any change in roles can be captured self.user = User.objects.get_by_natural_key(self.user.natural_key()[0]) self.assertTrue(instructor_role.has_user(self.user)) def test_create_course_after_delete(self): """ Test that course creation works after deleting a course with the same URL """ test_course_data = self.assert_created_course() course_id = _get_course_id(self.store, test_course_data) delete_course(course_id, self.user.id) self.assert_created_course() def test_create_course_duplicate_course(self): """Test new course creation - error path""" self.client.ajax_post('/course/', self.course_data) self.assert_course_creation_failed(self.duplicate_course_error) def assert_course_creation_failed(self, error_message): """ Checks that the course did not get created """ test_enrollment = False try: course_id = _get_course_id(self.store, self.course_data) initially_enrolled = CourseEnrollment.is_enrolled(self.user, course_id) test_enrollment = True except InvalidKeyError: # b/c the intent of the test with bad chars isn't to test auth but to test the handler, ignore pass resp = self.client.ajax_post('/course/', self.course_data) self.assertEqual(resp.status_code, 200) data = parse_json(resp) self.assertRegexpMatches(data['ErrMsg'], error_message) if test_enrollment: # One test case involves trying to create the same course twice. Hence for that course, # the user will be enrolled. In the other cases, initially_enrolled will be False. self.assertEqual(initially_enrolled, CourseEnrollment.is_enrolled(self.user, course_id)) def test_create_course_duplicate_number(self): """Test new course creation - error path""" self.client.ajax_post('/course/', self.course_data) self.course_data['display_name'] = 'Robot Super Course Two' self.course_data['run'] = '2013_Summer' self.assert_course_creation_failed(self.duplicate_course_error) @ddt.data(ModuleStoreEnum.Type.split, ModuleStoreEnum.Type.mongo) def test_create_course_case_change(self, default_store): """Test new course creation - error path due to case insensitive name equality""" self.course_data['number'] = '99x' with self.store.default_store(default_store): # Verify that the course was created properly. self.assert_created_course() # Keep the copy of original org cache_current = self.course_data['org'] # Change `org` to lower case and verify that course did not get created self.course_data['org'] = self.course_data['org'].lower() self.assert_course_creation_failed(self.duplicate_course_error) # Replace the org with its actual value, and keep the copy of course number. self.course_data['org'] = cache_current cache_current = self.course_data['number'] self.course_data['number'] = self.course_data['number'].upper() self.assert_course_creation_failed(self.duplicate_course_error) # Replace the org with its actual value, and keep the copy of course number. self.course_data['number'] = cache_current __ = self.course_data['run'] self.course_data['run'] = self.course_data['run'].upper() self.assert_course_creation_failed(self.duplicate_course_error) def test_course_substring(self): """ Test that a new course can be created whose name is a substring of an existing course """ self.client.ajax_post('/course/', self.course_data) cache_current = self.course_data['number'] self.course_data['number'] = '{}a'.format(self.course_data['number']) resp = self.client.ajax_post('/course/', self.course_data) self.assertEqual(resp.status_code, 200) self.course_data['number'] = cache_current self.course_data['org'] = 'a{}'.format(self.course_data['org']) resp = self.client.ajax_post('/course/', self.course_data) self.assertEqual(resp.status_code, 200) def test_create_course_with_bad_organization(self): """Test new course creation - error path for bad organization name""" self.course_data['org'] = 'University of California, Berkeley' self.assert_course_creation_failed(r"(?s)Unable to create course 'Robot Super Course'.*") def test_create_course_with_course_creation_disabled_staff(self): """Test new course creation -- course creation disabled, but staff access.""" with mock.patch.dict('django.conf.settings.FEATURES', {'DISABLE_COURSE_CREATION': True}): self.assert_created_course() def test_create_course_with_course_creation_disabled_not_staff(self): """Test new course creation -- error path for course creation disabled, not staff access.""" with mock.patch.dict('django.conf.settings.FEATURES', {'DISABLE_COURSE_CREATION': True}): self.user.is_staff = False self.user.save() self.assert_course_permission_denied() def test_create_course_no_course_creators_staff(self): """Test new course creation -- course creation group enabled, staff, group is empty.""" with mock.patch.dict('django.conf.settings.FEATURES', {'ENABLE_CREATOR_GROUP': True}): self.assert_created_course() def test_create_course_no_course_creators_not_staff(self): """Test new course creation -- error path for course creator group enabled, not staff, group is empty.""" with mock.patch.dict('django.conf.settings.FEATURES', {"ENABLE_CREATOR_GROUP": True}): self.user.is_staff = False self.user.save() self.assert_course_permission_denied() def test_create_course_with_course_creator(self): """Test new course creation -- use course creator group""" with mock.patch.dict('django.conf.settings.FEATURES', {"ENABLE_CREATOR_GROUP": True}): auth.add_users(self.user, CourseCreatorRole(), self.user) self.assert_created_course() def test_create_course_with_unicode_in_id_disabled(self): """ Test new course creation with feature setting: ALLOW_UNICODE_COURSE_ID disabled. """ with mock.patch.dict('django.conf.settings.FEATURES', {'ALLOW_UNICODE_COURSE_ID': False}): error_message = "Special characters not allowed in organization, course number, and course run." self.course_data['org'] = u'��������������' self.assert_create_course_failed(error_message) self.course_data['number'] = u'��chantillon' self.assert_create_course_failed(error_message) self.course_data['run'] = u'����������' self.assert_create_course_failed(error_message) def assert_course_permission_denied(self): """ Checks that the course did not get created due to a PermissionError. """ resp = self.client.ajax_post('/course/', self.course_data) self.assertEqual(resp.status_code, 403) def test_course_index_view_with_no_courses(self): """Test viewing the index page with no courses""" resp = self.client.get_html('/home/') self.assertContains( resp, '<h1 class="page-header">Studio Home</h1>', status_code=200, html=True ) def test_course_factory(self): """Test that the course factory works correctly.""" course = CourseFactory.create() self.assertIsInstance(course, CourseDescriptor) def test_item_factory(self): """Test that the item factory works correctly.""" course = CourseFactory.create() item = ItemFactory.create(parent_location=course.location) self.assertIsInstance(item, SequenceDescriptor) def test_course_overview_view_with_course(self): """Test viewing the course overview page with an existing course""" course = CourseFactory.create() resp = self._show_course_overview(course.id) self.assertContains( resp, '<article class="outline outline-complex outline-course" data-locator="{locator}" data-course-key="{course_key}">'.format( locator=text_type(course.location), course_key=text_type(course.id), ), status_code=200, html=True ) def test_create_item(self): """Test creating a new xblock instance.""" course = CourseFactory.create() section_data = { 'parent_locator': text_type(course.location), 'category': 'chapter', 'display_name': 'Section One', } resp = self.client.ajax_post(reverse_url('xblock_handler'), section_data) self.assertEqual(resp.status_code, 200) data = parse_json(resp) retarget = text_type(course.id.make_usage_key('chapter', 'REPLACE')).replace('REPLACE', r'([0-9]|[a-f]){3,}') self.assertRegexpMatches(data['locator'], retarget) def test_capa_module(self): """Test that a problem treats markdown specially.""" course = CourseFactory.create() problem_data = { 'parent_locator': text_type(course.location), 'category': 'problem' } resp = self.client.ajax_post(reverse_url('xblock_handler'), problem_data) self.assertEqual(resp.status_code, 200) payload = parse_json(resp) problem_loc = UsageKey.from_string(payload['locator']) problem = self.store.get_item(problem_loc) # should be a CapaDescriptor self.assertIsInstance(problem, CapaDescriptor, "New problem is not a CapaDescriptor") context = problem.get_context() self.assertIn('markdown', context, "markdown is missing from context") self.assertNotIn('markdown', problem.editable_metadata_fields, "Markdown slipped into the editable metadata fields") def test_cms_imported_course_walkthrough(self): """ Import and walk through some common URL endpoints. This just verifies non-500 and no other correct behavior, so it is not a deep test """ def test_get_html(handler): # Helper function for getting HTML for a page in Studio and # checking that it does not error. resp = self.client.get_html( get_url(handler, course_key, 'course_key_string') ) self.assertEqual(resp.status_code, 200) course_items = import_course_from_xml( self.store, self.user.id, TEST_DATA_DIR, ['simple'], create_if_not_present=True ) course_key = course_items[0].id resp = self._show_course_overview(course_key) self.assertEqual(resp.status_code, 200) self.assertContains(resp, 'Chapter 2') # go to various pages test_get_html('import_handler') test_get_html('export_handler') test_get_html('course_team_handler') test_get_html('course_info_handler') test_get_html('assets_handler') test_get_html('tabs_handler') test_get_html('settings_handler') test_get_html('grading_handler') test_get_html('advanced_settings_handler') test_get_html('textbooks_list_handler') # go look at the Edit page unit_key = course_key.make_usage_key('vertical', 'test_vertical') resp = self.client.get_html(get_url('container_handler', unit_key)) self.assertEqual(resp.status_code, 200) def delete_item(category, name): """ Helper method for testing the deletion of an xblock item. """ item_key = course_key.make_usage_key(category, name) resp = self.client.delete(get_url('xblock_handler', item_key)) self.assertEqual(resp.status_code, 204) # delete a component delete_item(category='html', name='test_html') # delete a unit delete_item(category='vertical', name='test_vertical') # delete a unit delete_item(category='sequential', name='test_sequence') # delete a chapter delete_item(category='chapter', name='chapter_2') def test_import_into_new_course_id(self): target_id = _get_course_id(self.store, self.course_data) _create_course(self, target_id, self.course_data) import_course_from_xml(self.store, self.user.id, TEST_DATA_DIR, ['toy'], target_id=target_id) modules = self.store.get_items(target_id) # we should have a number of modules in there # we can't specify an exact number since it'll always be changing self.assertGreater(len(modules), 10) # # test various re-namespacing elements # # first check PDF textbooks, to make sure the url paths got updated course_module = self.store.get_course(target_id) self.assertEqual(len(course_module.pdf_textbooks), 1) self.assertEqual(len(course_module.pdf_textbooks[0]["chapters"]), 2) self.assertEqual(course_module.pdf_textbooks[0]["chapters"][0]["url"], '/static/Chapter1.pdf') self.assertEqual(course_module.pdf_textbooks[0]["chapters"][1]["url"], '/static/Chapter2.pdf') def test_import_into_new_course_id_wiki_slug_renamespacing(self): # If reimporting into the same course do not change the wiki_slug. target_id = self.store.make_course_key('edX', 'toy', '2012_Fall') course_data = { 'org': target_id.org, 'number': target_id.course, 'display_name': 'Robot Super Course', 'run': target_id.run } _create_course(self, target_id, course_data) course_module = self.store.get_course(target_id) course_module.wiki_slug = 'toy' course_module.save() # Import a course with wiki_slug == location.course import_course_from_xml(self.store, self.user.id, TEST_DATA_DIR, ['toy'], target_id=target_id) course_module = self.store.get_course(target_id) self.assertEquals(course_module.wiki_slug, 'toy') # But change the wiki_slug if it is a different course. target_id = self.store.make_course_key('MITx', '111', '2013_Spring') course_data = { 'org': target_id.org, 'number': target_id.course, 'display_name': 'Robot Super Course', 'run': target_id.run } _create_course(self, target_id, course_data) # Import a course with wiki_slug == location.course import_course_from_xml(self.store, self.user.id, TEST_DATA_DIR, ['toy'], target_id=target_id) course_module = self.store.get_course(target_id) self.assertEquals(course_module.wiki_slug, 'MITx.111.2013_Spring') # Now try importing a course with wiki_slug == '{0}.{1}.{2}'.format(location.org, location.course, location.run) import_course_from_xml(self.store, self.user.id, TEST_DATA_DIR, ['two_toys'], target_id=target_id) course_module = self.store.get_course(target_id) self.assertEquals(course_module.wiki_slug, 'MITx.111.2013_Spring') def test_import_metadata_with_attempts_empty_string(self): import_course_from_xml(self.store, self.user.id, TEST_DATA_DIR, ['simple'], create_if_not_present=True) did_load_item = False try: course_key = self.store.make_course_key('edX', 'simple', 'problem') usage_key = course_key.make_usage_key('problem', 'ps01-simple') self.store.get_item(usage_key) did_load_item = True except ItemNotFoundError: pass # make sure we found the item (e.g. it didn't error while loading) self.assertTrue(did_load_item) @ddt.data(ModuleStoreEnum.Type.split, ModuleStoreEnum.Type.mongo) def test_forum_id_generation(self, default_store): """ Test that a discussion item, even if it doesn't set its discussion_id, consistently generates the same one """ course = CourseFactory.create(default_store=default_store) # create a discussion item discussion_item = self.store.create_item(self.user.id, course.id, 'discussion', 'new_component') # now fetch it from the modulestore to instantiate its descriptor fetched = self.store.get_item(discussion_item.location) # refetch it to be safe refetched = self.store.get_item(discussion_item.location) # and make sure the same discussion items have the same discussion ids self.assertEqual(fetched.discussion_id, discussion_item.discussion_id) self.assertEqual(fetched.discussion_id, refetched.discussion_id) # and make sure that the id isn't the old "$$GUID$$" self.assertNotEqual(discussion_item.discussion_id, '$$GUID$$') def test_metadata_inheritance(self): course_items = import_course_from_xml( self.store, self.user.id, TEST_DATA_DIR, ['toy'], create_if_not_present=True ) course = course_items[0] verticals = self.store.get_items(course.id, qualifiers={'category': 'vertical'}) # let's assert on the metadata_inheritance on an existing vertical for vertical in verticals: self.assertEqual(course.xqa_key, vertical.xqa_key) self.assertEqual(course.start, vertical.start) self.assertGreater(len(verticals), 0) # crate a new module and add it as a child to a vertical parent = verticals[0] new_block = self.store.create_child( self.user.id, parent.location, 'html', 'new_component' ) # flush the cache new_block = self.store.get_item(new_block.location) # check for grace period definition which should be defined at the course level self.assertEqual(parent.graceperiod, new_block.graceperiod) self.assertEqual(parent.start, new_block.start) self.assertEqual(course.start, new_block.start) self.assertEqual(course.xqa_key, new_block.xqa_key) # # now let's define an override at the leaf node level # new_block.graceperiod = timedelta(1) self.store.update_item(new_block, self.user.id) # flush the cache and refetch new_block = self.store.get_item(new_block.location) self.assertEqual(timedelta(1), new_block.graceperiod) def test_default_metadata_inheritance(self): course = CourseFactory.create() vertical = ItemFactory.create(parent_location=course.location) course.children.append(vertical) # in memory self.assertIsNotNone(course.start) self.assertEqual(course.start, vertical.start) self.assertEqual(course.textbooks, []) self.assertIn('GRADER', course.grading_policy) self.assertIn('GRADE_CUTOFFS', course.grading_policy) # by fetching fetched_course = self.store.get_item(course.location) fetched_item = self.store.get_item(vertical.location) self.assertIsNotNone(fetched_course.start) self.assertEqual(course.start, fetched_course.start) self.assertEqual(fetched_course.start, fetched_item.start) self.assertEqual(course.textbooks, fetched_course.textbooks) def test_image_import(self): """Test backwards compatibilty of course image.""" content_store = contentstore() # Use conditional_and_poll, as it's got an image already courses = import_course_from_xml( self.store, self.user.id, TEST_DATA_DIR, ['conditional_and_poll'], static_content_store=content_store, create_if_not_present=True ) course = courses[0] # Make sure the course image is set to the right place self.assertEqual(course.course_image, 'images_course_image.jpg') # Ensure that the imported course image is present -- this shouldn't raise an exception asset_key = course.id.make_asset_key('asset', course.course_image) content_store.find(asset_key) def _show_course_overview(self, course_key): """ Show the course overview page. """ resp = self.client.get_html(get_url('course_handler', course_key, 'course_key_string')) return resp def test_wiki_slug(self): """When creating a course a unique wiki_slug should be set.""" course_key = _get_course_id(self.store, self.course_data) _create_course(self, course_key, self.course_data) course_module = self.store.get_course(course_key) self.assertEquals(course_module.wiki_slug, 'MITx.111.2013_Spring') def test_course_handler_with_invalid_course_key_string(self): """Test viewing the course overview page with invalid course id""" response = self.client.get_html('/course/edX/test') self.assertEquals(response.status_code, 404) class MetadataSaveTestCase(ContentStoreTestCase): """Test that metadata is correctly cached and decached.""" def setUp(self): super(MetadataSaveTestCase, self).setUp() course = CourseFactory.create() video_sample_xml = """ <video display_name="Test Video" youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8" show_captions="false" from="00:00:01" to="00:01:00"> <source src="http://www.example.com/file.mp4"/> <track src="http://www.example.com/track"/> </video> """ self.video_descriptor = ItemFactory.create( parent_location=course.location, category='video', data={'data': video_sample_xml} ) def test_metadata_not_persistence(self): """ Test that descriptors which set metadata fields in their constructor are correctly deleted. """ self.assertIn('html5_sources', own_metadata(self.video_descriptor)) attrs_to_strip = { 'show_captions', 'youtube_id_1_0', 'youtube_id_0_75', 'youtube_id_1_25', 'youtube_id_1_5', 'start_time', 'end_time', 'source', 'html5_sources', 'track' } location = self.video_descriptor.location for field_name in attrs_to_strip: delattr(self.video_descriptor, field_name) self.assertNotIn('html5_sources', own_metadata(self.video_descriptor)) self.store.update_item(self.video_descriptor, self.user.id) module = self.store.get_item(location) self.assertNotIn('html5_sources', own_metadata(module)) def test_metadata_persistence(self): # TODO: create the same test as `test_metadata_not_persistence`, # but check persistence for some other module. pass class RerunCourseTest(ContentStoreTestCase): """ Tests for Rerunning a course via the view handler """ def setUp(self): super(RerunCourseTest, self).setUp() self.destination_course_data = { 'org': 'MITx', 'number': '111', 'display_name': 'Robot Super Course', 'run': '2013_Spring' } def post_rerun_request( self, source_course_key, destination_course_data=None, response_code=200, expect_error=False ): """Create and send an ajax post for the rerun request""" # create data to post rerun_course_data = {'source_course_key': text_type(source_course_key)} if not destination_course_data: destination_course_data = self.destination_course_data rerun_course_data.update(destination_course_data) destination_course_key = _get_course_id(self.store, destination_course_data) # post the request course_url = get_url('course_handler', destination_course_key, 'course_key_string') response = self.client.ajax_post(course_url, rerun_course_data) # verify response self.assertEqual(response.status_code, response_code) if not expect_error: json_resp = parse_json(response) self.assertNotIn('ErrMsg', json_resp) destination_course_key = CourseKey.from_string(json_resp['destination_course_key']) return destination_course_key def get_unsucceeded_course_action_elements(self, html, course_key): """Returns the elements in the unsucceeded course action section that have the given course_key""" return html.cssselect('.courses-processing li[data-course-key="{}"]'.format(text_type(course_key))) def assertInCourseListing(self, course_key): """ Asserts that the given course key is NOT in the unsucceeded course action section of the html. """ course_listing = lxml.html.fromstring(self.client.get_html('/home/').content) self.assertEqual(len(self.get_unsucceeded_course_action_elements(course_listing, course_key)), 0) def assertInUnsucceededCourseActions(self, course_key): """ Asserts that the given course key is in the unsucceeded course action section of the html. """ course_listing = lxml.html.fromstring(self.client.get_html('/home/').content) self.assertEqual(len(self.get_unsucceeded_course_action_elements(course_listing, course_key)), 1) def verify_rerun_course(self, source_course_key, destination_course_key, destination_display_name): """ Verify the contents of the course rerun action """ rerun_state = CourseRerunState.objects.find_first(course_key=destination_course_key) expected_states = { 'state': CourseRerunUIStateManager.State.SUCCEEDED, 'display_name': destination_display_name, 'source_course_key': source_course_key, 'course_key': destination_course_key, 'should_display': True, } for field_name, expected_value in expected_states.iteritems(): self.assertEquals(getattr(rerun_state, field_name), expected_value) # Verify that the creator is now enrolled in the course. self.assertTrue(CourseEnrollment.is_enrolled(self.user, destination_course_key)) # Verify both courses are in the course listing section self.assertInCourseListing(source_course_key) self.assertInCourseListing(destination_course_key) def test_rerun_course_no_videos_in_val(self): """ Test when rerunning a course with no videos, VAL copies nothing """ source_course = CourseFactory.create() destination_course_key = self.post_rerun_request(source_course.id) self.verify_rerun_course(source_course.id, destination_course_key, self.destination_course_data['display_name']) videos = list(get_videos_for_course(text_type(destination_course_key))) self.assertEqual(0, len(videos)) self.assertInCourseListing(destination_course_key) def test_rerun_course_success(self): source_course = CourseFactory.create() create_video( dict( edx_video_id="tree-hugger", courses=[text_type(source_course.id)], status='test', duration=2, encoded_videos=[] ) ) destination_course_key = self.post_rerun_request(source_course.id) self.verify_rerun_course(source_course.id, destination_course_key, self.destination_course_data['display_name']) # Verify that the VAL copies videos to the rerun source_videos = list(get_videos_for_course(text_type(source_course.id))) target_videos = list(get_videos_for_course(text_type(destination_course_key))) self.assertEqual(1, len(source_videos)) self.assertEqual(source_videos, target_videos) def test_rerun_course_resets_advertised_date(self): source_course = CourseFactory.create(advertised_start="01-12-2015") destination_course_key = self.post_rerun_request(source_course.id) destination_course = self.store.get_course(destination_course_key) self.assertEqual(None, destination_course.advertised_start) def test_rerun_of_rerun(self): source_course = CourseFactory.create() rerun_course_key = self.post_rerun_request(source_course.id) rerun_of_rerun_data = { 'org': rerun_course_key.org, 'number': rerun_course_key.course, 'display_name': 'rerun of rerun', 'run': 'rerun2' } rerun_of_rerun_course_key = self.post_rerun_request(rerun_course_key, rerun_of_rerun_data) self.verify_rerun_course(rerun_course_key, rerun_of_rerun_course_key, rerun_of_rerun_data['display_name']) def test_rerun_course_fail_no_source_course(self): existent_course_key = CourseFactory.create().id non_existent_course_key = CourseLocator("org", "non_existent_course", "non_existent_run") destination_course_key = self.post_rerun_request(non_existent_course_key) # Verify that the course rerun action is marked failed rerun_state = CourseRerunState.objects.find_first(course_key=destination_course_key) self.assertEquals(rerun_state.state, CourseRerunUIStateManager.State.FAILED) self.assertIn("Cannot find a course at", rerun_state.message) # Verify that the creator is not enrolled in the course. self.assertFalse(CourseEnrollment.is_enrolled(self.user, non_existent_course_key)) # Verify that the existing course continues to be in the course listings self.assertInCourseListing(existent_course_key) # Verify that the failed course is NOT in the course listings self.assertInUnsucceededCourseActions(destination_course_key) def test_rerun_course_fail_duplicate_course(self): existent_course_key = CourseFactory.create().id destination_course_data = { 'org': existent_course_key.org, 'number': existent_course_key.course, 'display_name': 'existing course', 'run': existent_course_key.run } destination_course_key = self.post_rerun_request( existent_course_key, destination_course_data, expect_error=True ) # Verify that the course rerun action doesn't exist with self.assertRaises(CourseActionStateItemNotFoundError): CourseRerunState.objects.find_first(course_key=destination_course_key) # Verify that the existing course continues to be in the course listing self.assertInCourseListing(existent_course_key) def test_rerun_with_permission_denied(self): with mock.patch.dict('django.conf.settings.FEATURES', {"ENABLE_CREATOR_GROUP": True}): source_course = CourseFactory.create() auth.add_users(self.user, CourseCreatorRole(), self.user) self.user.is_staff = False self.user.save() self.post_rerun_request(source_course.id, response_code=403, expect_error=True) def test_rerun_error(self): error_message = "Mock Error Message" with mock.patch( 'xmodule.modulestore.mixed.MixedModuleStore.clone_course', mock.Mock(side_effect=Exception(error_message)) ): source_course = CourseFactory.create() destination_course_key = self.post_rerun_request(source_course.id) rerun_state = CourseRerunState.objects.find_first(course_key=destination_course_key) self.assertEquals(rerun_state.state, CourseRerunUIStateManager.State.FAILED) self.assertIn(error_message, rerun_state.message) def test_rerun_error_trunc_message(self): """ CourseActionUIState.message is sometimes populated with the contents of Python tracebacks. This test ensures we don't crash when attempting to insert a value exceeding its max_length (note that sqlite does not complain if this happens, but MySQL throws an error). """ with mock.patch( 'xmodule.modulestore.mixed.MixedModuleStore.clone_course', mock.Mock(side_effect=Exception()), ): source_course = CourseFactory.create() message_too_long = "traceback".rjust(CourseRerunState.MAX_MESSAGE_LENGTH * 2, '-') with mock.patch('traceback.format_exc', return_value=message_too_long): destination_course_key = self.post_rerun_request(source_course.id) rerun_state = CourseRerunState.objects.find_first(course_key=destination_course_key) self.assertEquals(rerun_state.state, CourseRerunUIStateManager.State.FAILED) self.assertTrue(rerun_state.message.endswith("traceback")) self.assertEqual(len(rerun_state.message), CourseRerunState.MAX_MESSAGE_LENGTH) def test_rerun_course_wiki_slug(self): """ Test that unique wiki_slug is assigned to rerun course. """ course_data = { 'org': 'edX', 'number': '123', 'display_name': 'Rerun Course', 'run': '2013' } source_wiki_slug = '{0}.{1}.{2}'.format(course_data['org'], course_data['number'], course_data['run']) source_course_key = _get_course_id(self.store, course_data) _create_course(self, source_course_key, course_data) source_course = self.store.get_course(source_course_key) # Verify created course's wiki_slug. self.assertEquals(source_course.wiki_slug, source_wiki_slug) destination_course_data = course_data destination_course_data['run'] = '2013_Rerun' destination_course_key = self.post_rerun_request( source_course.id, destination_course_data=destination_course_data ) self.verify_rerun_course(source_course.id, destination_course_key, destination_course_data['display_name']) destination_course = self.store.get_course(destination_course_key) destination_wiki_slug = '{0}.{1}.{2}'.format( destination_course.id.org, destination_course.id.course, destination_course.id.run ) # Verify rerun course's wiki_slug. self.assertEquals(destination_course.wiki_slug, destination_wiki_slug) class ContentLicenseTest(ContentStoreTestCase): """ Tests around content licenses """ def test_course_license_export(self): content_store = contentstore() root_dir = path(mkdtemp_clean()) self.course.license = "creative-commons: BY SA" self.store.update_item(self.course, None) export_course_to_xml(self.store, content_store, self.course.id, root_dir, u'test_license') fname = "{block}.xml".format(block=self.course.scope_ids.usage_id.block_id) run_file_path = root_dir / "test_license" / "course" / fname run_xml = etree.parse(run_file_path.open()) self.assertEqual(run_xml.getroot().get("license"), "creative-commons: BY SA") def test_video_license_export(self): content_store = contentstore() root_dir = path(mkdtemp_clean()) video_descriptor = ItemFactory.create( parent_location=self.course.location, category='video', license="all-rights-reserved" ) export_course_to_xml(self.store, content_store, self.course.id, root_dir, u'test_license') fname = "{block}.xml".format(block=video_descriptor.scope_ids.usage_id.block_id) video_file_path = root_dir / "test_license" / "video" / fname video_xml = etree.parse(video_file_path.open()) self.assertEqual(video_xml.getroot().get("license"), "all-rights-reserved") def test_license_import(self): course_items = import_course_from_xml( self.store, self.user.id, TEST_DATA_DIR, ['toy'], create_if_not_present=True ) course = course_items[0] self.assertEqual(course.license, "creative-commons: BY") videos = self.store.get_items(course.id, qualifiers={'category': 'video'}) self.assertEqual(videos[0].license, "all-rights-reserved") class EntryPageTestCase(TestCase): """ Tests entry pages that aren't specific to a course. """ def setUp(self): super(EntryPageTestCase, self).setUp() self.client = AjaxEnabledTestClient() def _test_page(self, page, status_code=200): resp = self.client.get_html(page) self.assertEqual(resp.status_code, status_code) def test_how_it_works(self): self._test_page("/howitworks") def test_signup(self): self._test_page("/signup") def test_login(self): self._test_page("/signin") def test_logout(self): # Logout redirects. self._test_page("/logout", 302) @override_switch( '{}.{}'.format(waffle.WAFFLE_NAMESPACE, waffle.ENABLE_ACCESSIBILITY_POLICY_PAGE), active=True) def test_accessibility(self): self._test_page('/accessibility') class SigninPageTestCase(TestCase): """ Tests that the CSRF token is directly included in the signin form. This is important to make sure that the script is functional independently of any other script. """ def test_csrf_token_is_present_in_form(self): # Expected html: # <form> # ... # <fieldset> # ... # <input name="csrfmiddlewaretoken" value="..."> # ... # </fieldset> # ... # </form> response = self.client.get("/signin") csrf_token = response.cookies.get("csrftoken") form = lxml.html.fromstring(response.content).get_element_by_id("login_form") csrf_input_field = form.find(".//input[@name='csrfmiddlewaretoken']") self.assertIsNotNone(csrf_token) self.assertIsNotNone(csrf_token.value) self.assertIsNotNone(csrf_input_field) # TODO: Remove Django 1.11 upgrade shim # SHIM: _compare_salted_tokens was introduced in 1.10. Move the import and use only that branch post-upgrade. if django.VERSION < (1, 10): self.assertEqual(csrf_token.value, csrf_input_field.attrib["value"]) else: from django.middleware.csrf import _compare_salted_tokens self.assertTrue(_compare_salted_tokens(csrf_token.value, csrf_input_field.attrib["value"])) def _create_course(test, course_key, course_data): """ Creates a course via an AJAX request and verifies the URL returned in the response. """ course_url = get_url('course_handler', course_key, 'course_key_string') response = test.client.ajax_post(course_url, course_data) test.assertEqual(response.status_code, 200) data = parse_json(response) test.assertNotIn('ErrMsg', data) test.assertEqual(data['url'], course_url) def _get_course_id(store, course_data): """Returns the course ID.""" return store.make_course_key(course_data['org'], course_data['number'], course_data['run'])
unknown
codeparrot/codeparrot-clean
''' Context ======= .. versionadded:: 1.8.0 .. warning:: This is experimental and subject to change as long as this warning notice is present. Kivy has a few "global" instances that are used directly by many pieces of the framework: `Cache`, `Builder`, `Clock`. TODO: document this module. ''' __all__ = ('Context', 'ProxyContext', 'register_context', 'get_current_context') _contexts = {} _default_context = None _context_stack = [] class ProxyContext(object): __slots__ = ['_obj'] def __init__(self, obj): object.__init__(self) object.__setattr__(self, '_obj', obj) def __getattribute__(self, name): return getattr(object.__getattribute__(self, '_obj'), name) def __delattr__(self, name): delattr(object.__getattribute__(self, '_obj'), name) def __setattr__(self, name, value): setattr(object.__getattribute__(self, '_obj'), name, value) def __bool__(self): return bool(object.__getattribute__(self, '_obj')) def __str__(self): return str(object.__getattribute__(self, '_obj')) def __repr__(self): return repr(object.__getattribute__(self, '_obj')) class Context(dict): def __init__(self, init=False): dict.__init__(self) self.sandbox = None if not init: return for name in _contexts: context = _contexts[name] instance = context['cls'](*context['args'], **context['kwargs']) self[name] = instance def push(self): _context_stack.append(self) for name, instance in self.items(): object.__setattr__(_contexts[name]['proxy'], '_obj', instance) def pop(self): # After poping context from stack. Update proxy's _obj with # instances in current context _context_stack.pop(-1) for name, instance in get_current_context().items(): object.__setattr__(_contexts[name]['proxy'], '_obj', instance) def register_context(name, cls, *args, **kwargs): '''Register a new context. ''' instance = cls(*args, **kwargs) proxy = ProxyContext(instance) _contexts[name] = { 'cls': cls, 'args': args, 'kwargs': kwargs, 'proxy': proxy} _default_context[name] = instance return proxy def get_current_context(): '''Return the current context. ''' if not _context_stack: return _default_context return _context_stack[-1] _default_context = Context(init=False)
unknown
codeparrot/codeparrot-clean
import pytest import search_journalctl def canned_search_journalctl(get_log_output=None): """Create a search_journalctl object with canned get_log_output method""" module = search_journalctl if get_log_output: module.get_log_output = get_log_output return module DEFAULT_TIMESTAMP = 1496341364 def get_timestamp(modifier=0): return DEFAULT_TIMESTAMP + modifier def get_timestamp_microseconds(modifier=0): return get_timestamp(modifier) * 1000000 def create_test_log_object(stamp, msg): return '{{"__REALTIME_TIMESTAMP": "{}", "MESSAGE": "{}"}}'.format(stamp, msg) @pytest.mark.parametrize('name,matchers,log_input,expected_matches,expected_errors', [ ( 'test with valid params', [ { "start_regexp": r"Sample Logs Beginning", "regexp": r"test log message", "unit": "test", }, ], [ create_test_log_object(get_timestamp_microseconds(), "test log message"), create_test_log_object(get_timestamp_microseconds(), "Sample Logs Beginning"), ], ["test log message"], [], ), ( 'test with invalid json in log input', [ { "start_regexp": r"Sample Logs Beginning", "regexp": r"test log message", "unit": "test-unit", }, ], [ '{__REALTIME_TIMESTAMP: ' + str(get_timestamp_microseconds()) + ', "MESSAGE": "test log message"}', ], [], [ ["invalid json", "test-unit", "test log message"], ], ), ( 'test with invalid regexp', [ { "start_regexp": r"Sample Logs Beginning", "regexp": r"test [ log message", "unit": "test", }, ], [ create_test_log_object(get_timestamp_microseconds(), "test log message"), create_test_log_object(get_timestamp_microseconds(), "sample log message"), create_test_log_object(get_timestamp_microseconds(), "fake log message"), create_test_log_object(get_timestamp_microseconds(), "dummy log message"), create_test_log_object(get_timestamp_microseconds(), "Sample Logs Beginning"), ], [], [ ["invalid regular expression"], ], ), ], ids=lambda argval: argval[0]) def test_get_log_matches(name, matchers, log_input, expected_matches, expected_errors): def get_log_output(matcher): return log_input module = canned_search_journalctl(get_log_output) matched_regexp, errors = module.get_log_matches(matchers, 500, 60 * 60) assert set(matched_regexp) == set(expected_matches) assert len(expected_errors) == len(errors) for idx, partial_err_set in enumerate(expected_errors): for partial_err_msg in partial_err_set: assert partial_err_msg in errors[idx] @pytest.mark.parametrize('name,matcher,log_count_lim,stamp_lim_seconds,log_input,expected_match', [ ( 'test with matching log message, but out of bounds of log_count_lim', { "start_regexp": r"Sample Logs Beginning", "regexp": r"dummy log message", "unit": "test", }, 3, get_timestamp(-100 * 60 * 60), [ create_test_log_object(get_timestamp_microseconds(), "test log message"), create_test_log_object(get_timestamp_microseconds(), "sample log message"), create_test_log_object(get_timestamp_microseconds(), "fake log message"), create_test_log_object(get_timestamp_microseconds(), "dummy log message"), create_test_log_object(get_timestamp_microseconds(), "Sample Logs Beginning"), ], None, ), ( 'test with matching log message, but with timestamp too old', { "start_regexp": r"Sample Logs Beginning", "regexp": r"dummy log message", "unit": "test", }, 100, get_timestamp(-10), [ create_test_log_object(get_timestamp_microseconds(), "test log message"), create_test_log_object(get_timestamp_microseconds(), "sample log message"), create_test_log_object(get_timestamp_microseconds(), "fake log message"), create_test_log_object(get_timestamp_microseconds(-1000), "dummy log message"), create_test_log_object(get_timestamp_microseconds(-1000), "Sample Logs Beginning"), ], None, ), ( 'test with matching log message, and timestamp within time limit', { "start_regexp": r"Sample Logs Beginning", "regexp": r"dummy log message", "unit": "test", }, 100, get_timestamp(-1010), [ create_test_log_object(get_timestamp_microseconds(), "test log message"), create_test_log_object(get_timestamp_microseconds(), "sample log message"), create_test_log_object(get_timestamp_microseconds(), "fake log message"), create_test_log_object(get_timestamp_microseconds(-1000), "dummy log message"), create_test_log_object(get_timestamp_microseconds(-1000), "Sample Logs Beginning"), ], create_test_log_object(get_timestamp_microseconds(-1000), "dummy log message"), ), ], ids=lambda argval: argval[0]) def test_find_matches_skips_logs(name, matcher, log_count_lim, stamp_lim_seconds, log_input, expected_match): match = search_journalctl.find_matches(log_input, matcher, log_count_lim, stamp_lim_seconds) assert match == expected_match
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # (c) 2013, Jesse Keating <jesse.keating@rackspace.com, # Paul Durivage <paul.durivage@rackspace.com>, # Matt Martz <matt@sivel.net> # # This file is part of Ansible. # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. """ Rackspace Cloud Inventory Authors: Jesse Keating <jesse.keating@rackspace.com, Paul Durivage <paul.durivage@rackspace.com>, Matt Martz <matt@sivel.net> Description: Generates inventory that Ansible can understand by making API request to Rackspace Public Cloud API When run against a specific host, this script returns variables similar to: rax_os-ext-sts_task_state rax_addresses rax_links rax_image rax_os-ext-sts_vm_state rax_flavor rax_id rax_rax-bandwidth_bandwidth rax_user_id rax_os-dcf_diskconfig rax_accessipv4 rax_accessipv6 rax_progress rax_os-ext-sts_power_state rax_metadata rax_status rax_updated rax_hostid rax_name rax_created rax_tenant_id rax_loaded Configuration: rax.py can be configured using a rax.ini file or via environment variables. The rax.ini file should live in the same directory along side this script. The section header for configuration values related to this inventory plugin is [rax] [rax] creds_file = ~/.rackspace_cloud_credentials regions = IAD,ORD,DFW env = prod meta_prefix = meta access_network = public access_ip_version = 4 Each of these configurations also has a corresponding environment variable. An environment variable will override a configuration file value. creds_file: Environment Variable: RAX_CREDS_FILE An optional configuration that points to a pyrax-compatible credentials file. If not supplied, rax.py will look for a credentials file at ~/.rackspace_cloud_credentials. It uses the Rackspace Python SDK, and therefore requires a file formatted per the SDK's specifications. https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md regions: Environment Variable: RAX_REGION An optional environment variable to narrow inventory search scope. If used, needs a value like ORD, DFW, SYD (a Rackspace datacenter) and optionally accepts a comma-separated list. environment: Environment Variable: RAX_ENV A configuration that will use an environment as configured in ~/.pyrax.cfg, see https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md meta_prefix: Environment Variable: RAX_META_PREFIX Default: meta A configuration that changes the prefix used for meta key/value groups. For compatibility with ec2.py set to "tag" access_network: Environment Variable: RAX_ACCESS_NETWORK Default: public A configuration that will tell the inventory script to use a specific server network to determine the ansible_ssh_host value. If no address is found, ansible_ssh_host will not be set. Accepts a comma-separated list of network names, the first found wins. access_ip_version: Environment Variable: RAX_ACCESS_IP_VERSION Default: 4 A configuration related to "access_network" that will attempt to determine the ansible_ssh_host value for either IPv4 or IPv6. If no address is found, ansible_ssh_host will not be set. Acceptable values are: 4 or 6. Values other than 4 or 6 will be ignored, and 4 will be used. Accepts a comma-separated list, the first found wins. Examples: List server instances $ RAX_CREDS_FILE=~/.raxpub rax.py --list List servers in ORD datacenter only $ RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD rax.py --list List servers in ORD and DFW datacenters $ RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD,DFW rax.py --list Get server details for server named "server.example.com" $ RAX_CREDS_FILE=~/.raxpub rax.py --host server.example.com Use the instance private IP to connect (instead of public IP) $ RAX_CREDS_FILE=~/.raxpub RAX_ACCESS_NETWORK=private rax.py --list """ import os import re import sys import argparse import warnings import collections import ConfigParser from six import iteritems try: import json except ImportError: import simplejson as json try: import pyrax from pyrax.utils import slugify except ImportError: sys.exit('pyrax is required for this module') from time import time from ansible.constants import get_config, mk_boolean NON_CALLABLES = (basestring, bool, dict, int, list, type(None)) def load_config_file(): p = ConfigParser.ConfigParser() config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'rax.ini') try: p.read(config_file) except ConfigParser.Error: return None else: return p p = load_config_file() def rax_slugify(value): return 'rax_%s' % (re.sub('[^\w-]', '_', value).lower().lstrip('_')) def to_dict(obj): instance = {} for key in dir(obj): value = getattr(obj, key) if isinstance(value, NON_CALLABLES) and not key.startswith('_'): key = rax_slugify(key) instance[key] = value return instance def host(regions, hostname): hostvars = {} for region in regions: # Connect to the region cs = pyrax.connect_to_cloudservers(region=region) for server in cs.servers.list(): if server.name == hostname: for key, value in to_dict(server).items(): hostvars[key] = value # And finally, add an IP address hostvars['ansible_ssh_host'] = server.accessIPv4 print(json.dumps(hostvars, sort_keys=True, indent=4)) def _list_into_cache(regions): groups = collections.defaultdict(list) hostvars = collections.defaultdict(dict) images = {} cbs_attachments = collections.defaultdict(dict) prefix = get_config(p, 'rax', 'meta_prefix', 'RAX_META_PREFIX', 'meta') try: # Ansible 2.3+ networks = get_config(p, 'rax', 'access_network', 'RAX_ACCESS_NETWORK', 'public', value_type='list') except TypeError: # Ansible 2.2.x and below networks = get_config(p, 'rax', 'access_network', 'RAX_ACCESS_NETWORK', 'public', islist=True) try: try: ip_versions = map(int, get_config(p, 'rax', 'access_ip_version', 'RAX_ACCESS_IP_VERSION', 4, value_type='list')) except TypeError: ip_versions = map(int, get_config(p, 'rax', 'access_ip_version', 'RAX_ACCESS_IP_VERSION', 4, islist=True)) except: ip_versions = [4] else: ip_versions = [v for v in ip_versions if v in [4, 6]] if not ip_versions: ip_versions = [4] # Go through all the regions looking for servers for region in regions: # Connect to the region cs = pyrax.connect_to_cloudservers(region=region) if cs is None: warnings.warn( 'Connecting to Rackspace region "%s" has caused Pyrax to ' 'return None. Is this a valid region?' % region, RuntimeWarning) continue for server in cs.servers.list(): # Create a group on region groups[region].append(server.name) # Check if group metadata key in servers' metadata group = server.metadata.get('group') if group: groups[group].append(server.name) for extra_group in server.metadata.get('groups', '').split(','): if extra_group: groups[extra_group].append(server.name) # Add host metadata for key, value in to_dict(server).items(): hostvars[server.name][key] = value hostvars[server.name]['rax_region'] = region for key, value in iteritems(server.metadata): groups['%s_%s_%s' % (prefix, key, value)].append(server.name) groups['instance-%s' % server.id].append(server.name) groups['flavor-%s' % server.flavor['id']].append(server.name) # Handle boot from volume if not server.image: if not cbs_attachments[region]: cbs = pyrax.connect_to_cloud_blockstorage(region) for vol in cbs.list(): if mk_boolean(vol.bootable): for attachment in vol.attachments: metadata = vol.volume_image_metadata server_id = attachment['server_id'] cbs_attachments[region][server_id] = { 'id': metadata['image_id'], 'name': slugify(metadata['image_name']) } image = cbs_attachments[region].get(server.id) if image: server.image = {'id': image['id']} hostvars[server.name]['rax_image'] = server.image hostvars[server.name]['rax_boot_source'] = 'volume' images[image['id']] = image['name'] else: hostvars[server.name]['rax_boot_source'] = 'local' try: imagegroup = 'image-%s' % images[server.image['id']] groups[imagegroup].append(server.name) groups['image-%s' % server.image['id']].append(server.name) except KeyError: try: image = cs.images.get(server.image['id']) except cs.exceptions.NotFound: groups['image-%s' % server.image['id']].append(server.name) else: images[image.id] = image.human_id groups['image-%s' % image.human_id].append(server.name) groups['image-%s' % server.image['id']].append(server.name) # And finally, add an IP address ansible_ssh_host = None # use accessIPv[46] instead of looping address for 'public' for network_name in networks: if ansible_ssh_host: break if network_name == 'public': for version_name in ip_versions: if ansible_ssh_host: break if version_name == 6 and server.accessIPv6: ansible_ssh_host = server.accessIPv6 elif server.accessIPv4: ansible_ssh_host = server.accessIPv4 if not ansible_ssh_host: addresses = server.addresses.get(network_name, []) for address in addresses: for version_name in ip_versions: if ansible_ssh_host: break if address.get('version') == version_name: ansible_ssh_host = address.get('addr') break if ansible_ssh_host: hostvars[server.name]['ansible_ssh_host'] = ansible_ssh_host if hostvars: groups['_meta'] = {'hostvars': hostvars} with open(get_cache_file_path(regions), 'w') as cache_file: json.dump(groups, cache_file) def get_cache_file_path(regions): regions_str = '.'.join([reg.strip().lower() for reg in regions]) ansible_tmp_path = os.path.join(os.path.expanduser("~"), '.ansible', 'tmp') if not os.path.exists(ansible_tmp_path): os.makedirs(ansible_tmp_path) return os.path.join(ansible_tmp_path, 'ansible-rax-%s-%s.cache' % ( pyrax.identity.username, regions_str)) def _list(regions, refresh_cache=True): cache_max_age = int(get_config(p, 'rax', 'cache_max_age', 'RAX_CACHE_MAX_AGE', 600)) if (not os.path.exists(get_cache_file_path(regions)) or refresh_cache or (time() - os.stat(get_cache_file_path(regions))[-1]) > cache_max_age): # Cache file doesn't exist or older than 10m or refresh cache requested _list_into_cache(regions) with open(get_cache_file_path(regions), 'r') as cache_file: groups = json.load(cache_file) print(json.dumps(groups, sort_keys=True, indent=4)) def parse_args(): parser = argparse.ArgumentParser(description='Ansible Rackspace Cloud ' 'inventory module') group = parser.add_mutually_exclusive_group(required=True) group.add_argument('--list', action='store_true', help='List active servers') group.add_argument('--host', help='List details about the specific host') parser.add_argument('--refresh-cache', action='store_true', default=False, help=('Force refresh of cache, making API requests to' 'RackSpace (default: False - use cache files)')) return parser.parse_args() def setup(): default_creds_file = os.path.expanduser('~/.rackspace_cloud_credentials') env = get_config(p, 'rax', 'environment', 'RAX_ENV', None) if env: pyrax.set_environment(env) keyring_username = pyrax.get_setting('keyring_username') # Attempt to grab credentials from environment first creds_file = get_config(p, 'rax', 'creds_file', 'RAX_CREDS_FILE', None) if creds_file is not None: creds_file = os.path.expanduser(creds_file) else: # But if that fails, use the default location of # ~/.rackspace_cloud_credentials if os.path.isfile(default_creds_file): creds_file = default_creds_file elif not keyring_username: sys.exit('No value in environment variable %s and/or no ' 'credentials file at %s' % ('RAX_CREDS_FILE', default_creds_file)) identity_type = pyrax.get_setting('identity_type') pyrax.set_setting('identity_type', identity_type or 'rackspace') region = pyrax.get_setting('region') try: if keyring_username: pyrax.keyring_auth(keyring_username, region=region) else: pyrax.set_credential_file(creds_file, region=region) except Exception as e: sys.exit("%s: %s" % (e, e.message)) regions = [] if region: regions.append(region) else: try: # Ansible 2.3+ region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all', value_type='list') except TypeError: # Ansible 2.2.x and below region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all', islist=True) for region in region_list: region = region.strip().upper() if region == 'ALL': regions = pyrax.regions break elif region not in pyrax.regions: sys.exit('Unsupported region %s' % region) elif region not in regions: regions.append(region) return regions def main(): args = parse_args() regions = setup() if args.list: _list(regions, refresh_cache=args.refresh_cache) elif args.host: host(regions, args.host) sys.exit(0) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
# # Lines of Descendency Report - a plugin for Gramps, the GTK+/GNOME based # genealogy program. # # This program is released under the MIT License. # Cf. http://www.opensource.org/licenses/mit-license.php. # # Copyright (c) 2010, 2012 lcc <lcc.mailaddress@gmail.com> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # from gramps.gen.const import GRAMPS_LOCALE as glocale try: _trans = glocale.get_addon_translator(__file__) except ValueError: _trans = glocale.translation _ = _trans.gettext from gramps.gen.plug.docgen import FontStyle, ParagraphStyle, FONT_SANS_SERIF, \ PARA_ALIGN_CENTER from gramps.gen.plug.menu import PersonOption from gramps.gen.lib import FamilyRelType from gramps.gen.display.name import displayer as _nd from gramps.gen.plug.report import Report from gramps.gen.plug.report import CATEGORY_TEXT from gramps.gen.plug.report import MenuReportOptions import gramps.gen.plug.report.utils as ReportUtils class LODOptions(MenuReportOptions): def __init__(self, name, dbase): MenuReportOptions.__init__(self, name, dbase) def add_menu_options(self, menu): category = _('People') option = PersonOption(_('Ancestor')) option.set_help(_('The ancestor from which to start the line')) menu.add_option(category, 'ancestor', option) pid = PersonOption(_('Descendent')) pid.set_help(_('The descendent to which to build the line')) menu.add_option(category, 'pid', pid) def make_default_style(self, default_style): font = FontStyle() font.set(face=FONT_SANS_SERIF, size=16, bold=1) para = ParagraphStyle() para.set_font(font) para.set_header_level(1) para.set_top_margin(0.25) para.set_bottom_margin(0.25) para.set_alignment(PARA_ALIGN_CENTER) para.set_description(_('The style used for the title of the page.')) default_style.add_paragraph_style("LOD-Title", para) font = FontStyle() font.set(face=FONT_SANS_SERIF, size=15, bold=1) para = ParagraphStyle() para.set_font(font) para.set(lmargin=1.5) para.set_top_margin(0.25) para.set_bottom_margin(0.25) para.set_description(_('The style used for the title of a line.')) default_style.add_paragraph_style("LOD-Line", para) para = ParagraphStyle() para.set(lmargin=1.5) para.set_top_margin(0.25) para.set_bottom_margin(0.25) para.set_description(_('The basic style used for the text display.')) default_style.add_paragraph_style("LOD-Entry", para) class LinesOfDescendency(Report): def __init__(self, database, options, user): Report.__init__(self, database, options, user) menu = options.menu pid = menu.get_option_by_name('pid').get_value() self.descendent = database.get_person_from_gramps_id(pid) self.descendent_handle = self.descendent.get_handle() ancestor = menu.get_option_by_name('ancestor').get_value() self.ancestor = database.get_person_from_gramps_id(ancestor) if (self.descendent == None) : raise ReportError(_("Person %s is not in the Database") % pid ) def write_path(self, path): gen = 1 handle = path[0] next_person = self.database.get_person_from_handle(path[0]) self.doc.start_paragraph('LOD-Line') self.doc.write_text(_('%(line)s. line:') % { 'line': self.line }) self.doc.end_paragraph() self.line +=1 for next_handle in path[1:]: person = next_person next_person = self.database.get_person_from_handle(next_handle) name = _nd.display(person) family_handle = next_person.get_main_parents_family_handle() family = self.database.get_family_from_handle(family_handle) mother = family.get_mother_handle() spouse_handle = \ mother if mother != handle \ else family.get_father_handle() handle = next_handle spouse = self.database.get_person_from_handle(spouse_handle) if spouse: spouse_name = _nd.display(spouse) else: spouse_name = 'N.N.' if family.get_relationship() == FamilyRelType.MARRIED: abbrev = 'm.' else: abbrev = 'rw.' self.doc.start_paragraph("LOD-Entry") self.doc.write_text("%(gen)s. %(person)s %(abbrev)s %(spouse)s" % { 'gen' : gen, 'person' : name, 'abbrev' : abbrev, 'spouse' : spouse_name }) self.doc.end_paragraph() gen += 1 self.doc.start_paragraph("LOD-Entry") self.doc.write_text("%(gen)s. %(person)s" % { 'gen' : gen, 'person' : _nd.display(next_person) }) self.doc.end_paragraph() def traverse(self, person_handle, person_path=[], cur_gen=1): if (not person_handle): return next_path = list(person_path) next_path.append(person_handle) if person_handle == self.descendent_handle: self.write_path(next_path) return person = self.database.get_person_from_handle(person_handle) index = 0 for family_handle in person.get_family_handle_list(): family = self.database.get_family_from_handle(family_handle) for child_ref in family.get_child_ref_list(): self.traverse(child_ref.ref, next_path, cur_gen+1) def write_report(self): self.doc.start_paragraph("LOD-Title") self.doc.write_text(_("Lines of Descendency from %(ancestor)s to" " %(descendent)s") % { 'ancestor' : _nd.display(self.ancestor), 'descendent' : _nd.display(self.descendent) }) self.doc.end_paragraph() self.line = 1 self.traverse(self.ancestor.get_handle())
unknown
codeparrot/codeparrot-clean
# Copyright (c) 2008 Princeton University # Copyright (c) 2009 Advanced Micro Devices, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Steve Reinhardt # Brad Beckmann from m5.params import * from m5.proxy import * from BasicRouter import BasicRouter class GarnetRouter(BasicRouter): type = 'GarnetRouter' cxx_class = 'Router' cxx_header = "mem/ruby/network/garnet/flexible-pipeline/Router.hh" vcs_per_vnet = Param.Int(Parent.vcs_per_vnet, "virtual channels per virtual network") virt_nets = Param.Int(Parent.number_of_virtual_networks, "number of virtual networks")
unknown
codeparrot/codeparrot-clean
<?php namespace Illuminate\Database\Eloquent; class SoftDeletingScope implements Scope { /** * All of the extensions to be added to the builder. * * @var string[] */ protected $extensions = ['Restore', 'RestoreOrCreate', 'CreateOrRestore', 'WithTrashed', 'WithoutTrashed', 'OnlyTrashed']; /** * Apply the scope to a given Eloquent query builder. * * @template TModel of \Illuminate\Database\Eloquent\Model * * @param \Illuminate\Database\Eloquent\Builder<TModel> $builder * @param TModel $model * @return void */ public function apply(Builder $builder, Model $model) { $builder->whereNull($model->getQualifiedDeletedAtColumn()); } /** * Extend the query builder with the needed functions. * * @param \Illuminate\Database\Eloquent\Builder<*> $builder * @return void */ public function extend(Builder $builder) { foreach ($this->extensions as $extension) { $this->{"add{$extension}"}($builder); } $builder->onDelete(function (Builder $builder) { $column = $this->getDeletedAtColumn($builder); return $builder->update([ $column => $builder->getModel()->freshTimestampString(), ]); }); } /** * Get the "deleted at" column for the builder. * * @param \Illuminate\Database\Eloquent\Builder<*> $builder * @return string */ protected function getDeletedAtColumn(Builder $builder) { if (count((array) $builder->getQuery()->joins) > 0) { return $builder->getModel()->getQualifiedDeletedAtColumn(); } return $builder->getModel()->getDeletedAtColumn(); } /** * Add the restore extension to the builder. * * @param \Illuminate\Database\Eloquent\Builder<*> $builder * @return void */ protected function addRestore(Builder $builder) { $builder->macro('restore', function (Builder $builder) { $builder->withTrashed(); return $builder->update([$builder->getModel()->getDeletedAtColumn() => null]); }); } /** * Add the restore-or-create extension to the builder. * * @param \Illuminate\Database\Eloquent\Builder<*> $builder * @return void */ protected function addRestoreOrCreate(Builder $builder) { $builder->macro('restoreOrCreate', function (Builder $builder, array $attributes = [], array $values = []) { $builder->withTrashed(); return tap($builder->firstOrCreate($attributes, $values), function ($instance) { $instance->restore(); }); }); } /** * Add the create-or-restore extension to the builder. * * @param \Illuminate\Database\Eloquent\Builder<*> $builder * @return void */ protected function addCreateOrRestore(Builder $builder) { $builder->macro('createOrRestore', function (Builder $builder, array $attributes = [], array $values = []) { $builder->withTrashed(); return tap($builder->createOrFirst($attributes, $values), function ($instance) { $instance->restore(); }); }); } /** * Add the with-trashed extension to the builder. * * @param \Illuminate\Database\Eloquent\Builder<*> $builder * @return void */ protected function addWithTrashed(Builder $builder) { $builder->macro('withTrashed', function (Builder $builder, $withTrashed = true) { if (! $withTrashed) { return $builder->withoutTrashed(); } return $builder->withoutGlobalScope($this); }); } /** * Add the without-trashed extension to the builder. * * @param \Illuminate\Database\Eloquent\Builder<*> $builder * @return void */ protected function addWithoutTrashed(Builder $builder) { $builder->macro('withoutTrashed', function (Builder $builder) { $model = $builder->getModel(); $builder->withoutGlobalScope($this)->whereNull( $model->getQualifiedDeletedAtColumn() ); return $builder; }); } /** * Add the only-trashed extension to the builder. * * @param \Illuminate\Database\Eloquent\Builder<*> $builder * @return void */ protected function addOnlyTrashed(Builder $builder) { $builder->macro('onlyTrashed', function (Builder $builder) { $model = $builder->getModel(); $builder->withoutGlobalScope($this)->whereNotNull( $model->getQualifiedDeletedAtColumn() ); return $builder; }); } }
php
github
https://github.com/laravel/framework
src/Illuminate/Database/Eloquent/SoftDeletingScope.php
# # The Python Imaging Library. # $Id$ # # EXIF tags # # Copyright (c) 2003 by Secret Labs AB # # See the README file for information on usage and redistribution. # ## # This module provides constants and clear-text names for various # well-known EXIF tags. ## ## # Maps EXIF tags to tag names. TAGS = { # possibly incomplete 0x00fe: "NewSubfileType", 0x00ff: "SubfileType", 0x0100: "ImageWidth", 0x0101: "ImageLength", 0x0102: "BitsPerSample", 0x0103: "Compression", 0x0106: "PhotometricInterpretation", 0x0107: "Threshholding", 0x0108: "CellWidth", 0x0109: "CellLenght", 0x010a: "FillOrder", 0x010d: "DocumentName", 0x011d: "PageName", 0x010e: "ImageDescription", 0x010f: "Make", 0x0110: "Model", 0x0111: "StripOffsets", 0x0112: "Orientation", 0x0115: "SamplesPerPixel", 0x0116: "RowsPerStrip", 0x0117: "StripByteConunts", 0x0118: "MinSampleValue", 0x0119: "MaxSampleValue", 0x011a: "XResolution", 0x011b: "YResolution", 0x011c: "PlanarConfiguration", 0x0120: "FreeOffsets", 0x0121: "FreeByteCounts", 0x0122: "GrayResponseUnit", 0x0123: "GrayResponseCurve", 0x0128: "ResolutionUnit", 0x012d: "TransferFunction", 0x0131: "Software", 0x0132: "DateTime", 0x013b: "Artist", 0x013c: "HostComputer", 0x013e: "WhitePoint", 0x013f: "PrimaryChromaticities", 0x0140: "ColorMap", 0x0152: "ExtraSamples", 0x0201: "JpegIFOffset", 0x0202: "JpegIFByteCount", 0x0211: "YCbCrCoefficients", 0x0212: "YCbCrSubSampling", 0x0213: "YCbCrPositioning", 0x0214: "ReferenceBlackWhite", 0x1000: "RelatedImageFileFormat", 0x1001: "RelatedImageWidth", 0x1002: "RelatedImageLength", 0x828d: "CFARepeatPatternDim", 0x828e: "CFAPattern", 0x828f: "BatteryLevel", 0x8298: "Copyright", 0x829a: "ExposureTime", 0x829d: "FNumber", 0x8769: "ExifOffset", 0x8773: "InterColorProfile", 0x8822: "ExposureProgram", 0x8824: "SpectralSensitivity", 0x8825: "GPSInfo", 0x8827: "ISOSpeedRatings", 0x8828: "OECF", 0x8829: "Interlace", 0x882a: "TimeZoneOffset", 0x882b: "SelfTimerMode", 0x9000: "ExifVersion", 0x9003: "DateTimeOriginal", 0x9004: "DateTimeDigitized", 0x9101: "ComponentsConfiguration", 0x9102: "CompressedBitsPerPixel", 0x9201: "ShutterSpeedValue", 0x9202: "ApertureValue", 0x9203: "BrightnessValue", 0x9204: "ExposureBiasValue", 0x9205: "MaxApertureValue", 0x9206: "SubjectDistance", 0x9207: "MeteringMode", 0x9208: "LightSource", 0x9209: "Flash", 0x920a: "FocalLength", 0x920b: "FlashEnergy", 0x920c: "SpatialFrequencyResponse", 0x920d: "Noise", 0x9211: "ImageNumber", 0x9212: "SecurityClassification", 0x9213: "ImageHistory", 0x9214: "SubjectLocation", 0x9215: "ExposureIndex", 0x9216: "TIFF/EPStandardID", 0x927c: "MakerNote", 0x9286: "UserComment", 0x9290: "SubsecTime", 0x9291: "SubsecTimeOriginal", 0x9292: "SubsecTimeDigitized", 0xa000: "FlashPixVersion", 0xa001: "ColorSpace", 0xa002: "ExifImageWidth", 0xa003: "ExifImageHeight", 0xa004: "RelatedSoundFile", 0xa005: "ExifInteroperabilityOffset", 0xa20b: "FlashEnergy", 0xa20c: "SpatialFrequencyResponse", 0xa20e: "FocalPlaneXResolution", 0xa20f: "FocalPlaneYResolution", 0xa210: "FocalPlaneResolutionUnit", 0xa214: "SubjectLocation", 0xa215: "ExposureIndex", 0xa217: "SensingMethod", 0xa300: "FileSource", 0xa301: "SceneType", 0xa302: "CFAPattern", 0xa401: "CustomRendered", 0xa402: "ExposureMode", 0xa403: "WhiteBalance", 0xa404: "DigitalZoomRatio", 0xa405: "FocalLengthIn35mmFilm", 0xa406: "SceneCaptureType", 0xa407: "GainControl", 0xa408: "Contrast", 0xa409: "Saturation", 0xa40a: "Sharpness", 0xa40b: "DeviceSettingDescription", 0xa40c: "SubjectDistanceRange", 0xa420: "ImageUniqueID", 0xa430: "CameraOwnerName", 0xa431: "BodySerialNumber", 0xa432: "LensSpecification", 0xa433: "LensMake", 0xa434: "LensModel", 0xa435: "LensSerialNumber", 0xa500: "Gamma", } ## # Maps EXIF GPS tags to tag names. GPSTAGS = { 0: "GPSVersionID", 1: "GPSLatitudeRef", 2: "GPSLatitude", 3: "GPSLongitudeRef", 4: "GPSLongitude", 5: "GPSAltitudeRef", 6: "GPSAltitude", 7: "GPSTimeStamp", 8: "GPSSatellites", 9: "GPSStatus", 10: "GPSMeasureMode", 11: "GPSDOP", 12: "GPSSpeedRef", 13: "GPSSpeed", 14: "GPSTrackRef", 15: "GPSTrack", 16: "GPSImgDirectionRef", 17: "GPSImgDirection", 18: "GPSMapDatum", 19: "GPSDestLatitudeRef", 20: "GPSDestLatitude", 21: "GPSDestLongitudeRef", 22: "GPSDestLongitude", 23: "GPSDestBearingRef", 24: "GPSDestBearing", 25: "GPSDestDistanceRef", 26: "GPSDestDistance", 27: "GPSProcessingMethod", 28: "GPSAreaInformation", 29: "GPSDateStamp", 30: "GPSDifferential", 31: "GPSHPositioningError", }
unknown
codeparrot/codeparrot-clean
''' Created on Jan 18, 2013 @author: brian ''' import openid from openid.fetchers import HTTPFetcher, HTTPResponse from urlparse import parse_qs from django.conf import settings from django.test import TestCase, LiveServerTestCase from django.test.utils import override_settings # from django.contrib.auth.models import User from django.core.urlresolvers import reverse from django.test.client import RequestFactory from unittest import skipUnless class MyFetcher(HTTPFetcher): """A fetcher that uses server-internal calls for performing HTTP requests. """ def __init__(self, client): """@param client: A test client object""" super(MyFetcher, self).__init__() self.client = client def fetch(self, url, body=None, headers=None): """Perform an HTTP request @raises Exception: Any exception that can be raised by Django @see: C{L{HTTPFetcher.fetch}} """ if body: # method = 'POST' # undo the URL encoding of the POST arguments data = parse_qs(body) response = self.client.post(url, data) else: # method = 'GET' data = {} if headers and 'Accept' in headers: data['CONTENT_TYPE'] = headers['Accept'] response = self.client.get(url, data) # Translate the test client response to the fetcher's HTTP response abstraction content = response.content final_url = url response_headers = {} if 'Content-Type' in response: response_headers['content-type'] = response['Content-Type'] if 'X-XRDS-Location' in response: response_headers['x-xrds-location'] = response['X-XRDS-Location'] status = response.status_code return HTTPResponse( body=content, final_url=final_url, headers=response_headers, status=status, ) class OpenIdProviderTest(TestCase): """ Tests of the OpenId login """ @skipUnless(settings.MITX_FEATURES.get('AUTH_USE_OPENID') or settings.MITX_FEATURES.get('AUTH_USE_OPENID_PROVIDER'), True) def test_begin_login_with_xrds_url(self): # the provider URL must be converted to an absolute URL in order to be # used as an openid provider. provider_url = reverse('openid-provider-xrds') factory = RequestFactory() request = factory.request() abs_provider_url = request.build_absolute_uri(location=provider_url) # In order for this absolute URL to work (i.e. to get xrds, then authentication) # in the test environment, we either need a live server that works with the default # fetcher (i.e. urlopen2), or a test server that is reached through a custom fetcher. # Here we do the latter: fetcher = MyFetcher(self.client) openid.fetchers.setDefaultFetcher(fetcher, wrap_exceptions=False) # now we can begin the login process by invoking a local openid client, # with a pointer to the (also-local) openid provider: with self.settings(OPENID_SSO_SERVER_URL=abs_provider_url): url = reverse('openid-login') resp = self.client.post(url) code = 200 self.assertEqual(resp.status_code, code, "got code {0} for url '{1}'. Expected code {2}" .format(resp.status_code, url, code)) @skipUnless(settings.MITX_FEATURES.get('AUTH_USE_OPENID') or settings.MITX_FEATURES.get('AUTH_USE_OPENID_PROVIDER'), True) def test_begin_login_with_login_url(self): # the provider URL must be converted to an absolute URL in order to be # used as an openid provider. provider_url = reverse('openid-provider-login') factory = RequestFactory() request = factory.request() abs_provider_url = request.build_absolute_uri(location=provider_url) # In order for this absolute URL to work (i.e. to get xrds, then authentication) # in the test environment, we either need a live server that works with the default # fetcher (i.e. urlopen2), or a test server that is reached through a custom fetcher. # Here we do the latter: fetcher = MyFetcher(self.client) openid.fetchers.setDefaultFetcher(fetcher, wrap_exceptions=False) # now we can begin the login process by invoking a local openid client, # with a pointer to the (also-local) openid provider: with self.settings(OPENID_SSO_SERVER_URL=abs_provider_url): url = reverse('openid-login') resp = self.client.post(url) code = 200 self.assertEqual(resp.status_code, code, "got code {0} for url '{1}'. Expected code {2}" .format(resp.status_code, url, code)) self.assertContains(resp, '<input name="openid.mode" type="hidden" value="checkid_setup" />', html=True) self.assertContains(resp, '<input name="openid.ns" type="hidden" value="http://specs.openid.net/auth/2.0" />', html=True) self.assertContains(resp, '<input name="openid.identity" type="hidden" value="http://specs.openid.net/auth/2.0/identifier_select" />', html=True) self.assertContains(resp, '<input name="openid.claimed_id" type="hidden" value="http://specs.openid.net/auth/2.0/identifier_select" />', html=True) self.assertContains(resp, '<input name="openid.ns.ax" type="hidden" value="http://openid.net/srv/ax/1.0" />', html=True) self.assertContains(resp, '<input name="openid.ax.mode" type="hidden" value="fetch_request" />', html=True) self.assertContains(resp, '<input name="openid.ax.required" type="hidden" value="email,fullname,old_email,firstname,old_nickname,lastname,old_fullname,nickname" />', html=True) self.assertContains(resp, '<input name="openid.ax.type.fullname" type="hidden" value="http://axschema.org/namePerson" />', html=True) self.assertContains(resp, '<input name="openid.ax.type.lastname" type="hidden" value="http://axschema.org/namePerson/last" />', html=True) self.assertContains(resp, '<input name="openid.ax.type.firstname" type="hidden" value="http://axschema.org/namePerson/first" />', html=True) self.assertContains(resp, '<input name="openid.ax.type.nickname" type="hidden" value="http://axschema.org/namePerson/friendly" />', html=True) self.assertContains(resp, '<input name="openid.ax.type.email" type="hidden" value="http://axschema.org/contact/email" />', html=True) self.assertContains(resp, '<input name="openid.ax.type.old_email" type="hidden" value="http://schema.openid.net/contact/email" />', html=True) self.assertContains(resp, '<input name="openid.ax.type.old_nickname" type="hidden" value="http://schema.openid.net/namePerson/friendly" />', html=True) self.assertContains(resp, '<input name="openid.ax.type.old_fullname" type="hidden" value="http://schema.openid.net/namePerson" />', html=True) self.assertContains(resp, '<input type="submit" value="Continue" />', html=True) # this should work on the server: self.assertContains(resp, '<input name="openid.realm" type="hidden" value="http://testserver/" />', html=True) # not included here are elements that will vary from run to run: # <input name="openid.return_to" type="hidden" value="http://testserver/openid/complete/?janrain_nonce=2013-01-23T06%3A20%3A17ZaN7j6H" /> # <input name="openid.assoc_handle" type="hidden" value="{HMAC-SHA1}{50ff8120}{rh87+Q==}" /> def attempt_login(self, expected_code, **kwargs): """ Attempt to log in through the open id provider login """ url = reverse('openid-provider-login') post_args = { "openid.mode": "checkid_setup", "openid.return_to": "http://testserver/openid/complete/?janrain_nonce=2013-01-23T06%3A20%3A17ZaN7j6H", "openid.assoc_handle": "{HMAC-SHA1}{50ff8120}{rh87+Q==}", "openid.claimed_id": "http://specs.openid.net/auth/2.0/identifier_select", "openid.ns": "http://specs.openid.net/auth/2.0", "openid.realm": "http://testserver/", "openid.identity": "http://specs.openid.net/auth/2.0/identifier_select", "openid.ns.ax": "http://openid.net/srv/ax/1.0", "openid.ax.mode": "fetch_request", "openid.ax.required": "email,fullname,old_email,firstname,old_nickname,lastname,old_fullname,nickname", "openid.ax.type.fullname": "http://axschema.org/namePerson", "openid.ax.type.lastname": "http://axschema.org/namePerson/last", "openid.ax.type.firstname": "http://axschema.org/namePerson/first", "openid.ax.type.nickname": "http://axschema.org/namePerson/friendly", "openid.ax.type.email": "http://axschema.org/contact/email", "openid.ax.type.old_email": "http://schema.openid.net/contact/email", "openid.ax.type.old_nickname": "http://schema.openid.net/namePerson/friendly", "openid.ax.type.old_fullname": "http://schema.openid.net/namePerson", } # override the default args with any given arguments for key in kwargs: post_args["openid." + key] = kwargs[key] resp = self.client.post(url, post_args) code = expected_code self.assertEqual(resp.status_code, code, "got code {0} for url '{1}'. Expected code {2}" .format(resp.status_code, url, code)) @skipUnless(settings.MITX_FEATURES.get('AUTH_USE_OPENID') or settings.MITX_FEATURES.get('AUTH_USE_OPENID_PROVIDER'), True) def test_open_id_setup(self): """ Attempt a standard successful login """ self.attempt_login(200) @skipUnless(settings.MITX_FEATURES.get('AUTH_USE_OPENID') or settings.MITX_FEATURES.get('AUTH_USE_OPENID_PROVIDER'), True) def test_invalid_namespace(self): """ Test for 403 error code when the namespace of the request is invalid""" self.attempt_login(403, ns="http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0") @override_settings(OPENID_PROVIDER_TRUSTED_ROOTS=['http://apps.cs50.edx.org']) @skipUnless(settings.MITX_FEATURES.get('AUTH_USE_OPENID') or settings.MITX_FEATURES.get('AUTH_USE_OPENID_PROVIDER'), True) def test_invalid_return_url(self): """ Test for 403 error code when the url""" self.attempt_login(403, return_to="http://apps.cs50.edx.or") class OpenIdProviderLiveServerTest(LiveServerTestCase): """ In order for this absolute URL to work (i.e. to get xrds, then authentication) in the test environment, we either need a live server that works with the default fetcher (i.e. urlopen2), or a test server that is reached through a custom fetcher. Here we do the former. """ @skipUnless(settings.MITX_FEATURES.get('AUTH_USE_OPENID') or settings.MITX_FEATURES.get('AUTH_USE_OPENID_PROVIDER'), True) def test_begin_login(self): # the provider URL must be converted to an absolute URL in order to be # used as an openid provider. provider_url = reverse('openid-provider-xrds') factory = RequestFactory() request = factory.request() abs_provider_url = request.build_absolute_uri(location=provider_url) # now we can begin the login process by invoking a local openid client, # with a pointer to the (also-local) openid provider: with self.settings(OPENID_SSO_SERVER_URL=abs_provider_url): url = reverse('openid-login') resp = self.client.post(url) code = 200 self.assertEqual(resp.status_code, code, "got code {0} for url '{1}'. Expected code {2}" .format(resp.status_code, url, code))
unknown
codeparrot/codeparrot-clean
{ "extends": "../../tsconfig.json", "compilerOptions": { "outDir": "../../out-tsc/spec", "types": [ "jasmine", "node" ] }, "files": [ "src/test.ts" ], "include": [ "**/*.spec.ts", "**/*.d.ts" ] }
json
github
https://github.com/angular/angular
adev/src/content/examples/schematics-for-libraries/projects/my-lib/tsconfig.spec.json
# frozen_string_literal: true # :markup: markdown module ActionDispatch module Routing # # Action Dispatch Routing PolymorphicRoutes # # Polymorphic URL helpers are methods for smart resolution to a named route call # when given an Active Record model instance. They are to be used in combination # with ActionController::Resources. # # These methods are useful when you want to generate the correct URL or path to # a RESTful resource without having to know the exact type of the record in # question. # # Nested resources and/or namespaces are also supported, as illustrated in the # example: # # polymorphic_url([:admin, @article, @comment]) # # results in: # # admin_article_comment_url(@article, @comment) # # ## Usage within the framework # # Polymorphic URL helpers are used in a number of places throughout the Rails # framework: # # * `url_for`, so you can use it with a record as the argument, e.g. # `url_for(@article)`; # * ActionView::Helpers::FormHelper uses `polymorphic_path`, so you can write # `form_with(model: @article)` without having to specify `:url` parameter for the # form action; # * `redirect_to` (which, in fact, uses `url_for`) so you can write # `redirect_to(post)` in your controllers; # * ActionView::Helpers::AtomFeedHelper, so you don't have to explicitly # specify URLs for feed entries. # # # ## Prefixed polymorphic helpers # # In addition to `polymorphic_url` and `polymorphic_path` methods, a number of # prefixed helpers are available as a shorthand to `action: "..."` in options. # Those are: # # * `edit_polymorphic_url`, `edit_polymorphic_path` # * `new_polymorphic_url`, `new_polymorphic_path` # # # Example usage: # # edit_polymorphic_path(@post) # => "/posts/1/edit" # polymorphic_path(@post, format: :pdf) # => "/posts/1.pdf" # # ## Usage with mounted engines # # If you are using a mounted engine and you need to use a polymorphic_url # pointing at the engine's routes, pass in the engine's route proxy as the first # argument to the method. For example: # # polymorphic_url([blog, @post]) # calls blog.post_path(@post) # form_with(model: [blog, @post]) # => "/blog/posts/1" # module PolymorphicRoutes # Constructs a call to a named RESTful route for the given record and returns # the resulting URL string. For example: # # # calls post_url(post) # polymorphic_url(post) # => "http://example.com/posts/1" # polymorphic_url([blog, post]) # => "http://example.com/blogs/1/posts/1" # polymorphic_url([:admin, blog, post]) # => "http://example.com/admin/blogs/1/posts/1" # polymorphic_url([user, :blog, post]) # => "http://example.com/users/1/blog/posts/1" # polymorphic_url(Comment) # => "http://example.com/comments" # # #### Options # # * `:action` - Specifies the action prefix for the named route: `:new` or # `:edit`. Default is no prefix. # * `:routing_type` - Allowed values are `:path` or `:url`. Default is `:url`. # # # Also includes all the options from `url_for`. These include such things as # `:anchor` or `:trailing_slash`. Example usage is given below: # # polymorphic_url([blog, post], anchor: 'my_anchor') # # => "http://example.com/blogs/1/posts/1#my_anchor" # polymorphic_url([blog, post], anchor: 'my_anchor', script_name: "/my_app") # # => "http://example.com/my_app/blogs/1/posts/1#my_anchor" # # For all of these options, see the documentation for # [url_for](rdoc-ref:ActionDispatch::Routing::UrlFor). # # #### Functionality # # # an Article record # polymorphic_url(record) # same as article_url(record) # # # a Comment record # polymorphic_url(record) # same as comment_url(record) # # # it recognizes new records and maps to the collection # record = Comment.new # polymorphic_url(record) # same as comments_url() # # # the class of a record will also map to the collection # polymorphic_url(Comment) # same as comments_url() # def polymorphic_url(record_or_hash_or_array, options = {}) if Hash === record_or_hash_or_array options = record_or_hash_or_array.merge(options) record = options.delete :id return polymorphic_url record, options end if mapping = polymorphic_mapping(record_or_hash_or_array) return mapping.call(self, [record_or_hash_or_array, options], false) end opts = options.dup action = opts.delete :action type = opts.delete(:routing_type) || :url HelperMethodBuilder.polymorphic_method self, record_or_hash_or_array, action, type, opts end # Returns the path component of a URL for the given record. def polymorphic_path(record_or_hash_or_array, options = {}) if Hash === record_or_hash_or_array options = record_or_hash_or_array.merge(options) record = options.delete :id return polymorphic_path record, options end if mapping = polymorphic_mapping(record_or_hash_or_array) return mapping.call(self, [record_or_hash_or_array, options], true) end opts = options.dup action = opts.delete :action type = :path HelperMethodBuilder.polymorphic_method self, record_or_hash_or_array, action, type, opts end %w(edit new).each do |action| module_eval <<-EOT, __FILE__, __LINE__ + 1 # frozen_string_literal: true def #{action}_polymorphic_url(record_or_hash, options = {}) polymorphic_url_for_action("#{action}", record_or_hash, options) end def #{action}_polymorphic_path(record_or_hash, options = {}) polymorphic_path_for_action("#{action}", record_or_hash, options) end EOT end private def polymorphic_url_for_action(action, record_or_hash, options) polymorphic_url(record_or_hash, options.merge(action: action)) end def polymorphic_path_for_action(action, record_or_hash, options) polymorphic_path(record_or_hash, options.merge(action: action)) end def polymorphic_mapping(record) if record.respond_to?(:to_model) _routes.polymorphic_mappings[record.to_model.model_name.name] else _routes.polymorphic_mappings[record.class.name] end end class HelperMethodBuilder # :nodoc: CACHE = { path: {}, url: {} } def self.get(action, type) type = type.to_sym CACHE[type].fetch(action) { build action, type } end def self.url; CACHE[:url][nil]; end def self.path; CACHE[:path][nil]; end def self.build(action, type) prefix = action ? "#{action}_" : "" suffix = type if action.to_s == "new" HelperMethodBuilder.singular prefix, suffix else HelperMethodBuilder.plural prefix, suffix end end def self.singular(prefix, suffix) new(->(name) { name.singular_route_key }, prefix, suffix) end def self.plural(prefix, suffix) new(->(name) { name.route_key }, prefix, suffix) end def self.polymorphic_method(recipient, record_or_hash_or_array, action, type, options) builder = get action, type case record_or_hash_or_array when Array record_or_hash_or_array = record_or_hash_or_array.compact if record_or_hash_or_array.empty? raise ArgumentError, "Nil location provided. Can't build URI." end if record_or_hash_or_array.first.is_a?(ActionDispatch::Routing::RoutesProxy) recipient = record_or_hash_or_array.shift end method, args = builder.handle_list record_or_hash_or_array when String, Symbol method, args = builder.handle_string record_or_hash_or_array when Class method, args = builder.handle_class record_or_hash_or_array when nil raise ArgumentError, "Nil location provided. Can't build URI." else method, args = builder.handle_model record_or_hash_or_array end if options.empty? recipient.public_send(method, *args) else recipient.public_send(method, *args, options) end end attr_reader :suffix, :prefix def initialize(key_strategy, prefix, suffix) @key_strategy = key_strategy @prefix = prefix @suffix = suffix end def handle_string(record) [get_method_for_string(record), []] end def handle_string_call(target, str) target.public_send get_method_for_string str end def handle_class(klass) [get_method_for_class(klass), []] end def handle_class_call(target, klass) target.public_send get_method_for_class klass end def handle_model(record) args = [] model = record.to_model named_route = if model.persisted? args << model get_method_for_string model.model_name.singular_route_key else get_method_for_class model end [named_route, args] end def handle_model_call(target, record) if mapping = polymorphic_mapping(target, record) mapping.call(target, [record], suffix == "path") else method, args = handle_model(record) target.public_send(method, *args) end end def handle_list(list) record_list = list.dup record = record_list.pop args = [] route = record_list.map do |parent| case parent when Symbol parent.to_s when String raise(ArgumentError, "Please use symbols for polymorphic route arguments.") when Class args << parent parent.model_name.singular_route_key else args << parent.to_model parent.to_model.model_name.singular_route_key end end route << case record when Symbol record.to_s when String raise(ArgumentError, "Please use symbols for polymorphic route arguments.") when Class @key_strategy.call record.model_name else model = record.to_model if model.persisted? args << model model.model_name.singular_route_key else @key_strategy.call model.model_name end end route << suffix named_route = prefix + route.join("_") [named_route, args] end private def polymorphic_mapping(target, record) if record.respond_to?(:to_model) target._routes.polymorphic_mappings[record.to_model.model_name.name] else target._routes.polymorphic_mappings[record.class.name] end end def get_method_for_class(klass) name = @key_strategy.call klass.model_name get_method_for_string name end def get_method_for_string(str) "#{prefix}#{str}_#{suffix}" end [nil, "new", "edit"].each do |action| CACHE[:url][action] = build action, "url" CACHE[:path][action] = build action, "path" end end end end end
ruby
github
https://github.com/rails/rails
actionpack/lib/action_dispatch/routing/polymorphic_routes.rb
# -*- coding: utf-8 -*- """ CSS plugins """ from hyde.plugin import CLTransformer, Plugin from hyde.exceptions import HydeException import os import re import subprocess import sys from fswrap import File # # Less CSS # class LessCSSPlugin(CLTransformer): """ The plugin class for less css """ def __init__(self, site): super(LessCSSPlugin, self).__init__(site) self.import_finder = \ re.compile('^\\s*@import\s+(?:\'|\")([^\'\"]*)(?:\'|\")\s*\;\s*$', re.MULTILINE) @property def executable_name(self): return "lessc" def _should_parse_resource(self, resource): """ Check user defined """ return resource.source_file.kind == 'less' and \ getattr(resource, 'meta', {}).get('parse', True) def _should_replace_imports(self, resource): return getattr(resource, 'meta', {}).get('uses_template', True) def begin_site(self): """ Find all the less css files and set their relative deploy path. """ for resource in self.site.content.walk_resources(): if self._should_parse_resource(resource): new_name = resource.source_file.name_without_extension + ".css" target_folder = File(resource.relative_deploy_path).parent resource.relative_deploy_path = target_folder.child(new_name) def begin_text_resource(self, resource, text): """ Replace @import statements with {% include %} statements. """ if not self._should_parse_resource(resource) or \ not self._should_replace_imports(resource): return text def import_to_include(match): if not match.lastindex: return '' path = match.groups(1)[0] afile = File(resource.source_file.parent.child(path)) if len(afile.kind.strip()) == 0: afile = File(afile.path + '.less') ref = self.site.content.resource_from_path(afile.path) if not ref: raise HydeException( "Cannot import from path [%s]" % afile.path) ref.is_processable = False return self.template.get_include_statement(ref.relative_path) text = self.import_finder.sub(import_to_include, text) return text @property def plugin_name(self): """ The name of the plugin. """ return "less" def text_resource_complete(self, resource, text): """ Save the file to a temporary place and run less compiler. Read the generated file and return the text as output. Set the target path to have a css extension. """ if not self._should_parse_resource(resource): return supported = [ "verbose", ("silent", "s"), ("compress", "x"), "O0", "O1", "O2", "include-path=" ] less = self.app source = File.make_temp(text) target = File.make_temp('') args = [unicode(less)] args.extend(self.process_args(supported)) args.extend([unicode(source), unicode(target)]) try: self.call_app(args) except subprocess.CalledProcessError: HydeException.reraise( "Cannot process %s. Error occurred when " "processing [%s]" % (self.app.name, resource.source_file), sys.exc_info()) return target.read_all() # # Stylus CSS # class StylusPlugin(CLTransformer): """ The plugin class for stylus css """ def __init__(self, site): super(StylusPlugin, self).__init__(site) self.import_finder = \ re.compile('^\\s*@import\s+(?:\'|\")([^\'\"]*)(?:\'|\")\s*\;?\s*$', re.MULTILINE) def begin_site(self): """ Find all the styl files and set their relative deploy path. """ for resource in self.site.content.walk_resources(): if resource.source_file.kind == 'styl': new_name = resource.source_file.name_without_extension + ".css" target_folder = File(resource.relative_deploy_path).parent resource.relative_deploy_path = target_folder.child(new_name) def begin_text_resource(self, resource, text): """ Replace @import statements with {% include %} statements. """ if not resource.source_file.kind == 'styl': return def import_to_include(match): """ Converts a css import statement to include statement. """ if not match.lastindex: return '' path = match.groups(1)[0] first_child = resource.source_file.parent.child(path) afile = File(File(first_child).fully_expanded_path) if len(afile.kind.strip()) == 0: afile = File(afile.path + '.styl') ref = self.site.content.resource_from_path(afile.path) if not ref: try: include = self.settings.args.include except AttributeError: include = False if not include: raise HydeException( "Cannot import from path [%s]" % afile.path) else: ref.is_processable = False return "\n" + \ self.template.get_include_statement(ref.relative_path) + \ "\n" return '@import "' + path + '"\n' text = self.import_finder.sub(import_to_include, text) return text @property def defaults(self): """ Returns `compress` if not in development mode. """ try: mode = self.site.config.mode except AttributeError: mode = "production" defaults = {"compress": ""} if mode.startswith('dev'): defaults = {} return defaults @property def plugin_name(self): """ The name of the plugin. """ return "stylus" def text_resource_complete(self, resource, text): """ Save the file to a temporary place and run stylus compiler. Read the generated file and return the text as output. Set the target path to have a css extension. """ if not resource.source_file.kind == 'styl': return stylus = self.app source = File.make_temp(text.strip()) supported = [("compress", "c"), ("include", "I")] args = [unicode(stylus)] args.extend(self.process_args(supported)) args.append(unicode(source)) try: self.call_app(args) except subprocess.CalledProcessError: HydeException.reraise( "Cannot process %s. Error occurred when " "processing [%s]" % (stylus.name, resource.source_file), sys.exc_info()) target = File(source.path + '.css') return target.read_all() # # Clever CSS # class CleverCSSPlugin(Plugin): """ The plugin class for CleverCSS """ def __init__(self, site): super(CleverCSSPlugin, self).__init__(site) try: import clevercss except ImportError, e: raise HydeException('Unable to import CleverCSS: ' + e.message) else: self.clevercss = clevercss def _should_parse_resource(self, resource): """ Check user defined """ return resource.source_file.kind == 'ccss' and \ getattr(resource, 'meta', {}).get('parse', True) def _should_replace_imports(self, resource): return getattr(resource, 'meta', {}).get('uses_template', True) def begin_site(self): """ Find all the clevercss files and set their relative deploy path. """ for resource in self.site.content.walk_resources(): if self._should_parse_resource(resource): new_name = resource.source_file.name_without_extension + ".css" target_folder = File(resource.relative_deploy_path).parent resource.relative_deploy_path = target_folder.child(new_name) def begin_text_resource(self, resource, text): """ Replace @import statements with {% include %} statements. """ if not self._should_parse_resource(resource) or \ not self._should_replace_imports(resource): return text import_finder = re.compile( '^\\s*@import\s+(?:\'|\")([^\'\"]*)(?:\'|\")\s*\;\s*$', re.MULTILINE) def import_to_include(match): if not match.lastindex: return '' path = match.groups(1)[0] afile = File(resource.source_file.parent.child(path)) if len(afile.kind.strip()) == 0: afile = File(afile.path + '.ccss') ref = self.site.content.resource_from_path(afile.path) if not ref: raise HydeException( "Cannot import from path [%s]" % afile.path) ref.is_processable = False return self.template.get_include_statement(ref.relative_path) text = import_finder.sub(import_to_include, text) return text def text_resource_complete(self, resource, text): """ Run clevercss compiler on text. """ if not self._should_parse_resource(resource): return return self.clevercss.convert(text, self.settings) # # Sassy CSS # class SassyCSSPlugin(Plugin): """ The plugin class for SassyCSS """ def __init__(self, site): super(SassyCSSPlugin, self).__init__(site) try: import scss except ImportError, e: raise HydeException('Unable to import pyScss: ' + e.message) else: self.scss = scss def _should_parse_resource(self, resource): """ Check user defined """ return resource.source_file.kind == 'scss' and \ getattr(resource, 'meta', {}).get('parse', True) @property def options(self): """ Returns options depending on development mode """ try: mode = self.site.config.mode except AttributeError: mode = "production" debug = mode.startswith('dev') opts = {'compress': not debug, 'debug_info': debug} site_opts = self.settings.get('options', {}) opts.update(site_opts) return opts @property def vars(self): """ Returns scss variables. """ return self.settings.get('vars', {}) @property def includes(self): """ Returns scss load paths. """ return self.settings.get('includes', []) def begin_site(self): """ Find all the sassycss files and set their relative deploy path. """ self.scss.STATIC_URL = self.site.content_url('/') self.scss.STATIC_ROOT = self.site.config.content_root_path.path self.scss.ASSETS_URL = self.site.media_url('/') self.scss.ASSETS_ROOT = self.site.config.deploy_root_path.child( self.site.config.media_root) for resource in self.site.content.walk_resources(): if self._should_parse_resource(resource): new_name = resource.source_file.name_without_extension + ".css" target_folder = File(resource.relative_deploy_path).parent resource.relative_deploy_path = target_folder.child(new_name) def text_resource_complete(self, resource, text): """ Run sassycss compiler on text. """ if not self._should_parse_resource(resource): return includes = [resource.node.path] + self.includes includes = [path.rstrip(os.sep) + os.sep for path in includes] options = self.options if 'load_paths' not in options: options['load_paths'] = [] options['load_paths'].extend(includes) scss = self.scss.Scss(scss_opts=options, scss_vars=self.vars) return scss.compile(text)
unknown
codeparrot/codeparrot-clean
from langchain_classic.schema.runnable.retry import __all__ EXPECTED_ALL = ["RunnableRetry", "U"] def test_all_imports() -> None: assert set(__all__) == set(EXPECTED_ALL)
python
github
https://github.com/langchain-ai/langchain
libs/langchain/tests/unit_tests/schema/runnable/test_retry.py
prelude: | def bp_yield yield end def bp_pass &b bp_yield &b end benchmark: vm_blockparam_pass: | bp_pass{} loop_count: 30000000
unknown
github
https://github.com/ruby/ruby
benchmark/vm_blockparam_pass.yml
# frozen_string_literal: true require_relative "worker" require_relative "installer/parallel_installer" require_relative "installer/standalone" require_relative "installer/gem_installer" module Bundler class Installer attr_reader :post_install_messages, :definition # Begins the installation process for Bundler. # For more information see the #run method on this class. def self.install(root, definition, options = {}) installer = new(root, definition) Plugin.hook(Plugin::Events::GEM_BEFORE_INSTALL_ALL, definition.dependencies) installer.run(options) Plugin.hook(Plugin::Events::GEM_AFTER_INSTALL_ALL, definition.dependencies) installer end def initialize(root, definition) @root = root @definition = definition @post_install_messages = {} end # Runs the install procedures for a specific Gemfile. # # Firstly, this method will check to see if `Bundler.bundle_path` exists # and if not then Bundler will create the directory. This is usually the same # location as RubyGems which typically is the `~/.gem` directory # unless other specified. # # Secondly, it checks if Bundler has been configured to be "frozen". # Frozen ensures that the Gemfile and the Gemfile.lock file are matching. # This stops a situation where a developer may update the Gemfile but may not run # `bundle install`, which leads to the Gemfile.lock file not being correctly updated. # If this file is not correctly updated then any other developer running # `bundle install` will potentially not install the correct gems. # # Thirdly, Bundler checks if there are any dependencies specified in the Gemfile. # If there are no dependencies specified then Bundler returns a warning message stating # so and this method returns. # # Fourthly, Bundler checks if the Gemfile.lock exists, and if so # then proceeds to set up a definition based on the Gemfile and the Gemfile.lock. # During this step Bundler will also download information about any new gems # that are not in the Gemfile.lock and resolve any dependencies if needed. # # Fifthly, Bundler resolves the dependencies either through a cache of gems or by remote. # This then leads into the gems being installed, along with stubs for their executables, # but only if the --binstubs option has been passed or Bundler.options[:bin] has been set # earlier. # # Sixthly, a new Gemfile.lock is created from the installed gems to ensure that the next time # that a user runs `bundle install` they will receive any updates from this process. # # Finally, if the user has specified the standalone flag, Bundler will generate the needed # require paths and save them in a `setup.rb` file. See `bundle standalone --help` for more # information. def run(options) Bundler.create_bundle_path ProcessLock.lock do @definition.ensure_equivalent_gemfile_and_lockfile(options[:deployment]) if @definition.dependencies.empty? Bundler.ui.warn "The Gemfile specifies no dependencies" lock return end if @definition.setup_domain!(options) ensure_specs_are_compatible! load_plugins end install(options) Gem::Specification.reset # invalidate gem specification cache so that installed gems are immediately available lock Standalone.new(options[:standalone], @definition).generate if options[:standalone] end end def generate_bundler_executable_stubs(spec, options = {}) if spec.name == "bundler" Bundler.ui.warn "Bundler itself does not use binstubs because its version is selected by RubyGems" return end if options[:binstubs_cmd] && spec.executables.empty? options = {} spec.runtime_dependencies.each do |dep| bins = @definition.specs[dep].first.executables options[dep.name] = bins unless bins.empty? end if options.any? Bundler.ui.warn "#{spec.name} has no executables, but you may want " \ "one from a gem it depends on." options.each {|name, bins| Bundler.ui.warn " #{name} has: #{bins.join(", ")}" } else Bundler.ui.warn "There are no executables for the gem #{spec.name}." end return end # double-assignment to avoid warnings about variables that will be used by ERB bin_path = Bundler.bin_path bin_path = bin_path relative_gemfile_path = Bundler.default_gemfile.relative_path_from(bin_path) relative_gemfile_path = relative_gemfile_path ruby_command = Thor::Util.ruby_command ruby_command = ruby_command template_path = File.expand_path("templates/Executable", __dir__) template = File.read(template_path) exists = [] spec.executables.each do |executable| binstub_path = "#{bin_path}/#{executable}" if File.exist?(binstub_path) && !options[:force] exists << executable next end mode = Gem.win_platform? ? "wb:UTF-8" : "w" require "erb" content = ERB.new(template, trim_mode: "-").result(binding) File.write(binstub_path, content, mode: mode, perm: 0o777 & ~File.umask) if Gem.win_platform? || options[:all_platforms] prefix = "@ruby -x \"%~f0\" %*\n@exit /b %ERRORLEVEL%\n\n" File.write("#{binstub_path}.cmd", prefix + content, mode: mode) end end if options[:binstubs_cmd] && exists.any? case exists.size when 1 Bundler.ui.warn "Skipped #{exists[0]} since it already exists." when 2 Bundler.ui.warn "Skipped #{exists.join(" and ")} since they already exist." else items = exists[0...-1].empty? ? nil : exists[0...-1].join(", ") skipped = [items, exists[-1]].compact.join(" and ") Bundler.ui.warn "Skipped #{skipped} since they already exist." end Bundler.ui.warn "If you want to overwrite skipped stubs, use --force." end end def generate_standalone_bundler_executable_stubs(spec, options = {}) # double-assignment to avoid warnings about variables that will be used by ERB bin_path = Bundler.bin_path unless path = Bundler.settings[:path] raise "Can't standalone without an explicit path set" end standalone_path = Bundler.root.join(path).relative_path_from(bin_path) standalone_path = standalone_path template = File.read(File.expand_path("templates/Executable.standalone", __dir__)) ruby_command = Thor::Util.ruby_command ruby_command = ruby_command spec.executables.each do |executable| next if executable == "bundle" executable_path = Pathname(spec.full_gem_path).join(spec.bindir, executable).relative_path_from(bin_path) executable_path = executable_path mode = Gem.win_platform? ? "wb:UTF-8" : "w" require "erb" content = ERB.new(template, trim_mode: "-").result(binding) File.write("#{bin_path}/#{executable}", content, mode: mode, perm: 0o755) if Gem.win_platform? || options[:all_platforms] prefix = "@ruby -x \"%~f0\" %*\n@exit /b %ERRORLEVEL%\n\n" File.write("#{bin_path}/#{executable}.cmd", prefix + content, mode: mode) end end end private # the order that the resolver provides is significant, since # dependencies might affect the installation of a gem. # that said, it's a rare situation (other than rake), and parallel # installation is SO MUCH FASTER. so we let people opt in. def install(options) standalone = options[:standalone] force = options[:force] local = options[:local] || options[:"prefer-local"] jobs = installation_parallelization spec_installations = ParallelInstaller.call(self, @definition.specs, jobs, standalone, force, local: local) spec_installations.each do |installation| post_install_messages[installation.name] = installation.post_install_message if installation.has_post_install_message? end end def installation_parallelization if jobs = Bundler.settings[:jobs] return jobs end Bundler.settings.processor_count end def load_plugins Gem.load_plugins requested_path_gems = @definition.specs.select {|s| s.source.is_a?(Source::Path) } path_plugin_files = requested_path_gems.flat_map do |spec| spec.matches_for_glob("rubygems_plugin#{Bundler.rubygems.suffix_pattern}") rescue TypeError error_message = "#{spec.name} #{spec.version} has an invalid gemspec" raise Gem::InvalidSpecificationException, error_message end Gem.load_plugin_files(path_plugin_files) Gem.load_env_plugins end def ensure_specs_are_compatible! @definition.specs.each do |spec| unless spec.matches_current_ruby? raise InstallError, "#{spec.full_name} requires ruby version #{spec.required_ruby_version}, " \ "which is incompatible with the current version, #{Gem.ruby_version}" end unless spec.matches_current_rubygems? raise InstallError, "#{spec.full_name} requires rubygems version #{spec.required_rubygems_version}, " \ "which is incompatible with the current version, #{Gem.rubygems_version}" end end end def lock @definition.lock end end end
ruby
github
https://github.com/ruby/ruby
lib/bundler/installer.rb
#[cfg_attr(target_os = "emscripten", ignore = "disabled on Emscripten")] #[rustversion::attr(not(nightly), ignore = "requires nightly")] #[cfg_attr(miri, ignore = "incompatible with miri")] #[allow(unused_attributes)] #[test] fn ui() { let t = trybuild::TestCases::new(); t.compile_fail("tests/ui/**/*.rs"); }
rust
github
https://github.com/serde-rs/serde
test_suite/tests/compiletest.rs
#!/usr/bin/env python import json import struct import sys import zlib try: xrange # Python 2 PY2 = True except NameError: PY2 = False xrange = range # Python 3 if __name__ == '__main__': with open(sys.argv[1]) as fp: obj = json.load(fp) text = json.dumps(obj, separators=(',', ':')).encode('utf-8') data = zlib.compress(text, zlib.Z_BEST_COMPRESSION) # To make decompression a little easier, we prepend the compressed data # with the size of the uncompressed data as a 24 bits BE unsigned integer. assert len(text) < 1 << 24, 'Uncompressed JSON must be < 16 MiB.' data = struct.pack('>I', len(text))[1:4] + data step = 20 slices = (data[i:i+step] for i in xrange(0, len(data), step)) slices = [','.join(str(ord(c) if PY2 else c) for c in s) for s in slices] text = ',\n'.join(slices) with open(sys.argv[2], 'w') as fp: fp.write(text)
python
github
https://github.com/nodejs/node
tools/compress_json.py
""" This module contains tests for the pulp_node.resources module. """ import unittest from pulp.common import config import mock from pulp_node import resources class TestParentBindings(unittest.TestCase): """ This class contains tests for the parent_bindings() function. """ @mock.patch('pulp_node.resources.read_config') def test_verify_ssl_false(self, read_config): """ Make sure that verify_ssl is passed correctly when it is false. """ ca_path = '/some/path.crt' node_config = {'parent_oauth': {'key': 'some_key', 'secret': 'ssssh!', 'user_id': 'bgates'}, 'main': {'verify_ssl': 'fAlsE', 'ca_path': ca_path}} node_config = config.Config(node_config).graph() read_config.return_value = node_config bindings = resources.parent_bindings('host') self.assertEqual(bindings.bindings.server.ca_path, ca_path) self.assertEqual(bindings.bindings.server.verify_ssl, False) @mock.patch('pulp_node.resources.read_config') def test_verify_ssl_true(self, read_config): """ Make sure that verify_ssl is passed correctly when it is true. """ ca_path = '/some/path' node_config = {'parent_oauth': {'key': 'some_key', 'secret': 'ssssh!', 'user_id': 'bgates'}, 'main': {'verify_ssl': 'tRue', 'ca_path': ca_path}} node_config = config.Config(node_config).graph() read_config.return_value = node_config bindings = resources.parent_bindings('host') self.assertEqual(bindings.bindings.server.ca_path, ca_path) self.assertEqual(bindings.bindings.server.verify_ssl, True) class TestPulpBindings(unittest.TestCase): """ This class contains tests for the pulp_bindings() function. """ @mock.patch('pulp_node.resources.read_config') def test_verify_ssl_false(self, read_config): """ Make sure that verify_ssl is passed correctly when it is false. """ ca_path = '/some/path.crt' node_config = {'parent_oauth': {'key': 'some_key', 'secret': 'ssssh!', 'user_id': 'bgates'}, 'main': {'verify_ssl': 'fAlsE', 'ca_path': ca_path}} node_config = config.Config(node_config).graph() read_config.return_value = node_config bindings = resources.pulp_bindings() self.assertEqual(bindings.bindings.server.ca_path, ca_path) self.assertEqual(bindings.bindings.server.verify_ssl, False) @mock.patch('pulp_node.resources.read_config') def test_verify_ssl_true(self, read_config): """ Make sure that verify_ssl is passed correctly when it is true. """ ca_path = '/some/path' node_config = {'parent_oauth': {'key': 'some_key', 'secret': 'ssssh!', 'user_id': 'bgates'}, 'main': {'verify_ssl': 'True', 'ca_path': ca_path}} node_config = config.Config(node_config).graph() read_config.return_value = node_config bindings = resources.pulp_bindings() self.assertEqual(bindings.bindings.server.ca_path, ca_path) self.assertEqual(bindings.bindings.server.verify_ssl, True)
unknown
codeparrot/codeparrot-clean
"""engine.SCons.Variables.ListVariable This file defines the option type for SCons implementing 'lists'. A 'list' option may either be 'all', 'none' or a list of names separated by comma. After the option has been processed, the option value holds either the named list elements, all list elemens or no list elements at all. Usage example: list_of_libs = Split('x11 gl qt ical') opts = Variables() opts.Add(ListVariable('shared', 'libraries to build as shared libraries', 'all', elems = list_of_libs)) ... for lib in list_of_libs: if lib in env['shared']: env.SharedObject(...) else: env.Object(...) """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Variables/ListVariable.py 3897 2009/01/13 06:45:54 scons" # Know Bug: This should behave like a Set-Type, but does not really, # since elements can occur twice. __all__ = ['ListVariable',] import string import UserList import SCons.Util class _ListVariable(UserList.UserList): def __init__(self, initlist=[], allowedElems=[]): UserList.UserList.__init__(self, filter(None, initlist)) self.allowedElems = allowedElems[:] self.allowedElems.sort() def __cmp__(self, other): raise NotImplementedError def __eq__(self, other): raise NotImplementedError def __ge__(self, other): raise NotImplementedError def __gt__(self, other): raise NotImplementedError def __le__(self, other): raise NotImplementedError def __lt__(self, other): raise NotImplementedError def __str__(self): if len(self) == 0: return 'none' self.data.sort() if self.data == self.allowedElems: return 'all' else: return string.join(self, ',') def prepare_to_store(self): return self.__str__() def _converter(val, allowedElems, mapdict): """ """ if val == 'none': val = [] elif val == 'all': val = allowedElems else: val = filter(None, string.split(val, ',')) val = map(lambda v, m=mapdict: m.get(v, v), val) notAllowed = filter(lambda v, aE=allowedElems: not v in aE, val) if notAllowed: raise ValueError("Invalid value(s) for option: %s" % string.join(notAllowed, ',')) return _ListVariable(val, allowedElems) ## def _validator(key, val, env): ## """ ## """ ## # todo: write validater for pgk list ## return 1 def ListVariable(key, help, default, names, map={}): """ The input parameters describe a 'package list' option, thus they are returned with the correct converter and validater appended. The result is usable for input to opts.Add() . A 'package list' option may either be 'all', 'none' or a list of package names (separated by space). """ names_str = 'allowed names: %s' % string.join(names, ' ') if SCons.Util.is_List(default): default = string.join(default, ',') help = string.join( (help, '(all|none|comma-separated list of names)', names_str), '\n ') return (key, help, default, None, #_validator, lambda val, elems=names, m=map: _converter(val, elems, m))
unknown
codeparrot/codeparrot-clean
"""Pure-Python AES implementation.""" from cryptomath import * from AES import * from rijndael import rijndael def new(key, mode, IV): return Python_AES(key, mode, IV) class Python_AES(AES): def __init__(self, key, mode, IV): AES.__init__(self, key, mode, IV, "python") self.rijndael = rijndael(key, 16) self.IV = IV def encrypt(self, plaintext): AES.encrypt(self, plaintext) plaintextBytes = stringToBytes(plaintext) chainBytes = stringToBytes(self.IV) #CBC Mode: For each block... for x in range(len(plaintextBytes)/16): #XOR with the chaining block blockBytes = plaintextBytes[x*16 : (x*16)+16] for y in range(16): blockBytes[y] ^= chainBytes[y] blockString = bytesToString(blockBytes) #Encrypt it encryptedBytes = stringToBytes(self.rijndael.encrypt(blockString)) #Overwrite the input with the output for y in range(16): plaintextBytes[(x*16)+y] = encryptedBytes[y] #Set the next chaining block chainBytes = encryptedBytes self.IV = bytesToString(chainBytes) return bytesToString(plaintextBytes) def decrypt(self, ciphertext): AES.decrypt(self, ciphertext) ciphertextBytes = stringToBytes(ciphertext) chainBytes = stringToBytes(self.IV) #CBC Mode: For each block... for x in range(len(ciphertextBytes)/16): #Decrypt it blockBytes = ciphertextBytes[x*16 : (x*16)+16] blockString = bytesToString(blockBytes) decryptedBytes = stringToBytes(self.rijndael.decrypt(blockString)) #XOR with the chaining block and overwrite the input with output for y in range(16): decryptedBytes[y] ^= chainBytes[y] ciphertextBytes[(x*16)+y] = decryptedBytes[y] #Set the next chaining block chainBytes = blockBytes self.IV = bytesToString(chainBytes) return bytesToString(ciphertextBytes)
unknown
codeparrot/codeparrot-clean
use super::prelude::*; pub(crate) struct PathParser; impl<S: Stage> SingleAttributeParser<S> for PathParser { const PATH: &[Symbol] = &[sym::path]; const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepOutermost; const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::WarnButFutureError; const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowListWarnRest(&[Allow(Target::Mod), Error(Target::Crate)]); const TEMPLATE: AttributeTemplate = template!( NameValueStr: "file", "https://doc.rust-lang.org/reference/items/modules.html#the-path-attribute" ); fn convert(cx: &mut AcceptContext<'_, '_, S>, args: &ArgParser) -> Option<AttributeKind> { let Some(nv) = args.name_value() else { cx.expected_name_value(cx.attr_span, None); return None; }; let Some(path) = nv.value_as_str() else { cx.expected_string_literal(nv.value_span, Some(nv.value_as_lit())); return None; }; Some(AttributeKind::Path(path, cx.attr_span)) } }
rust
github
https://github.com/rust-lang/rust
compiler/rustc_attr_parsing/src/attributes/path.rs
# Copyright 2009-2010 Gregory P. Ward # Copyright 2009-2010 Intelerad Medical Systems Incorporated # Copyright 2010-2011 Fog Creek Software # Copyright 2010-2011 Unity Technologies # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. '''store class for local filesystem''' from mercurial.i18n import _ import lfutil import basestore class localstore(basestore.basestore): '''localstore first attempts to grab files out of the store in the remote Mercurial repository. Failing that, it attempts to grab the files from the user cache.''' def __init__(self, ui, repo, remote): self.remote = remote.local() super(localstore, self).__init__(ui, repo, self.remote.url()) def put(self, source, hash): if lfutil.instore(self.remote, hash): return lfutil.link(source, lfutil.storepath(self.remote, hash)) def exists(self, hashes): retval = {} for hash in hashes: retval[hash] = lfutil.instore(self.remote, hash) return retval def _getfile(self, tmpfile, filename, hash): path = lfutil.findfile(self.remote, hash) if not path: raise basestore.StoreError(filename, hash, self.url, _("can't get file locally")) fd = open(path, 'rb') try: return lfutil.copyandhash(fd, tmpfile) finally: fd.close() def _verifyfile(self, cctx, cset, contents, standin, verified): filename = lfutil.splitstandin(standin) if not filename: return False fctx = cctx[standin] key = (filename, fctx.filenode()) if key in verified: return False expecthash = fctx.data()[0:40] storepath = lfutil.storepath(self.remote, expecthash) verified.add(key) if not lfutil.instore(self.remote, expecthash): self.ui.warn( _('changeset %s: %s references missing %s\n') % (cset, filename, storepath)) return True # failed if contents: actualhash = lfutil.hashfile(storepath) if actualhash != expecthash: self.ui.warn( _('changeset %s: %s references corrupted %s\n') % (cset, filename, storepath)) return True # failed return False
unknown
codeparrot/codeparrot-clean
#### Note: this error code is no longer emitted by the compiler. `async` non-`move` closures with parameters are currently not supported. Erroneous code example: ```edition2018 fn main() { let add_one = async |num: u8| { num + 1 }; } ``` `async` with non-move is currently not supported with the current version, you can use successfully by using move: ```edition2018 fn main() { let add_one = async move |num: u8| { // ok! num + 1 }; } ```
unknown
github
https://github.com/rust-lang/rust
compiler/rustc_error_codes/src/error_codes/E0708.md
from setuptools import setup, find_packages setup(name='restosaur', version='0.6.8', description='Damn simple RESTful library', classifiers=[ "Development Status :: 3 - Alpha", "Environment :: Web Environment", "Framework :: Django", "Framework :: Django :: 1.6", "Framework :: Django :: 1.7", "Framework :: Django :: 1.8", "Framework :: Django :: 1.9", #"Framework :: Django :: 1.10", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2.7", "Topic :: Internet :: WWW/HTTP", "Topic :: Software Development :: Libraries :: Python Modules", "Intended Audience :: Developers", ], author='Marcin Nowak', author_email='marcin.j.nowak@gmail.com', url='https://github.com/marcinn/restosaur', install_requires = ['mimeparse', 'times>=0.7'], keywords='web rest python django', packages=find_packages('.'), include_package_data=True, test_suite='nose.collector', zip_safe=True, )
unknown
codeparrot/codeparrot-clean
/*------------------------------------------------------------------------- * * hba.c * Routines to handle host based authentication (that's the scheme * wherein you authenticate a user by seeing what IP address the system * says he comes from and choosing authentication method based on it). * * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * * IDENTIFICATION * src/backend/libpq/hba.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include <ctype.h> #include <pwd.h> #include <fcntl.h> #include <sys/param.h> #include <sys/socket.h> #include <netdb.h> #include <netinet/in.h> #include <arpa/inet.h> #include <unistd.h> #include "catalog/pg_collation.h" #include "common/ip.h" #include "common/string.h" #include "libpq/hba.h" #include "libpq/ifaddr.h" #include "libpq/libpq-be.h" #include "libpq/oauth.h" #include "postmaster/postmaster.h" #include "regex/regex.h" #include "replication/walsender.h" #include "storage/fd.h" #include "utils/acl.h" #include "utils/conffiles.h" #include "utils/guc.h" #include "utils/memutils.h" #include "utils/varlena.h" #ifdef USE_LDAP #ifdef WIN32 #include <winldap.h> #else #include <ldap.h> #endif #endif /* callback data for check_network_callback */ typedef struct check_network_data { IPCompareMethod method; /* test method */ SockAddr *raddr; /* client's actual address */ bool result; /* set to true if match */ } check_network_data; typedef struct { const char *filename; int linenum; } tokenize_error_callback_arg; #define token_has_regexp(t) (t->regex != NULL) #define token_is_member_check(t) (!t->quoted && t->string[0] == '+') #define token_is_keyword(t, k) (!t->quoted && strcmp(t->string, k) == 0) #define token_matches(t, k) (strcmp(t->string, k) == 0) #define token_matches_insensitive(t,k) (pg_strcasecmp(t->string, k) == 0) /* * Memory context holding the list of TokenizedAuthLines when parsing * HBA or ident configuration files. This is created when opening the first * file (depth of CONF_FILE_START_DEPTH). */ static MemoryContext tokenize_context = NULL; /* * pre-parsed content of HBA config file: list of HbaLine structs. * parsed_hba_context is the memory context where it lives. */ static List *parsed_hba_lines = NIL; static MemoryContext parsed_hba_context = NULL; /* * pre-parsed content of ident mapping file: list of IdentLine structs. * parsed_ident_context is the memory context where it lives. */ static List *parsed_ident_lines = NIL; static MemoryContext parsed_ident_context = NULL; /* * The following character array represents the names of the authentication * methods that are supported by PostgreSQL. * * Note: keep this in sync with the UserAuth enum in hba.h. */ static const char *const UserAuthName[] = { "reject", "implicit reject", /* Not a user-visible option */ "trust", "ident", "password", "md5", "scram-sha-256", "gss", "sspi", "pam", "bsd", "ldap", "cert", "radius", "peer", "oauth", }; /* * Make sure UserAuthName[] tracks additions to the UserAuth enum */ StaticAssertDecl(lengthof(UserAuthName) == USER_AUTH_LAST + 1, "UserAuthName[] must match the UserAuth enum"); static List *tokenize_expand_file(List *tokens, const char *outer_filename, const char *inc_filename, int elevel, int depth, char **err_msg); static bool parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, int elevel, char **err_msg); static int regcomp_auth_token(AuthToken *token, char *filename, int line_num, char **err_msg, int elevel); static int regexec_auth_token(const char *match, AuthToken *token, size_t nmatch, regmatch_t pmatch[]); static void tokenize_error_callback(void *arg); static bool pg_isblank(const char c) { /* don't accept non-ASCII data */ return (!IS_HIGHBIT_SET(c) && isblank(c)); } /* * Grab one token out of the string pointed to by *lineptr. * * Tokens are strings of non-blank characters bounded by blank characters, * commas, beginning of line, and end of line. Blank means space or tab. * * Tokens can be delimited by double quotes (this allows the inclusion of * commas, blanks, and '#', but not newlines). As in SQL, write two * double-quotes to represent a double quote. * * Comments (started by an unquoted '#') are skipped, i.e. the remainder * of the line is ignored. * * (Note that line continuation processing happens before tokenization. * Thus, if a continuation occurs within quoted text or a comment, the * quoted text or comment is considered to continue to the next line.) * * The token, if any, is returned into buf (replacing any previous * contents), and *lineptr is advanced past the token. * * Also, we set *initial_quote to indicate whether there was quoting before * the first character. (We use that to prevent "@x" from being treated * as a file inclusion request. Note that @"x" should be so treated; * we want to allow that to support embedded spaces in file paths.) * * We set *terminating_comma to indicate whether the token is terminated by a * comma (which is not returned, nor advanced over). * * The only possible error condition is lack of terminating quote, but we * currently do not detect that, but just return the rest of the line. * * If successful: store dequoted token in buf and return true. * If no more tokens on line: set buf to empty and return false. */ static bool next_token(char **lineptr, StringInfo buf, bool *initial_quote, bool *terminating_comma) { int c; bool in_quote = false; bool was_quote = false; bool saw_quote = false; /* Initialize output parameters */ resetStringInfo(buf); *initial_quote = false; *terminating_comma = false; /* Move over any whitespace and commas preceding the next token */ while ((c = (*(*lineptr)++)) != '\0' && (pg_isblank(c) || c == ',')) ; /* * Build a token in buf of next characters up to EOL, unquoted comma, or * unquoted whitespace. */ while (c != '\0' && (!pg_isblank(c) || in_quote)) { /* skip comments to EOL */ if (c == '#' && !in_quote) { while ((c = (*(*lineptr)++)) != '\0') ; break; } /* we do not pass back a terminating comma in the token */ if (c == ',' && !in_quote) { *terminating_comma = true; break; } if (c != '"' || was_quote) appendStringInfoChar(buf, c); /* Literal double-quote is two double-quotes */ if (in_quote && c == '"') was_quote = !was_quote; else was_quote = false; if (c == '"') { in_quote = !in_quote; saw_quote = true; if (buf->len == 0) *initial_quote = true; } c = *(*lineptr)++; } /* * Un-eat the char right after the token (critical in case it is '\0', * else next call will read past end of string). */ (*lineptr)--; return (saw_quote || buf->len > 0); } /* * Construct a palloc'd AuthToken struct, copying the given string. */ static AuthToken * make_auth_token(const char *token, bool quoted) { AuthToken *authtoken; int toklen; toklen = strlen(token); /* we copy string into same palloc block as the struct */ authtoken = (AuthToken *) palloc0(sizeof(AuthToken) + toklen + 1); authtoken->string = (char *) authtoken + sizeof(AuthToken); authtoken->quoted = quoted; authtoken->regex = NULL; memcpy(authtoken->string, token, toklen + 1); return authtoken; } /* * Free an AuthToken, that may include a regular expression that needs * to be cleaned up explicitly. */ static void free_auth_token(AuthToken *token) { if (token_has_regexp(token)) pg_regfree(token->regex); } /* * Copy a AuthToken struct into freshly palloc'd memory. */ static AuthToken * copy_auth_token(AuthToken *in) { AuthToken *out = make_auth_token(in->string, in->quoted); return out; } /* * Compile the regular expression and store it in the AuthToken given in * input. Returns the result of pg_regcomp(). On error, the details are * stored in "err_msg". */ static int regcomp_auth_token(AuthToken *token, char *filename, int line_num, char **err_msg, int elevel) { pg_wchar *wstr; int wlen; int rc; Assert(token->regex == NULL); if (token->string[0] != '/') return 0; /* nothing to compile */ token->regex = palloc0_object(regex_t); wstr = palloc((strlen(token->string + 1) + 1) * sizeof(pg_wchar)); wlen = pg_mb2wchar_with_len(token->string + 1, wstr, strlen(token->string + 1)); rc = pg_regcomp(token->regex, wstr, wlen, REG_ADVANCED, C_COLLATION_OID); if (rc) { char errstr[100]; pg_regerror(rc, token->regex, errstr, sizeof(errstr)); ereport(elevel, (errcode(ERRCODE_INVALID_REGULAR_EXPRESSION), errmsg("invalid regular expression \"%s\": %s", token->string + 1, errstr), errcontext("line %d of configuration file \"%s\"", line_num, filename))); *err_msg = psprintf("invalid regular expression \"%s\": %s", token->string + 1, errstr); } pfree(wstr); return rc; } /* * Execute a regular expression computed in an AuthToken, checking for a match * with the string specified in "match". The caller may optionally give an * array to store the matches. Returns the result of pg_regexec(). */ static int regexec_auth_token(const char *match, AuthToken *token, size_t nmatch, regmatch_t pmatch[]) { pg_wchar *wmatchstr; int wmatchlen; int r; Assert(token->string[0] == '/' && token->regex); wmatchstr = palloc((strlen(match) + 1) * sizeof(pg_wchar)); wmatchlen = pg_mb2wchar_with_len(match, wmatchstr, strlen(match)); r = pg_regexec(token->regex, wmatchstr, wmatchlen, 0, NULL, nmatch, pmatch, 0); pfree(wmatchstr); return r; } /* * Tokenize one HBA field from a line, handling file inclusion and comma lists. * * filename: current file's pathname (needed to resolve relative pathnames) * *lineptr: current line pointer, which will be advanced past field * * In event of an error, log a message at ereport level elevel, and also * set *err_msg to a string describing the error. Note that the result * may be non-NIL anyway, so *err_msg must be tested to determine whether * there was an error. * * The result is a List of AuthToken structs, one for each token in the field, * or NIL if we reached EOL. */ static List * next_field_expand(const char *filename, char **lineptr, int elevel, int depth, char **err_msg) { StringInfoData buf; bool trailing_comma; bool initial_quote; List *tokens = NIL; initStringInfo(&buf); do { if (!next_token(lineptr, &buf, &initial_quote, &trailing_comma)) break; /* Is this referencing a file? */ if (!initial_quote && buf.len > 1 && buf.data[0] == '@') tokens = tokenize_expand_file(tokens, filename, buf.data + 1, elevel, depth + 1, err_msg); else { MemoryContext oldcxt; /* * lappend() may do its own allocations, so move to the context * for the list of tokens. */ oldcxt = MemoryContextSwitchTo(tokenize_context); tokens = lappend(tokens, make_auth_token(buf.data, initial_quote)); MemoryContextSwitchTo(oldcxt); } } while (trailing_comma && (*err_msg == NULL)); pfree(buf.data); return tokens; } /* * tokenize_include_file * Include a file from another file into an hba "field". * * Opens and tokenises a file included from another authentication file * with one of the include records ("include", "include_if_exists" or * "include_dir"), and assign all values found to an existing list of * list of AuthTokens. * * All new tokens are allocated in the memory context dedicated to the * tokenization, aka tokenize_context. * * If missing_ok is true, ignore a missing file. * * In event of an error, log a message at ereport level elevel, and also * set *err_msg to a string describing the error. Note that the result * may be non-NIL anyway, so *err_msg must be tested to determine whether * there was an error. */ static void tokenize_include_file(const char *outer_filename, const char *inc_filename, List **tok_lines, int elevel, int depth, bool missing_ok, char **err_msg) { char *inc_fullname; FILE *inc_file; inc_fullname = AbsoluteConfigLocation(inc_filename, outer_filename); inc_file = open_auth_file(inc_fullname, elevel, depth, err_msg); if (!inc_file) { if (errno == ENOENT && missing_ok) { ereport(elevel, (errmsg("skipping missing authentication file \"%s\"", inc_fullname))); *err_msg = NULL; pfree(inc_fullname); return; } /* error in err_msg, so leave and report */ pfree(inc_fullname); Assert(err_msg); return; } tokenize_auth_file(inc_fullname, inc_file, tok_lines, elevel, depth); free_auth_file(inc_file, depth); pfree(inc_fullname); } /* * tokenize_expand_file * Expand a file included from another file into an hba "field" * * Opens and tokenises a file included from another HBA config file with @, * and returns all values found therein as a flat list of AuthTokens. If a * @-token or include record is found, recursively expand it. The newly * read tokens are appended to "tokens" (so that foo,bar,@baz does what you * expect). All new tokens are allocated in the memory context dedicated * to the list of TokenizedAuthLines, aka tokenize_context. * * In event of an error, log a message at ereport level elevel, and also * set *err_msg to a string describing the error. Note that the result * may be non-NIL anyway, so *err_msg must be tested to determine whether * there was an error. */ static List * tokenize_expand_file(List *tokens, const char *outer_filename, const char *inc_filename, int elevel, int depth, char **err_msg) { char *inc_fullname; FILE *inc_file; List *inc_lines = NIL; ListCell *inc_line; inc_fullname = AbsoluteConfigLocation(inc_filename, outer_filename); inc_file = open_auth_file(inc_fullname, elevel, depth, err_msg); if (inc_file == NULL) { /* error already logged */ pfree(inc_fullname); return tokens; } /* * There is possible recursion here if the file contains @ or an include * record. */ tokenize_auth_file(inc_fullname, inc_file, &inc_lines, elevel, depth); pfree(inc_fullname); /* * Move all the tokens found in the file to the tokens list. These are * already saved in tokenize_context. */ foreach(inc_line, inc_lines) { TokenizedAuthLine *tok_line = (TokenizedAuthLine *) lfirst(inc_line); ListCell *inc_field; /* If any line has an error, propagate that up to caller */ if (tok_line->err_msg) { *err_msg = pstrdup(tok_line->err_msg); break; } foreach(inc_field, tok_line->fields) { List *inc_tokens = lfirst(inc_field); ListCell *inc_token; foreach(inc_token, inc_tokens) { AuthToken *token = lfirst(inc_token); MemoryContext oldcxt; /* * lappend() may do its own allocations, so move to the * context for the list of tokens. */ oldcxt = MemoryContextSwitchTo(tokenize_context); tokens = lappend(tokens, token); MemoryContextSwitchTo(oldcxt); } } } free_auth_file(inc_file, depth); return tokens; } /* * free_auth_file * Free a file opened by open_auth_file(). */ void free_auth_file(FILE *file, int depth) { FreeFile(file); /* If this is the last cleanup, remove the tokenization context */ if (depth == CONF_FILE_START_DEPTH) { MemoryContextDelete(tokenize_context); tokenize_context = NULL; } } /* * open_auth_file * Open the given file. * * filename: the absolute path to the target file * elevel: message logging level * depth: recursion level when opening the file * err_msg: details about the error * * Return value is the opened file. On error, returns NULL with details * about the error stored in "err_msg". */ FILE * open_auth_file(const char *filename, int elevel, int depth, char **err_msg) { FILE *file; /* * Reject too-deep include nesting depth. This is just a safety check to * avoid dumping core due to stack overflow if an include file loops back * to itself. The maximum nesting depth is pretty arbitrary. */ if (depth > CONF_FILE_MAX_DEPTH) { ereport(elevel, (errcode_for_file_access(), errmsg("could not open file \"%s\": maximum nesting depth exceeded", filename))); if (err_msg) *err_msg = psprintf("could not open file \"%s\": maximum nesting depth exceeded", filename); return NULL; } file = AllocateFile(filename, "r"); if (file == NULL) { int save_errno = errno; ereport(elevel, (errcode_for_file_access(), errmsg("could not open file \"%s\": %m", filename))); if (err_msg) { errno = save_errno; *err_msg = psprintf("could not open file \"%s\": %m", filename); } /* the caller may care about some specific errno */ errno = save_errno; return NULL; } /* * When opening the top-level file, create the memory context used for the * tokenization. This will be closed with this file when coming back to * this level of cleanup. */ if (depth == CONF_FILE_START_DEPTH) { /* * A context may be present, but assume that it has been eliminated * already. */ tokenize_context = AllocSetContextCreate(CurrentMemoryContext, "tokenize_context", ALLOCSET_START_SMALL_SIZES); } return file; } /* * error context callback for tokenize_auth_file() */ static void tokenize_error_callback(void *arg) { tokenize_error_callback_arg *callback_arg = (tokenize_error_callback_arg *) arg; errcontext("line %d of configuration file \"%s\"", callback_arg->linenum, callback_arg->filename); } /* * tokenize_auth_file * Tokenize the given file. * * The output is a list of TokenizedAuthLine structs; see the struct definition * in libpq/hba.h. This is the central piece in charge of parsing the * authentication files. All the operations of this function happen in its own * local memory context, easing the cleanup of anything allocated here. This * matters a lot when reloading authentication files in the postmaster. * * filename: the absolute path to the target file * file: the already-opened target file * tok_lines: receives output list, saved into tokenize_context * elevel: message logging level * depth: level of recursion when tokenizing the target file * * Errors are reported by logging messages at ereport level elevel and by * adding TokenizedAuthLine structs containing non-null err_msg fields to the * output list. */ void tokenize_auth_file(const char *filename, FILE *file, List **tok_lines, int elevel, int depth) { int line_number = 1; StringInfoData buf; MemoryContext linecxt; MemoryContext funccxt; /* context of this function's caller */ ErrorContextCallback tokenerrcontext; tokenize_error_callback_arg callback_arg; Assert(tokenize_context); callback_arg.filename = filename; callback_arg.linenum = line_number; tokenerrcontext.callback = tokenize_error_callback; tokenerrcontext.arg = &callback_arg; tokenerrcontext.previous = error_context_stack; error_context_stack = &tokenerrcontext; /* * Do all the local tokenization in its own context, to ease the cleanup * of any memory allocated while tokenizing. */ linecxt = AllocSetContextCreate(CurrentMemoryContext, "tokenize_auth_file", ALLOCSET_SMALL_SIZES); funccxt = MemoryContextSwitchTo(linecxt); initStringInfo(&buf); if (depth == CONF_FILE_START_DEPTH) *tok_lines = NIL; while (!feof(file) && !ferror(file)) { TokenizedAuthLine *tok_line; MemoryContext oldcxt; char *lineptr; List *current_line = NIL; char *err_msg = NULL; int last_backslash_buflen = 0; int continuations = 0; /* Collect the next input line, handling backslash continuations */ resetStringInfo(&buf); while (pg_get_line_append(file, &buf, NULL)) { /* Strip trailing newline, including \r in case we're on Windows */ buf.len = pg_strip_crlf(buf.data); /* * Check for backslash continuation. The backslash must be after * the last place we found a continuation, else two backslashes * followed by two \n's would behave surprisingly. */ if (buf.len > last_backslash_buflen && buf.data[buf.len - 1] == '\\') { /* Continuation, so strip it and keep reading */ buf.data[--buf.len] = '\0'; last_backslash_buflen = buf.len; continuations++; continue; } /* Nope, so we have the whole line */ break; } if (ferror(file)) { /* I/O error! */ int save_errno = errno; ereport(elevel, (errcode_for_file_access(), errmsg("could not read file \"%s\": %m", filename))); errno = save_errno; err_msg = psprintf("could not read file \"%s\": %m", filename); break; } /* Parse fields */ lineptr = buf.data; while (*lineptr && err_msg == NULL) { List *current_field; current_field = next_field_expand(filename, &lineptr, elevel, depth, &err_msg); /* add field to line, unless we are at EOL or comment start */ if (current_field != NIL) { /* * lappend() may do its own allocations, so move to the * context for the list of tokens. */ oldcxt = MemoryContextSwitchTo(tokenize_context); current_line = lappend(current_line, current_field); MemoryContextSwitchTo(oldcxt); } } /* * Reached EOL; no need to emit line to TokenizedAuthLine list if it's * boring. */ if (current_line == NIL && err_msg == NULL) goto next_line; /* If the line is valid, check if that's an include directive */ if (err_msg == NULL && list_length(current_line) == 2) { AuthToken *first, *second; first = linitial(linitial_node(List, current_line)); second = linitial(lsecond_node(List, current_line)); if (strcmp(first->string, "include") == 0) { tokenize_include_file(filename, second->string, tok_lines, elevel, depth + 1, false, &err_msg); if (err_msg) goto process_line; /* * tokenize_auth_file() has taken care of creating the * TokenizedAuthLines. */ goto next_line; } else if (strcmp(first->string, "include_dir") == 0) { char **filenames; char *dir_name = second->string; int num_filenames; StringInfoData err_buf; filenames = GetConfFilesInDir(dir_name, filename, elevel, &num_filenames, &err_msg); if (!filenames) { /* the error is in err_msg, so create an entry */ goto process_line; } initStringInfo(&err_buf); for (int i = 0; i < num_filenames; i++) { tokenize_include_file(filename, filenames[i], tok_lines, elevel, depth + 1, false, &err_msg); /* cumulate errors if any */ if (err_msg) { if (err_buf.len > 0) appendStringInfoChar(&err_buf, '\n'); appendStringInfoString(&err_buf, err_msg); } } /* clean up things */ for (int i = 0; i < num_filenames; i++) pfree(filenames[i]); pfree(filenames); /* * If there were no errors, the line is fully processed, * bypass the general TokenizedAuthLine processing. */ if (err_buf.len == 0) goto next_line; /* Otherwise, process the cumulated errors, if any. */ err_msg = err_buf.data; goto process_line; } else if (strcmp(first->string, "include_if_exists") == 0) { tokenize_include_file(filename, second->string, tok_lines, elevel, depth + 1, true, &err_msg); if (err_msg) goto process_line; /* * tokenize_auth_file() has taken care of creating the * TokenizedAuthLines. */ goto next_line; } } process_line: /* * General processing: report the error if any and emit line to the * TokenizedAuthLine. This is saved in the memory context dedicated * to this list. */ oldcxt = MemoryContextSwitchTo(tokenize_context); tok_line = palloc0_object(TokenizedAuthLine); tok_line->fields = current_line; tok_line->file_name = pstrdup(filename); tok_line->line_num = line_number; tok_line->raw_line = pstrdup(buf.data); tok_line->err_msg = err_msg ? pstrdup(err_msg) : NULL; *tok_lines = lappend(*tok_lines, tok_line); MemoryContextSwitchTo(oldcxt); next_line: line_number += continuations + 1; callback_arg.linenum = line_number; } MemoryContextSwitchTo(funccxt); MemoryContextDelete(linecxt); error_context_stack = tokenerrcontext.previous; } /* * Does user belong to role? * * userid is the OID of the role given as the attempted login identifier. * We check to see if it is a member of the specified role name. */ static bool is_member(Oid userid, const char *role) { Oid roleid; if (!OidIsValid(userid)) return false; /* if user not exist, say "no" */ roleid = get_role_oid(role, true); if (!OidIsValid(roleid)) return false; /* if target role not exist, say "no" */ /* * See if user is directly or indirectly a member of role. For this * purpose, a superuser is not considered to be automatically a member of * the role, so group auth only applies to explicit membership. */ return is_member_of_role_nosuper(userid, roleid); } /* * Check AuthToken list for a match to role, allowing group names. * * Each AuthToken listed is checked one-by-one. Keywords are processed * first (these cannot have regular expressions), followed by regular * expressions (if any), the case-insensitive match (if requested) and * the exact match. */ static bool check_role(const char *role, Oid roleid, List *tokens, bool case_insensitive) { ListCell *cell; AuthToken *tok; foreach(cell, tokens) { tok = lfirst(cell); if (token_is_member_check(tok)) { if (is_member(roleid, tok->string + 1)) return true; } else if (token_is_keyword(tok, "all")) return true; else if (token_has_regexp(tok)) { if (regexec_auth_token(role, tok, 0, NULL) == REG_OKAY) return true; } else if (case_insensitive) { if (token_matches_insensitive(tok, role)) return true; } else if (token_matches(tok, role)) return true; } return false; } /* * Check to see if db/role combination matches AuthToken list. * * Each AuthToken listed is checked one-by-one. Keywords are checked * first (these cannot have regular expressions), followed by regular * expressions (if any) and the exact match. */ static bool check_db(const char *dbname, const char *role, Oid roleid, List *tokens) { ListCell *cell; AuthToken *tok; foreach(cell, tokens) { tok = lfirst(cell); if (am_walsender && !am_db_walsender) { /* * physical replication walsender connections can only match * replication keyword */ if (token_is_keyword(tok, "replication")) return true; } else if (token_is_keyword(tok, "all")) return true; else if (token_is_keyword(tok, "sameuser")) { if (strcmp(dbname, role) == 0) return true; } else if (token_is_keyword(tok, "samegroup") || token_is_keyword(tok, "samerole")) { if (is_member(roleid, dbname)) return true; } else if (token_is_keyword(tok, "replication")) continue; /* never match this if not walsender */ else if (token_has_regexp(tok)) { if (regexec_auth_token(dbname, tok, 0, NULL) == REG_OKAY) return true; } else if (token_matches(tok, dbname)) return true; } return false; } static bool ipv4eq(struct sockaddr_in *a, struct sockaddr_in *b) { return (a->sin_addr.s_addr == b->sin_addr.s_addr); } static bool ipv6eq(struct sockaddr_in6 *a, struct sockaddr_in6 *b) { int i; for (i = 0; i < 16; i++) if (a->sin6_addr.s6_addr[i] != b->sin6_addr.s6_addr[i]) return false; return true; } /* * Check whether host name matches pattern. */ static bool hostname_match(const char *pattern, const char *actual_hostname) { if (pattern[0] == '.') /* suffix match */ { size_t plen = strlen(pattern); size_t hlen = strlen(actual_hostname); if (hlen < plen) return false; return (pg_strcasecmp(pattern, actual_hostname + (hlen - plen)) == 0); } else return (pg_strcasecmp(pattern, actual_hostname) == 0); } /* * Check to see if a connecting IP matches a given host name. */ static bool check_hostname(Port *port, const char *hostname) { struct addrinfo *gai_result, *gai; int ret; bool found; /* Quick out if remote host name already known bad */ if (port->remote_hostname_resolv < 0) return false; /* Lookup remote host name if not already done */ if (!port->remote_hostname) { char remote_hostname[NI_MAXHOST]; ret = pg_getnameinfo_all(&port->raddr.addr, port->raddr.salen, remote_hostname, sizeof(remote_hostname), NULL, 0, NI_NAMEREQD); if (ret != 0) { /* remember failure; don't complain in the postmaster log yet */ port->remote_hostname_resolv = -2; port->remote_hostname_errcode = ret; return false; } port->remote_hostname = pstrdup(remote_hostname); } /* Now see if remote host name matches this pg_hba line */ if (!hostname_match(hostname, port->remote_hostname)) return false; /* If we already verified the forward lookup, we're done */ if (port->remote_hostname_resolv == +1) return true; /* Lookup IP from host name and check against original IP */ ret = getaddrinfo(port->remote_hostname, NULL, NULL, &gai_result); if (ret != 0) { /* remember failure; don't complain in the postmaster log yet */ port->remote_hostname_resolv = -2; port->remote_hostname_errcode = ret; return false; } found = false; for (gai = gai_result; gai; gai = gai->ai_next) { if (gai->ai_addr->sa_family == port->raddr.addr.ss_family) { if (gai->ai_addr->sa_family == AF_INET) { if (ipv4eq((struct sockaddr_in *) gai->ai_addr, (struct sockaddr_in *) &port->raddr.addr)) { found = true; break; } } else if (gai->ai_addr->sa_family == AF_INET6) { if (ipv6eq((struct sockaddr_in6 *) gai->ai_addr, (struct sockaddr_in6 *) &port->raddr.addr)) { found = true; break; } } } } if (gai_result) freeaddrinfo(gai_result); if (!found) elog(DEBUG2, "pg_hba.conf host name \"%s\" rejected because address resolution did not return a match with IP address of client", hostname); port->remote_hostname_resolv = found ? +1 : -1; return found; } /* * Check to see if a connecting IP matches the given address and netmask. */ static bool check_ip(SockAddr *raddr, struct sockaddr *addr, struct sockaddr *mask) { if (raddr->addr.ss_family == addr->sa_family && pg_range_sockaddr(&raddr->addr, (struct sockaddr_storage *) addr, (struct sockaddr_storage *) mask)) return true; return false; } /* * pg_foreach_ifaddr callback: does client addr match this machine interface? */ static void check_network_callback(struct sockaddr *addr, struct sockaddr *netmask, void *cb_data) { check_network_data *cn = (check_network_data *) cb_data; struct sockaddr_storage mask; /* Already found a match? */ if (cn->result) return; if (cn->method == ipCmpSameHost) { /* Make an all-ones netmask of appropriate length for family */ pg_sockaddr_cidr_mask(&mask, NULL, addr->sa_family); cn->result = check_ip(cn->raddr, addr, (struct sockaddr *) &mask); } else { /* Use the netmask of the interface itself */ cn->result = check_ip(cn->raddr, addr, netmask); } } /* * Use pg_foreach_ifaddr to check a samehost or samenet match */ static bool check_same_host_or_net(SockAddr *raddr, IPCompareMethod method) { check_network_data cn; cn.method = method; cn.raddr = raddr; cn.result = false; errno = 0; if (pg_foreach_ifaddr(check_network_callback, &cn) < 0) { ereport(LOG, (errmsg("error enumerating network interfaces: %m"))); return false; } return cn.result; } /* * Macros used to check and report on invalid configuration options. * On error: log a message at level elevel, set *err_msg, and exit the function. * These macros are not as general-purpose as they look, because they know * what the calling function's error-exit value is. * * INVALID_AUTH_OPTION = reports when an option is specified for a method where it's * not supported. * REQUIRE_AUTH_OPTION = same as INVALID_AUTH_OPTION, except it also checks if the * method is actually the one specified. Used as a shortcut when * the option is only valid for one authentication method. * MANDATORY_AUTH_ARG = check if a required option is set for an authentication method, * reporting error if it's not. */ #define INVALID_AUTH_OPTION(optname, validmethods) \ do { \ ereport(elevel, \ (errcode(ERRCODE_CONFIG_FILE_ERROR), \ /* translator: the second %s is a list of auth methods */ \ errmsg("authentication option \"%s\" is only valid for authentication methods %s", \ optname, _(validmethods)), \ errcontext("line %d of configuration file \"%s\"", \ line_num, file_name))); \ *err_msg = psprintf("authentication option \"%s\" is only valid for authentication methods %s", \ optname, validmethods); \ return false; \ } while (0) #define REQUIRE_AUTH_OPTION(methodval, optname, validmethods) \ do { \ if (hbaline->auth_method != methodval) \ INVALID_AUTH_OPTION(optname, validmethods); \ } while (0) #define MANDATORY_AUTH_ARG(argvar, argname, authname) \ do { \ if (argvar == NULL) { \ ereport(elevel, \ (errcode(ERRCODE_CONFIG_FILE_ERROR), \ errmsg("authentication method \"%s\" requires argument \"%s\" to be set", \ authname, argname), \ errcontext("line %d of configuration file \"%s\"", \ line_num, file_name))); \ *err_msg = psprintf("authentication method \"%s\" requires argument \"%s\" to be set", \ authname, argname); \ return NULL; \ } \ } while (0) /* * Macros for handling pg_ident problems, similar as above. * * IDENT_FIELD_ABSENT: * Reports when the given ident field ListCell is not populated. * * IDENT_MULTI_VALUE: * Reports when the given ident token List has more than one element. */ #define IDENT_FIELD_ABSENT(field) \ do { \ if (!field) { \ ereport(elevel, \ (errcode(ERRCODE_CONFIG_FILE_ERROR), \ errmsg("missing entry at end of line"), \ errcontext("line %d of configuration file \"%s\"", \ line_num, file_name))); \ *err_msg = pstrdup("missing entry at end of line"); \ return NULL; \ } \ } while (0) #define IDENT_MULTI_VALUE(tokens) \ do { \ if (tokens->length > 1) { \ ereport(elevel, \ (errcode(ERRCODE_CONFIG_FILE_ERROR), \ errmsg("multiple values in ident field"), \ errcontext("line %d of configuration file \"%s\"", \ line_num, file_name))); \ *err_msg = pstrdup("multiple values in ident field"); \ return NULL; \ } \ } while (0) /* * Parse one tokenised line from the hba config file and store the result in a * HbaLine structure. * * If parsing fails, log a message at ereport level elevel, store an error * string in tok_line->err_msg, and return NULL. (Some non-error conditions * can also result in such messages.) * * Note: this function leaks memory when an error occurs. Caller is expected * to have set a memory context that will be reset if this function returns * NULL. */ HbaLine * parse_hba_line(TokenizedAuthLine *tok_line, int elevel) { int line_num = tok_line->line_num; char *file_name = tok_line->file_name; char **err_msg = &tok_line->err_msg; char *str; struct addrinfo *gai_result; struct addrinfo hints; int ret; char *cidr_slash; char *unsupauth; ListCell *field; List *tokens; ListCell *tokencell; AuthToken *token; HbaLine *parsedline; parsedline = palloc0_object(HbaLine); parsedline->sourcefile = pstrdup(file_name); parsedline->linenumber = line_num; parsedline->rawline = pstrdup(tok_line->raw_line); /* Check the record type. */ Assert(tok_line->fields != NIL); field = list_head(tok_line->fields); tokens = lfirst(field); if (tokens->length > 1) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("multiple values specified for connection type"), errhint("Specify exactly one connection type per line."), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = "multiple values specified for connection type"; return NULL; } token = linitial(tokens); if (strcmp(token->string, "local") == 0) { parsedline->conntype = ctLocal; } else if (strcmp(token->string, "host") == 0 || strcmp(token->string, "hostssl") == 0 || strcmp(token->string, "hostnossl") == 0 || strcmp(token->string, "hostgssenc") == 0 || strcmp(token->string, "hostnogssenc") == 0) { if (token->string[4] == 's') /* "hostssl" */ { parsedline->conntype = ctHostSSL; /* Log a warning if SSL support is not active */ #ifdef USE_SSL if (!EnableSSL) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("hostssl record cannot match because SSL is disabled"), errhint("Set \"ssl = on\" in postgresql.conf."), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = "hostssl record cannot match because SSL is disabled"; } #else ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("hostssl record cannot match because SSL is not supported by this build"), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = "hostssl record cannot match because SSL is not supported by this build"; #endif } else if (token->string[4] == 'g') /* "hostgssenc" */ { parsedline->conntype = ctHostGSS; #ifndef ENABLE_GSS ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("hostgssenc record cannot match because GSSAPI is not supported by this build"), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = "hostgssenc record cannot match because GSSAPI is not supported by this build"; #endif } else if (token->string[4] == 'n' && token->string[6] == 's') parsedline->conntype = ctHostNoSSL; else if (token->string[4] == 'n' && token->string[6] == 'g') parsedline->conntype = ctHostNoGSS; else { /* "host" */ parsedline->conntype = ctHost; } } /* record type */ else { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("invalid connection type \"%s\"", token->string), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = psprintf("invalid connection type \"%s\"", token->string); return NULL; } /* Get the databases. */ field = lnext(tok_line->fields, field); if (!field) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("end-of-line before database specification"), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = "end-of-line before database specification"; return NULL; } parsedline->databases = NIL; tokens = lfirst(field); foreach(tokencell, tokens) { AuthToken *tok = copy_auth_token(lfirst(tokencell)); /* Compile a regexp for the database token, if necessary */ if (regcomp_auth_token(tok, file_name, line_num, err_msg, elevel)) return NULL; parsedline->databases = lappend(parsedline->databases, tok); } /* Get the roles. */ field = lnext(tok_line->fields, field); if (!field) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("end-of-line before role specification"), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = "end-of-line before role specification"; return NULL; } parsedline->roles = NIL; tokens = lfirst(field); foreach(tokencell, tokens) { AuthToken *tok = copy_auth_token(lfirst(tokencell)); /* Compile a regexp from the role token, if necessary */ if (regcomp_auth_token(tok, file_name, line_num, err_msg, elevel)) return NULL; parsedline->roles = lappend(parsedline->roles, tok); } if (parsedline->conntype != ctLocal) { /* Read the IP address field. (with or without CIDR netmask) */ field = lnext(tok_line->fields, field); if (!field) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("end-of-line before IP address specification"), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = "end-of-line before IP address specification"; return NULL; } tokens = lfirst(field); if (tokens->length > 1) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("multiple values specified for host address"), errhint("Specify one address range per line."), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = "multiple values specified for host address"; return NULL; } token = linitial(tokens); if (token_is_keyword(token, "all")) { parsedline->ip_cmp_method = ipCmpAll; } else if (token_is_keyword(token, "samehost")) { /* Any IP on this host is allowed to connect */ parsedline->ip_cmp_method = ipCmpSameHost; } else if (token_is_keyword(token, "samenet")) { /* Any IP on the host's subnets is allowed to connect */ parsedline->ip_cmp_method = ipCmpSameNet; } else { /* IP and netmask are specified */ parsedline->ip_cmp_method = ipCmpMask; /* need a modifiable copy of token */ str = pstrdup(token->string); /* Check if it has a CIDR suffix and if so isolate it */ cidr_slash = strchr(str, '/'); if (cidr_slash) *cidr_slash = '\0'; /* Get the IP address either way */ hints.ai_flags = AI_NUMERICHOST; hints.ai_family = AF_UNSPEC; hints.ai_socktype = 0; hints.ai_protocol = 0; hints.ai_addrlen = 0; hints.ai_canonname = NULL; hints.ai_addr = NULL; hints.ai_next = NULL; ret = pg_getaddrinfo_all(str, NULL, &hints, &gai_result); if (ret == 0 && gai_result) { memcpy(&parsedline->addr, gai_result->ai_addr, gai_result->ai_addrlen); parsedline->addrlen = gai_result->ai_addrlen; } else if (ret == EAI_NONAME) parsedline->hostname = str; else { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("invalid IP address \"%s\": %s", str, gai_strerror(ret)), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = psprintf("invalid IP address \"%s\": %s", str, gai_strerror(ret)); if (gai_result) pg_freeaddrinfo_all(hints.ai_family, gai_result); return NULL; } pg_freeaddrinfo_all(hints.ai_family, gai_result); /* Get the netmask */ if (cidr_slash) { if (parsedline->hostname) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("specifying both host name and CIDR mask is invalid: \"%s\"", token->string), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = psprintf("specifying both host name and CIDR mask is invalid: \"%s\"", token->string); return NULL; } if (pg_sockaddr_cidr_mask(&parsedline->mask, cidr_slash + 1, parsedline->addr.ss_family) < 0) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("invalid CIDR mask in address \"%s\"", token->string), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = psprintf("invalid CIDR mask in address \"%s\"", token->string); return NULL; } parsedline->masklen = parsedline->addrlen; pfree(str); } else if (!parsedline->hostname) { /* Read the mask field. */ pfree(str); field = lnext(tok_line->fields, field); if (!field) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("end-of-line before netmask specification"), errhint("Specify an address range in CIDR notation, or provide a separate netmask."), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = "end-of-line before netmask specification"; return NULL; } tokens = lfirst(field); if (tokens->length > 1) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("multiple values specified for netmask"), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = "multiple values specified for netmask"; return NULL; } token = linitial(tokens); ret = pg_getaddrinfo_all(token->string, NULL, &hints, &gai_result); if (ret || !gai_result) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("invalid IP mask \"%s\": %s", token->string, gai_strerror(ret)), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = psprintf("invalid IP mask \"%s\": %s", token->string, gai_strerror(ret)); if (gai_result) pg_freeaddrinfo_all(hints.ai_family, gai_result); return NULL; } memcpy(&parsedline->mask, gai_result->ai_addr, gai_result->ai_addrlen); parsedline->masklen = gai_result->ai_addrlen; pg_freeaddrinfo_all(hints.ai_family, gai_result); if (parsedline->addr.ss_family != parsedline->mask.ss_family) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("IP address and mask do not match"), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = "IP address and mask do not match"; return NULL; } } } } /* != ctLocal */ /* Get the authentication method */ field = lnext(tok_line->fields, field); if (!field) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("end-of-line before authentication method"), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = "end-of-line before authentication method"; return NULL; } tokens = lfirst(field); if (tokens->length > 1) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("multiple values specified for authentication type"), errhint("Specify exactly one authentication type per line."), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = "multiple values specified for authentication type"; return NULL; } token = linitial(tokens); unsupauth = NULL; if (strcmp(token->string, "trust") == 0) parsedline->auth_method = uaTrust; else if (strcmp(token->string, "ident") == 0) parsedline->auth_method = uaIdent; else if (strcmp(token->string, "peer") == 0) parsedline->auth_method = uaPeer; else if (strcmp(token->string, "password") == 0) parsedline->auth_method = uaPassword; else if (strcmp(token->string, "gss") == 0) #ifdef ENABLE_GSS parsedline->auth_method = uaGSS; #else unsupauth = "gss"; #endif else if (strcmp(token->string, "sspi") == 0) #ifdef ENABLE_SSPI parsedline->auth_method = uaSSPI; #else unsupauth = "sspi"; #endif else if (strcmp(token->string, "reject") == 0) parsedline->auth_method = uaReject; else if (strcmp(token->string, "md5") == 0) parsedline->auth_method = uaMD5; else if (strcmp(token->string, "scram-sha-256") == 0) parsedline->auth_method = uaSCRAM; else if (strcmp(token->string, "pam") == 0) #ifdef USE_PAM parsedline->auth_method = uaPAM; #else unsupauth = "pam"; #endif else if (strcmp(token->string, "bsd") == 0) #ifdef USE_BSD_AUTH parsedline->auth_method = uaBSD; #else unsupauth = "bsd"; #endif else if (strcmp(token->string, "ldap") == 0) #ifdef USE_LDAP parsedline->auth_method = uaLDAP; #else unsupauth = "ldap"; #endif else if (strcmp(token->string, "cert") == 0) #ifdef USE_SSL parsedline->auth_method = uaCert; #else unsupauth = "cert"; #endif else if (strcmp(token->string, "radius") == 0) parsedline->auth_method = uaRADIUS; else if (strcmp(token->string, "oauth") == 0) parsedline->auth_method = uaOAuth; else { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("invalid authentication method \"%s\"", token->string), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = psprintf("invalid authentication method \"%s\"", token->string); return NULL; } if (unsupauth) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("invalid authentication method \"%s\": not supported by this build", token->string), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = psprintf("invalid authentication method \"%s\": not supported by this build", token->string); return NULL; } /* * XXX: When using ident on local connections, change it to peer, for * backwards compatibility. */ if (parsedline->conntype == ctLocal && parsedline->auth_method == uaIdent) parsedline->auth_method = uaPeer; /* Invalid authentication combinations */ if (parsedline->conntype == ctLocal && parsedline->auth_method == uaGSS) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("gssapi authentication is not supported on local sockets"), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = "gssapi authentication is not supported on local sockets"; return NULL; } if (parsedline->conntype != ctLocal && parsedline->auth_method == uaPeer) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("peer authentication is only supported on local sockets"), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = "peer authentication is only supported on local sockets"; return NULL; } /* * SSPI authentication can never be enabled on ctLocal connections, * because it's only supported on Windows, where ctLocal isn't supported. */ if (parsedline->conntype != ctHostSSL && parsedline->auth_method == uaCert) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("cert authentication is only supported on hostssl connections"), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = "cert authentication is only supported on hostssl connections"; return NULL; } /* * For GSS and SSPI, set the default value of include_realm to true. * Having include_realm set to false is dangerous in multi-realm * situations and is generally considered bad practice. We keep the * capability around for backwards compatibility, but we might want to * remove it at some point in the future. Users who still need to strip * the realm off would be better served by using an appropriate regex in a * pg_ident.conf mapping. */ if (parsedline->auth_method == uaGSS || parsedline->auth_method == uaSSPI) parsedline->include_realm = true; /* * For SSPI, include_realm defaults to the SAM-compatible domain (aka * NetBIOS name) and user names instead of the Kerberos principal name for * compatibility. */ if (parsedline->auth_method == uaSSPI) { parsedline->compat_realm = true; parsedline->upn_username = false; } /* Parse remaining arguments */ while ((field = lnext(tok_line->fields, field)) != NULL) { tokens = lfirst(field); foreach(tokencell, tokens) { char *val; token = lfirst(tokencell); str = pstrdup(token->string); val = strchr(str, '='); if (val == NULL) { /* * Got something that's not a name=value pair. */ ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("authentication option not in name=value format: %s", token->string), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = psprintf("authentication option not in name=value format: %s", token->string); return NULL; } *val++ = '\0'; /* str now holds "name", val holds "value" */ if (!parse_hba_auth_opt(str, val, parsedline, elevel, err_msg)) /* parse_hba_auth_opt already logged the error message */ return NULL; pfree(str); } } /* * Check if the selected authentication method has any mandatory arguments * that are not set. */ if (parsedline->auth_method == uaLDAP) { #ifndef HAVE_LDAP_INITIALIZE /* Not mandatory for OpenLDAP, because it can use DNS SRV records */ MANDATORY_AUTH_ARG(parsedline->ldapserver, "ldapserver", "ldap"); #endif /* * LDAP can operate in two modes: either with a direct bind, using * ldapprefix and ldapsuffix, or using a search+bind, using * ldapbasedn, ldapbinddn, ldapbindpasswd and one of * ldapsearchattribute or ldapsearchfilter. Disallow mixing these * parameters. */ if (parsedline->ldapprefix || parsedline->ldapsuffix) { if (parsedline->ldapbasedn || parsedline->ldapbinddn || parsedline->ldapbindpasswd || parsedline->ldapsearchattribute || parsedline->ldapsearchfilter) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("cannot mix options for simple bind and search+bind modes"), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = "cannot mix options for simple bind and search+bind modes"; return NULL; } } else if (!parsedline->ldapbasedn) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("authentication method \"ldap\" requires argument \"ldapbasedn\", \"ldapprefix\", or \"ldapsuffix\" to be set"), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = "authentication method \"ldap\" requires argument \"ldapbasedn\", \"ldapprefix\", or \"ldapsuffix\" to be set"; return NULL; } /* * When using search+bind, you can either use a simple attribute * (defaulting to "uid") or a fully custom search filter. You can't * do both. */ if (parsedline->ldapsearchattribute && parsedline->ldapsearchfilter) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("cannot use ldapsearchattribute together with ldapsearchfilter"), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = "cannot use ldapsearchattribute together with ldapsearchfilter"; return NULL; } } if (parsedline->auth_method == uaRADIUS) { MANDATORY_AUTH_ARG(parsedline->radiusservers, "radiusservers", "radius"); MANDATORY_AUTH_ARG(parsedline->radiussecrets, "radiussecrets", "radius"); if (parsedline->radiusservers == NIL) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("list of RADIUS servers cannot be empty"), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = "list of RADIUS servers cannot be empty"; return NULL; } if (parsedline->radiussecrets == NIL) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("list of RADIUS secrets cannot be empty"), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = "list of RADIUS secrets cannot be empty"; return NULL; } /* * Verify length of option lists - each can be 0 (except for secrets, * but that's already checked above), 1 (use the same value * everywhere) or the same as the number of servers. */ if (!(list_length(parsedline->radiussecrets) == 1 || list_length(parsedline->radiussecrets) == list_length(parsedline->radiusservers))) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("the number of RADIUS secrets (%d) must be 1 or the same as the number of RADIUS servers (%d)", list_length(parsedline->radiussecrets), list_length(parsedline->radiusservers)), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = psprintf("the number of RADIUS secrets (%d) must be 1 or the same as the number of RADIUS servers (%d)", list_length(parsedline->radiussecrets), list_length(parsedline->radiusservers)); return NULL; } if (!(list_length(parsedline->radiusports) == 0 || list_length(parsedline->radiusports) == 1 || list_length(parsedline->radiusports) == list_length(parsedline->radiusservers))) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("the number of RADIUS ports (%d) must be 1 or the same as the number of RADIUS servers (%d)", list_length(parsedline->radiusports), list_length(parsedline->radiusservers)), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = psprintf("the number of RADIUS ports (%d) must be 1 or the same as the number of RADIUS servers (%d)", list_length(parsedline->radiusports), list_length(parsedline->radiusservers)); return NULL; } if (!(list_length(parsedline->radiusidentifiers) == 0 || list_length(parsedline->radiusidentifiers) == 1 || list_length(parsedline->radiusidentifiers) == list_length(parsedline->radiusservers))) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("the number of RADIUS identifiers (%d) must be 1 or the same as the number of RADIUS servers (%d)", list_length(parsedline->radiusidentifiers), list_length(parsedline->radiusservers)), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = psprintf("the number of RADIUS identifiers (%d) must be 1 or the same as the number of RADIUS servers (%d)", list_length(parsedline->radiusidentifiers), list_length(parsedline->radiusservers)); return NULL; } } /* * Enforce any parameters implied by other settings. */ if (parsedline->auth_method == uaCert) { /* * For auth method cert, client certificate validation is mandatory, * and it implies the level of verify-full. */ parsedline->clientcert = clientCertFull; } /* * Enforce proper configuration of OAuth authentication. */ if (parsedline->auth_method == uaOAuth) { MANDATORY_AUTH_ARG(parsedline->oauth_scope, "scope", "oauth"); MANDATORY_AUTH_ARG(parsedline->oauth_issuer, "issuer", "oauth"); /* Ensure a validator library is set and permitted by the config. */ if (!check_oauth_validator(parsedline, elevel, err_msg)) return NULL; /* * Supplying a usermap combined with the option to skip usermapping is * nonsensical and indicates a configuration error. */ if (parsedline->oauth_skip_usermap && parsedline->usermap != NULL) { ereport(elevel, errcode(ERRCODE_CONFIG_FILE_ERROR), /* translator: strings are replaced with hba options */ errmsg("%s cannot be used in combination with %s", "map", "delegate_ident_mapping"), errcontext("line %d of configuration file \"%s\"", line_num, file_name)); *err_msg = "map cannot be used in combination with delegate_ident_mapping"; return NULL; } } return parsedline; } /* * Parse one name-value pair as an authentication option into the given * HbaLine. Return true if we successfully parse the option, false if we * encounter an error. In the event of an error, also log a message at * ereport level elevel, and store a message string into *err_msg. */ static bool parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, int elevel, char **err_msg) { int line_num = hbaline->linenumber; char *file_name = hbaline->sourcefile; #ifdef USE_LDAP hbaline->ldapscope = LDAP_SCOPE_SUBTREE; #endif if (strcmp(name, "map") == 0) { if (hbaline->auth_method != uaIdent && hbaline->auth_method != uaPeer && hbaline->auth_method != uaGSS && hbaline->auth_method != uaSSPI && hbaline->auth_method != uaCert && hbaline->auth_method != uaOAuth) INVALID_AUTH_OPTION("map", gettext_noop("ident, peer, gssapi, sspi, cert, and oauth")); hbaline->usermap = pstrdup(val); } else if (strcmp(name, "clientcert") == 0) { if (hbaline->conntype != ctHostSSL) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("clientcert can only be configured for \"hostssl\" rows"), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = "clientcert can only be configured for \"hostssl\" rows"; return false; } if (strcmp(val, "verify-full") == 0) { hbaline->clientcert = clientCertFull; } else if (strcmp(val, "verify-ca") == 0) { if (hbaline->auth_method == uaCert) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("clientcert only accepts \"verify-full\" when using \"cert\" authentication"), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = "clientcert can only be set to \"verify-full\" when using \"cert\" authentication"; return false; } hbaline->clientcert = clientCertCA; } else { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("invalid value for clientcert: \"%s\"", val), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); return false; } } else if (strcmp(name, "clientname") == 0) { if (hbaline->conntype != ctHostSSL) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("clientname can only be configured for \"hostssl\" rows"), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = "clientname can only be configured for \"hostssl\" rows"; return false; } if (strcmp(val, "CN") == 0) { hbaline->clientcertname = clientCertCN; } else if (strcmp(val, "DN") == 0) { hbaline->clientcertname = clientCertDN; } else { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("invalid value for clientname: \"%s\"", val), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); return false; } } else if (strcmp(name, "pamservice") == 0) { REQUIRE_AUTH_OPTION(uaPAM, "pamservice", "pam"); hbaline->pamservice = pstrdup(val); } else if (strcmp(name, "pam_use_hostname") == 0) { REQUIRE_AUTH_OPTION(uaPAM, "pam_use_hostname", "pam"); if (strcmp(val, "1") == 0) hbaline->pam_use_hostname = true; else hbaline->pam_use_hostname = false; } else if (strcmp(name, "ldapurl") == 0) { #ifdef LDAP_API_FEATURE_X_OPENLDAP LDAPURLDesc *urldata; int rc; #endif REQUIRE_AUTH_OPTION(uaLDAP, "ldapurl", "ldap"); #ifdef LDAP_API_FEATURE_X_OPENLDAP rc = ldap_url_parse(val, &urldata); if (rc != LDAP_SUCCESS) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("could not parse LDAP URL \"%s\": %s", val, ldap_err2string(rc)))); *err_msg = psprintf("could not parse LDAP URL \"%s\": %s", val, ldap_err2string(rc)); return false; } if (strcmp(urldata->lud_scheme, "ldap") != 0 && strcmp(urldata->lud_scheme, "ldaps") != 0) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("unsupported LDAP URL scheme: %s", urldata->lud_scheme))); *err_msg = psprintf("unsupported LDAP URL scheme: %s", urldata->lud_scheme); ldap_free_urldesc(urldata); return false; } if (urldata->lud_scheme) hbaline->ldapscheme = pstrdup(urldata->lud_scheme); if (urldata->lud_host) hbaline->ldapserver = pstrdup(urldata->lud_host); hbaline->ldapport = urldata->lud_port; if (urldata->lud_dn) hbaline->ldapbasedn = pstrdup(urldata->lud_dn); if (urldata->lud_attrs) hbaline->ldapsearchattribute = pstrdup(urldata->lud_attrs[0]); /* only use first one */ hbaline->ldapscope = urldata->lud_scope; if (urldata->lud_filter) hbaline->ldapsearchfilter = pstrdup(urldata->lud_filter); ldap_free_urldesc(urldata); #else /* not OpenLDAP */ ereport(elevel, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("LDAP URLs not supported on this platform"))); *err_msg = "LDAP URLs not supported on this platform"; #endif /* not OpenLDAP */ } else if (strcmp(name, "ldaptls") == 0) { REQUIRE_AUTH_OPTION(uaLDAP, "ldaptls", "ldap"); if (strcmp(val, "1") == 0) hbaline->ldaptls = true; else hbaline->ldaptls = false; } else if (strcmp(name, "ldapscheme") == 0) { REQUIRE_AUTH_OPTION(uaLDAP, "ldapscheme", "ldap"); if (strcmp(val, "ldap") != 0 && strcmp(val, "ldaps") != 0) ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("invalid ldapscheme value: \"%s\"", val), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); hbaline->ldapscheme = pstrdup(val); } else if (strcmp(name, "ldapserver") == 0) { REQUIRE_AUTH_OPTION(uaLDAP, "ldapserver", "ldap"); hbaline->ldapserver = pstrdup(val); } else if (strcmp(name, "ldapport") == 0) { REQUIRE_AUTH_OPTION(uaLDAP, "ldapport", "ldap"); hbaline->ldapport = atoi(val); if (hbaline->ldapport == 0) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("invalid LDAP port number: \"%s\"", val), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = psprintf("invalid LDAP port number: \"%s\"", val); return false; } } else if (strcmp(name, "ldapbinddn") == 0) { REQUIRE_AUTH_OPTION(uaLDAP, "ldapbinddn", "ldap"); hbaline->ldapbinddn = pstrdup(val); } else if (strcmp(name, "ldapbindpasswd") == 0) { REQUIRE_AUTH_OPTION(uaLDAP, "ldapbindpasswd", "ldap"); hbaline->ldapbindpasswd = pstrdup(val); } else if (strcmp(name, "ldapsearchattribute") == 0) { REQUIRE_AUTH_OPTION(uaLDAP, "ldapsearchattribute", "ldap"); hbaline->ldapsearchattribute = pstrdup(val); } else if (strcmp(name, "ldapsearchfilter") == 0) { REQUIRE_AUTH_OPTION(uaLDAP, "ldapsearchfilter", "ldap"); hbaline->ldapsearchfilter = pstrdup(val); } else if (strcmp(name, "ldapbasedn") == 0) { REQUIRE_AUTH_OPTION(uaLDAP, "ldapbasedn", "ldap"); hbaline->ldapbasedn = pstrdup(val); } else if (strcmp(name, "ldapprefix") == 0) { REQUIRE_AUTH_OPTION(uaLDAP, "ldapprefix", "ldap"); hbaline->ldapprefix = pstrdup(val); } else if (strcmp(name, "ldapsuffix") == 0) { REQUIRE_AUTH_OPTION(uaLDAP, "ldapsuffix", "ldap"); hbaline->ldapsuffix = pstrdup(val); } else if (strcmp(name, "krb_realm") == 0) { if (hbaline->auth_method != uaGSS && hbaline->auth_method != uaSSPI) INVALID_AUTH_OPTION("krb_realm", gettext_noop("gssapi and sspi")); hbaline->krb_realm = pstrdup(val); } else if (strcmp(name, "include_realm") == 0) { if (hbaline->auth_method != uaGSS && hbaline->auth_method != uaSSPI) INVALID_AUTH_OPTION("include_realm", gettext_noop("gssapi and sspi")); if (strcmp(val, "1") == 0) hbaline->include_realm = true; else hbaline->include_realm = false; } else if (strcmp(name, "compat_realm") == 0) { if (hbaline->auth_method != uaSSPI) INVALID_AUTH_OPTION("compat_realm", gettext_noop("sspi")); if (strcmp(val, "1") == 0) hbaline->compat_realm = true; else hbaline->compat_realm = false; } else if (strcmp(name, "upn_username") == 0) { if (hbaline->auth_method != uaSSPI) INVALID_AUTH_OPTION("upn_username", gettext_noop("sspi")); if (strcmp(val, "1") == 0) hbaline->upn_username = true; else hbaline->upn_username = false; } else if (strcmp(name, "radiusservers") == 0) { struct addrinfo *gai_result; struct addrinfo hints; int ret; List *parsed_servers; ListCell *l; char *dupval = pstrdup(val); REQUIRE_AUTH_OPTION(uaRADIUS, "radiusservers", "radius"); if (!SplitGUCList(dupval, ',', &parsed_servers)) { /* syntax error in list */ ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("could not parse RADIUS server list \"%s\"", val), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); return false; } /* For each entry in the list, translate it */ foreach(l, parsed_servers) { MemSet(&hints, 0, sizeof(hints)); hints.ai_socktype = SOCK_DGRAM; hints.ai_family = AF_UNSPEC; ret = pg_getaddrinfo_all((char *) lfirst(l), NULL, &hints, &gai_result); if (ret || !gai_result) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("could not translate RADIUS server name \"%s\" to address: %s", (char *) lfirst(l), gai_strerror(ret)), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); if (gai_result) pg_freeaddrinfo_all(hints.ai_family, gai_result); list_free(parsed_servers); return false; } pg_freeaddrinfo_all(hints.ai_family, gai_result); } /* All entries are OK, so store them */ hbaline->radiusservers = parsed_servers; hbaline->radiusservers_s = pstrdup(val); } else if (strcmp(name, "radiusports") == 0) { List *parsed_ports; ListCell *l; char *dupval = pstrdup(val); REQUIRE_AUTH_OPTION(uaRADIUS, "radiusports", "radius"); if (!SplitGUCList(dupval, ',', &parsed_ports)) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("could not parse RADIUS port list \"%s\"", val), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = psprintf("invalid RADIUS port number: \"%s\"", val); return false; } foreach(l, parsed_ports) { if (atoi(lfirst(l)) == 0) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("invalid RADIUS port number: \"%s\"", val), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); return false; } } hbaline->radiusports = parsed_ports; hbaline->radiusports_s = pstrdup(val); } else if (strcmp(name, "radiussecrets") == 0) { List *parsed_secrets; char *dupval = pstrdup(val); REQUIRE_AUTH_OPTION(uaRADIUS, "radiussecrets", "radius"); if (!SplitGUCList(dupval, ',', &parsed_secrets)) { /* syntax error in list */ ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("could not parse RADIUS secret list \"%s\"", val), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); return false; } hbaline->radiussecrets = parsed_secrets; hbaline->radiussecrets_s = pstrdup(val); } else if (strcmp(name, "radiusidentifiers") == 0) { List *parsed_identifiers; char *dupval = pstrdup(val); REQUIRE_AUTH_OPTION(uaRADIUS, "radiusidentifiers", "radius"); if (!SplitGUCList(dupval, ',', &parsed_identifiers)) { /* syntax error in list */ ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("could not parse RADIUS identifiers list \"%s\"", val), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); return false; } hbaline->radiusidentifiers = parsed_identifiers; hbaline->radiusidentifiers_s = pstrdup(val); } else if (strcmp(name, "issuer") == 0) { REQUIRE_AUTH_OPTION(uaOAuth, "issuer", "oauth"); hbaline->oauth_issuer = pstrdup(val); } else if (strcmp(name, "scope") == 0) { REQUIRE_AUTH_OPTION(uaOAuth, "scope", "oauth"); hbaline->oauth_scope = pstrdup(val); } else if (strcmp(name, "validator") == 0) { REQUIRE_AUTH_OPTION(uaOAuth, "validator", "oauth"); hbaline->oauth_validator = pstrdup(val); } else if (strcmp(name, "delegate_ident_mapping") == 0) { REQUIRE_AUTH_OPTION(uaOAuth, "delegate_ident_mapping", "oauth"); if (strcmp(val, "1") == 0) hbaline->oauth_skip_usermap = true; else hbaline->oauth_skip_usermap = false; } else { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("unrecognized authentication option name: \"%s\"", name), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = psprintf("unrecognized authentication option name: \"%s\"", name); return false; } return true; } /* * Scan the pre-parsed hba file, looking for a match to the port's connection * request. */ static void check_hba(Port *port) { Oid roleid; ListCell *line; HbaLine *hba; /* Get the target role's OID. Note we do not error out for bad role. */ roleid = get_role_oid(port->user_name, true); foreach(line, parsed_hba_lines) { hba = (HbaLine *) lfirst(line); /* Check connection type */ if (hba->conntype == ctLocal) { if (port->raddr.addr.ss_family != AF_UNIX) continue; } else { if (port->raddr.addr.ss_family == AF_UNIX) continue; /* Check SSL state */ if (port->ssl_in_use) { /* Connection is SSL, match both "host" and "hostssl" */ if (hba->conntype == ctHostNoSSL) continue; } else { /* Connection is not SSL, match both "host" and "hostnossl" */ if (hba->conntype == ctHostSSL) continue; } /* Check GSSAPI state */ #ifdef ENABLE_GSS if (port->gss && port->gss->enc && hba->conntype == ctHostNoGSS) continue; else if (!(port->gss && port->gss->enc) && hba->conntype == ctHostGSS) continue; #else if (hba->conntype == ctHostGSS) continue; #endif /* Check IP address */ switch (hba->ip_cmp_method) { case ipCmpMask: if (hba->hostname) { if (!check_hostname(port, hba->hostname)) continue; } else { if (!check_ip(&port->raddr, (struct sockaddr *) &hba->addr, (struct sockaddr *) &hba->mask)) continue; } break; case ipCmpAll: break; case ipCmpSameHost: case ipCmpSameNet: if (!check_same_host_or_net(&port->raddr, hba->ip_cmp_method)) continue; break; default: /* shouldn't get here, but deem it no-match if so */ continue; } } /* != ctLocal */ /* Check database and role */ if (!check_db(port->database_name, port->user_name, roleid, hba->databases)) continue; if (!check_role(port->user_name, roleid, hba->roles, false)) continue; /* Found a record that matched! */ port->hba = hba; return; } /* If no matching entry was found, then implicitly reject. */ hba = palloc0_object(HbaLine); hba->auth_method = uaImplicitReject; port->hba = hba; } /* * Read the config file and create a List of HbaLine records for the contents. * * The configuration is read into a temporary list, and if any parse error * occurs the old list is kept in place and false is returned. Only if the * whole file parses OK is the list replaced, and the function returns true. * * On a false result, caller will take care of reporting a FATAL error in case * this is the initial startup. If it happens on reload, we just keep running * with the old data. */ bool load_hba(void) { FILE *file; List *hba_lines = NIL; ListCell *line; List *new_parsed_lines = NIL; bool ok = true; MemoryContext oldcxt; MemoryContext hbacxt; file = open_auth_file(HbaFileName, LOG, 0, NULL); if (file == NULL) { /* error already logged */ return false; } tokenize_auth_file(HbaFileName, file, &hba_lines, LOG, 0); /* Now parse all the lines */ Assert(PostmasterContext); hbacxt = AllocSetContextCreate(PostmasterContext, "hba parser context", ALLOCSET_SMALL_SIZES); oldcxt = MemoryContextSwitchTo(hbacxt); foreach(line, hba_lines) { TokenizedAuthLine *tok_line = (TokenizedAuthLine *) lfirst(line); HbaLine *newline; /* don't parse lines that already have errors */ if (tok_line->err_msg != NULL) { ok = false; continue; } if ((newline = parse_hba_line(tok_line, LOG)) == NULL) { /* Parse error; remember there's trouble */ ok = false; /* * Keep parsing the rest of the file so we can report errors on * more than the first line. Error has already been logged, no * need for more chatter here. */ continue; } new_parsed_lines = lappend(new_parsed_lines, newline); } /* * A valid HBA file must have at least one entry; else there's no way to * connect to the postmaster. But only complain about this if we didn't * already have parsing errors. */ if (ok && new_parsed_lines == NIL) { ereport(LOG, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("configuration file \"%s\" contains no entries", HbaFileName))); ok = false; } /* Free tokenizer memory */ free_auth_file(file, 0); MemoryContextSwitchTo(oldcxt); if (!ok) { /* * File contained one or more errors, so bail out. MemoryContextDelete * is enough to clean up everything, including regexes. */ MemoryContextDelete(hbacxt); return false; } /* Loaded new file successfully, replace the one we use */ if (parsed_hba_context != NULL) MemoryContextDelete(parsed_hba_context); parsed_hba_context = hbacxt; parsed_hba_lines = new_parsed_lines; return true; } /* * Parse one tokenised line from the ident config file and store the result in * an IdentLine structure. * * If parsing fails, log a message at ereport level elevel, store an error * string in tok_line->err_msg and return NULL. * * If ident_user is a regular expression (ie. begins with a slash), it is * compiled and stored in IdentLine structure. * * Note: this function leaks memory when an error occurs. Caller is expected * to have set a memory context that will be reset if this function returns * NULL. */ IdentLine * parse_ident_line(TokenizedAuthLine *tok_line, int elevel) { int line_num = tok_line->line_num; char *file_name = tok_line->file_name; char **err_msg = &tok_line->err_msg; ListCell *field; List *tokens; AuthToken *token; IdentLine *parsedline; Assert(tok_line->fields != NIL); field = list_head(tok_line->fields); parsedline = palloc0_object(IdentLine); parsedline->linenumber = line_num; /* Get the map token (must exist) */ tokens = lfirst(field); IDENT_MULTI_VALUE(tokens); token = linitial(tokens); parsedline->usermap = pstrdup(token->string); /* Get the ident user token */ field = lnext(tok_line->fields, field); IDENT_FIELD_ABSENT(field); tokens = lfirst(field); IDENT_MULTI_VALUE(tokens); token = linitial(tokens); /* Copy the ident user token */ parsedline->system_user = copy_auth_token(token); /* Get the PG rolename token */ field = lnext(tok_line->fields, field); IDENT_FIELD_ABSENT(field); tokens = lfirst(field); IDENT_MULTI_VALUE(tokens); token = linitial(tokens); parsedline->pg_user = copy_auth_token(token); /* * Now that the field validation is done, compile a regex from the user * tokens, if necessary. */ if (regcomp_auth_token(parsedline->system_user, file_name, line_num, err_msg, elevel)) { /* err_msg includes the error to report */ return NULL; } if (regcomp_auth_token(parsedline->pg_user, file_name, line_num, err_msg, elevel)) { /* err_msg includes the error to report */ return NULL; } return parsedline; } /* * Process one line from the parsed ident config lines. * * Compare input parsed ident line to the needed map, pg_user and system_user. * *found_p and *error_p are set according to our results. */ static void check_ident_usermap(IdentLine *identLine, const char *usermap_name, const char *pg_user, const char *system_user, bool case_insensitive, bool *found_p, bool *error_p) { Oid roleid; *found_p = false; *error_p = false; if (strcmp(identLine->usermap, usermap_name) != 0) /* Line does not match the map name we're looking for, so just abort */ return; /* Get the target role's OID. Note we do not error out for bad role. */ roleid = get_role_oid(pg_user, true); /* Match? */ if (token_has_regexp(identLine->system_user)) { /* * Process the system username as a regular expression that returns * exactly one match. This is replaced for \1 in the database username * string, if present. */ int r; regmatch_t matches[2]; char *ofs; AuthToken *expanded_pg_user_token; bool created_temporary_token = false; r = regexec_auth_token(system_user, identLine->system_user, 2, matches); if (r) { char errstr[100]; if (r != REG_NOMATCH) { /* REG_NOMATCH is not an error, everything else is */ pg_regerror(r, identLine->system_user->regex, errstr, sizeof(errstr)); ereport(LOG, (errcode(ERRCODE_INVALID_REGULAR_EXPRESSION), errmsg("regular expression match for \"%s\" failed: %s", identLine->system_user->string + 1, errstr))); *error_p = true; } return; } /* * Replace \1 with the first captured group unless the field already * has some special meaning, like a group membership or a regexp-based * check. */ if (!token_is_member_check(identLine->pg_user) && !token_has_regexp(identLine->pg_user) && (ofs = strstr(identLine->pg_user->string, "\\1")) != NULL) { const char *repl_str; size_t repl_len; char *old_pg_user; char *expanded_pg_user; size_t offset; /* substitution of the first argument requested */ if (matches[1].rm_so < 0) { ereport(LOG, (errcode(ERRCODE_INVALID_REGULAR_EXPRESSION), errmsg("regular expression \"%s\" has no subexpressions as requested by backreference in \"%s\"", identLine->system_user->string + 1, identLine->pg_user->string))); *error_p = true; return; } repl_str = system_user + matches[1].rm_so; repl_len = matches[1].rm_eo - matches[1].rm_so; /* * It's allowed to have more than one \1 in the string, and we'll * replace them all. But that's pretty unusual so we optimize on * the assumption of only one occurrence, which motivates doing * repeated replacements instead of making two passes over the * string to determine the final length right away. */ old_pg_user = identLine->pg_user->string; do { /* * length: current length minus length of \1 plus length of * replacement plus null terminator */ expanded_pg_user = palloc(strlen(old_pg_user) - 2 + repl_len + 1); /* ofs points into the old_pg_user string at this point */ offset = ofs - old_pg_user; memcpy(expanded_pg_user, old_pg_user, offset); memcpy(expanded_pg_user + offset, repl_str, repl_len); strcpy(expanded_pg_user + offset + repl_len, ofs + 2); if (old_pg_user != identLine->pg_user->string) pfree(old_pg_user); old_pg_user = expanded_pg_user; } while ((ofs = strstr(old_pg_user + offset + repl_len, "\\1")) != NULL); /* * Mark the token as quoted, so it will only be compared literally * and not for some special meaning, such as "all" or a group * membership check. */ expanded_pg_user_token = make_auth_token(expanded_pg_user, true); created_temporary_token = true; pfree(expanded_pg_user); } else { expanded_pg_user_token = identLine->pg_user; } /* check the Postgres user */ *found_p = check_role(pg_user, roleid, list_make1(expanded_pg_user_token), case_insensitive); if (created_temporary_token) free_auth_token(expanded_pg_user_token); return; } else { /* * Not a regular expression, so make a complete match. If the system * user does not match, just leave. */ if (case_insensitive) { if (!token_matches_insensitive(identLine->system_user, system_user)) return; } else { if (!token_matches(identLine->system_user, system_user)) return; } /* check the Postgres user */ *found_p = check_role(pg_user, roleid, list_make1(identLine->pg_user), case_insensitive); } } /* * Scan the (pre-parsed) ident usermap file line by line, looking for a match * * See if the system user with ident username "system_user" is allowed to act as * Postgres user "pg_user" according to usermap "usermap_name". * * Special case: Usermap NULL, equivalent to what was previously called * "sameuser" or "samerole", means don't look in the usermap file. * That's an implied map wherein "pg_user" must be identical to * "system_user" in order to be authorized. * * Iff authorized, return STATUS_OK, otherwise return STATUS_ERROR. */ int check_usermap(const char *usermap_name, const char *pg_user, const char *system_user, bool case_insensitive) { bool found_entry = false, error = false; if (usermap_name == NULL || usermap_name[0] == '\0') { if (case_insensitive) { if (pg_strcasecmp(pg_user, system_user) == 0) return STATUS_OK; } else { if (strcmp(pg_user, system_user) == 0) return STATUS_OK; } ereport(LOG, (errmsg("provided user name (%s) and authenticated user name (%s) do not match", pg_user, system_user))); return STATUS_ERROR; } else { ListCell *line_cell; foreach(line_cell, parsed_ident_lines) { check_ident_usermap(lfirst(line_cell), usermap_name, pg_user, system_user, case_insensitive, &found_entry, &error); if (found_entry || error) break; } } if (!found_entry && !error) { ereport(LOG, (errmsg("no match in usermap \"%s\" for user \"%s\" authenticated as \"%s\"", usermap_name, pg_user, system_user))); } return found_entry ? STATUS_OK : STATUS_ERROR; } /* * Read the ident config file and create a List of IdentLine records for * the contents. * * This works the same as load_hba(), but for the user config file. */ bool load_ident(void) { FILE *file; List *ident_lines = NIL; ListCell *line_cell; List *new_parsed_lines = NIL; bool ok = true; MemoryContext oldcxt; MemoryContext ident_context; IdentLine *newline; /* not FATAL ... we just won't do any special ident maps */ file = open_auth_file(IdentFileName, LOG, 0, NULL); if (file == NULL) { /* error already logged */ return false; } tokenize_auth_file(IdentFileName, file, &ident_lines, LOG, 0); /* Now parse all the lines */ Assert(PostmasterContext); ident_context = AllocSetContextCreate(PostmasterContext, "ident parser context", ALLOCSET_SMALL_SIZES); oldcxt = MemoryContextSwitchTo(ident_context); foreach(line_cell, ident_lines) { TokenizedAuthLine *tok_line = (TokenizedAuthLine *) lfirst(line_cell); /* don't parse lines that already have errors */ if (tok_line->err_msg != NULL) { ok = false; continue; } if ((newline = parse_ident_line(tok_line, LOG)) == NULL) { /* Parse error; remember there's trouble */ ok = false; /* * Keep parsing the rest of the file so we can report errors on * more than the first line. Error has already been logged, no * need for more chatter here. */ continue; } new_parsed_lines = lappend(new_parsed_lines, newline); } /* Free tokenizer memory */ free_auth_file(file, 0); MemoryContextSwitchTo(oldcxt); if (!ok) { /* * File contained one or more errors, so bail out. MemoryContextDelete * is enough to clean up everything, including regexes. */ MemoryContextDelete(ident_context); return false; } /* Loaded new file successfully, replace the one we use */ if (parsed_ident_context != NULL) MemoryContextDelete(parsed_ident_context); parsed_ident_context = ident_context; parsed_ident_lines = new_parsed_lines; return true; } /* * Determine what authentication method should be used when accessing database * "database" from frontend "raddr", user "user". Return the method and * an optional argument (stored in fields of *port), and STATUS_OK. * * If the file does not contain any entry matching the request, we return * method = uaImplicitReject. */ void hba_getauthmethod(Port *port) { check_hba(port); } /* * Return the name of the auth method in use ("gss", "md5", "trust", etc.). * * The return value is statically allocated (see the UserAuthName array) and * should not be freed. */ const char * hba_authname(UserAuth auth_method) { return UserAuthName[auth_method]; }
c
github
https://github.com/postgres/postgres
src/backend/libpq/hba.c
#!/usr/bin/env python3 """Convert a DECam crosstalk text table into LSST CrosstalkCalibs. """ import argparse import numpy as np import os.path import sys import lsst.ip.isr as ipIsr from lsst.daf.base import PropertyList from lsst.obs.decam import DecamMapper def makeDetectorCrosstalk(dataDict, force=False): """Generate and write CrosstalkCalib from dictionary. Parameters ---------- dataDict : `dict` Dictionary from ``readFile`` containing crosstalk definition. """ dataDict['coeffs'] = dataDict['coeffs'].transpose() decamCT = ipIsr.crosstalk.CrosstalkCalib.fromDict(dataDict) # Supply a date prior to all data, to ensure universal use. decamCT.updateMetadata(setDate=False, CALIBDATE='1970-01-01T00:00:00') detName = dataDict['DETECTOR_NAME'] outDir = os.path.join(DecamMapper.getCrosstalkDir(), detName.lower()) if os.path.exists(outDir): if not force: print("Output directory %r exists; use --force to replace" % (outDir, )) sys.exit(1) else: os.makedirs(outDir) decamCT.writeText(f"{outDir}/1970-01-01T00:00:00.yaml") def readFile(crosstalkInfile): """Construct crosstalk dictionary-of-dictionaries from crosstalkInfile. Parameters ---------- crosstalkInfile : `str` File containing crosstalk coefficient information. Results ------- outDict : `dict` [`str` : `dict`] Output dictionary, keyed on victim detector names, containing `lsst.ip.isr.CrosstalkCalib`'s expected dictionary format. Raises ------ RuntimeError : Raised if the detector is not known. """ ampIndexMap = {'A': 0, 'B': 1} detMap = {f"ccd{key:02d}": value for key, value in DecamMapper.detectorNames.items()} detMap['ccd61'] = 'N30' detSerialMap = {value: key for key, value in DecamMapper.detectorNames.items()} detSerialMap['N30'] = 61 outDict = dict() with open(crosstalkInfile) as f: for line in f: li = line.strip() if not li.startswith('#'): elem = li.split() victimDetAmp = elem[0] sourceDetAmp = elem[1] coeff = float(elem[2]) if 'A' in victimDetAmp: victimAmp = 'A' elif 'B' in victimDetAmp: victimAmp = 'B' else: raise RuntimeError(f"Unknown amp: {victimDetAmp}") if 'A' in sourceDetAmp: sourceAmp = 'A' elif 'B' in sourceDetAmp: sourceAmp = 'B' else: raise RuntimeError(f"Unknown amp: {sourceDetAmp}") victimDet = victimDetAmp.replace(victimAmp, "") sourceDet = sourceDetAmp.replace(sourceAmp, "") victimDet = detMap[victimDet] sourceDet = detMap[sourceDet] victimAmp = ampIndexMap[victimAmp] sourceAmp = ampIndexMap[sourceAmp] if victimDet not in outDict: outDict[victimDet] = dict() outDict[victimDet]['metadata'] = PropertyList() outDict[victimDet]['metadata']['OBSTYPE'] = 'CROSSTALK' outDict[victimDet]['metadata']['INSTRUME'] = 'DECam' outDict[victimDet]['interChip'] = dict() outDict[victimDet]['crosstalkShape'] = (2, 2) outDict[victimDet]['hasCrosstalk'] = True outDict[victimDet]['nAmp'] = 2 if 'coeffs' not in outDict[victimDet]: # shape=outDict[victimDet]['crosstalkShape']) outDict[victimDet]['coeffs'] = np.zeros_like([], shape=(2, 2)) if sourceDet == victimDet: outDict[victimDet]['metadata']['DETECTOR_NAME'] = victimDet outDict[victimDet]['metadata']['DETECTOR_SERIAL'] = detSerialMap[victimDet] outDict[victimDet]['metadata']['DETECTOR'] = detSerialMap[victimDet] outDict[victimDet]['DETECTOR_NAME'] = victimDet outDict[victimDet]['DETECTOR_SERIAL'] = detSerialMap[victimDet] outDict[victimDet]['DETECTOR'] = detSerialMap[victimDet] outDict[victimDet]['coeffs'][victimAmp][sourceAmp] = coeff else: if sourceDet not in outDict[victimDet]['interChip']: outDict[victimDet]['interChip'][sourceDet] = np.zeros_like([], shape=(2, 2)) outDict[victimDet]['interChip'][sourceDet][victimAmp][sourceAmp] = coeff return outDict if __name__ == "__main__": parser = argparse.ArgumentParser(description="Convert a DECam crosstalk file into LSST CrosstalkCalibs.") parser.add_argument(dest="crosstalkInfile", help="DECam crosstalk file.") parser.add_argument("-v", "--verbose", action="store_true", help="Print data about each detector.") parser.add_argument("-f", "--force", action="store_true", help="Overwrite existing CrosstalkCalibs.") cmd = parser.parse_args() outDict = readFile(crosstalkInfile=cmd.crosstalkInfile) crosstalkDir = DecamMapper.getCrosstalkDir() if os.path.exists(crosstalkDir): if not cmd.force: print("Output directory %r exists; use --force to replace" % (crosstalkDir, )) sys.exit(1) print("Replacing data in crosstalk directory %r" % (crosstalkDir, )) else: print("Creating crosstalk directory %r" % (crosstalkDir, )) os.makedirs(crosstalkDir) for detName in outDict: if cmd.verbose: print(f"{detName}: has crosstalk? {outDict[detName]['hasCrosstalk']}") print(f"COEFF:\n{outDict[detName]['coeffs']}") for source in outDict[detName]['interChip']: print(f"INTERCHIP {source}:\n{outDict[detName]['interChip'][source]}") makeDetectorCrosstalk(dataDict=outDict[detName], force=cmd.force)
unknown
codeparrot/codeparrot-clean
# A parallelized "find(1)" using the thread module. # This demonstrates the use of a work queue and worker threads. # It really does do more stats/sec when using multiple threads, # although the improvement is only about 20-30 percent. # (That was 8 years ago. In 2002, on Linux, I can't measure # a speedup. :-( ) # I'm too lazy to write a command line parser for the full find(1) # command line syntax, so the predicate it searches for is wired-in, # see function selector() below. (It currently searches for files with # world write permission.) # Usage: parfind.py [-w nworkers] [directory] ... # Default nworkers is 4 import sys import getopt import string import time import os from stat import * import thread # Work queue class. Usage: # wq = WorkQ() # wq.addwork(func, (arg1, arg2, ...)) # one or more calls # wq.run(nworkers) # The work is done when wq.run() completes. # The function calls executed by the workers may add more work. # Don't use keyboard interrupts! class WorkQ: # Invariants: # - busy and work are only modified when mutex is locked # - len(work) is the number of jobs ready to be taken # - busy is the number of jobs being done # - todo is locked iff there is no work and somebody is busy def __init__(self): self.mutex = thread.allocate() self.todo = thread.allocate() self.todo.acquire() self.work = [] self.busy = 0 def addwork(self, func, args): job = (func, args) self.mutex.acquire() self.work.append(job) self.mutex.release() if len(self.work) == 1: self.todo.release() def _getwork(self): self.todo.acquire() self.mutex.acquire() if self.busy == 0 and len(self.work) == 0: self.mutex.release() self.todo.release() return None job = self.work[0] del self.work[0] self.busy = self.busy + 1 self.mutex.release() if len(self.work) > 0: self.todo.release() return job def _donework(self): self.mutex.acquire() self.busy = self.busy - 1 if self.busy == 0 and len(self.work) == 0: self.todo.release() self.mutex.release() def _worker(self): time.sleep(0.00001) # Let other threads run while 1: job = self._getwork() if not job: break func, args = job apply(func, args) self._donework() def run(self, nworkers): if not self.work: return # Nothing to do for i in range(nworkers-1): thread.start_new(self._worker, ()) self._worker() self.todo.acquire() # Main program def main(): nworkers = 4 opts, args = getopt.getopt(sys.argv[1:], '-w:') for opt, arg in opts: if opt == '-w': nworkers = string.atoi(arg) if not args: args = [os.curdir] wq = WorkQ() for dir in args: wq.addwork(find, (dir, selector, wq)) t1 = time.time() wq.run(nworkers) t2 = time.time() sys.stderr.write('Total time %r sec.\n' % (t2-t1)) # The predicate -- defines what files we look for. # Feel free to change this to suit your purpose def selector(dir, name, fullname, stat): # Look for world writable files that are not symlinks return (stat[ST_MODE] & 0002) != 0 and not S_ISLNK(stat[ST_MODE]) # The find procedure -- calls wq.addwork() for subdirectories def find(dir, pred, wq): try: names = os.listdir(dir) except os.error, msg: print repr(dir), ':', msg return for name in names: if name not in (os.curdir, os.pardir): fullname = os.path.join(dir, name) try: stat = os.lstat(fullname) except os.error, msg: print repr(fullname), ':', msg continue if pred(dir, name, fullname, stat): print fullname if S_ISDIR(stat[ST_MODE]): if not os.path.ismount(fullname): wq.addwork(find, (fullname, pred, wq)) # Call the main program main()
unknown
codeparrot/codeparrot-clean
# frozen_string_literal: true class Possession < ActiveRecord::Base self.table_name = "having" end
ruby
github
https://github.com/rails/rails
activerecord/test/models/possession.rb
#!/usr/bin/python # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: rax_dns_record short_description: Manage DNS records on Rackspace Cloud DNS description: - Manage DNS records on Rackspace Cloud DNS version_added: 1.5 options: comment: description: - Brief description of the domain. Maximum length of 160 characters data: description: - IP address for A/AAAA record, FQDN for CNAME/MX/NS, or text data for SRV/TXT required: True domain: description: - Domain name to create the record in. This is an invalid option when type=PTR loadbalancer: description: - Load Balancer ID to create a PTR record for. Only used with type=PTR version_added: 1.7 name: description: - FQDN record name to create required: True overwrite: description: - Add new records if data doesn't match, instead of updating existing record with matching name. If there are already multiple records with matching name and overwrite=true, this module will fail. default: true version_added: 2.1 priority: description: - Required for MX and SRV records, but forbidden for other record types. If specified, must be an integer from 0 to 65535. server: description: - Server ID to create a PTR record for. Only used with type=PTR version_added: 1.7 state: description: - Indicate desired state of the resource choices: - present - absent default: present ttl: description: - Time to live of record in seconds default: 3600 type: description: - DNS record type choices: - A - AAAA - CNAME - MX - NS - SRV - TXT - PTR required: true notes: - "It is recommended that plays utilizing this module be run with C(serial: 1) to avoid exceeding the API request limit imposed by the Rackspace CloudDNS API" - To manipulate a C(PTR) record either C(loadbalancer) or C(server) must be supplied - As of version 1.7, the C(type) field is required and no longer defaults to an C(A) record. - C(PTR) record support was added in version 1.7 author: "Matt Martz (@sivel)" extends_documentation_fragment: - rackspace - rackspace.openstack ''' EXAMPLES = ''' - name: Create DNS Records hosts: all gather_facts: False tasks: - name: Create A record local_action: module: rax_dns_record credentials: ~/.raxpub domain: example.org name: www.example.org data: "{{ rax_accessipv4 }}" type: A register: a_record - name: Create PTR record local_action: module: rax_dns_record credentials: ~/.raxpub server: "{{ rax_id }}" name: "{{ inventory_hostname }}" region: DFW register: ptr_record ''' try: import pyrax HAS_PYRAX = True except ImportError: HAS_PYRAX = False from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.rax import (rax_argument_spec, rax_find_loadbalancer, rax_find_server, rax_required_together, rax_to_dict, setup_rax_module, ) def rax_dns_record_ptr(module, data=None, comment=None, loadbalancer=None, name=None, server=None, state='present', ttl=7200): changed = False results = [] dns = pyrax.cloud_dns if not dns: module.fail_json(msg='Failed to instantiate client. This ' 'typically indicates an invalid region or an ' 'incorrectly capitalized region name.') if loadbalancer: item = rax_find_loadbalancer(module, pyrax, loadbalancer) elif server: item = rax_find_server(module, pyrax, server) if state == 'present': current = dns.list_ptr_records(item) for record in current: if record.data == data: if record.ttl != ttl or record.name != name: try: dns.update_ptr_record(item, record, name, data, ttl) changed = True except Exception as e: module.fail_json(msg='%s' % e.message) record.ttl = ttl record.name = name results.append(rax_to_dict(record)) break else: results.append(rax_to_dict(record)) break if not results: record = dict(name=name, type='PTR', data=data, ttl=ttl, comment=comment) try: results = dns.add_ptr_records(item, [record]) changed = True except Exception as e: module.fail_json(msg='%s' % e.message) module.exit_json(changed=changed, records=results) elif state == 'absent': current = dns.list_ptr_records(item) for record in current: if record.data == data: results.append(rax_to_dict(record)) break if results: try: dns.delete_ptr_records(item, data) changed = True except Exception as e: module.fail_json(msg='%s' % e.message) module.exit_json(changed=changed, records=results) def rax_dns_record(module, comment=None, data=None, domain=None, name=None, overwrite=True, priority=None, record_type='A', state='present', ttl=7200): """Function for manipulating record types other than PTR""" changed = False dns = pyrax.cloud_dns if not dns: module.fail_json(msg='Failed to instantiate client. This ' 'typically indicates an invalid region or an ' 'incorrectly capitalized region name.') if state == 'present': if not priority and record_type in ['MX', 'SRV']: module.fail_json(msg='A "priority" attribute is required for ' 'creating a MX or SRV record') try: domain = dns.find(name=domain) except Exception as e: module.fail_json(msg='%s' % e.message) try: if overwrite: record = domain.find_record(record_type, name=name) else: record = domain.find_record(record_type, name=name, data=data) except pyrax.exceptions.DomainRecordNotUnique as e: module.fail_json(msg='overwrite=true and there are multiple matching records') except pyrax.exceptions.DomainRecordNotFound as e: try: record_data = { 'type': record_type, 'name': name, 'data': data, 'ttl': ttl } if comment: record_data.update(dict(comment=comment)) if priority and record_type.upper() in ['MX', 'SRV']: record_data.update(dict(priority=priority)) record = domain.add_records([record_data])[0] changed = True except Exception as e: module.fail_json(msg='%s' % e.message) update = {} if comment != getattr(record, 'comment', None): update['comment'] = comment if ttl != getattr(record, 'ttl', None): update['ttl'] = ttl if priority != getattr(record, 'priority', None): update['priority'] = priority if data != getattr(record, 'data', None): update['data'] = data if update: try: record.update(**update) changed = True record.get() except Exception as e: module.fail_json(msg='%s' % e.message) elif state == 'absent': try: domain = dns.find(name=domain) except Exception as e: module.fail_json(msg='%s' % e.message) try: record = domain.find_record(record_type, name=name, data=data) except pyrax.exceptions.DomainRecordNotFound as e: record = {} except pyrax.exceptions.DomainRecordNotUnique as e: module.fail_json(msg='%s' % e.message) if record: try: record.delete() changed = True except Exception as e: module.fail_json(msg='%s' % e.message) module.exit_json(changed=changed, record=rax_to_dict(record)) def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( comment=dict(), data=dict(required=True), domain=dict(), loadbalancer=dict(), name=dict(required=True), overwrite=dict(type='bool', default=True), priority=dict(type='int'), server=dict(), state=dict(default='present', choices=['present', 'absent']), ttl=dict(type='int', default=3600), type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SRV', 'TXT', 'PTR']) ) ) module = AnsibleModule( argument_spec=argument_spec, required_together=rax_required_together(), mutually_exclusive=[ ['server', 'loadbalancer', 'domain'], ], required_one_of=[ ['server', 'loadbalancer', 'domain'], ], ) if not HAS_PYRAX: module.fail_json(msg='pyrax is required for this module') comment = module.params.get('comment') data = module.params.get('data') domain = module.params.get('domain') loadbalancer = module.params.get('loadbalancer') name = module.params.get('name') overwrite = module.params.get('overwrite') priority = module.params.get('priority') server = module.params.get('server') state = module.params.get('state') ttl = module.params.get('ttl') record_type = module.params.get('type') setup_rax_module(module, pyrax, False) if record_type.upper() == 'PTR': if not server and not loadbalancer: module.fail_json(msg='one of the following is required: ' 'server,loadbalancer') rax_dns_record_ptr(module, data=data, comment=comment, loadbalancer=loadbalancer, name=name, server=server, state=state, ttl=ttl) else: rax_dns_record(module, comment=comment, data=data, domain=domain, name=name, overwrite=overwrite, priority=priority, record_type=record_type, state=state, ttl=ttl) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
package distribution import ( "context" "errors" "strings" "syscall" "testing" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/client" ) var errUnexpected = errors.New("some totally unexpected error") var alwaysContinue = []error{ &client.UnexpectedHTTPResponseError{}, errcode.Errors{}, errUnexpected, // nested errcode.Errors{errUnexpected}, } var continueFromMirrorEndpoint = []error{ imageConfigPullError{}, errcode.Error{}, // nested errcode.Errors{errcode.Error{}}, } func TestContinueOnError_NonMirrorEndpoint(t *testing.T) { for _, err := range alwaysContinue { if !continueOnError(err, false) { t.Errorf("Should continue from non-mirror endpoint: %T: '%s'", err, err.Error()) } } for _, err := range continueFromMirrorEndpoint { if continueOnError(err, false) { t.Errorf("Should only continue from mirror endpoint: %T: '%s'", err, err.Error()) } } } func TestContinueOnError_MirrorEndpoint(t *testing.T) { var errs []error errs = append(errs, alwaysContinue...) errs = append(errs, continueFromMirrorEndpoint...) for _, err := range errs { if !continueOnError(err, true) { t.Errorf("Should continue from mirror endpoint: %T: '%s'", err, err.Error()) } } } func TestContinueOnError_NeverContinue(t *testing.T) { neverContinue := []error{ errors.New(strings.ToLower(syscall.ESRCH.Error())), // No such process context.Canceled, context.DeadlineExceeded, } for _, isMirrorEndpoint := range []bool{true, false} { for _, err := range neverContinue { if continueOnError(err, isMirrorEndpoint) { t.Errorf("Should never continue: %T: '%s'", err, err.Error()) } } } }
go
github
https://github.com/moby/moby
daemon/internal/distribution/errors_test.go
// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package agent import ( "context" "errors" "fmt" "log/slog" "math" "path/filepath" "sync" "time" "unicode/utf8" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "go.uber.org/atomic" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage/remote" "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/record" "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/tsdb/wlog" "github.com/prometheus/prometheus/util/compression" "github.com/prometheus/prometheus/util/zeropool" ) const ( sampleMetricTypeFloat = "float" sampleMetricTypeHistogram = "histogram" ) var ErrUnsupported = errors.New("unsupported operation with WAL-only storage") // Default values for options. var ( DefaultTruncateFrequency = 2 * time.Hour DefaultMinWALTime = int64(5 * time.Minute / time.Millisecond) DefaultMaxWALTime = int64(4 * time.Hour / time.Millisecond) ) // Options of the WAL storage. type Options struct { // Segments (wal files) max size. // WALSegmentSize <= 0, segment size is default size. // WALSegmentSize > 0, segment size is WALSegmentSize. WALSegmentSize int // WALCompression configures the compression type to use on records in the WAL. WALCompression compression.Type // StripeSize is the size (power of 2) in entries of the series hash map. Reducing the size will save memory but impact performance. StripeSize int // TruncateFrequency determines how frequently to truncate data from the WAL. TruncateFrequency time.Duration // Shortest and longest amount of time data can exist in the WAL before being // deleted. MinWALTime, MaxWALTime int64 // NoLockfile disables creation and consideration of a lock file. NoLockfile bool // OutOfOrderTimeWindow specifies how much out of order is allowed, if any. OutOfOrderTimeWindow int64 // EnableSTAsZeroSample represents 'created-timestamp-zero-ingestion' feature flag. // If true, ST, if non-empty and earlier than sample timestamp, will be stored // as a zero sample before the actual sample. // // The zero sample is best-effort, only debug log on failure is emitted. // NOTE(bwplotka): This feature might be deprecated and removed once PROM-60 // is implemented. EnableSTAsZeroSample bool // EnableSTStorage determines whether agent DB should write a Start Timestamp (ST) // per sample to WAL. // TODO(bwplotka): Implement this option as per PROM-60, currently it's noop. EnableSTStorage bool } // DefaultOptions used for the WAL storage. They are reasonable for setups using // millisecond-precision timestamps. func DefaultOptions() *Options { return &Options{ WALSegmentSize: wlog.DefaultSegmentSize, WALCompression: compression.None, StripeSize: tsdb.DefaultStripeSize, TruncateFrequency: DefaultTruncateFrequency, MinWALTime: DefaultMinWALTime, MaxWALTime: DefaultMaxWALTime, NoLockfile: false, OutOfOrderTimeWindow: 0, } } type dbMetrics struct { r prometheus.Registerer numActiveSeries prometheus.Gauge numWALSeriesPendingDeletion prometheus.Gauge totalAppendedSamples *prometheus.CounterVec totalAppendedExemplars prometheus.Counter totalOutOfOrderSamples prometheus.Counter walTruncateDuration prometheus.Summary walCorruptionsTotal prometheus.Counter walTotalReplayDuration prometheus.Gauge checkpointDeleteFail prometheus.Counter checkpointDeleteTotal prometheus.Counter checkpointCreationFail prometheus.Counter checkpointCreationTotal prometheus.Counter } func newDBMetrics(r prometheus.Registerer) *dbMetrics { m := dbMetrics{r: r} m.numActiveSeries = prometheus.NewGauge(prometheus.GaugeOpts{ Name: "prometheus_agent_active_series", Help: "Number of active series being tracked by the WAL storage", }) m.numWALSeriesPendingDeletion = prometheus.NewGauge(prometheus.GaugeOpts{ Name: "prometheus_agent_deleted_series", Help: "Number of series pending deletion from the WAL", }) m.totalAppendedSamples = prometheus.NewCounterVec(prometheus.CounterOpts{ Name: "prometheus_agent_samples_appended_total", Help: "Total number of samples appended to the storage", }, []string{"type"}) m.totalAppendedExemplars = prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_agent_exemplars_appended_total", Help: "Total number of exemplars appended to the storage", }) m.totalOutOfOrderSamples = prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_agent_out_of_order_samples_total", Help: "Total number of out of order samples ingestion failed attempts.", }) m.walTruncateDuration = prometheus.NewSummary(prometheus.SummaryOpts{ Name: "prometheus_agent_truncate_duration_seconds", Help: "Duration of WAL truncation.", }) m.walCorruptionsTotal = prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_agent_corruptions_total", Help: "Total number of WAL corruptions.", }) m.walTotalReplayDuration = prometheus.NewGauge(prometheus.GaugeOpts{ Name: "prometheus_agent_data_replay_duration_seconds", Help: "Time taken to replay the data on disk.", }) m.checkpointDeleteFail = prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_agent_checkpoint_deletions_failed_total", Help: "Total number of checkpoint deletions that failed.", }) m.checkpointDeleteTotal = prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_agent_checkpoint_deletions_total", Help: "Total number of checkpoint deletions attempted.", }) m.checkpointCreationFail = prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_agent_checkpoint_creations_failed_total", Help: "Total number of checkpoint creations that failed.", }) m.checkpointCreationTotal = prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_agent_checkpoint_creations_total", Help: "Total number of checkpoint creations attempted.", }) if r != nil { r.MustRegister( m.numActiveSeries, m.numWALSeriesPendingDeletion, m.totalAppendedSamples, m.totalAppendedExemplars, m.totalOutOfOrderSamples, m.walTruncateDuration, m.walCorruptionsTotal, m.walTotalReplayDuration, m.checkpointDeleteFail, m.checkpointDeleteTotal, m.checkpointCreationFail, m.checkpointCreationTotal, ) } return &m } func (m *dbMetrics) Unregister() { if m.r == nil { return } cs := []prometheus.Collector{ m.numActiveSeries, m.numWALSeriesPendingDeletion, m.totalAppendedSamples, m.totalAppendedExemplars, m.totalOutOfOrderSamples, m.walTruncateDuration, m.walCorruptionsTotal, m.walTotalReplayDuration, m.checkpointDeleteFail, m.checkpointDeleteTotal, m.checkpointCreationFail, m.checkpointCreationTotal, } for _, c := range cs { m.r.Unregister(c) } } // DB represents a WAL-only storage. It implements storage.DB. type DB struct { mtx sync.RWMutex logger *slog.Logger opts *Options rs *remote.Storage wal *wlog.WL locker *tsdbutil.DirLocker appenderPool sync.Pool appenderV2Pool sync.Pool bufPool sync.Pool // These pools are only used during WAL replay and are reset at the end. // NOTE: Adjust resetWALReplayResources() upon changes to the pools. walReplaySeriesPool zeropool.Pool[[]record.RefSeries] walReplaySamplesPool zeropool.Pool[[]record.RefSample] walReplayHistogramsPool zeropool.Pool[[]record.RefHistogramSample] walReplayFloatHistogramsPool zeropool.Pool[[]record.RefFloatHistogramSample] nextRef *atomic.Uint64 series *stripeSeries // deleted is a map of (ref IDs that should be deleted from WAL) to (the WAL segment they // must be kept around to). deleted map[chunks.HeadSeriesRef]int donec chan struct{} stopc chan struct{} writeNotified wlog.WriteNotified metrics *dbMetrics } // Open returns a new agent.DB in the given directory. func Open(l *slog.Logger, reg prometheus.Registerer, rs *remote.Storage, dir string, opts *Options) (*DB, error) { opts = validateOptions(opts) locker, err := tsdbutil.NewDirLocker(dir, "agent", l, reg) if err != nil { return nil, err } if !opts.NoLockfile { if err := locker.Lock(); err != nil { return nil, err } } // remote_write expects WAL to be stored in a "wal" subdirectory of the main storage. dir = filepath.Join(dir, "wal") w, err := wlog.NewSize(l, reg, dir, opts.WALSegmentSize, opts.WALCompression) if err != nil { return nil, fmt.Errorf("creating WAL: %w", err) } db := &DB{ logger: l, opts: opts, rs: rs, wal: w, locker: locker, nextRef: atomic.NewUint64(0), series: newStripeSeries(opts.StripeSize), deleted: make(map[chunks.HeadSeriesRef]int), donec: make(chan struct{}), stopc: make(chan struct{}), metrics: newDBMetrics(reg), } db.bufPool.New = func() any { return make([]byte, 0, 1024) } db.appenderPool.New = func() any { return &appender{ appenderBase: appenderBase{ DB: db, pendingSeries: make([]record.RefSeries, 0, 100), pendingSamples: make([]record.RefSample, 0, 100), pendingHistograms: make([]record.RefHistogramSample, 0, 100), pendingFloatHistograms: make([]record.RefFloatHistogramSample, 0, 100), pendingExamplars: make([]record.RefExemplar, 0, 10), }, } } db.appenderV2Pool.New = func() any { return &appenderV2{ appenderBase: appenderBase{ DB: db, pendingSeries: make([]record.RefSeries, 0, 100), pendingSamples: make([]record.RefSample, 0, 100), pendingHistograms: make([]record.RefHistogramSample, 0, 100), pendingFloatHistograms: make([]record.RefFloatHistogramSample, 0, 100), pendingExamplars: make([]record.RefExemplar, 0, 10), }, } } if err := db.replayWAL(); err != nil { db.logger.Warn("encountered WAL read error, attempting repair", "err", err) if err := w.Repair(err); err != nil { return nil, fmt.Errorf("repair corrupted WAL: %w", err) } db.logger.Info("successfully repaired WAL") } go db.run() return db, nil } // SetWriteNotified allows to set an instance to notify when a write happens. // It must be used during initialization. It is not safe to use it during execution. func (db *DB) SetWriteNotified(wn wlog.WriteNotified) { db.writeNotified = wn } func validateOptions(opts *Options) *Options { if opts == nil { opts = DefaultOptions() } if opts.WALSegmentSize <= 0 { opts.WALSegmentSize = wlog.DefaultSegmentSize } if opts.WALCompression == "" { opts.WALCompression = compression.None } // Revert StripeSize to DefaultStripeSize if StripeSize is either 0 or not a power of 2. if opts.StripeSize <= 0 || ((opts.StripeSize & (opts.StripeSize - 1)) != 0) { opts.StripeSize = tsdb.DefaultStripeSize } if opts.TruncateFrequency <= 0 { opts.TruncateFrequency = DefaultTruncateFrequency } if opts.MinWALTime <= 0 { opts.MinWALTime = DefaultMinWALTime } if opts.MaxWALTime <= 0 { opts.MaxWALTime = DefaultMaxWALTime } if opts.MinWALTime > opts.MaxWALTime { opts.MaxWALTime = opts.MinWALTime } if t := int64(opts.TruncateFrequency / time.Millisecond); opts.MaxWALTime < t { opts.MaxWALTime = t } return opts } func (db *DB) replayWAL() error { db.logger.Info("replaying WAL, this may take a while", "dir", db.wal.Dir()) defer db.resetWALReplayResources() start := time.Now() dir, startFrom, err := wlog.LastCheckpoint(db.wal.Dir()) if err != nil && !errors.Is(err, record.ErrNotFound) { return fmt.Errorf("find last checkpoint: %w", err) } multiRef := map[chunks.HeadSeriesRef]chunks.HeadSeriesRef{} if err == nil { sr, err := wlog.NewSegmentsReader(dir) if err != nil { return fmt.Errorf("open checkpoint: %w", err) } defer func() { if err := sr.Close(); err != nil { db.logger.Warn("error while closing the wal segments reader", "err", err) } }() // A corrupted checkpoint is a hard error for now and requires user // intervention. There's likely little data that can be recovered anyway. if err := db.loadWAL(wlog.NewReader(sr), multiRef); err != nil { return fmt.Errorf("backfill checkpoint: %w", err) } startFrom++ db.logger.Info("WAL checkpoint loaded") } // Find the last segment. _, last, err := wlog.Segments(db.wal.Dir()) if err != nil { return fmt.Errorf("finding WAL segments: %w", err) } // Backfill segments from the most recent checkpoint onwards. for i := startFrom; i <= last; i++ { seg, err := wlog.OpenReadSegment(wlog.SegmentName(db.wal.Dir(), i)) if err != nil { return fmt.Errorf("open WAL segment: %d: %w", i, err) } sr := wlog.NewSegmentBufReader(seg) err = db.loadWAL(wlog.NewReader(sr), multiRef) if err := sr.Close(); err != nil { db.logger.Warn("error while closing the wal segments reader", "err", err) } if err != nil { return err } db.logger.Info("WAL segment loaded", "segment", i, "maxSegment", last) } walReplayDuration := time.Since(start) db.metrics.walTotalReplayDuration.Set(walReplayDuration.Seconds()) return nil } func (db *DB) resetWALReplayResources() { db.walReplaySeriesPool = zeropool.Pool[[]record.RefSeries]{} db.walReplaySamplesPool = zeropool.Pool[[]record.RefSample]{} db.walReplayHistogramsPool = zeropool.Pool[[]record.RefHistogramSample]{} db.walReplayFloatHistogramsPool = zeropool.Pool[[]record.RefFloatHistogramSample]{} } func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef) (err error) { var ( syms = labels.NewSymbolTable() // One table for the whole WAL. dec = record.NewDecoder(syms, db.logger) lastRef = chunks.HeadSeriesRef(db.nextRef.Load()) decoded = make(chan any, 10) errCh = make(chan error, 1) ) go func() { defer close(decoded) var err error for r.Next() { rec := r.Record() switch dec.Type(rec) { case record.Series: series := db.walReplaySeriesPool.Get()[:0] series, err = dec.Series(rec, series) if err != nil { errCh <- &wlog.CorruptionErr{ Err: fmt.Errorf("decode series: %w", err), Segment: r.Segment(), Offset: r.Offset(), } return } decoded <- series case record.Samples: samples := db.walReplaySamplesPool.Get()[:0] samples, err = dec.Samples(rec, samples) if err != nil { errCh <- &wlog.CorruptionErr{ Err: fmt.Errorf("decode samples: %w", err), Segment: r.Segment(), Offset: r.Offset(), } return } decoded <- samples case record.HistogramSamples, record.CustomBucketsHistogramSamples: histograms := db.walReplayHistogramsPool.Get()[:0] histograms, err = dec.HistogramSamples(rec, histograms) if err != nil { errCh <- &wlog.CorruptionErr{ Err: fmt.Errorf("decode histogram samples: %w", err), Segment: r.Segment(), Offset: r.Offset(), } return } decoded <- histograms case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples: floatHistograms := db.walReplayFloatHistogramsPool.Get()[:0] floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms) if err != nil { errCh <- &wlog.CorruptionErr{ Err: fmt.Errorf("decode float histogram samples: %w", err), Segment: r.Segment(), Offset: r.Offset(), } return } decoded <- floatHistograms case record.Tombstones, record.Exemplars: // We don't care about tombstones or exemplars during replay. // TODO: If decide to decode exemplars, we should make sure to prepopulate // stripeSeries.exemplars in the next block by using setLatestExemplar. continue default: errCh <- &wlog.CorruptionErr{ Err: fmt.Errorf("invalid record type %v", dec.Type(rec)), Segment: r.Segment(), Offset: r.Offset(), } } } }() var nonExistentSeriesRefs atomic.Uint64 for d := range decoded { switch v := d.(type) { case []record.RefSeries: for _, entry := range v { // If this is a new series, create it in memory. If we never read in a // sample for this series, its timestamp will remain at 0 and it will // be deleted at the next GC. if db.series.GetByID(entry.Ref) == nil { series := &memSeries{ref: entry.Ref, lset: entry.Labels, lastTs: 0} db.series.Set(entry.Labels.Hash(), series) multiRef[entry.Ref] = series.ref db.metrics.numActiveSeries.Inc() if entry.Ref > lastRef { lastRef = entry.Ref } } } db.walReplaySeriesPool.Put(v) case []record.RefSample: for _, entry := range v { // Update the lastTs for the series based ref, ok := multiRef[entry.Ref] if !ok { nonExistentSeriesRefs.Inc() continue } series := db.series.GetByID(ref) if entry.T > series.lastTs { series.lastTs = entry.T } } db.walReplaySamplesPool.Put(v) case []record.RefHistogramSample: for _, entry := range v { // Update the lastTs for the series based ref, ok := multiRef[entry.Ref] if !ok { nonExistentSeriesRefs.Inc() continue } series := db.series.GetByID(ref) if entry.T > series.lastTs { series.lastTs = entry.T } } db.walReplayHistogramsPool.Put(v) case []record.RefFloatHistogramSample: for _, entry := range v { // Update the lastTs for the series based ref, ok := multiRef[entry.Ref] if !ok { nonExistentSeriesRefs.Inc() continue } series := db.series.GetByID(ref) if entry.T > series.lastTs { series.lastTs = entry.T } } db.walReplayFloatHistogramsPool.Put(v) default: panic(fmt.Errorf("unexpected decoded type: %T", d)) } } if v := nonExistentSeriesRefs.Load(); v > 0 { db.logger.Warn("found sample referencing non-existing series", "skipped_series", v) } db.nextRef.Store(uint64(lastRef)) select { case err := <-errCh: return err default: if r.Err() != nil { return fmt.Errorf("read records: %w", r.Err()) } return nil } } func (db *DB) run() { defer close(db.donec) Loop: for { select { case <-db.stopc: break Loop case <-time.After(db.opts.TruncateFrequency): // The timestamp ts is used to determine which series are not receiving // samples and may be deleted from the WAL. Their most recent append // timestamp is compared to ts, and if that timestamp is older then ts, // they are considered inactive and may be deleted. // // Subtracting a duration from ts will add a buffer for when series are // considered inactive and safe for deletion. ts := max(db.rs.LowestSentTimestamp()-db.opts.MinWALTime, 0) // Network issues can prevent the result of getRemoteWriteTimestamp from // changing. We don't want data in the WAL to grow forever, so we set a cap // on the maximum age data can be. If our ts is older than this cutoff point, // we'll shift it forward to start deleting very stale data. if maxTS := timestamp.FromTime(time.Now()) - db.opts.MaxWALTime; ts < maxTS { ts = maxTS } db.logger.Debug("truncating the WAL", "ts", ts) if err := db.truncate(ts); err != nil { db.logger.Warn("failed to truncate WAL", "err", err) } } } } // keepSeriesInWALCheckpointFn returns a function that is used to determine whether a series record should be kept in the checkpoint. // last is the last WAL segment that was considered for checkpointing. // NOTE: the agent implementation here is different from the Prometheus implementation, in that it uses WAL segment numbers instead of timestamps. func (db *DB) keepSeriesInWALCheckpointFn(last int) func(id chunks.HeadSeriesRef) bool { return func(id chunks.HeadSeriesRef) bool { // Keep the record if the series exists in the db. if db.series.GetByID(id) != nil { return true } // Keep the record if the series was recently deleted. seg, ok := db.deleted[id] return ok && seg > last } } func (db *DB) truncate(mint int64) error { db.logger.Info("series GC started") db.mtx.RLock() defer db.mtx.RUnlock() start := time.Now() db.gc(mint) db.logger.Info("series GC completed", "duration", time.Since(start)) first, last, err := wlog.Segments(db.wal.Dir()) if err != nil { return fmt.Errorf("get segment range: %w", err) } // Start a new segment so low ingestion volume instances don't have more WAL // than needed. if _, err := db.wal.NextSegment(); err != nil { return fmt.Errorf("next segment: %w", err) } last-- // Never consider most recent segment for checkpoint if last < 0 { return nil // no segments yet } // The lower two-thirds of segments should contain mostly obsolete samples. // If we have less than two segments, it's not worth checkpointing yet. last = first + (last-first)*2/3 if last <= first { return nil } db.metrics.checkpointCreationTotal.Inc() if _, err = wlog.Checkpoint(db.logger, db.wal, first, last, db.keepSeriesInWALCheckpointFn(last), mint); err != nil { db.metrics.checkpointCreationFail.Inc() var cerr *wlog.CorruptionErr if errors.As(err, &cerr) { db.metrics.walCorruptionsTotal.Inc() } return fmt.Errorf("create checkpoint: %w", err) } if err := db.wal.Truncate(last + 1); err != nil { // If truncating fails, we'll just try it again at the next checkpoint. // Leftover segments will still just be ignored in the future if there's a // checkpoint that supersedes them. db.logger.Error("truncating segments failed", "err", err) } // The checkpoint is written and segments before it are truncated, so we // no longer need to track deleted series that were being kept around. for ref, segment := range db.deleted { if segment <= last { delete(db.deleted, ref) } } db.metrics.checkpointDeleteTotal.Inc() db.metrics.numWALSeriesPendingDeletion.Set(float64(len(db.deleted))) if err := wlog.DeleteCheckpoints(db.wal.Dir(), last); err != nil { // Leftover old checkpoints do not cause problems down the line beyond // occupying disk space. They will just be ignored since a newer checkpoint // exists. db.logger.Error("delete old checkpoints", "err", err) db.metrics.checkpointDeleteFail.Inc() } db.metrics.walTruncateDuration.Observe(time.Since(start).Seconds()) db.logger.Info("WAL checkpoint complete", "first", first, "last", last, "duration", time.Since(start)) return nil } // gc marks ref IDs that have not received a sample since mint as deleted in // s.deleted, along with the segment where they originally got deleted. func (db *DB) gc(mint int64) { deleted := db.series.GC(mint) db.metrics.numActiveSeries.Sub(float64(len(deleted))) _, last, _ := wlog.Segments(db.wal.Dir()) // We want to keep series records for any newly deleted series // until we've passed the last recorded segment. This prevents // the WAL having samples for series records that no longer exist. for ref := range deleted { db.deleted[ref] = last } db.metrics.numWALSeriesPendingDeletion.Set(float64(len(db.deleted))) } // StartTime implements the Storage interface. func (*DB) StartTime() (int64, error) { return int64(model.Latest), nil } // Querier implements the Storage interface. func (*DB) Querier(int64, int64) (storage.Querier, error) { return nil, ErrUnsupported } // ChunkQuerier implements the Storage interface. func (*DB) ChunkQuerier(int64, int64) (storage.ChunkQuerier, error) { return nil, ErrUnsupported } // ExemplarQuerier implements the Storage interface. func (*DB) ExemplarQuerier(context.Context) (storage.ExemplarQuerier, error) { return nil, ErrUnsupported } // Appender implements storage.Storage. func (db *DB) Appender(context.Context) storage.Appender { return db.appenderPool.Get().(storage.Appender) } // Close implements the Storage interface. func (db *DB) Close() error { db.mtx.Lock() defer db.mtx.Unlock() close(db.stopc) <-db.donec db.metrics.Unregister() return errors.Join(db.locker.Release(), db.wal.Close()) } type appenderBase struct { *DB pendingSeries []record.RefSeries pendingSamples []record.RefSample pendingHistograms []record.RefHistogramSample pendingFloatHistograms []record.RefFloatHistogramSample pendingExamplars []record.RefExemplar // Pointers to the series referenced by each element of pendingSamples. // Series lock is not held on elements. sampleSeries []*memSeries // Pointers to the series referenced by each element of pendingHistograms. // Series lock is not held on elements. histogramSeries []*memSeries // Pointers to the series referenced by each element of pendingFloatHistograms. // Series lock is not held on elements. floatHistogramSeries []*memSeries } type appender struct { appenderBase hints *storage.AppendOptions } func (a *appender) SetOptions(opts *storage.AppendOptions) { a.hints = opts } func (a *appender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) { // series references and chunk references are identical for agent mode. headRef := chunks.HeadSeriesRef(ref) series := a.series.GetByID(headRef) if series == nil { var err error series, err = a.getOrCreate(l) if err != nil { return 0, err } } series.Lock() defer series.Unlock() if t <= a.minValidTime(series.lastTs) { a.metrics.totalOutOfOrderSamples.Inc() return 0, storage.ErrOutOfOrderSample } // NOTE: always modify pendingSamples and sampleSeries together. a.pendingSamples = append(a.pendingSamples, record.RefSample{ Ref: series.ref, T: t, V: v, }) a.sampleSeries = append(a.sampleSeries, series) a.metrics.totalAppendedSamples.WithLabelValues(sampleMetricTypeFloat).Inc() return storage.SeriesRef(series.ref), nil } func (a *appenderBase) getOrCreate(l labels.Labels) (series *memSeries, err error) { // Ensure no empty or duplicate labels have gotten through. This mirrors the // equivalent validation code in the TSDB's headAppender. l = l.WithoutEmpty() if l.IsEmpty() { return nil, fmt.Errorf("empty labelset: %w", tsdb.ErrInvalidSample) } if lbl, dup := l.HasDuplicateLabelNames(); dup { return nil, fmt.Errorf(`label name "%s" is not unique: %w`, lbl, tsdb.ErrInvalidSample) } hash := l.Hash() series = a.series.GetByHash(hash, l) if series != nil { return series, nil } ref := chunks.HeadSeriesRef(a.nextRef.Inc()) series = &memSeries{ref: ref, lset: l, lastTs: math.MinInt64} a.series.Set(hash, series) a.pendingSeries = append(a.pendingSeries, record.RefSeries{ Ref: series.ref, Labels: l, }) a.metrics.numActiveSeries.Inc() return series, nil } func (a *appender) AppendExemplar(ref storage.SeriesRef, _ labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { // Series references and chunk references are identical for agent mode. headRef := chunks.HeadSeriesRef(ref) s := a.series.GetByID(headRef) if s == nil { return 0, fmt.Errorf("unknown series ref when trying to add exemplar: %d", ref) } // Ensure no empty labels have gotten through. e.Labels = e.Labels.WithoutEmpty() if err := a.validateExemplar(s.ref, e); err != nil { if errors.Is(err, storage.ErrDuplicateExemplar) { // Duplicate, don't return an error but don't accept the exemplar. return 0, nil } return 0, err } a.series.SetLatestExemplar(s.ref, &e) a.pendingExamplars = append(a.pendingExamplars, record.RefExemplar{ Ref: s.ref, T: e.Ts, V: e.Value, Labels: e.Labels, }) a.metrics.totalAppendedExemplars.Inc() return storage.SeriesRef(s.ref), nil } func (a *appenderBase) validateExemplar(ref chunks.HeadSeriesRef, e exemplar.Exemplar) error { if lbl, dup := e.Labels.HasDuplicateLabelNames(); dup { return fmt.Errorf(`label name "%s" is not unique: %w`, lbl, tsdb.ErrInvalidExemplar) } // Exemplar label length does not include chars involved in text rendering such as quotes // equals sign, or commas. See definition of const ExemplarMaxLabelLength. labelSetLen := 0 if err := e.Labels.Validate(func(l labels.Label) error { labelSetLen += utf8.RuneCountInString(l.Name) labelSetLen += utf8.RuneCountInString(l.Value) if labelSetLen > exemplar.ExemplarMaxLabelSetLength { return storage.ErrExemplarLabelLength } return nil }); err != nil { return err } // Check for duplicate vs last stored exemplar for this series, and discard those. // Otherwise, record the current exemplar as the latest. // Prometheus' TSDB returns 0 when encountering duplicates, so we do the same here. prevExemplar := a.series.GetLatestExemplar(ref) if prevExemplar != nil && prevExemplar.Equals(e) { return storage.ErrDuplicateExemplar } return nil } func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { if h != nil { if err := h.Validate(); err != nil { return 0, err } } if fh != nil { if err := fh.Validate(); err != nil { return 0, err } } // series references and chunk references are identical for agent mode. headRef := chunks.HeadSeriesRef(ref) series := a.series.GetByID(headRef) if series == nil { var err error series, err = a.getOrCreate(l) if err != nil { return 0, err } } series.Lock() defer series.Unlock() if t <= a.minValidTime(series.lastTs) { a.metrics.totalOutOfOrderSamples.Inc() return 0, storage.ErrOutOfOrderSample } switch { case h != nil: // NOTE: always modify pendingHistograms and histogramSeries together a.pendingHistograms = append(a.pendingHistograms, record.RefHistogramSample{ Ref: series.ref, T: t, H: h, }) a.histogramSeries = append(a.histogramSeries, series) case fh != nil: // NOTE: always modify pendingFloatHistograms and floatHistogramSeries together a.pendingFloatHistograms = append(a.pendingFloatHistograms, record.RefFloatHistogramSample{ Ref: series.ref, T: t, FH: fh, }) a.floatHistogramSeries = append(a.floatHistogramSeries, series) } a.metrics.totalAppendedSamples.WithLabelValues(sampleMetricTypeHistogram).Inc() return storage.SeriesRef(series.ref), nil } func (*appender) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.Metadata) (storage.SeriesRef, error) { // TODO: Wire metadata in the Agent's appender. return 0, nil } func (a *appender) AppendHistogramSTZeroSample(ref storage.SeriesRef, l labels.Labels, t, st int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { if h != nil { if err := h.Validate(); err != nil { return 0, err } } if fh != nil { if err := fh.Validate(); err != nil { return 0, err } } if st >= t { return 0, storage.ErrSTNewerThanSample } series := a.series.GetByID(chunks.HeadSeriesRef(ref)) if series == nil { var err error series, err = a.getOrCreate(l) if err != nil { return 0, err } } series.Lock() defer series.Unlock() if st <= a.minValidTime(series.lastTs) { return 0, storage.ErrOutOfOrderST } if st <= series.lastTs { // discard the sample if it's out of order. return 0, storage.ErrOutOfOrderST } // NOTE(bwplotka): This is a bug, as we "commit" pending sample TS as the WAL last TS. It was likely done // to satisfy incorrect TestDBStartTimestampSamplesIngestion test. We are leaving it as-is given the planned removal // of AppenderV1 as per https://github.com/prometheus/prometheus/issues/17632. series.lastTs = st switch { case h != nil: zeroHistogram := &histogram.Histogram{} a.pendingHistograms = append(a.pendingHistograms, record.RefHistogramSample{ Ref: series.ref, T: st, H: zeroHistogram, }) a.histogramSeries = append(a.histogramSeries, series) case fh != nil: a.pendingFloatHistograms = append(a.pendingFloatHistograms, record.RefFloatHistogramSample{ Ref: series.ref, T: st, FH: &histogram.FloatHistogram{}, }) a.floatHistogramSeries = append(a.floatHistogramSeries, series) } a.metrics.totalAppendedSamples.WithLabelValues(sampleMetricTypeHistogram).Inc() return storage.SeriesRef(series.ref), nil } func (a *appender) AppendSTZeroSample(ref storage.SeriesRef, l labels.Labels, t, st int64) (storage.SeriesRef, error) { if st >= t { return 0, storage.ErrSTNewerThanSample } series := a.series.GetByID(chunks.HeadSeriesRef(ref)) if series == nil { var err error series, err = a.getOrCreate(l) if err != nil { return 0, err } } series.Lock() defer series.Unlock() if t <= a.minValidTime(series.lastTs) { a.metrics.totalOutOfOrderSamples.Inc() return 0, storage.ErrOutOfOrderSample } if st <= series.lastTs { // discard the sample if it's out of order. return 0, storage.ErrOutOfOrderST } // NOTE(bwplotka): This is a bug, as we "commit" pending sample TS as the WAL last TS. It was likely done // to satisfy incorrect TestDBStartTimestampSamplesIngestion test. We are leaving it as-is given the planned removal // of AppenderV1 as per https://github.com/prometheus/prometheus/issues/17632. series.lastTs = st // NOTE: always modify pendingSamples and sampleSeries together. a.pendingSamples = append(a.pendingSamples, record.RefSample{ Ref: series.ref, T: st, V: 0, }) a.sampleSeries = append(a.sampleSeries, series) a.metrics.totalAppendedSamples.WithLabelValues(sampleMetricTypeFloat).Inc() return storage.SeriesRef(series.ref), nil } // Commit submits the collected samples and purges the batch. func (a *appender) Commit() error { defer a.appenderPool.Put(a) return a.commit() } func (a *appender) Rollback() error { defer a.appenderPool.Put(a) return a.rollback() } func (a *appenderBase) commit() error { if err := a.log(); err != nil { return err } a.clearData() if a.writeNotified != nil { a.writeNotified.Notify() } return nil } // log logs all pending data to the WAL. func (a *appenderBase) log() error { a.mtx.RLock() defer a.mtx.RUnlock() var encoder record.Encoder buf := a.bufPool.Get().([]byte) defer func() { a.bufPool.Put(buf) //nolint:staticcheck }() if len(a.pendingSeries) > 0 { buf = encoder.Series(a.pendingSeries, buf) if err := a.wal.Log(buf); err != nil { return err } buf = buf[:0] } if len(a.pendingSamples) > 0 { buf = encoder.Samples(a.pendingSamples, buf) if err := a.wal.Log(buf); err != nil { return err } buf = buf[:0] } if len(a.pendingHistograms) > 0 { var customBucketsHistograms []record.RefHistogramSample buf, customBucketsHistograms = encoder.HistogramSamples(a.pendingHistograms, buf) if len(buf) > 0 { if err := a.wal.Log(buf); err != nil { return err } buf = buf[:0] } if len(customBucketsHistograms) > 0 { buf = encoder.CustomBucketsHistogramSamples(customBucketsHistograms, nil) if err := a.wal.Log(buf); err != nil { return err } buf = buf[:0] } } if len(a.pendingFloatHistograms) > 0 { var customBucketsFloatHistograms []record.RefFloatHistogramSample buf, customBucketsFloatHistograms = encoder.FloatHistogramSamples(a.pendingFloatHistograms, buf) if len(buf) > 0 { if err := a.wal.Log(buf); err != nil { return err } buf = buf[:0] } if len(customBucketsFloatHistograms) > 0 { buf = encoder.CustomBucketsFloatHistogramSamples(customBucketsFloatHistograms, nil) if err := a.wal.Log(buf); err != nil { return err } buf = buf[:0] } } if len(a.pendingExamplars) > 0 { buf = encoder.Exemplars(a.pendingExamplars, buf) if err := a.wal.Log(buf); err != nil { return err } buf = buf[:0] } var series *memSeries for i, s := range a.pendingSamples { series = a.sampleSeries[i] if !series.updateTimestamp(s.T) { a.metrics.totalOutOfOrderSamples.Inc() } } for i, s := range a.pendingHistograms { series = a.histogramSeries[i] if !series.updateTimestamp(s.T) { a.metrics.totalOutOfOrderSamples.Inc() } } for i, s := range a.pendingFloatHistograms { series = a.floatHistogramSeries[i] if !series.updateTimestamp(s.T) { a.metrics.totalOutOfOrderSamples.Inc() } } return nil } // clearData clears all pending data. func (a *appenderBase) clearData() { a.pendingSeries = a.pendingSeries[:0] a.pendingSamples = a.pendingSamples[:0] a.pendingHistograms = a.pendingHistograms[:0] a.pendingFloatHistograms = a.pendingFloatHistograms[:0] a.pendingExamplars = a.pendingExamplars[:0] a.sampleSeries = a.sampleSeries[:0] a.histogramSeries = a.histogramSeries[:0] a.floatHistogramSeries = a.floatHistogramSeries[:0] } func (a *appenderBase) rollback() error { // Series are created in-memory regardless of rollback. This means we must // log them to the WAL, otherwise subsequent commits may reference a series // which was never written to the WAL. if err := a.logSeries(); err != nil { return err } a.clearData() return nil } // logSeries logs only pending series records to the WAL. func (a *appenderBase) logSeries() error { a.mtx.RLock() defer a.mtx.RUnlock() if len(a.pendingSeries) > 0 { buf := a.bufPool.Get().([]byte) defer func() { a.bufPool.Put(buf) //nolint:staticcheck }() var encoder record.Encoder buf = encoder.Series(a.pendingSeries, buf) if err := a.wal.Log(buf); err != nil { return err } buf = buf[:0] } return nil } // minValidTime returns the minimum timestamp that a sample can have // and is needed for preventing underflow. func (a *appenderBase) minValidTime(lastTs int64) int64 { if lastTs < math.MinInt64+a.opts.OutOfOrderTimeWindow { return math.MinInt64 } return lastTs - a.opts.OutOfOrderTimeWindow }
go
github
https://github.com/prometheus/prometheus
tsdb/agent/db.go
//// [tests/cases/compiler/ambientExternalModuleWithoutInternalImportDeclaration.ts] //// //// [ambientExternalModuleWithoutInternalImportDeclaration_0.ts] declare module 'M' { namespace C { export var f: number; } class C { foo(): void; } export = C; } //// [ambientExternalModuleWithoutInternalImportDeclaration_1.ts] ///<reference path='ambientExternalModuleWithoutInternalImportDeclaration_0.ts'/> import A = require('M'); var c = new A(); //// [ambientExternalModuleWithoutInternalImportDeclaration_0.js] "use strict"; //// [ambientExternalModuleWithoutInternalImportDeclaration_1.js] define(["require", "exports", "M"], function (require, exports, A) { "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); var c = new A(); });
javascript
github
https://github.com/microsoft/TypeScript
tests/baselines/reference/ambientExternalModuleWithoutInternalImportDeclaration.js
# -*- coding: utf-8 -*- # Copyright (c) 2019 Ansible Project # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) from __future__ import absolute_import, division, print_function __metaclass__ = type import re from ansible.module_utils.six import iteritems SIZE_RANGES = { 'Y': 1 << 80, 'Z': 1 << 70, 'E': 1 << 60, 'P': 1 << 50, 'T': 1 << 40, 'G': 1 << 30, 'M': 1 << 20, 'K': 1 << 10, 'B': 1, } def lenient_lowercase(lst): """Lowercase elements of a list. If an element is not a string, pass it through untouched. """ lowered = [] for value in lst: try: lowered.append(value.lower()) except AttributeError: lowered.append(value) return lowered def human_to_bytes(number, default_unit=None, isbits=False): """Convert number in string format into bytes (ex: '2K' => 2048) or using unit argument. example: human_to_bytes('10M') <=> human_to_bytes(10, 'M'). When isbits is False (default), converts bytes from a human-readable format to integer. example: human_to_bytes('1MB') returns 1048576 (int). The function expects 'B' (uppercase) as a byte identifier passed as a part of 'name' param string or 'unit', e.g. 'MB'/'KB'/etc. (except when the identifier is single 'b', it is perceived as a byte identifier too). if 'Mb'/'Kb'/... is passed, the ValueError will be rased. When isbits is True, converts bits from a human-readable format to integer. example: human_to_bytes('1Mb', isbits=True) returns 1048576 (int) - string bits representation was passed and return as a number or bits. The function expects 'b' (lowercase) as a bit identifier, e.g. 'Mb'/'Kb'/etc. if 'MB'/'KB'/... is passed, the ValueError will be rased. """ m = re.search(r'^\s*(\d*\.?\d*)\s*([A-Za-z]+)?', str(number), flags=re.IGNORECASE) if m is None: raise ValueError("human_to_bytes() can't interpret following string: %s" % str(number)) try: num = float(m.group(1)) except Exception: raise ValueError("human_to_bytes() can't interpret following number: %s (original input string: %s)" % (m.group(1), number)) unit = m.group(2) if unit is None: unit = default_unit if unit is None: ''' No unit given, returning raw number ''' return int(round(num)) range_key = unit[0].upper() try: limit = SIZE_RANGES[range_key] except Exception: raise ValueError("human_to_bytes() failed to convert %s (unit = %s). The suffix must be one of %s" % (number, unit, ", ".join(SIZE_RANGES.keys()))) # default value unit_class = 'B' unit_class_name = 'byte' # handling bits case if isbits: unit_class = 'b' unit_class_name = 'bit' # check unit value if more than one character (KB, MB) if len(unit) > 1: expect_message = 'expect %s%s or %s' % (range_key, unit_class, range_key) if range_key == 'B': expect_message = 'expect %s or %s' % (unit_class, unit_class_name) if unit_class_name in unit.lower(): pass elif unit[1] != unit_class: raise ValueError("human_to_bytes() failed to convert %s. Value is not a valid string (%s)" % (number, expect_message)) return int(round(num * limit)) def bytes_to_human(size, isbits=False, unit=None): base = 'Bytes' if isbits: base = 'bits' suffix = '' for suffix, limit in sorted(iteritems(SIZE_RANGES), key=lambda item: -item[1]): if (unit is None and size >= limit) or unit is not None and unit.upper() == suffix[0]: break if limit != 1: suffix += base[0] else: suffix = base return '%.2f %s' % (size / limit, suffix)
unknown
codeparrot/codeparrot-clean
/* * Copyright 2002-present the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.beans.factory; /** * Exception thrown when a bean is not a factory, but a user tries to get * at the factory for the given bean name. Whether a bean is a factory is * determined by whether it implements the FactoryBean interface. * * @author Rod Johnson * @since 10.03.2003 * @see org.springframework.beans.factory.FactoryBean */ @SuppressWarnings("serial") public class BeanIsNotAFactoryException extends BeanNotOfRequiredTypeException { /** * Create a new BeanIsNotAFactoryException. * @param name the name of the bean requested * @param actualType the actual type returned, which did not match * the expected type */ public BeanIsNotAFactoryException(String name, Class<?> actualType) { super(name, FactoryBean.class, actualType); } }
java
github
https://github.com/spring-projects/spring-framework
spring-beans/src/main/java/org/springframework/beans/factory/BeanIsNotAFactoryException.java
// Copyright IBM Corp. 2016, 2025 // SPDX-License-Identifier: MPL-2.0 package keysutil import ( "context" "encoding/base64" "errors" "math/big" paths "path" "sort" "strings" lru "github.com/hashicorp/golang-lru" "github.com/hashicorp/vault/sdk/logical" ) const ( // DefaultCacheSize is used if no cache size is specified for // NewEncryptedKeyStorage. This value is the number of cache entries to // store, not the size in bytes of the cache. DefaultCacheSize = 16 * 1024 // DefaultPrefix is used if no prefix is specified for // NewEncryptedKeyStorage. Prefix must be defined so we can provide context // for the base folder. DefaultPrefix = "encryptedkeys/" // EncryptedKeyPolicyVersionTpl is a template that can be used to minimize // the amount of data that's stored with the ciphertext. EncryptedKeyPolicyVersionTpl = "{{version}}:" ) var ( // ErrPolicyDerivedKeys is returned if the provided policy does not use // derived keys. This is a requirement for this storage implementation. ErrPolicyDerivedKeys = errors.New("key policy must use derived keys") // ErrPolicyConvergentEncryption is returned if the provided policy does not use // convergent encryption. This is a requirement for this storage implementation. ErrPolicyConvergentEncryption = errors.New("key policy must use convergent encryption") // ErrPolicyConvergentVersion is returned if the provided policy does not use // a new enough convergent version. This is a requirement for this storage // implementation. ErrPolicyConvergentVersion = errors.New("key policy must use convergent version > 2") // ErrNilStorage is returned if the provided storage is nil. ErrNilStorage = errors.New("nil storage provided") // ErrNilPolicy is returned if the provided policy is nil. ErrNilPolicy = errors.New("nil policy provided") ) // EncryptedKeyStorageConfig is used to configure an EncryptedKeyStorage object. type EncryptedKeyStorageConfig struct { // Policy is the key policy to use to encrypt the key paths. Policy *Policy // Prefix is the storage prefix for this instance of the EncryptedKeyStorage // object. This is stored in plaintext. If not set the DefaultPrefix will be // used. Prefix string // CacheSize is the number of elements to cache. If not set the // DetaultCacheSize will be used. CacheSize int } // NewEncryptedKeyStorageWrapper takes an EncryptedKeyStorageConfig and returns a new // EncryptedKeyStorage object. func NewEncryptedKeyStorageWrapper(config EncryptedKeyStorageConfig) (*EncryptedKeyStorageWrapper, error) { if config.Policy == nil { return nil, ErrNilPolicy } if !config.Policy.Derived { return nil, ErrPolicyDerivedKeys } if !config.Policy.ConvergentEncryption { return nil, ErrPolicyConvergentEncryption } if config.Prefix == "" { config.Prefix = DefaultPrefix } if !strings.HasSuffix(config.Prefix, "/") { config.Prefix += "/" } size := config.CacheSize if size <= 0 { size = DefaultCacheSize } cache, err := lru.New2Q(size) if err != nil { return nil, err } return &EncryptedKeyStorageWrapper{ policy: config.Policy, prefix: config.Prefix, lru: cache, }, nil } type EncryptedKeyStorageWrapper struct { policy *Policy lru *lru.TwoQueueCache prefix string } func (f *EncryptedKeyStorageWrapper) Wrap(s logical.Storage) logical.Storage { return &encryptedKeyStorage{ policy: f.policy, s: s, prefix: f.prefix, lru: f.lru, } } // EncryptedKeyStorage implements the logical.Storage interface and ensures the // storage paths are encrypted in the underlying storage. type encryptedKeyStorage struct { policy *Policy s logical.Storage lru *lru.TwoQueueCache prefix string } func ensureTailingSlash(path string) string { if !strings.HasSuffix(path, "/") { return path + "/" } return path } // List implements the logical.Storage List method, and decrypts all the items // in a path prefix. This can only operate on full folder structures so the // prefix should end in a "/". func (s *encryptedKeyStorage) List(ctx context.Context, prefix string) ([]string, error) { var decoder big.Int encPrefix, err := s.encryptPath(prefix) if err != nil { return nil, err } keys, err := s.s.List(ctx, ensureTailingSlash(encPrefix)) if err != nil { return keys, err } decryptedKeys := make([]string, len(keys)) // The context for the decryption operations will be the object's prefix // joined with the provided prefix. Join cleans the path ensuring there // isn't a trailing "/". context := []byte(paths.Join(s.prefix, prefix)) for i, k := range keys { raw, ok := s.lru.Get(k) if ok { // cache HIT, we can bail early and skip the decode & decrypt operations. decryptedKeys[i] = raw.(string) continue } // If a folder is included in the keys it will have a trailing "/". // We need to remove this before decoding/decrypting and add it back // later. appendSlash := strings.HasSuffix(k, "/") if appendSlash { k = strings.TrimSuffix(k, "/") } decoder.SetString(k, 62) decoded := decoder.Bytes() if len(decoded) == 0 { return nil, errors.New("could not decode key") } // Decrypt the data with the object's key policy. encodedPlaintext, err := s.policy.Decrypt(context, nil, string(decoded[:])) if err != nil { return nil, err } // The plaintext is still base64 encoded, decode it. decoded, err = base64.StdEncoding.DecodeString(encodedPlaintext) if err != nil { return nil, err } plaintext := string(decoded[:]) // Add the slash back to the plaintext value if appendSlash { plaintext += "/" k += "/" } // We want to store the unencoded version of the key in the cache. // This will make it more performent when it's a HIT. s.lru.Add(k, plaintext) decryptedKeys[i] = plaintext } sort.Strings(decryptedKeys) return decryptedKeys, nil } // Get implements the logical.Storage Get method. func (s *encryptedKeyStorage) Get(ctx context.Context, path string) (*logical.StorageEntry, error) { encPath, err := s.encryptPath(path) if err != nil { return nil, err } return s.s.Get(ctx, encPath) } // Put implements the logical.Storage Put method. func (s *encryptedKeyStorage) Put(ctx context.Context, entry *logical.StorageEntry) error { encPath, err := s.encryptPath(entry.Key) if err != nil { return err } e := &logical.StorageEntry{} *e = *entry e.Key = encPath return s.s.Put(ctx, e) } // Delete implements the logical.Storage Delete method. func (s *encryptedKeyStorage) Delete(ctx context.Context, path string) error { encPath, err := s.encryptPath(path) if err != nil { return err } return s.s.Delete(ctx, encPath) } // encryptPath takes a plaintext path and encrypts each path section (separated // by "/") with the object's key policy. The context for each encryption is the // plaintext path prefix for the key. func (s *encryptedKeyStorage) encryptPath(path string) (string, error) { var encoder big.Int if path == "" || path == "/" { return s.prefix, nil } path = paths.Clean(path) // Trim the prefix if it starts with a "/" path = strings.TrimPrefix(path, "/") parts := strings.Split(path, "/") encPath := s.prefix context := strings.TrimSuffix(s.prefix, "/") for _, p := range parts { encoded := base64.StdEncoding.EncodeToString([]byte(p)) ciphertext, err := s.policy.Encrypt(0, []byte(context), nil, encoded) if err != nil { return "", err } encoder.SetBytes([]byte(ciphertext)) encPath = paths.Join(encPath, encoder.Text(62)) context = paths.Join(context, p) } return encPath, nil }
go
github
https://github.com/hashicorp/vault
sdk/helper/keysutil/encrypted_key_storage.go
# -*- coding: utf-8 -*- from distutils.version import LooseVersion import json import django from django.contrib.sites.models import Site from django.http import HttpResponse from django.shortcuts import render_to_response from django.template.context import RequestContext from django.utils.encoding import smart_str from cms.models import Page, GlobalPagePermission from cms.utils import get_language_from_request from cms.utils import get_language_list from cms.utils import get_cms_setting from cms.constants import PUBLISHER_STATE_PENDING, PUBLISHER_STATE_DIRTY NOT_FOUND_RESPONSE = "NotFound" DJANGO_1_4 = LooseVersion(django.get_version()) < LooseVersion('1.5') def jsonify_request(response): """ Turn any response in a 200 response to let jQuery code handle it nicely. Response contains a json object with the following attributes: * status: original response status code * content: original response content """ content = {'status': response.status_code, 'content': smart_str(response.content, response._charset)} return HttpResponse(json.dumps(content), content_type="application/json") publisher_classes = { PUBLISHER_STATE_DIRTY: "publisher_dirty", PUBLISHER_STATE_PENDING: "publisher_pending", } def get_admin_menu_item_context(request, page, filtered=False, language=None): """ Used for rendering the page tree, inserts into context everything what we need for single item """ has_add_page_permission = page.has_add_permission(request) has_move_page_permission = page.has_move_page_permission(request) site = Site.objects.get_current() lang = get_language_from_request(request) #slug = page.get_slug(language=lang, fallback=True) # why was this here ?? metadata = "" if get_cms_setting('PERMISSION'): # jstree metadata generator md = [] #if not has_add_page_permission: if not has_move_page_permission: md.append(('valid_children', False)) md.append(('draggable', False)) if md: # just turn it into simple javascript object metadata = "{" + ", ".join(map(lambda e: "%s: %s" % (e[0], isinstance(e[1], bool) and str(e[1]) or e[1].lower() ), md)) + "}" has_add_on_same_level_permission = False opts = Page._meta if get_cms_setting('PERMISSION'): if hasattr(request.user, '_global_add_perm_cache'): global_add_perm = request.user._global_add_perm_cache else: global_add_perm = GlobalPagePermission.objects.user_has_add_permission( request.user, page.site_id).exists() request.user._global_add_perm_cache = global_add_perm if request.user.has_perm(opts.app_label + '.' + opts.get_add_permission()) and global_add_perm: has_add_on_same_level_permission = True from cms.utils import permissions if not has_add_on_same_level_permission and page.parent_id: has_add_on_same_level_permission = permissions.has_generic_permission(page.parent_id, request.user, "add", page.site_id) #has_add_on_same_level_permission = has_add_page_on_same_level_permission(request, page) context = { 'page': page, 'site': site, 'lang': lang, 'filtered': filtered, 'metadata': metadata, 'preview_language': language, 'has_change_permission': page.has_change_permission(request), 'has_publish_permission': page.has_publish_permission(request), 'has_delete_permission': page.has_delete_permission(request), 'has_move_page_permission': has_move_page_permission, 'has_add_page_permission': has_add_page_permission, 'has_add_on_same_level_permission': has_add_on_same_level_permission, 'CMS_PERMISSION': get_cms_setting('PERMISSION'), } return context def render_admin_menu_item(request, page, template=None, language=None): """ Renders requested page item for the tree. This is used in case when item must be reloaded over ajax. """ if not template: template = "admin/cms/page/tree/menu_fragment.html" if not page.pk: return HttpResponse(NOT_FOUND_RESPONSE) # Not found - tree will remove item # languages from cms.utils import permissions languages = get_language_list(page.site_id) context = RequestContext(request, { 'has_add_permission': permissions.has_page_add_permission(request), 'site_languages': languages, }) filtered = 'filtered' in request.REQUEST context.update(get_admin_menu_item_context(request, page, filtered, language)) # add mimetype to help out IE if DJANGO_1_4: return render_to_response(template, context, mimetype="text/html; charset=utf-8") else: return render_to_response(template, context, content_type="text/html; charset=utf-8")
unknown
codeparrot/codeparrot-clean
# Copyright (c) 2008 Resolver Systems Ltd # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation # files (the "Software"), to deal in the Software without # restriction, including without limitation the rights to use, # copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following # conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. from django.core import mail from django.test import TestCase from django.contrib.auth.models import User from sessionprofile.models import SessionProfile class TestSessionProfileMaintained(TestCase): def setUp(self): super(TestCase, self).setUp() self.user = User.objects.create_user( "harold", "harold@example.com", "p455w0rd" ) def assertLoggedIn(self): self.assertNotEqual(self.client.session.get("_auth_user_id"), None) def assertNotLoggedIn(self): self.assertEquals(self.client.session.get("_auth_user_id"), None) def testProfileCreatedAndMaintained(self): def GetSessionID(): return self.client.cookies["sessionid"].value # Harold is not logged in. page = self.client.get("/admin/") self.assertNotLoggedIn() # There is no username associated with his session in the django_session table. sessionProfile = SessionProfile.objects.get(session__session_key=GetSessionID()) self.assertEqual(sessionProfile.user, None) # He logs in. self.client.login(username="harold", password="p455w0rd") page = self.client.get("/admin/") # His username is now associated with his session. sessionProfile = SessionProfile.objects.get(session__session_key=GetSessionID()) self.assertEqual(sessionProfile.user, self.user) # He logs out self.client.logout() page = self.client.get("/admin/") # The session is disassociated. sessionProfile = SessionProfile.objects.get(session__session_key=GetSessionID()) self.assertEqual(sessionProfile.user, None)
unknown
codeparrot/codeparrot-clean
from django.template import TemplateSyntaxError from django.test import SimpleTestCase from ..utils import setup class WidthRatioTagTests(SimpleTestCase): libraries = {"custom": "template_tests.templatetags.custom"} @setup({"widthratio01": "{% widthratio a b 0 %}"}) def test_widthratio01(self): output = self.engine.render_to_string("widthratio01", {"a": 50, "b": 100}) self.assertEqual(output, "0") @setup({"widthratio02": "{% widthratio a b 100 %}"}) def test_widthratio02(self): output = self.engine.render_to_string("widthratio02", {"a": 0, "b": 0}) self.assertEqual(output, "0") @setup({"widthratio03": "{% widthratio a b 100 %}"}) def test_widthratio03(self): output = self.engine.render_to_string("widthratio03", {"a": 0, "b": 100}) self.assertEqual(output, "0") @setup({"widthratio04": "{% widthratio a b 100 %}"}) def test_widthratio04(self): output = self.engine.render_to_string("widthratio04", {"a": 50, "b": 100}) self.assertEqual(output, "50") @setup({"widthratio05": "{% widthratio a b 100 %}"}) def test_widthratio05(self): output = self.engine.render_to_string("widthratio05", {"a": 100, "b": 100}) self.assertEqual(output, "100") @setup({"widthratio06": "{% widthratio a b 100 %}"}) def test_widthratio06(self): """ 62.5 should round to 62 """ output = self.engine.render_to_string("widthratio06", {"a": 50, "b": 80}) self.assertEqual(output, "62") @setup({"widthratio07": "{% widthratio a b 100 %}"}) def test_widthratio07(self): """ 71.4 should round to 71 """ output = self.engine.render_to_string("widthratio07", {"a": 50, "b": 70}) self.assertEqual(output, "71") # Raise exception if we don't have 3 args, last one an integer @setup({"widthratio08": "{% widthratio %}"}) def test_widthratio08(self): with self.assertRaises(TemplateSyntaxError): self.engine.get_template("widthratio08") @setup({"widthratio09": "{% widthratio a b %}"}) def test_widthratio09(self): with self.assertRaises(TemplateSyntaxError): self.engine.render_to_string("widthratio09", {"a": 50, "b": 100}) @setup({"widthratio10": "{% widthratio a b 100.0 %}"}) def test_widthratio10(self): output = self.engine.render_to_string("widthratio10", {"a": 50, "b": 100}) self.assertEqual(output, "50") @setup({"widthratio11": "{% widthratio a b c %}"}) def test_widthratio11(self): """ #10043: widthratio should allow max_width to be a variable """ output = self.engine.render_to_string( "widthratio11", {"a": 50, "c": 100, "b": 100} ) self.assertEqual(output, "50") # #18739: widthratio should handle None args consistently with # non-numerics @setup({"widthratio12a": "{% widthratio a b c %}"}) def test_widthratio12a(self): output = self.engine.render_to_string( "widthratio12a", {"a": "a", "c": 100, "b": 100} ) self.assertEqual(output, "") @setup({"widthratio12b": "{% widthratio a b c %}"}) def test_widthratio12b(self): output = self.engine.render_to_string( "widthratio12b", {"a": None, "c": 100, "b": 100} ) self.assertEqual(output, "") @setup({"widthratio13a": "{% widthratio a b c %}"}) def test_widthratio13a(self): output = self.engine.render_to_string( "widthratio13a", {"a": 0, "c": 100, "b": "b"} ) self.assertEqual(output, "") @setup({"widthratio13b": "{% widthratio a b c %}"}) def test_widthratio13b(self): output = self.engine.render_to_string( "widthratio13b", {"a": 0, "c": 100, "b": None} ) self.assertEqual(output, "") @setup({"widthratio14a": "{% widthratio a b c %}"}) def test_widthratio14a(self): with self.assertRaises(TemplateSyntaxError): self.engine.render_to_string("widthratio14a", {"a": 0, "c": "c", "b": 100}) @setup({"widthratio14b": "{% widthratio a b c %}"}) def test_widthratio14b(self): with self.assertRaises(TemplateSyntaxError): self.engine.render_to_string("widthratio14b", {"a": 0, "c": None, "b": 100}) @setup({"widthratio15": '{% load custom %}{% widthratio a|noop:"x y" b 0 %}'}) def test_widthratio15(self): """ Test whitespace in filter argument """ output = self.engine.render_to_string("widthratio15", {"a": 50, "b": 100}) self.assertEqual(output, "0") # Widthratio with variable assignment @setup({"widthratio16": "{% widthratio a b 100 as variable %}-{{ variable }}-"}) def test_widthratio16(self): output = self.engine.render_to_string("widthratio16", {"a": 50, "b": 100}) self.assertEqual(output, "-50-") @setup({"widthratio17": "{% widthratio a b 100 as variable %}-{{ variable }}-"}) def test_widthratio17(self): output = self.engine.render_to_string("widthratio17", {"a": 100, "b": 100}) self.assertEqual(output, "-100-") @setup({"widthratio18": "{% widthratio a b 100 as %}"}) def test_widthratio18(self): with self.assertRaises(TemplateSyntaxError): self.engine.get_template("widthratio18") @setup({"widthratio19": "{% widthratio a b 100 not_as variable %}"}) def test_widthratio19(self): with self.assertRaises(TemplateSyntaxError): self.engine.get_template("widthratio19") @setup({"widthratio20": "{% widthratio a b 100 %}"}) def test_widthratio20(self): output = self.engine.render_to_string( "widthratio20", {"a": float("inf"), "b": float("inf")} ) self.assertEqual(output, "") @setup({"widthratio21": "{% widthratio a b 100 %}"}) def test_widthratio21(self): output = self.engine.render_to_string( "widthratio21", {"a": float("inf"), "b": 2} ) self.assertEqual(output, "") @setup({"t": "{% widthratio a b 100 as variable %}-{{ variable }}-"}) def test_zerodivisionerror_as_var(self): output = self.engine.render_to_string("t", {"a": 0, "b": 0}) self.assertEqual(output, "-0-") @setup({"t": "{% widthratio a b c as variable %}-{{ variable }}-"}) def test_typeerror_as_var(self): output = self.engine.render_to_string("t", {"a": "a", "c": 100, "b": 100}) self.assertEqual(output, "--")
python
github
https://github.com/django/django
tests/template_tests/syntax_tests/test_width_ratio.py
#!/usr/bin/env python # # Copyright (c) 2015 Intel Corporation. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of works must retain the original copyright notice, this # list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the original copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of Intel Corporation nor the names of its contributors # may be used to endorse or promote products derived from this work without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY # OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: # Li, Cici<cici.x.li@intel.com> import unittest import os, sys, commands import comm import time class TestSampleAppFunctions(unittest.TestCase): def test_stop(self): comm.setUp() app_name = "Memorygame" # Find whether the app have launched cmdacti = "adb -s " + comm.device + " shell dumpsys activity activities | grep org.xwalk.%s" % app_name.lower() launched = commands.getstatusoutput(cmdacti) if launched[0] != 0: print "Stop APK ---------------->%s App haven't launched, need to launch it!" % app_name cmdstart = "adb -s " + comm.device + " shell am start -n org.xwalk.%s/.%sActivity" % \ (app_name.lower(), app_name) comm.app_launch(cmdstart, self) time.sleep(1) cmdstop = "adb -s " + comm.device + " shell am force-stop org.xwalk.%s" % app_name.lower() comm.app_stop(cmdstop, self) if __name__ == '__main__': unittest.main()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # Copyright 2017-present Iain Cambridge. # # Licensed under the MIT License(the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # https://opensource.org/licenses/MIT # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client from lxml import html from datetime import datetime import csv import os import time conn = http.client.HTTPSConnection("www.hockey-reference.com") def writeCsv(coach, years): if len(years) == 0: print("No years") return directory = "./stats/coach" filename = "%s/%s.csv" % (directory, coach) if not os.path.exists(directory): os.makedirs(directory) with open(filename, 'w', newline='') as csvfile: fieldnames = [] for fieldname in years[0]: fieldnames.append(fieldname) writer = csv.DictWriter(csvfile, fieldnames=fieldnames) writer.writeheader() for i in range(len(years)): writer.writerow(years[i]) def buildYears(tree): seasons = tree.xpath('//*[@id="coach"]/tbody/tr/th/text()') agesPre = tree.xpath('//*[@id="coach"]/tbody/tr/td[1]') teams = tree.xpath('//*[@id="coach"]/tbody/tr/td[2]/a/text()') leagues = tree.xpath('//*[@id="coach"]/tbody/tr/td[3]/a/text()') gamesPlayedPre = tree.xpath('//*[@id="coach"]/tbody/tr/td[4]') winsPre = tree.xpath('//*[@id="coach"]/tbody/tr/td[5]') losesPre = tree.xpath('//*[@id="coach"]/tbody/tr/td[6]') tiesPre = tree.xpath('//*[@id="coach"]/tbody/tr/td[7]') otLosesPre = tree.xpath('//*[@id="coach"]/tbody/tr/td[8]') pointsPre = tree.xpath('//*[@id="coach"]/tbody/tr/td[9]') pointsPrecentagePre = tree.xpath('//*[@id="coach"]/tbody/tr/td[10]') finishPre = tree.xpath('//*[@id="coach"]/tbody/tr/td[12]') playoffWinsPre = tree.xpath('//*[@id="coach"]/tbody/tr/td[13]') playoffLosesPre = tree.xpath('//*[@id="coach"]/tbody/tr/td[14]') playoffTiesPre = tree.xpath('//*[@id="coach"]/tbody/tr/td[15]') playoffWinLosePre = tree.xpath('//*[@id="coach"]/tbody/tr/td[16]') playoffNotesPre = tree.xpath('//*[@id="coach"]/tbody/tr/td[17]') ages = [x.text if x.text else 0 for x in agesPre] gamesPlayed = [x.text if x.text else 0 for x in gamesPlayedPre] wins = [x.text if x.text else 0 for x in winsPre] loses = [x.text if x.text else 0 for x in losesPre] ties = [x.text if x.text else 0 for x in tiesPre] otLoses = [x.text if x.text else 0 for x in otLosesPre] points = [x.text if x.text else 0 for x in pointsPre] pointsPercentages = [x.text if x.text else 0 for x in pointsPrecentagePre] finishes = [x.text if x.text else '' for x in finishPre] playoffWins = [x.text if x.text else 0 for x in playoffWinsPre] playoffLoses = [x.text if x.text else 0 for x in playoffLosesPre] playoffTies = [x.text if x.text else 0 for x in playoffTiesPre] playoffWinLose = [x.text if x.text else 0 for x in playoffWinLosePre] playoffNotes = [x.text if x.text else '' for x in playoffNotesPre] years = [] i = 0 while i < len(seasons): year = { "season": seasons[i], "leage": leagues[i], "team": teams[i], "games_played": gamesPlayed[i], "wins": wins[i], "loses": loses[i], "ties": ties[i], "overtime_loses": otLoses[i], "points": points[i], "points_percentage": pointsPercentages[i], "finishing_position": finishes[i], "playoff_note": playoffNotes[i], "playoff_wins": playoffWins[i], "playoff_loses": playoffLoses[i], "playoff_ties": playoffTies[i], "playoff_win_lose": playoffWinLose[i] } years.append(year) i = i + 1 return years def getHtml(url): time.sleep(2) conn.request("GET", url) response = conn.getresponse() print(url, response.status, response.reason) return response.read() def crawlCoach(url): content = getHtml(url) coachName = url.split("/")[2].split(".")[0] tree = html.fromstring(content) years = buildYears(tree) writeCsv(coachName, years) content = getHtml("/coaches/") tree = html.fromstring(content) unBoldCoachPages = tree.xpath('//*[@id="coaches"]/tbody/tr/th/a/@href') boldCoachPages = tree.xpath('//*[@id="coaches"]/tbody/tr/th//strong/a/@href') coachPages = unBoldCoachPages + boldCoachPages numberFound = len(coachPages) print("Found %s" % (numberFound)) for i in range(numberFound): crawlCoach(coachPages[i])
unknown
codeparrot/codeparrot-clean
import { AccessorDeclaration, canHaveDecorators, cast, ClassLikeDeclaration, concatenate, ConstructorDeclaration, DeclarationName, Diagnostics, factory, FileTextChanges, findAncestor, getDecorators, getEffectiveModifierFlags, getFirstConstructorWithBody, getLocaleSpecificMessage, getTokenAtPosition, getTypeAnnotationNode, getUniqueName, hasEffectiveReadonlyModifier, hasStaticModifier, Identifier, isClassLike, isElementAccessExpression, isFunctionLike, isIdentifier, isParameterPropertyDeclaration, isPropertyAccessExpression, isPropertyAssignment, isPropertyDeclaration, isSourceFileJS, isStringLiteral, isUnionTypeNode, isWriteAccess, ModifierFlags, ModifierLike, Mutable, Node, nodeOverlapsWithStartEnd, ObjectLiteralExpression, ParameterPropertyDeclaration, Program, PropertyAssignment, PropertyDeclaration, refactor, SourceFile, startsWithUnderscore, StringLiteral, suppressLeadingAndTrailingTrivia, SyntaxKind, textChanges, TypeNode, } from "../_namespaces/ts.js"; /** @internal */ export type AcceptedDeclaration = ParameterPropertyDeclaration | PropertyDeclaration | PropertyAssignment; /** @internal */ export type AcceptedNameType = Identifier | StringLiteral; /** @internal */ export type ContainerDeclaration = ClassLikeDeclaration | ObjectLiteralExpression; /** @internal */ export type AccessorOrRefactorErrorInfo = AccessorInfo | refactor.RefactorErrorInfo; /** @internal */ export interface AccessorInfo { readonly container: ContainerDeclaration; readonly isStatic: boolean; readonly isReadonly: boolean; readonly type: TypeNode | undefined; readonly declaration: AcceptedDeclaration; readonly fieldName: AcceptedNameType; readonly accessorName: AcceptedNameType; readonly originalName: string; readonly renameAccessor: boolean; } /** @internal */ export function generateAccessorFromProperty(file: SourceFile, program: Program, start: number, end: number, context: textChanges.TextChangesContext, _actionName: string): FileTextChanges[] | undefined { const fieldInfo = getAccessorConvertiblePropertyAtPosition(file, program, start, end); if (!fieldInfo || refactor.isRefactorErrorInfo(fieldInfo)) return undefined; const changeTracker = textChanges.ChangeTracker.fromContext(context); const { isStatic, isReadonly, fieldName, accessorName, originalName, type, container, declaration } = fieldInfo; suppressLeadingAndTrailingTrivia(fieldName); suppressLeadingAndTrailingTrivia(accessorName); suppressLeadingAndTrailingTrivia(declaration); suppressLeadingAndTrailingTrivia(container); let accessorModifiers: readonly ModifierLike[] | undefined; let fieldModifiers: readonly ModifierLike[] | undefined; if (isClassLike(container)) { const modifierFlags = getEffectiveModifierFlags(declaration); if (isSourceFileJS(file)) { const modifiers = factory.createModifiersFromModifierFlags(modifierFlags); accessorModifiers = modifiers; fieldModifiers = modifiers; } else { accessorModifiers = factory.createModifiersFromModifierFlags(prepareModifierFlagsForAccessor(modifierFlags)); fieldModifiers = factory.createModifiersFromModifierFlags(prepareModifierFlagsForField(modifierFlags)); } if (canHaveDecorators(declaration)) { fieldModifiers = concatenate(getDecorators(declaration), fieldModifiers); } } updateFieldDeclaration(changeTracker, file, declaration, type, fieldName, fieldModifiers); const getAccessor = generateGetAccessor(fieldName, accessorName, type, accessorModifiers, isStatic, container); suppressLeadingAndTrailingTrivia(getAccessor); insertAccessor(changeTracker, file, getAccessor, declaration, container); if (isReadonly) { // readonly modifier only existed in classLikeDeclaration const constructor = getFirstConstructorWithBody(container as ClassLikeDeclaration); if (constructor) { updateReadonlyPropertyInitializerStatementConstructor(changeTracker, file, constructor, fieldName.text, originalName); } } else { const setAccessor = generateSetAccessor(fieldName, accessorName, type, accessorModifiers, isStatic, container); suppressLeadingAndTrailingTrivia(setAccessor); insertAccessor(changeTracker, file, setAccessor, declaration, container); } return changeTracker.getChanges(); } function isConvertibleName(name: DeclarationName): name is AcceptedNameType { return isIdentifier(name) || isStringLiteral(name); } function isAcceptedDeclaration(node: Node): node is AcceptedDeclaration { return isParameterPropertyDeclaration(node, node.parent) || isPropertyDeclaration(node) || isPropertyAssignment(node); } function createPropertyName(name: string, originalName: AcceptedNameType) { return isIdentifier(originalName) ? factory.createIdentifier(name) : factory.createStringLiteral(name); } function createAccessorAccessExpression(fieldName: AcceptedNameType, isStatic: boolean, container: ContainerDeclaration) { const leftHead = isStatic ? (container as ClassLikeDeclaration).name! : factory.createThis(); // TODO: GH#18217 return isIdentifier(fieldName) ? factory.createPropertyAccessExpression(leftHead, fieldName) : factory.createElementAccessExpression(leftHead, factory.createStringLiteralFromNode(fieldName)); } function prepareModifierFlagsForAccessor(modifierFlags: ModifierFlags): ModifierFlags { modifierFlags &= ~ModifierFlags.Readonly; // avoid Readonly modifier because it will convert to get accessor modifierFlags &= ~ModifierFlags.Private; if (!(modifierFlags & ModifierFlags.Protected)) { modifierFlags |= ModifierFlags.Public; } return modifierFlags; } function prepareModifierFlagsForField(modifierFlags: ModifierFlags): ModifierFlags { modifierFlags &= ~ModifierFlags.Public; modifierFlags &= ~ModifierFlags.Protected; modifierFlags |= ModifierFlags.Private; return modifierFlags; } /** @internal */ export function getAccessorConvertiblePropertyAtPosition(file: SourceFile, program: Program, start: number, end: number, considerEmptySpans = true): AccessorOrRefactorErrorInfo | undefined { const node = getTokenAtPosition(file, start); const cursorRequest = start === end && considerEmptySpans; const declaration = findAncestor(node.parent, isAcceptedDeclaration); // make sure declaration have AccessibilityModifier or Static Modifier or Readonly Modifier const meaning = ModifierFlags.AccessibilityModifier | ModifierFlags.Static | ModifierFlags.Readonly; if (!declaration || (!(nodeOverlapsWithStartEnd(declaration.name, file, start, end) || cursorRequest))) { return { error: getLocaleSpecificMessage(Diagnostics.Could_not_find_property_for_which_to_generate_accessor), }; } if (!isConvertibleName(declaration.name)) { return { error: getLocaleSpecificMessage(Diagnostics.Name_is_not_valid), }; } if (((getEffectiveModifierFlags(declaration) & ModifierFlags.Modifier) | meaning) !== meaning) { return { error: getLocaleSpecificMessage(Diagnostics.Can_only_convert_property_with_modifier), }; } const name = declaration.name.text; const startWithUnderscore = startsWithUnderscore(name); const fieldName = createPropertyName(startWithUnderscore ? name : getUniqueName(`_${name}`, file), declaration.name); const accessorName = createPropertyName(startWithUnderscore ? getUniqueName(name.substring(1), file) : name, declaration.name); return { isStatic: hasStaticModifier(declaration), isReadonly: hasEffectiveReadonlyModifier(declaration), type: getDeclarationType(declaration, program), container: declaration.kind === SyntaxKind.Parameter ? declaration.parent.parent : declaration.parent, originalName: (declaration.name as AcceptedNameType).text, declaration, fieldName, accessorName, renameAccessor: startWithUnderscore, }; } function generateGetAccessor(fieldName: AcceptedNameType, accessorName: AcceptedNameType, type: TypeNode | undefined, modifiers: readonly ModifierLike[] | undefined, isStatic: boolean, container: ContainerDeclaration) { return factory.createGetAccessorDeclaration( modifiers, accessorName, [], type, factory.createBlock([ factory.createReturnStatement( createAccessorAccessExpression(fieldName, isStatic, container), ), ], /*multiLine*/ true), ); } function generateSetAccessor(fieldName: AcceptedNameType, accessorName: AcceptedNameType, type: TypeNode | undefined, modifiers: readonly ModifierLike[] | undefined, isStatic: boolean, container: ContainerDeclaration) { return factory.createSetAccessorDeclaration( modifiers, accessorName, [factory.createParameterDeclaration( /*modifiers*/ undefined, /*dotDotDotToken*/ undefined, factory.createIdentifier("value"), /*questionToken*/ undefined, type, )], factory.createBlock([ factory.createExpressionStatement( factory.createAssignment( createAccessorAccessExpression(fieldName, isStatic, container), factory.createIdentifier("value"), ), ), ], /*multiLine*/ true), ); } function updatePropertyDeclaration(changeTracker: textChanges.ChangeTracker, file: SourceFile, declaration: PropertyDeclaration, type: TypeNode | undefined, fieldName: AcceptedNameType, modifiers: readonly ModifierLike[] | undefined) { const property = factory.updatePropertyDeclaration( declaration, modifiers, fieldName, declaration.questionToken || declaration.exclamationToken, type, declaration.initializer, ); changeTracker.replaceNode(file, declaration, property); } function updatePropertyAssignmentDeclaration(changeTracker: textChanges.ChangeTracker, file: SourceFile, declaration: PropertyAssignment, fieldName: AcceptedNameType) { let assignment = factory.updatePropertyAssignment(declaration, fieldName, declaration.initializer); // Remove grammar errors from assignment if (assignment.modifiers || assignment.questionToken || assignment.exclamationToken) { if (assignment === declaration) assignment = factory.cloneNode(assignment); (assignment as Mutable<PropertyAssignment>).modifiers = undefined; (assignment as Mutable<PropertyAssignment>).questionToken = undefined; (assignment as Mutable<PropertyAssignment>).exclamationToken = undefined; } changeTracker.replacePropertyAssignment(file, declaration, assignment); } function updateFieldDeclaration(changeTracker: textChanges.ChangeTracker, file: SourceFile, declaration: AcceptedDeclaration, type: TypeNode | undefined, fieldName: AcceptedNameType, modifiers: readonly ModifierLike[] | undefined) { if (isPropertyDeclaration(declaration)) { updatePropertyDeclaration(changeTracker, file, declaration, type, fieldName, modifiers); } else if (isPropertyAssignment(declaration)) { updatePropertyAssignmentDeclaration(changeTracker, file, declaration, fieldName); } else { changeTracker.replaceNode(file, declaration, factory.updateParameterDeclaration(declaration, modifiers, declaration.dotDotDotToken, cast(fieldName, isIdentifier), declaration.questionToken, declaration.type, declaration.initializer)); } } function insertAccessor(changeTracker: textChanges.ChangeTracker, file: SourceFile, accessor: AccessorDeclaration, declaration: AcceptedDeclaration, container: ContainerDeclaration) { isParameterPropertyDeclaration(declaration, declaration.parent) ? changeTracker.insertMemberAtStart(file, container as ClassLikeDeclaration, accessor) : isPropertyAssignment(declaration) ? changeTracker.insertNodeAfterComma(file, declaration, accessor) : changeTracker.insertNodeAfter(file, declaration, accessor); } function updateReadonlyPropertyInitializerStatementConstructor(changeTracker: textChanges.ChangeTracker, file: SourceFile, constructor: ConstructorDeclaration, fieldName: string, originalName: string) { if (!constructor.body) return; constructor.body.forEachChild(function recur(node) { if ( isElementAccessExpression(node) && node.expression.kind === SyntaxKind.ThisKeyword && isStringLiteral(node.argumentExpression) && node.argumentExpression.text === originalName && isWriteAccess(node) ) { changeTracker.replaceNode(file, node.argumentExpression, factory.createStringLiteral(fieldName)); } if (isPropertyAccessExpression(node) && node.expression.kind === SyntaxKind.ThisKeyword && node.name.text === originalName && isWriteAccess(node)) { changeTracker.replaceNode(file, node.name, factory.createIdentifier(fieldName)); } if (!isFunctionLike(node) && !isClassLike(node)) { node.forEachChild(recur); } }); } function getDeclarationType(declaration: AcceptedDeclaration, program: Program): TypeNode | undefined { const typeNode = getTypeAnnotationNode(declaration); if (isPropertyDeclaration(declaration) && typeNode && declaration.questionToken) { const typeChecker = program.getTypeChecker(); const type = typeChecker.getTypeFromTypeNode(typeNode); if (!typeChecker.isTypeAssignableTo(typeChecker.getUndefinedType(), type)) { const types = isUnionTypeNode(typeNode) ? typeNode.types : [typeNode]; return factory.createUnionTypeNode([...types, factory.createKeywordTypeNode(SyntaxKind.UndefinedKeyword)]); } } return typeNode; }
typescript
github
https://github.com/microsoft/TypeScript
src/services/codefixes/generateAccessors.ts
use crate::spec::{Arch, StackProbeType, Target, TargetMetadata, base}; pub(crate) fn target() -> Target { let mut base = base::teeos::opts(); base.features = "+strict-align,+neon".into(); base.max_atomic_width = Some(128); base.stack_probes = StackProbeType::Inline; Target { llvm_target: "aarch64-unknown-none".into(), metadata: TargetMetadata { description: Some("ARM64 TEEOS".into()), tier: Some(3), host_tools: Some(false), std: None, // ? }, pointer_width: 64, data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32".into(), arch: Arch::AArch64, options: base, } }
rust
github
https://github.com/rust-lang/rust
compiler/rustc_target/src/spec/targets/aarch64_unknown_teeos.rs
//===--- OperatorNameLookup.cpp - Operator and Precedence Lookup *- C++ -*-===// // // This source file is part of the Swift.org open source project // // Copyright (c) 2020 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // // See https://swift.org/LICENSE.txt for license information // See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// // // This file implements interfaces for performing operator and precedence group // declaration lookup. // //===----------------------------------------------------------------------===// #include "swift/AST/OperatorNameLookup.h" #include "swift/AST/ASTContext.h" #include "swift/AST/DiagnosticsSema.h" #include "swift/AST/ImportCache.h" #include "swift/AST/NameLookupRequests.h" #include "swift/AST/SourceFile.h" #include "swift/Basic/Assertions.h" #define DEBUG_TYPE "operator-name-lookup" using namespace swift; using namespace swift::namelookup; template <typename T> static TinyPtrVector<T *> lookupOperatorImpl( DeclContext *moduleDC, Identifier name, llvm::function_ref<void(OperatorLookupDescriptor, TinyPtrVector<T *> &)> lookupDirect) { assert(moduleDC->isModuleScopeContext()); auto &ctx = moduleDC->getASTContext(); // First try to use the new operator lookup logic. { TinyPtrVector<T *> results; for (auto &import : getAllImports(moduleDC)) { auto *mod = import.importedModule; lookupDirect(OperatorLookupDescriptor::forModule(mod, name), results); } // If there aren't multiple results, or the new logic is completely enabled, // perform shadowing checks and return. Otherwise fall through to the old // logic. if (results.size() <= 1 || ctx.LangOpts.EnableNewOperatorLookup) { removeShadowedDecls(results, moduleDC); return std::move(results); } } // There are three stages to the old operator lookup: // 1) Lookup directly in the file. // 2) Lookup in the file's direct imports (not looking through exports). // 3) Lookup in other files. // If any step yields results, we return them without performing the next // steps. Note that this means when we come to look in other files, we can // accumulate ambiguities across files, unlike when looking in the original // file, where we can bail early. TinyPtrVector<T *> results; SmallPtrSet<ModuleDecl *, 8> visitedModules; // Protect against source files that contrive to import their own modules. visitedModules.insert(moduleDC->getParentModule()); auto lookupInFileAndImports = [&](FileUnit *file, bool includePrivate) -> bool { // If we find something in the file itself, bail without checking imports. lookupDirect(OperatorLookupDescriptor::forFile(file, name), results); if (!results.empty()) return true; // Only look into SourceFile imports. auto *SF = dyn_cast<SourceFile>(file); if (!SF) return false; for (auto &import : SF->getImports()) { auto *mod = import.module.importedModule; if (!visitedModules.insert(mod).second) continue; bool isExported = import.options.contains(ImportFlags::Exported); if (!includePrivate && !isExported) continue; lookupDirect(OperatorLookupDescriptor::forModule(mod, name), results); } return !results.empty(); }; // If we have a SourceFile context, search it and its private imports. auto *SF = dyn_cast<SourceFile>(moduleDC); if (SF && lookupInFileAndImports(SF, /*includePrivate*/ true)) return std::move(results); // Search all the other files of the module, this time excluding private // imports. auto *mod = moduleDC->getParentModule(); for (auto *file : mod->getFiles()) { if (file != SF) lookupInFileAndImports(file, /*includePrivate*/ false); } return std::move(results); } static TinyPtrVector<OperatorDecl *> lookupOperator(DeclContext *moduleDC, Identifier name, OperatorFixity fixity) { auto &eval = moduleDC->getASTContext().evaluator; return lookupOperatorImpl<OperatorDecl>( moduleDC, name, [&](OperatorLookupDescriptor desc, TinyPtrVector<OperatorDecl *> &results) { auto ops = evaluateOrDefault( eval, DirectOperatorLookupRequest{desc, fixity}, {}); for (auto *op : ops) results.push_back(op); }); } void InfixOperatorLookupResult::diagnoseAmbiguity(SourceLoc loc) const { auto &ctx = ModuleDC->getASTContext(); ctx.Diags.diagnose(loc, diag::ambiguous_operator_decls); for (auto *op : *this) op->diagnose(diag::found_this_operator_decl); } void InfixOperatorLookupResult::diagnoseMissing(SourceLoc loc, bool forBuiltin) const { ModuleDC->getASTContext().Diags.diagnose(loc, diag::unknown_binop); } TinyPtrVector<InfixOperatorDecl *> LookupInfixOperatorRequest::evaluate(Evaluator &evaluator, OperatorLookupDescriptor desc) const { auto ops = lookupOperator(desc.getDC(), desc.name, OperatorFixity::Infix); // If we have a single result, return it directly. This avoids having to look // up its precedence group. if (ops.size() == 1) return {cast<InfixOperatorDecl>(ops[0])}; // Otherwise take the first infix operator we see with a particular precedence // group. This avoids an ambiguity if two different modules declare the same // operator with the same precedence. TinyPtrVector<InfixOperatorDecl *> results; SmallPtrSet<PrecedenceGroupDecl *, 2> groups; for (auto *op : ops) { auto *infix = cast<InfixOperatorDecl>(op); if (groups.insert(infix->getPrecedenceGroup()).second) results.push_back(infix); } return results; } InfixOperatorLookupResult DeclContext::lookupInfixOperator(Identifier name) const { auto desc = OperatorLookupDescriptor::forDC(this, name); auto ops = evaluateOrDefault(getASTContext().evaluator, LookupInfixOperatorRequest{desc}, {}); // Wrap the result in a InfixOperatorLookupResult. The request doesn't // return this directly to avoid unnecessarily caching the name and context. return InfixOperatorLookupResult(this, name, std::move(ops)); } PrefixOperatorDecl * LookupPrefixOperatorRequest::evaluate(Evaluator &evaluator, OperatorLookupDescriptor desc) const { auto ops = lookupOperator(desc.getDC(), desc.name, OperatorFixity::Prefix); if (ops.empty()) return nullptr; // We can return the first prefix operator. All prefix operators of the same // name are equivalent. return cast<PrefixOperatorDecl>(ops[0]); } PrefixOperatorDecl *DeclContext::lookupPrefixOperator(Identifier name) const { auto desc = OperatorLookupDescriptor::forDC(this, name); return evaluateOrDefault(getASTContext().evaluator, LookupPrefixOperatorRequest{desc}, nullptr); } PostfixOperatorDecl * LookupPostfixOperatorRequest::evaluate(Evaluator &evaluator, OperatorLookupDescriptor desc) const { auto ops = lookupOperator(desc.getDC(), desc.name, OperatorFixity::Postfix); if (ops.empty()) return nullptr; // We can return the first postfix operator. All postfix operators of the same // name are equivalent. return cast<PostfixOperatorDecl>(ops[0]); } PostfixOperatorDecl *DeclContext::lookupPostfixOperator(Identifier name) const { auto desc = OperatorLookupDescriptor::forDC(this, name); return evaluateOrDefault(getASTContext().evaluator, LookupPostfixOperatorRequest{desc}, nullptr); } void PrecedenceGroupLookupResult::diagnoseAmbiguity(SourceLoc loc) const { auto &ctx = ModuleDC->getASTContext(); ctx.Diags.diagnose(loc, diag::ambiguous_precedence_groups); for (auto *group : *this) group->diagnose(diag::found_this_precedence_group); } void PrecedenceGroupLookupResult::diagnoseMissing(SourceLoc loc, bool forBuiltin) const { auto &ctx = ModuleDC->getASTContext(); auto diagID = forBuiltin ? diag::missing_builtin_precedence_group : diag::unknown_precedence_group; ctx.Diags.diagnose(loc, diagID, Name); } TinyPtrVector<PrecedenceGroupDecl *> LookupPrecedenceGroupRequest::evaluate(Evaluator &evaluator, OperatorLookupDescriptor desc) const { return lookupOperatorImpl<PrecedenceGroupDecl>( desc.getDC(), desc.name, [&](OperatorLookupDescriptor desc, TinyPtrVector<PrecedenceGroupDecl *> &results) { auto groups = evaluateOrDefault( evaluator, DirectPrecedenceGroupLookupRequest{desc}, {}); for (auto *group : groups) results.push_back(group); }); } PrecedenceGroupLookupResult DeclContext::lookupPrecedenceGroup(Identifier name) const { auto desc = OperatorLookupDescriptor::forDC(this, name); auto groups = evaluateOrDefault(getASTContext().evaluator, LookupPrecedenceGroupRequest{desc}, {}); // Wrap the result in a PrecedenceGroupLookupResult. The request doesn't // return this directly to avoid unnecessarily caching the name and context. return PrecedenceGroupLookupResult(this, name, std::move(groups)); }
cpp
github
https://github.com/apple/swift
lib/AST/OperatorNameLookup.cpp
from tornado import gen from . import spotifyMix as spot from lib.database.auth import save_token from lib.basehandler import OAuthRequestHandler class SpotifyAuth(OAuthRequestHandler, spot.SpotifyOAuth2Mixin): scope = [ 'playlist-read-private', 'playlist-read-collaborative', 'user-follow-read', 'user-library-read', 'user-read-birthdate', 'user-read-email', ] def initialize(self): super(SpotifyAuth, self).setProvider("spotify") def startFlow(self): uri = '{0}/auth/spotify'.format(self.application.settings['base_url']) self.authorize_redirect( redirect_uri=uri, client_id=self.application.settings['spotify_oauth']['key'], response_type='code', scope=self.scope, ) @gen.coroutine def handleAuthCallBack(self, code, user_id): redir_uri = '{0}/auth/spotify'.format( self.application.settings['base_url']) access = yield self.get_authenticated_user( redirect_uri=redir_uri, code=code) # from here use spotipy - pass it over to a scraper context yield save_token( provider="spotify", user_id=user_id, token_data=access )
unknown
codeparrot/codeparrot-clean
/*************************************************************************** * _ _ ____ _ * Project ___| | | | _ \| | * / __| | | | |_) | | * | (__| |_| | _ <| |___ * \___|\___/|_| \_\_____| * * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al. * * This software is licensed as described in the file COPYING, which * you should have received as part of this distribution. The terms * are also available at https://curl.se/docs/copyright.html. * * You may opt to use, copy, modify, merge, publish, distribute and/or sell * copies of the Software, and permit persons to whom the Software is * furnished to do so, under the terms of the COPYING file. * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY * KIND, either express or implied. * * SPDX-License-Identifier: curl * ***************************************************************************/ /* <DESC> * FTP upload a file from memory * </DESC> */ #include <stdio.h> #include <string.h> #include <curl/curl.h> static const char data[] = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. " "___ rhoncus odio id venenatis volutpat. Vestibulum dapibus " "bibendum ullamcorper. Maecenas finibus elit augue, vel " "condimentum odio maximus nec. In hac habitasse platea dictumst. " "Vestibulum vel dolor et turpis rutrum finibus ac at nulla. " "Vivamus nec neque ac elit blandit pretium vitae maximus ipsum. " "Quisque sodales magna vel erat auctor, sed pellentesque nisi " "rhoncus. Donec vehicula maximus pretium. Aliquam eu tincidunt " "lorem."; struct WriteThis { const char *readptr; size_t sizeleft; }; static size_t read_cb(char *ptr, size_t size, size_t nmemb, void *userp) { struct WriteThis *upload = (struct WriteThis *)userp; size_t max = size * nmemb; if(max < 1) return 0; if(upload->sizeleft) { size_t copylen = max; if(copylen > upload->sizeleft) copylen = upload->sizeleft; memcpy(ptr, upload->readptr, copylen); upload->readptr += copylen; upload->sizeleft -= copylen; return copylen; } return 0; /* no more data left to deliver */ } int main(void) { CURL *curl; CURLcode result; struct WriteThis upload; upload.readptr = data; upload.sizeleft = strlen(data); /* In Windows, this inits the Winsock stuff */ result = curl_global_init(CURL_GLOBAL_DEFAULT); /* Check for errors */ if(result != CURLE_OK) { fprintf(stderr, "curl_global_init() failed: %s\n", curl_easy_strerror(result)); return 1; } /* get a curl handle */ curl = curl_easy_init(); if(curl) { /* First set the URL, the target file */ curl_easy_setopt(curl, CURLOPT_URL, "ftp://example.com/path/to/upload/file"); /* User and password for the FTP login */ curl_easy_setopt(curl, CURLOPT_USERPWD, "login:secret"); /* Now specify we want to UPLOAD data */ curl_easy_setopt(curl, CURLOPT_UPLOAD, 1L); /* we want to use our own read function */ curl_easy_setopt(curl, CURLOPT_READFUNCTION, read_cb); /* pointer to pass to our read function */ curl_easy_setopt(curl, CURLOPT_READDATA, &upload); /* get verbose debug output please */ curl_easy_setopt(curl, CURLOPT_VERBOSE, 1L); /* Set the expected upload size. */ curl_easy_setopt(curl, CURLOPT_INFILESIZE_LARGE, (curl_off_t)upload.sizeleft); /* Perform the request, result gets the return code */ result = curl_easy_perform(curl); /* Check for errors */ if(result != CURLE_OK) fprintf(stderr, "curl_easy_perform() failed: %s\n", curl_easy_strerror(result)); /* always cleanup */ curl_easy_cleanup(curl); } curl_global_cleanup(); return (int)result; }
c
github
https://github.com/curl/curl
docs/examples/ftpuploadfrommem.c
""" routines for computing rates from one subset of a graph to another """ import itertools from collections import defaultdict import networkx as nx import numpy as np def kmcgraph_from_rates(rates): """create a graph for input to GraphReduction from a dictionary of rates Parameters ---------- rates : dict a dictionary of rates. the keys are tuples of nodes (u,v), the values are the rate constants from u to v. Returns ------- graph : networkx.DiGraph object A directed graph specifying the connectivity, the initial transition probabilities and the occupation times. The graph must have all the data in the correct format. Each node must have the following keys in their attributes dictionary:: "tau" : occupation time Each edge between nodes u and v must have the following keys in their attributes dictionary: "P" : transition probability from u to v """ graph = nx.DiGraph() sumk = defaultdict(lambda: 0.) # compute the sum of the outgoing rates for each node for edge, rate in rates.iteritems(): u, v = edge sumk[u] += rate # add nodes to the rate graph and assign waiting time and Puu for u, sumk_u in sumk.iteritems(): tau = 1. / sumk_u Puu = 0. graph.add_node(u, tau=tau) graph.add_edge(u, u, P=Puu) # add edges to rate graph and assign transition probabilities for edge, rate in rates.iteritems(): u, v = edge tau_u = graph.node[u]["tau"] Puv = rate * tau_u graph.add_edge(u, v, P=Puv) return graph class GraphReduction(object): """ class to apply the graph reduction method for finding transition rates between two groups of nodes Parameters ---------- rate_constants : dict a dictionary of rates. the keys are tuples of nodes (u,v), the values are the rate constants from u to v. A, B : iterables Groups of nodes specifying the reactant and product groups. The rates returned will be the rate from A to B and vice versa. weights : dict Dictionary with nodes as keys and weights as values. The weights are the equilibrium occupation probabilities of the nodes in A and B. They are used to do the weighted mean for the final average over inverse mean first passage times. Notes ----- This follows the new graph transformation procedure (NGT) described by David Wales, J. Chem. Phys., 2009 http://dx.doi.org/10.1063/1.3133782 The rate, rAB computed by this calculation (returned by self.get_final_rates) is the inverse mean first passage time averaged over the states in A """ def __init__(self, rate_constants, A, B, debug=False, weights=None): self.graph = kmcgraph_from_rates(rate_constants) self.A = set(A) self.B = set(B) if weights is None: # everything has weight 1 self.weights = defaultdict(lambda : 1.) else: self.weights = weights self._final_Pxx = dict() self._final_tau = dict() self.debug = debug self.initial_check_graph() self.check_graph() self._initial_tau = dict([(u,data["tau"]) for u, data in self.graph.nodes(data=True)]) def _remove_nodes(self, nodes): nodes = list(nodes) # The calculation is faster if we remove the nodes with the least edges first # TODO: should probably recalculate which nodes have fewest edges at each step nodes.sort(key=lambda x: self.graph.in_degree(x) + self.graph.out_degree(x)) for x in nodes: self._remove_node(x) def _phase_one_remove_intermediates(self): intermediates = set(self.graph.nodes()) intermediates.difference_update(self.A) intermediates.difference_update(self.B) self._remove_nodes(intermediates) def _get_final_rate(self, group): # should maybe be careful when Pxx is very close to 1. rate = sum(( (1.-self._final_Pxx[x]) / self._final_tau[x] * self.weights[x] for x in group)) norm = sum((self.weights[x] for x in group)) return rate / norm def get_committor_probabilityAB(self, x): """return the committor probability for node x x must be in A or in B. If x is in A return the the probability that a trajectory starting at x gets to B before returning to x. If x is in B return the the probability that a trajectory starting at x gets to A before returning to x. """ if len(self._final_Pxx) == 0: raise RuntimeError("you must call compute_rates before calling this function") try: return 1. - self._final_Pxx[x] except KeyError: if x not in self.A and x not in self.B: raise ValueError("x is not in A or in B. Use compute_committor_probability() if x is an intermediate") def get_rate_AB(self): """Return the transition rate from A to B This is the inverse mean first passage time averaged over the elements in A""" return self._get_final_rate(self.A) def get_rate_BA(self): """Return the transition rate from B to A This is the inverse mean first passage time averaged over the elements in B""" return self._get_final_rate(self.B) def compute_rates(self): """do the computation to compute the rates""" self._phase_one_remove_intermediates() self._phase_two() # self.rateAB, self.rateBA = self.get_final_rates() # return self.rateAB, self.rateBA def get_rate_AB_SS(self): rate = 0. for a in self.A: PaB = sum((data["P"] for x, b, data in self.graph.out_edges(a, data=True) if b in self.B )) rate += PaB * self.weights[a] / self._initial_tau[a] norm = sum((self.weights[x] for x in self.A)) return rate / norm def _reduce_all_iterator(self, nodes, restore_graph=True): """for each node in nodes remove all other nodes in nodes and yield the remaining node The simplest way to do this runs in (worst case) time order len(nodes)**4. This algorithm runs in (worst case) time order len(nodes)**3. """ if len(nodes) == 0: return if restore_graph: full_graph = self.graph.copy() nodes = list(nodes) nodes.sort(key=lambda x: self.graph.in_degree(x) + self.graph.out_degree(x)) while True: if len(nodes) == 1: yield nodes[0] break graph_copy = self.graph.copy() # remove all nodes except the one at index 0 u = nodes.pop(0) self._remove_nodes(nodes) yield u # restore the graph and remove the node at index 0 self.graph = graph_copy self._remove_node(u) if restore_graph: # restore the graph to it's original state self.graph = full_graph def _phase_two_group(self, full_graph, group): """ for each element a in the group, remove all other elements in the group then record the node attributes for later analysis. """ for a in self._reduce_all_iterator(group): if self.graph.out_degree(a) <= 1: raise Exception("node %s is not connected" % a) adata = self.graph.node[a] # in the paper, to avoid numerical errors DJW computes # 1-Pxx as sum_j Pxj if Pxx > .99 Paa = self._get_edge_data(a, a)["P"] if Paa > 0.999: print "warning, Pxx is very large (%s), numerical precision problems might be in your future" % Paa self._final_Pxx[a] = Paa self._final_tau[a] = adata["tau"] def _phase_two(self): """ in this second phase we deal with reactant and product sets (A and B) that have more than 1 element. This follows the text above equation 19 in Wales 2009. This is called after all the intermediates have been decimated. for each element a in set A, compute tauF_a and PF_aa by decimating all nodes in A except a. Then the final rate from A to B kAB = (1/p_eq_A) sum_a PF_aB / tauF_a * p_eq_a where in the above PF_aB = 1-PF_aa and p_eq_a is the equilibrium probability to be in state a and p_eq_A = sum_a p_eq_a The inverse rate is, symmetrically, kBA = (1/p_eq_B) sum_b PF_bA / tauF_b * p_eq_b """ full_graph = self.graph.copy() self._phase_two_group(full_graph, self.A) self._phase_two_group(full_graph, self.B) # restore the full graph self.graph = full_graph def _add_edge(self, u, v): """add an edge to the graph and initialize it with the appropriate data""" if self.debug: print " creating edge", u, v self.graph.add_edge(u, v, P=0.) return self._get_edge_data(u, v) def _get_edge_data(self, u, v): return self.graph[u][v] def _update_edge(self, u, v, x, Pxx): """ update the probabilities of transition between u and v upon removing node x Puv -> Puv + Pux * Pxv / (1-Pxx) u==v is fine, but u==x or v==x is not """ assert u != x assert v != x try: uxdata = self._get_edge_data(u, x) xvdata = self._get_edge_data(x, v) except KeyError: # if either Pux or Pxv does not exist then nothing need be done return Pux = uxdata["P"] Pxv = xvdata["P"] # if the edge u, v doesn't exist, create it. try: uvdata = self._get_edge_data(u, v) except KeyError: uvdata = self._add_edge(u, v) if self.debug: Puvold = uvdata["P"] # update transition probability uvdata["P"] += Pux * Pxv / (1.-Pxx) if self.debug: print " updating edge", u, "->", v, ":", Puvold, "->", uvdata["P"] def _update_node(self, u, x, tau_x, Pxx): """ update the waiting time and Puu for node u upon removing node x tauu -> tauu + Pux * taux / (1-Pxx) Puu -> Puu + Pux * Pxu / (1-Pxx) """ assert x != u udata = self.graph.node[u] Pux = self._get_edge_data(u, x)["P"] if self.debug: tauold = udata["tau"] # update the waiting time at u udata["tau"] += Pux * tau_x / (1.-Pxx) if self.debug: print " updating node data", u, "tau", tauold, "->", udata["tau"] def _remove_node(self, x): """ remove node x from the graph and update the neighbors of x """ neibs = set(self.graph.successors(x)).union(self.graph.predecessors(x)) neibs.remove(x) tau_x = self.graph.node[x]["tau"] # in the paper, to avoid numerical errors DJW computes # 1-Pxx as sum_j Pxj if Pxx > .99 Pxx = self._get_edge_data(x, x)["P"] if Pxx > 0.999: print "warning, Pxx is very large (%s), numerical precision problems might be in your future" % Pxx if self.debug: print "removing node", x, tau_x, Pxx # update node data for u in neibs: self._update_node(u, x, tau_x, Pxx) # update the edges between neighbors for u in neibs: for v in neibs: self._update_edge(u, v, x, Pxx) self.graph.remove_node(x) def _print_node_data(self, u): # pragma: no cover print "data from node x =", u udata = self.graph.node[u] # print "checking node", u print " taux", udata["tau"] total_prob = 0. for x, v, uvdata in self.graph.out_edges(u, data=True): Puv = uvdata["P"] print " Pxv", Puv, ": v =", v total_prob += Puv print " total prob", total_prob def _check_node(self, u, verbose=True): udata = self.graph.node[u] # print "checking node", u assert udata["tau"] >= 0 total_prob = 0. for x, v, uvdata in self.graph.out_edges(u, data=True): Puv = uvdata["P"] assert 1 >= Puv >= 0 total_prob += Puv assert np.abs(total_prob - 1.) < 1e-6, "%s: total_prob %g" % (str(u), total_prob) def _check_A_B_connected(self, connected_components): ccset = [set(c) for c in connected_components] ca_intersections = [c.intersection(self.A) for c in ccset] cb_intersections = [c.intersection(self.B) for c in ccset] # check to make sure all the nodes in A are connected sizes = [len(ca) for ca in ca_intersections if len(ca) > 0] if len(sizes) != 1: assert len(sizes) != 0 print "warning, the reactant set (A) is not fully connected" print " ", [c for c in ca_intersections if len(c) > 0] raise ValueError("the reactant set (A) is not fully connected") # check to make sure all the nodes in B are connected sizes = [len(cb) for cb in cb_intersections if len(cb) > 0] if len(sizes) != 1: assert len(sizes) != 0 print "warning, the product set (B) is not fully connected" print " ", [c for c in cb_intersections if len(c) > 0] raise ValueError("the product set (B) is not fully connected") AB_connected = False for ca, cb in itertools.izip(ca_intersections, cb_intersections): if len(ca) > 0 and len(cb) > 0: AB_connected = True break if not AB_connected: raise ValueError("product and reactant sets (A and B) are not connected") # remove the nodes that are not connected to A or to B unconnected_nodes = set() remaining_components = [] for c in ccset: if not self.A.intersection(c) and not self.B.intersection(c): # the nodes in c are not connected to A or to B. unconnected_nodes.update(c) else: remaining_components.append(c) if unconnected_nodes: print "removing", len(unconnected_nodes), "nodes from the graph because they're not connected to A or to B" self.graph.remove_nodes_from(unconnected_nodes) if len(remaining_components) > 1: print "warning, graph is not fully connected. There are", len(remaining_components), "components" return False def initial_check_graph(self): for a in self.A: if not self.graph.has_node(a): raise ValueError("an element in the reactant set (A) is not in the graph") for b in self.B: if not self.graph.has_node(b): raise ValueError("an element in the product set (B) is not in the graph") # add node self loops with zero probability if they don't already exist for u in self.graph.nodes(): if not self.graph.has_edge(u, u): self._add_edge(u, u) if len(self.A.intersection(self.B)) > 0: raise ValueError("A and B have at least one node in common") # check A and B are connected undirected_graph = self.graph.to_undirected() cc = list(nx.connected_components(undirected_graph)) if len(cc) != 1: # print "warning, graph is not fully connected. There are", len(cc), "components" self._check_A_B_connected(cc) # for a, b in itertools.product(self.A, self.B): # try: a, b def check_graph(self): for u in self.graph.nodes(): try: self._check_node(u, verbose=False) except: self._print_node_data(u) raise def _get_committor_probability(self, x): PxA = sum([data["P"] for (u, v, data) in self.graph.out_edges([x], data=True) if v in self.A ]) PxB = sum([data["P"] for (u, v, data) in self.graph.out_edges([x], data=True) if v in self.B ]) # These will not necessarily sum to 1 because of the self transition probabilities, sum_prob = PxA + PxB if sum_prob == 0.: print "x", x print PxA, PxB print self.graph.edges(x, data=True) raise Exception return 0. return PxB / (PxA + PxB) def compute_committor_probability(self, x): """compute the probability that trajectory starting from x reaches B before it reaches A Notes ----- Since compute_rates() modifies the original graph this must be called before copute_rates() """ PxB = self.compute_committor_probabilities([x]) return PxB[x] def compute_committor_probabilities(self, nodes): """ compute the committor probability for each node in nodes Notes ----- this is the probability that the trajectory starting from node x reaches B before it reaches A. Since compute_rates() modifies the original graph this must be called before copute_rates() Returns ------- a dictionary of the committor probabilies for each node in nodes """ nodes = set(nodes) not_in_graph = nodes.difference(self.graph.nodes()) if len(not_in_graph) > 0: raise ValueError("At least on of the nodes is not in the graph." + " This could be because you have already called compute_rates()." ) backup_graph = self.graph.copy() # first remove all nodes that are not in A, B or nodes to_be_removed = set(self.graph.nodes()) - nodes - self.A - self.B self._remove_nodes(to_be_removed) PxB = dict() # now compute the committor probabilities for the nodes that are not in A or in B intermediates = set(nodes) - self.A - self.B for x in self._reduce_all_iterator(intermediates, restore_graph=False): PxB[x] = self._get_committor_probability(x) # At this point there is at most one node in the graph that is not in A or in B intermediates = set(self.graph.nodes()) intermediates.difference_update(self.A) intermediates.difference_update(self.B) assert len(intermediates) <= 1 for x in intermediates: self._remove_node(x) # Now all the nodes except those in A or in B are removed. assert len(set(self.graph.nodes()).difference(self.A).difference(self.B)) == 0 final_nodes = nodes.intersection(self.A.union(self.B)) if len(final_nodes) > 0: for x in final_nodes: PxB[x] = self._get_committor_probability(x) # restore the graph self.graph = backup_graph return PxB
unknown
codeparrot/codeparrot-clean
#! /usr/bin/env python3 """ The Python Debugger Pdb ======================= To use the debugger in its simplest form: >>> import pdb >>> pdb.run('<a statement>') The debugger's prompt is '(Pdb) '. This will stop in the first function call in <a statement>. Alternatively, if a statement terminated with an unhandled exception, you can use pdb's post-mortem facility to inspect the contents of the traceback: >>> <a statement> <exception traceback> >>> import pdb >>> pdb.pm() The commands recognized by the debugger are listed in the next section. Most can be abbreviated as indicated; e.g., h(elp) means that 'help' can be typed as 'h' or 'help' (but not as 'he' or 'hel', nor as 'H' or 'Help' or 'HELP'). Optional arguments are enclosed in square brackets. Alternatives in the command syntax are separated by a vertical bar (|). A blank line repeats the previous command literally, except for 'list', where it lists the next 11 lines. Commands that the debugger doesn't recognize are assumed to be Python statements and are executed in the context of the program being debugged. Python statements can also be prefixed with an exclamation point ('!'). This is a powerful way to inspect the program being debugged; it is even possible to change variables or call functions. When an exception occurs in such a statement, the exception name is printed but the debugger's state is not changed. The debugger supports aliases, which can save typing. And aliases can have parameters (see the alias help entry) which allows one a certain level of adaptability to the context under examination. Multiple commands may be entered on a single line, separated by the pair ';;'. No intelligence is applied to separating the commands; the input is split at the first ';;', even if it is in the middle of a quoted string. If a file ".pdbrc" exists in your home directory or in the current directory, it is read in and executed as if it had been typed at the debugger prompt. This is particularly useful for aliases. If both files exist, the one in the home directory is read first and aliases defined there can be overriden by the local file. Aside from aliases, the debugger is not directly programmable; but it is implemented as a class from which you can derive your own debugger class, which you can make as fancy as you like. Debugger commands ================= """ # NOTE: the actual command documentation is collected from docstrings of the # commands and is appended to __doc__ after the class has been defined. import os import re import sys import cmd import bdb import dis import code import pprint import signal import inspect import traceback import linecache class Restart(Exception): """Causes a debugger to be restarted for the debugged python program.""" pass __all__ = ["run", "pm", "Pdb", "runeval", "runctx", "runcall", "set_trace", "post_mortem", "help"] def find_function(funcname, filename): cre = re.compile(r'def\s+%s\s*[(]' % re.escape(funcname)) try: fp = open(filename) except IOError: return None # consumer of this info expects the first line to be 1 lineno = 1 answer = None while True: line = fp.readline() if line == '': break if cre.match(line): answer = funcname, filename, lineno break lineno += 1 fp.close() return answer def getsourcelines(obj): lines, lineno = inspect.findsource(obj) if inspect.isframe(obj) and obj.f_globals is obj.f_locals: # must be a module frame: do not try to cut a block out of it return lines, 1 elif inspect.ismodule(obj): return lines, 1 return inspect.getblock(lines[lineno:]), lineno+1 def lasti2lineno(code, lasti): linestarts = list(dis.findlinestarts(code)) linestarts.reverse() for i, lineno in linestarts: if lasti >= i: return lineno return 0 class _rstr(str): """String that doesn't quote its repr.""" def __repr__(self): return self # Interaction prompt line will separate file and call info from code # text using value of line_prefix string. A newline and arrow may # be to your liking. You can set it once pdb is imported using the # command "pdb.line_prefix = '\n% '". # line_prefix = ': ' # Use this to get the old situation back line_prefix = '\n-> ' # Probably a better default class Pdb(bdb.Bdb, cmd.Cmd): def __init__(self, completekey='tab', stdin=None, stdout=None, skip=None, nosigint=False): bdb.Bdb.__init__(self, skip=skip) cmd.Cmd.__init__(self, completekey, stdin, stdout) if stdout: self.use_rawinput = 0 self.prompt = '(Pdb) ' self.aliases = {} self.displaying = {} self.mainpyfile = '' self._wait_for_mainpyfile = False self.tb_lineno = {} # Try to load readline if it exists try: import readline except ImportError: pass self.allow_kbdint = False self.nosigint = nosigint # Read $HOME/.pdbrc and ./.pdbrc self.rcLines = [] if 'HOME' in os.environ: envHome = os.environ['HOME'] try: with open(os.path.join(envHome, ".pdbrc")) as rcFile: self.rcLines.extend(rcFile) except IOError: pass try: with open(".pdbrc") as rcFile: self.rcLines.extend(rcFile) except IOError: pass self.commands = {} # associates a command list to breakpoint numbers self.commands_doprompt = {} # for each bp num, tells if the prompt # must be disp. after execing the cmd list self.commands_silent = {} # for each bp num, tells if the stack trace # must be disp. after execing the cmd list self.commands_defining = False # True while in the process of defining # a command list self.commands_bnum = None # The breakpoint number for which we are # defining a list def sigint_handler(self, signum, frame): if self.allow_kbdint: raise KeyboardInterrupt self.message("\nProgram interrupted. (Use 'cont' to resume).") self.set_step() self.set_trace(frame) # restore previous signal handler signal.signal(signal.SIGINT, self._previous_sigint_handler) def reset(self): bdb.Bdb.reset(self) self.forget() def forget(self): self.lineno = None self.stack = [] self.curindex = 0 self.curframe = None self.tb_lineno.clear() def setup(self, f, tb): self.forget() self.stack, self.curindex = self.get_stack(f, tb) while tb: # when setting up post-mortem debugging with a traceback, save all # the original line numbers to be displayed along the current line # numbers (which can be different, e.g. due to finally clauses) lineno = lasti2lineno(tb.tb_frame.f_code, tb.tb_lasti) self.tb_lineno[tb.tb_frame] = lineno tb = tb.tb_next self.curframe = self.stack[self.curindex][0] # The f_locals dictionary is updated from the actual frame # locals whenever the .f_locals accessor is called, so we # cache it here to ensure that modifications are not overwritten. self.curframe_locals = self.curframe.f_locals return self.execRcLines() # Can be executed earlier than 'setup' if desired def execRcLines(self): if not self.rcLines: return # local copy because of recursion rcLines = self.rcLines rcLines.reverse() # execute every line only once self.rcLines = [] while rcLines: line = rcLines.pop().strip() if line and line[0] != '#': if self.onecmd(line): # if onecmd returns True, the command wants to exit # from the interaction, save leftover rc lines # to execute before next interaction self.rcLines += reversed(rcLines) return True # Override Bdb methods def user_call(self, frame, argument_list): """This method is called when there is the remote possibility that we ever need to stop in this function.""" if self._wait_for_mainpyfile: return if self.stop_here(frame): self.message('--Call--') self.interaction(frame, None) def user_line(self, frame): """This function is called when we stop or break at this line.""" if self._wait_for_mainpyfile: if (self.mainpyfile != self.canonic(frame.f_code.co_filename) or frame.f_lineno <= 0): return self._wait_for_mainpyfile = False if self.bp_commands(frame): self.interaction(frame, None) def bp_commands(self, frame): """Call every command that was set for the current active breakpoint (if there is one). Returns True if the normal interaction function must be called, False otherwise.""" # self.currentbp is set in bdb in Bdb.break_here if a breakpoint was hit if getattr(self, "currentbp", False) and \ self.currentbp in self.commands: currentbp = self.currentbp self.currentbp = 0 lastcmd_back = self.lastcmd self.setup(frame, None) for line in self.commands[currentbp]: self.onecmd(line) self.lastcmd = lastcmd_back if not self.commands_silent[currentbp]: self.print_stack_entry(self.stack[self.curindex]) if self.commands_doprompt[currentbp]: self._cmdloop() self.forget() return return 1 def user_return(self, frame, return_value): """This function is called when a return trap is set here.""" if self._wait_for_mainpyfile: return frame.f_locals['__return__'] = return_value self.message('--Return--') self.interaction(frame, None) def user_exception(self, frame, exc_info): """This function is called if an exception occurs, but only if we are to stop at or just below this level.""" if self._wait_for_mainpyfile: return exc_type, exc_value, exc_traceback = exc_info frame.f_locals['__exception__'] = exc_type, exc_value self.message(traceback.format_exception_only(exc_type, exc_value)[-1].strip()) self.interaction(frame, exc_traceback) # General interaction function def _cmdloop(self): while True: try: # keyboard interrupts allow for an easy way to cancel # the current command, so allow them during interactive input self.allow_kbdint = True self.cmdloop() self.allow_kbdint = False break except KeyboardInterrupt: self.message('--KeyboardInterrupt--') # Called before loop, handles display expressions def preloop(self): displaying = self.displaying.get(self.curframe) if displaying: for expr, oldvalue in displaying.items(): newvalue = self._getval_except(expr) # check for identity first; this prevents custom __eq__ to # be called at every loop, and also prevents instances whose # fields are changed to be displayed if newvalue is not oldvalue and newvalue != oldvalue: displaying[expr] = newvalue self.message('display %s: %r [old: %r]' % (expr, newvalue, oldvalue)) def interaction(self, frame, traceback): if self.setup(frame, traceback): # no interaction desired at this time (happens if .pdbrc contains # a command like "continue") self.forget() return self.print_stack_entry(self.stack[self.curindex]) self._cmdloop() self.forget() def displayhook(self, obj): """Custom displayhook for the exec in default(), which prevents assignment of the _ variable in the builtins. """ # reproduce the behavior of the standard displayhook, not printing None if obj is not None: self.message(repr(obj)) def default(self, line): if line[:1] == '!': line = line[1:] locals = self.curframe_locals globals = self.curframe.f_globals try: code = compile(line + '\n', '<stdin>', 'single') save_stdout = sys.stdout save_stdin = sys.stdin save_displayhook = sys.displayhook try: sys.stdin = self.stdin sys.stdout = self.stdout sys.displayhook = self.displayhook exec(code, globals, locals) finally: sys.stdout = save_stdout sys.stdin = save_stdin sys.displayhook = save_displayhook except: exc_info = sys.exc_info()[:2] self.error(traceback.format_exception_only(*exc_info)[-1].strip()) def precmd(self, line): """Handle alias expansion and ';;' separator.""" if not line.strip(): return line args = line.split() while args[0] in self.aliases: line = self.aliases[args[0]] ii = 1 for tmpArg in args[1:]: line = line.replace("%" + str(ii), tmpArg) ii += 1 line = line.replace("%*", ' '.join(args[1:])) args = line.split() # split into ';;' separated commands # unless it's an alias command if args[0] != 'alias': marker = line.find(';;') if marker >= 0: # queue up everything after marker next = line[marker+2:].lstrip() self.cmdqueue.append(next) line = line[:marker].rstrip() return line def onecmd(self, line): """Interpret the argument as though it had been typed in response to the prompt. Checks whether this line is typed at the normal prompt or in a breakpoint command list definition. """ if not self.commands_defining: return cmd.Cmd.onecmd(self, line) else: return self.handle_command_def(line) def handle_command_def(self, line): """Handles one command line during command list definition.""" cmd, arg, line = self.parseline(line) if not cmd: return if cmd == 'silent': self.commands_silent[self.commands_bnum] = True return # continue to handle other cmd def in the cmd list elif cmd == 'end': self.cmdqueue = [] return 1 # end of cmd list cmdlist = self.commands[self.commands_bnum] if arg: cmdlist.append(cmd+' '+arg) else: cmdlist.append(cmd) # Determine if we must stop try: func = getattr(self, 'do_' + cmd) except AttributeError: func = self.default # one of the resuming commands if func.__name__ in self.commands_resuming: self.commands_doprompt[self.commands_bnum] = False self.cmdqueue = [] return 1 return # interface abstraction functions def message(self, msg): print(msg, file=self.stdout) def error(self, msg): print('***', msg, file=self.stdout) # Command definitions, called by cmdloop() # The argument is the remaining string on the command line # Return true to exit from the command loop def do_commands(self, arg): """commands [bpnumber] (com) ... (com) end (Pdb) Specify a list of commands for breakpoint number bpnumber. The commands themselves are entered on the following lines. Type a line containing just 'end' to terminate the commands. The commands are executed when the breakpoint is hit. To remove all commands from a breakpoint, type commands and follow it immediately with end; that is, give no commands. With no bpnumber argument, commands refers to the last breakpoint set. You can use breakpoint commands to start your program up again. Simply use the continue command, or step, or any other command that resumes execution. Specifying any command resuming execution (currently continue, step, next, return, jump, quit and their abbreviations) terminates the command list (as if that command was immediately followed by end). This is because any time you resume execution (even with a simple next or step), you may encounter another breakpoint -- which could have its own command list, leading to ambiguities about which list to execute. If you use the 'silent' command in the command list, the usual message about stopping at a breakpoint is not printed. This may be desirable for breakpoints that are to print a specific message and then continue. If none of the other commands print anything, you will see no sign that the breakpoint was reached. """ if not arg: bnum = len(bdb.Breakpoint.bpbynumber) - 1 else: try: bnum = int(arg) except: self.error("Usage: commands [bnum]\n ...\n end") return self.commands_bnum = bnum # Save old definitions for the case of a keyboard interrupt. if bnum in self.commands: old_command_defs = (self.commands[bnum], self.commands_doprompt[bnum], self.commands_silent[bnum]) else: old_command_defs = None self.commands[bnum] = [] self.commands_doprompt[bnum] = True self.commands_silent[bnum] = False prompt_back = self.prompt self.prompt = '(com) ' self.commands_defining = True try: self.cmdloop() except KeyboardInterrupt: # Restore old definitions. if old_command_defs: self.commands[bnum] = old_command_defs[0] self.commands_doprompt[bnum] = old_command_defs[1] self.commands_silent[bnum] = old_command_defs[2] else: del self.commands[bnum] del self.commands_doprompt[bnum] del self.commands_silent[bnum] self.error('command definition aborted, old commands restored') finally: self.commands_defining = False self.prompt = prompt_back def do_break(self, arg, temporary = 0): """b(reak) [ ([filename:]lineno | function) [, condition] ] Without argument, list all breaks. With a line number argument, set a break at this line in the current file. With a function name, set a break at the first executable line of that function. If a second argument is present, it is a string specifying an expression which must evaluate to true before the breakpoint is honored. The line number may be prefixed with a filename and a colon, to specify a breakpoint in another file (probably one that hasn't been loaded yet). The file is searched for on sys.path; the .py suffix may be omitted. """ if not arg: if self.breaks: # There's at least one self.message("Num Type Disp Enb Where") for bp in bdb.Breakpoint.bpbynumber: if bp: self.message(bp.bpformat()) return # parse arguments; comma has lowest precedence # and cannot occur in filename filename = None lineno = None cond = None comma = arg.find(',') if comma > 0: # parse stuff after comma: "condition" cond = arg[comma+1:].lstrip() arg = arg[:comma].rstrip() # parse stuff before comma: [filename:]lineno | function colon = arg.rfind(':') funcname = None if colon >= 0: filename = arg[:colon].rstrip() f = self.lookupmodule(filename) if not f: self.error('%r not found from sys.path' % filename) return else: filename = f arg = arg[colon+1:].lstrip() try: lineno = int(arg) except ValueError: self.error('Bad lineno: %s' % arg) return else: # no colon; can be lineno or function try: lineno = int(arg) except ValueError: try: func = eval(arg, self.curframe.f_globals, self.curframe_locals) except: func = arg try: if hasattr(func, '__func__'): func = func.__func__ code = func.__code__ #use co_name to identify the bkpt (function names #could be aliased, but co_name is invariant) funcname = code.co_name lineno = code.co_firstlineno filename = code.co_filename except: # last thing to try (ok, filename, ln) = self.lineinfo(arg) if not ok: self.error('The specified object %r is not a function ' 'or was not found along sys.path.' % arg) return funcname = ok # ok contains a function name lineno = int(ln) if not filename: filename = self.defaultFile() # Check for reasonable breakpoint line = self.checkline(filename, lineno) if line: # now set the break point err = self.set_break(filename, line, temporary, cond, funcname) if err: self.error(err, file=self.stdout) else: bp = self.get_breaks(filename, line)[-1] self.message("Breakpoint %d at %s:%d" % (bp.number, bp.file, bp.line)) # To be overridden in derived debuggers def defaultFile(self): """Produce a reasonable default.""" filename = self.curframe.f_code.co_filename if filename == '<string>' and self.mainpyfile: filename = self.mainpyfile return filename do_b = do_break def do_tbreak(self, arg): """tbreak [ ([filename:]lineno | function) [, condition] ] Same arguments as break, but sets a temporary breakpoint: it is automatically deleted when first hit. """ self.do_break(arg, 1) def lineinfo(self, identifier): failed = (None, None, None) # Input is identifier, may be in single quotes idstring = identifier.split("'") if len(idstring) == 1: # not in single quotes id = idstring[0].strip() elif len(idstring) == 3: # quoted id = idstring[1].strip() else: return failed if id == '': return failed parts = id.split('.') # Protection for derived debuggers if parts[0] == 'self': del parts[0] if len(parts) == 0: return failed # Best first guess at file to look at fname = self.defaultFile() if len(parts) == 1: item = parts[0] else: # More than one part. # First is module, second is method/class f = self.lookupmodule(parts[0]) if f: fname = f item = parts[1] answer = find_function(item, fname) return answer or failed def checkline(self, filename, lineno): """Check whether specified line seems to be executable. Return `lineno` if it is, 0 if not (e.g. a docstring, comment, blank line or EOF). Warning: testing is not comprehensive. """ # this method should be callable before starting debugging, so default # to "no globals" if there is no current frame globs = self.curframe.f_globals if hasattr(self, 'curframe') else None line = linecache.getline(filename, lineno, globs) if not line: self.message('End of file') return 0 line = line.strip() # Don't allow setting breakpoint at a blank line if (not line or (line[0] == '#') or (line[:3] == '"""') or line[:3] == "'''"): self.error('Blank or comment') return 0 return lineno def do_enable(self, arg): """enable bpnumber [bpnumber ...] Enables the breakpoints given as a space separated list of breakpoint numbers. """ args = arg.split() for i in args: try: bp = self.get_bpbynumber(i) except ValueError as err: self.error(err) else: bp.enable() self.message('Enabled %s' % bp) def do_disable(self, arg): """disable bpnumber [bpnumber ...] Disables the breakpoints given as a space separated list of breakpoint numbers. Disabling a breakpoint means it cannot cause the program to stop execution, but unlike clearing a breakpoint, it remains in the list of breakpoints and can be (re-)enabled. """ args = arg.split() for i in args: try: bp = self.get_bpbynumber(i) except ValueError as err: self.error(err) else: bp.disable() self.message('Disabled %s' % bp) def do_condition(self, arg): """condition bpnumber [condition] Set a new condition for the breakpoint, an expression which must evaluate to true before the breakpoint is honored. If condition is absent, any existing condition is removed; i.e., the breakpoint is made unconditional. """ args = arg.split(' ', 1) try: cond = args[1] except IndexError: cond = None try: bp = self.get_bpbynumber(args[0].strip()) except ValueError as err: self.error(err) else: bp.cond = cond if not cond: self.message('Breakpoint %d is now unconditional.' % bp.number) else: self.message('New condition set for breakpoint %d.' % bp.number) def do_ignore(self, arg): """ignore bpnumber [count] Set the ignore count for the given breakpoint number. If count is omitted, the ignore count is set to 0. A breakpoint becomes active when the ignore count is zero. When non-zero, the count is decremented each time the breakpoint is reached and the breakpoint is not disabled and any associated condition evaluates to true. """ args = arg.split() try: count = int(args[1].strip()) except: count = 0 try: bp = self.get_bpbynumber(args[0].strip()) except ValueError as err: self.error(err) else: bp.ignore = count if count > 0: if count > 1: countstr = '%d crossings' % count else: countstr = '1 crossing' self.message('Will ignore next %s of breakpoint %d.' % (countstr, bp.number)) else: self.message('Will stop next time breakpoint %d is reached.' % bp.number) def do_clear(self, arg): """cl(ear) filename:lineno\ncl(ear) [bpnumber [bpnumber...]] With a space separated list of breakpoint numbers, clear those breakpoints. Without argument, clear all breaks (but first ask confirmation). With a filename:lineno argument, clear all breaks at that line in that file. """ if not arg: try: reply = input('Clear all breaks? ') except EOFError: reply = 'no' reply = reply.strip().lower() if reply in ('y', 'yes'): bplist = [bp for bp in bdb.Breakpoint.bpbynumber if bp] self.clear_all_breaks() for bp in bplist: self.message('Deleted %s' % bp) return if ':' in arg: # Make sure it works for "clear C:\foo\bar.py:12" i = arg.rfind(':') filename = arg[:i] arg = arg[i+1:] try: lineno = int(arg) except ValueError: err = "Invalid line number (%s)" % arg else: bplist = self.get_breaks(filename, lineno) err = self.clear_break(filename, lineno) if err: self.error(err) else: for bp in bplist: self.message('Deleted %s' % bp) return numberlist = arg.split() for i in numberlist: try: bp = self.get_bpbynumber(i) except ValueError as err: self.error(err) else: self.clear_bpbynumber(i) self.message('Deleted %s' % bp) do_cl = do_clear # 'c' is already an abbreviation for 'continue' def do_where(self, arg): """w(here) Print a stack trace, with the most recent frame at the bottom. An arrow indicates the "current frame", which determines the context of most commands. 'bt' is an alias for this command. """ self.print_stack_trace() do_w = do_where do_bt = do_where def _select_frame(self, number): assert 0 <= number < len(self.stack) self.curindex = number self.curframe = self.stack[self.curindex][0] self.curframe_locals = self.curframe.f_locals self.print_stack_entry(self.stack[self.curindex]) self.lineno = None def do_up(self, arg): """u(p) [count] Move the current frame count (default one) levels up in the stack trace (to an older frame). """ if self.curindex == 0: self.error('Oldest frame') return try: count = int(arg or 1) except ValueError: self.error('Invalid frame count (%s)' % arg) return if count < 0: newframe = 0 else: newframe = max(0, self.curindex - count) self._select_frame(newframe) do_u = do_up def do_down(self, arg): """d(own) [count] Move the current frame count (default one) levels down in the stack trace (to a newer frame). """ if self.curindex + 1 == len(self.stack): self.error('Newest frame') return try: count = int(arg or 1) except ValueError: self.error('Invalid frame count (%s)' % arg) return if count < 0: newframe = len(self.stack) - 1 else: newframe = min(len(self.stack) - 1, self.curindex + count) self._select_frame(newframe) do_d = do_down def do_until(self, arg): """unt(il) [lineno] Without argument, continue execution until the line with a number greater than the current one is reached. With a line number, continue execution until a line with a number greater or equal to that is reached. In both cases, also stop when the current frame returns. """ if arg: try: lineno = int(arg) except ValueError: self.error('Error in argument: %r' % arg) return if lineno <= self.curframe.f_lineno: self.error('"until" line number is smaller than current ' 'line number') return else: lineno = None self.set_until(self.curframe, lineno) return 1 do_unt = do_until def do_step(self, arg): """s(tep) Execute the current line, stop at the first possible occasion (either in a function that is called or in the current function). """ self.set_step() return 1 do_s = do_step def do_next(self, arg): """n(ext) Continue execution until the next line in the current function is reached or it returns. """ self.set_next(self.curframe) return 1 do_n = do_next def do_run(self, arg): """run [args...] Restart the debugged python program. If a string is supplied it is splitted with "shlex", and the result is used as the new sys.argv. History, breakpoints, actions and debugger options are preserved. "restart" is an alias for "run". """ if arg: import shlex argv0 = sys.argv[0:1] sys.argv = shlex.split(arg) sys.argv[:0] = argv0 # this is caught in the main debugger loop raise Restart do_restart = do_run def do_return(self, arg): """r(eturn) Continue execution until the current function returns. """ self.set_return(self.curframe) return 1 do_r = do_return def do_continue(self, arg): """c(ont(inue)) Continue execution, only stop when a breakpoint is encountered. """ if not self.nosigint: self._previous_sigint_handler = \ signal.signal(signal.SIGINT, self.sigint_handler) self.set_continue() return 1 do_c = do_cont = do_continue def do_jump(self, arg): """j(ump) lineno Set the next line that will be executed. Only available in the bottom-most frame. This lets you jump back and execute code again, or jump forward to skip code that you don't want to run. It should be noted that not all jumps are allowed -- for instance it is not possible to jump into the middle of a for loop or out of a finally clause. """ if self.curindex + 1 != len(self.stack): self.error('You can only jump within the bottom frame') return try: arg = int(arg) except ValueError: self.error("The 'jump' command requires a line number") else: try: # Do the jump, fix up our copy of the stack, and display the # new position self.curframe.f_lineno = arg self.stack[self.curindex] = self.stack[self.curindex][0], arg self.print_stack_entry(self.stack[self.curindex]) except ValueError as e: self.error('Jump failed: %s' % e) do_j = do_jump def do_debug(self, arg): """debug code Enter a recursive debugger that steps through the code argument (which is an arbitrary expression or statement to be executed in the current environment). """ sys.settrace(None) globals = self.curframe.f_globals locals = self.curframe_locals p = Pdb(self.completekey, self.stdin, self.stdout) p.prompt = "(%s) " % self.prompt.strip() self.message("ENTERING RECURSIVE DEBUGGER") sys.call_tracing(p.run, (arg, globals, locals)) self.message("LEAVING RECURSIVE DEBUGGER") sys.settrace(self.trace_dispatch) self.lastcmd = p.lastcmd def do_quit(self, arg): """q(uit)\nexit Quit from the debugger. The program being executed is aborted. """ self._user_requested_quit = True self.set_quit() return 1 do_q = do_quit do_exit = do_quit def do_EOF(self, arg): """EOF Handles the receipt of EOF as a command. """ self.message('') self._user_requested_quit = True self.set_quit() return 1 def do_args(self, arg): """a(rgs) Print the argument list of the current function. """ co = self.curframe.f_code dict = self.curframe_locals n = co.co_argcount if co.co_flags & 4: n = n+1 if co.co_flags & 8: n = n+1 for i in range(n): name = co.co_varnames[i] if name in dict: self.message('%s = %r' % (name, dict[name])) else: self.message('%s = *** undefined ***' % (name,)) do_a = do_args def do_retval(self, arg): """retval Print the return value for the last return of a function. """ if '__return__' in self.curframe_locals: self.message(repr(self.curframe_locals['__return__'])) else: self.error('Not yet returned!') do_rv = do_retval def _getval(self, arg): try: return eval(arg, self.curframe.f_globals, self.curframe_locals) except: exc_info = sys.exc_info()[:2] self.error(traceback.format_exception_only(*exc_info)[-1].strip()) raise def _getval_except(self, arg, frame=None): try: if frame is None: return eval(arg, self.curframe.f_globals, self.curframe_locals) else: return eval(arg, frame.f_globals, frame.f_locals) except: exc_info = sys.exc_info()[:2] err = traceback.format_exception_only(*exc_info)[-1].strip() return _rstr('** raised %s **' % err) def do_p(self, arg): """p(rint) expression Print the value of the expression. """ try: self.message(repr(self._getval(arg))) except: pass # make "print" an alias of "p" since print isn't a Python statement anymore do_print = do_p def do_pp(self, arg): """pp expression Pretty-print the value of the expression. """ try: self.message(pprint.pformat(self._getval(arg))) except: pass def do_list(self, arg): """l(ist) [first [,last] | .] List source code for the current file. Without arguments, list 11 lines around the current line or continue the previous listing. With . as argument, list 11 lines around the current line. With one argument, list 11 lines starting at that line. With two arguments, list the given range; if the second argument is less than the first, it is a count. The current line in the current frame is indicated by "->". If an exception is being debugged, the line where the exception was originally raised or propagated is indicated by ">>", if it differs from the current line. """ self.lastcmd = 'list' last = None if arg and arg != '.': try: if ',' in arg: first, last = arg.split(',') first = int(first.strip()) last = int(last.strip()) if last < first: # assume it's a count last = first + last else: first = int(arg.strip()) first = max(1, first - 5) except ValueError: self.error('Error in argument: %r' % arg) return elif self.lineno is None or arg == '.': first = max(1, self.curframe.f_lineno - 5) else: first = self.lineno + 1 if last is None: last = first + 10 filename = self.curframe.f_code.co_filename breaklist = self.get_file_breaks(filename) try: lines = linecache.getlines(filename, self.curframe.f_globals) self._print_lines(lines[first-1:last], first, breaklist, self.curframe) self.lineno = min(last, len(lines)) if len(lines) < last: self.message('[EOF]') except KeyboardInterrupt: pass do_l = do_list def do_longlist(self, arg): """longlist | ll List the whole source code for the current function or frame. """ filename = self.curframe.f_code.co_filename breaklist = self.get_file_breaks(filename) try: lines, lineno = getsourcelines(self.curframe) except IOError as err: self.error(err) return self._print_lines(lines, lineno, breaklist, self.curframe) do_ll = do_longlist def do_source(self, arg): """source expression Try to get source code for the given object and display it. """ try: obj = self._getval(arg) except: return try: lines, lineno = getsourcelines(obj) except (IOError, TypeError) as err: self.error(err) return self._print_lines(lines, lineno) def _print_lines(self, lines, start, breaks=(), frame=None): """Print a range of lines.""" if frame: current_lineno = frame.f_lineno exc_lineno = self.tb_lineno.get(frame, -1) else: current_lineno = exc_lineno = -1 for lineno, line in enumerate(lines, start): s = str(lineno).rjust(3) if len(s) < 4: s += ' ' if lineno in breaks: s += 'B' else: s += ' ' if lineno == current_lineno: s += '->' elif lineno == exc_lineno: s += '>>' self.message(s + '\t' + line.rstrip()) def do_whatis(self, arg): """whatis arg Print the type of the argument. """ try: value = self._getval(arg) except: # _getval() already printed the error return code = None # Is it a function? try: code = value.__code__ except Exception: pass if code: self.message('Function %s' % code.co_name) return # Is it an instance method? try: code = value.__func__.__code__ except Exception: pass if code: self.message('Method %s' % code.co_name) return # Is it a class? if value.__class__ is type: self.message('Class %s.%s' % (value.__module__, value.__name__)) return # None of the above... self.message(type(value)) def do_display(self, arg): """display [expression] Display the value of the expression if it changed, each time execution stops in the current frame. Without expression, list all display expressions for the current frame. """ if not arg: self.message('Currently displaying:') for item in self.displaying.get(self.curframe, {}).items(): self.message('%s: %r' % item) else: val = self._getval_except(arg) self.displaying.setdefault(self.curframe, {})[arg] = val self.message('display %s: %r' % (arg, val)) def do_undisplay(self, arg): """undisplay [expression] Do not display the expression any more in the current frame. Without expression, clear all display expressions for the current frame. """ if arg: try: del self.displaying.get(self.curframe, {})[arg] except KeyError: self.error('not displaying %s' % arg) else: self.displaying.pop(self.curframe, None) def do_interact(self, arg): """interact Start an interative interpreter whose global namespace contains all the (global and local) names found in the current scope. """ ns = self.curframe.f_globals.copy() ns.update(self.curframe_locals) code.interact("*interactive*", local=ns) def do_alias(self, arg): """alias [name [command [parameter parameter ...] ]] Create an alias called 'name' that executes 'command'. The command must *not* be enclosed in quotes. Replaceable parameters can be indicated by %1, %2, and so on, while %* is replaced by all the parameters. If no command is given, the current alias for name is shown. If no name is given, all aliases are listed. Aliases may be nested and can contain anything that can be legally typed at the pdb prompt. Note! You *can* override internal pdb commands with aliases! Those internal commands are then hidden until the alias is removed. Aliasing is recursively applied to the first word of the command line; all other words in the line are left alone. As an example, here are two useful aliases (especially when placed in the .pdbrc file): # Print instance variables (usage "pi classInst") alias pi for k in %1.__dict__.keys(): print "%1.",k,"=",%1.__dict__[k] # Print instance variables in self alias ps pi self """ args = arg.split() if len(args) == 0: keys = sorted(self.aliases.keys()) for alias in keys: self.message("%s = %s" % (alias, self.aliases[alias])) return if args[0] in self.aliases and len(args) == 1: self.message("%s = %s" % (args[0], self.aliases[args[0]])) else: self.aliases[args[0]] = ' '.join(args[1:]) def do_unalias(self, arg): """unalias name Delete the specified alias. """ args = arg.split() if len(args) == 0: return if args[0] in self.aliases: del self.aliases[args[0]] # List of all the commands making the program resume execution. commands_resuming = ['do_continue', 'do_step', 'do_next', 'do_return', 'do_quit', 'do_jump'] # Print a traceback starting at the top stack frame. # The most recently entered frame is printed last; # this is different from dbx and gdb, but consistent with # the Python interpreter's stack trace. # It is also consistent with the up/down commands (which are # compatible with dbx and gdb: up moves towards 'main()' # and down moves towards the most recent stack frame). def print_stack_trace(self): try: for frame_lineno in self.stack: self.print_stack_entry(frame_lineno) except KeyboardInterrupt: pass def print_stack_entry(self, frame_lineno, prompt_prefix=line_prefix): frame, lineno = frame_lineno if frame is self.curframe: prefix = '> ' else: prefix = ' ' self.message(prefix + self.format_stack_entry(frame_lineno, prompt_prefix)) # Provide help def do_help(self, arg): """h(elp) Without argument, print the list of available commands. With a command name as argument, print help about that command. "help pdb" shows the full pdb documentation. "help exec" gives help on the ! command. """ if not arg: return cmd.Cmd.do_help(self, arg) try: try: topic = getattr(self, 'help_' + arg) return topic() except AttributeError: command = getattr(self, 'do_' + arg) except AttributeError: self.error('No help for %r' % arg) else: if sys.flags.optimize >= 2: self.error('No help for %r; please do not run Python with -OO ' 'if you need command help' % arg) return self.message(command.__doc__.rstrip()) do_h = do_help def help_exec(self): """(!) statement Execute the (one-line) statement in the context of the current stack frame. The exclamation point can be omitted unless the first word of the statement resembles a debugger command. To assign to a global variable you must always prefix the command with a 'global' command, e.g.: (Pdb) global list_options; list_options = ['-l'] (Pdb) """ self.message((self.help_exec.__doc__ or '').strip()) def help_pdb(self): help() # other helper functions def lookupmodule(self, filename): """Helper function for break/clear parsing -- may be overridden. lookupmodule() translates (possibly incomplete) file or module name into an absolute file name. """ if os.path.isabs(filename) and os.path.exists(filename): return filename f = os.path.join(sys.path[0], filename) if os.path.exists(f) and self.canonic(f) == self.mainpyfile: return f root, ext = os.path.splitext(filename) if ext == '': filename = filename + '.py' if os.path.isabs(filename): return filename for dirname in sys.path: while os.path.islink(dirname): dirname = os.readlink(dirname) fullname = os.path.join(dirname, filename) if os.path.exists(fullname): return fullname return None def _runscript(self, filename): # The script has to run in __main__ namespace (or imports from # __main__ will break). # # So we clear up the __main__ and set several special variables # (this gets rid of pdb's globals and cleans old variables on restarts). import __main__ __main__.__dict__.clear() __main__.__dict__.update({"__name__" : "__main__", "__file__" : filename, "__builtins__": __builtins__, }) # When bdb sets tracing, a number of call and line events happens # BEFORE debugger even reaches user's code (and the exact sequence of # events depends on python version). So we take special measures to # avoid stopping before we reach the main script (see user_line and # user_call for details). self._wait_for_mainpyfile = True self.mainpyfile = self.canonic(filename) self._user_requested_quit = False with open(filename, "rb") as fp: statement = "exec(compile(%r, %r, 'exec'))" % \ (fp.read(), self.mainpyfile) self.run(statement) # Collect all command help into docstring, if not run with -OO if __doc__ is not None: # unfortunately we can't guess this order from the class definition _help_order = [ 'help', 'where', 'down', 'up', 'break', 'tbreak', 'clear', 'disable', 'enable', 'ignore', 'condition', 'commands', 'step', 'next', 'until', 'jump', 'return', 'retval', 'run', 'continue', 'list', 'longlist', 'args', 'print', 'pp', 'whatis', 'source', 'display', 'undisplay', 'interact', 'alias', 'unalias', 'debug', 'quit', ] for _command in _help_order: __doc__ += getattr(Pdb, 'do_' + _command).__doc__.strip() + '\n\n' __doc__ += Pdb.help_exec.__doc__ del _help_order, _command # Simplified interface def run(statement, globals=None, locals=None): Pdb().run(statement, globals, locals) def runeval(expression, globals=None, locals=None): return Pdb().runeval(expression, globals, locals) def runctx(statement, globals, locals): # B/W compatibility run(statement, globals, locals) def runcall(*args, **kwds): return Pdb().runcall(*args, **kwds) def set_trace(): Pdb().set_trace(sys._getframe().f_back) # Post-Mortem interface def post_mortem(t=None): # handling the default if t is None: # sys.exc_info() returns (type, value, traceback) if an exception is # being handled, otherwise it returns None t = sys.exc_info()[2] if t is None: raise ValueError("A valid traceback must be passed if no " "exception is being handled") p = Pdb() p.reset() p.interaction(None, t) def pm(): post_mortem(sys.last_traceback) # Main program for testing TESTCMD = 'import x; x.main()' def test(): run(TESTCMD) # print help def help(): import pydoc pydoc.pager(__doc__) _usage = """\ usage: pdb.py [-c command] ... pyfile [arg] ... Debug the Python program given by pyfile. Initial commands are read from .pdbrc files in your home directory and in the current directory, if they exist. Commands supplied with -c are executed after commands from .pdbrc files. To let the script run until an exception occurs, use "-c continue". To let the script run up to a given line X in the debugged file, use "-c 'until X'".""" def main(): import getopt opts, args = getopt.getopt(sys.argv[1:], 'hc:', ['--help', '--command=']) if not args: print(_usage) sys.exit(2) commands = [] for opt, optarg in opts: if opt in ['-h', '--help']: print(_usage) sys.exit() elif opt in ['-c', '--command']: commands.append(optarg) mainpyfile = args[0] # Get script filename if not os.path.exists(mainpyfile): print('Error:', mainpyfile, 'does not exist') sys.exit(1) sys.argv[:] = args # Hide "pdb.py" and pdb options from argument list # Replace pdb's dir with script's dir in front of module search path. sys.path[0] = os.path.dirname(mainpyfile) # Note on saving/restoring sys.argv: it's a good idea when sys.argv was # modified by the script being debugged. It's a bad idea when it was # changed by the user from the command line. There is a "restart" command # which allows explicit specification of command line arguments. pdb = Pdb() pdb.rcLines.extend(commands) while True: try: pdb._runscript(mainpyfile) if pdb._user_requested_quit: break print("The program finished and will be restarted") except Restart: print("Restarting", mainpyfile, "with arguments:") print("\t" + " ".join(args)) except SystemExit: # In most cases SystemExit does not warrant a post-mortem session. print("The program exited via sys.exit(). Exit status:", end=' ') print(sys.exc_info()[1]) except: traceback.print_exc() print("Uncaught exception. Entering post mortem debugging") print("Running 'cont' or 'step' will restart the program") t = sys.exc_info()[2] pdb.interaction(None, t) print("Post mortem debugger finished. The " + mainpyfile + " will be restarted") # When invoked as main program, invoke the debugger on a script if __name__ == '__main__': import pdb pdb.main()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python """ Michael DeHaan <mdehaan@fedoraproject.org>, 2008 This software may be freely redistributed under the terms of the GNU general public license. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. --- This script automates pushes from git checkouts into Fedora CVS. It is expected you already have Fedora CVS set up for a project and have the build system tools installed. After that, usage looks like: python pusher.py --proj=/cg/func --cvs=~/func Work in progress """ # if new releases come out or old ones go away, edit here #PROCESS_RELEASES = [ "devel", "F-9", "F-8", "EL-5", "EL-4" ] PROCESS_RELEASES = [ "devel" ] import optparse import os import sys import glob import subprocess def run(cmd,failok=False): """ Wrapper around subprocess """ print "running: %s" % cmd rc = subprocess.call(cmd, shell=True) print "rc: %s" % rc if not failok and not rc == 0: croak("aborting") def croak(msg): """ Print something and die. """ print msg sys.exit(1) # process options, as described at the top of this file p = optparse.OptionParser(usage="pusher [ARGS]") p.add_option("--cvs", dest="cvs", help="EX: ~/cvs/func") p.add_option("--proj", dest="proj", help="EX: /cg/func") (options,args) = p.parse_args() if options.cvs is None: croak("--cvs is required, PEBKAC") if options.proj is None: croak("--proj is required, PEBKAC") cvsdir = os.path.expanduser(options.cvs) projdir = os.path.expanduser(options.proj) print "----------------------------------------------" print "Running Michael's totally awesome code pusher script" print "----------------------------------------------" print "assuming you first ran something like..." print " ssh-agent bash" print " ssh-agent ~/.ssh/id_dsa" print "if not, expect pain and it's not my fault" print "----------------------------------------------" print " " print "ok, here we go..." print " " # find the RPM build directory rpmbuild = os.path.join(projdir, "rpm-build") if not os.path.exists(rpmbuild): croak("no directory: %s" % rpmbuild) print "found rpm-build directory" # find the tarballs tarsearch = "%s/*.tar.gz" % rpmbuild tars = glob.glob(tarsearch) if len(tars) != 1: croak("expected to find just one tar.gz in %s, no luck") % rpmbuild tarfile = tars[0] print "found tarball: %s" % tarfile # find a version file, if any versionfile = None #versearch = os.path.join(projdir,"version") #if os.path.exists(versearch): # print "found a version file: %s" % versearch # versionfile = versearch #print "found version file: %s" % versionfile # find a specfile specsearch = "%s/*.spec" % projdir specs = glob.glob(specsearch) if len(specs) != 1: croak("need one and only one specfile in %s" % projdir) specfile = specs[0] print "found specfile: %s" % specfile # verify cvsdir exists if not os.path.exists(cvsdir): croak("can't find cvs directory: %s" % cvsdir) # store current directory topdir = os.getcwd() # do cvs update os.chdir(cvsdir) run("cvs update -d") os.chdir(topdir) # copy specfile and version file into CVS # plus upload tarball # and then commit for x in PROCESS_RELEASES: releasedir = os.path.join(cvsdir, x) rc = run("cp %s %s" % (specfile, releasedir)) if versionfile: rc = run("cp %s %s" % (versionfile, releasedir)) print "cd into %s" % releasedir os.chdir(releasedir) rc = run("make upload FILES=%s" % tarfile) os.chdir(cvsdir) run("cvs commit") # go back through each CVS directory and build stuff for x in PROCESS_RELEASES: releasedir = os.path.join(cvsdir, x) print "cd into %s" % releasedir os.chdir(releasedir) rc = run("make tag") rc = run("BUILD_FLAGS=\"--nowait\" make build",failok=True) print "---------------------------------------------" print "all done, assuming you didn't see anything weird" print "don't forget to visit https://admin.fedoraproject.org/updates" print " "
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- # Generated by Django 1.9.7 on 2016-11-29 04:15 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('tournament', '0112_auto_20161127_1814'), ] operations = [ migrations.AddField( model_name='alternate', name='status', field=models.CharField(blank=True, choices=[('waiting', 'Waiting'), ('contacted', 'Contacted'), ('accepted', 'Accepted'), ('declined', 'Declined'), ('unresponsive', 'Unresponsive')], max_length=31), ), migrations.AddField( model_name='playeravailability', name='alternate_status', field=models.CharField(blank=True, choices=[('search_started', 'Search started'), ('all_contacted', 'All alternates contacted')], max_length=31), ), migrations.AddField( model_name='season', name='enable_alternates_manager', field=models.BooleanField(default=False), ), migrations.AlterField( model_name='leaguenotification', name='type', field=models.CharField(choices=[('mod', 'Moderation stream'), ('captains', 'Captains stream')], max_length=255), ), ]
unknown
codeparrot/codeparrot-clean
from django.shortcuts import render from django import forms import models from bigtree import BigTree from db_cbs_kv import CBS from operator import itemgetter import operator class FormBuildsSelector(forms.Form): def __init__(self, *args, **kwargs): builds_choices = self.get_build_choices() baseline_build_selected = kwargs.pop('baseline_build_selected', None) active_build_selected = kwargs.pop('active_build_selected', None) super(FormBuildsSelector, self).__init__(*args, **kwargs) self.fields['Baseline Build'] = forms.TypedChoiceField(choices=builds_choices, initial = baseline_build_selected) self.fields['Active Build'] = forms.TypedChoiceField(choices=builds_choices, initial = active_build_selected) def get_build_choices(self): cbs = CBS() if cbs.connect(): builds = cbs.get_all_builds() build_choices = [] for value in builds: build_tuple = (value,value) build_choices.append(build_tuple) return build_choices def homeView(request): home_model = models.HomeModel() cbs = CBS() if cbs.connect(): home_model.builds = cbs.get_all_builds() if request.method == 'POST': home_model.active_build = request.POST.get('Active Build','') home_model.baseline_build = request.POST.get('Baseline Build','') else: if 'a' in request.GET: home_model.active_build = request.GET['a'] else: home_model.active_build = sorted(home_model.builds)[len(home_model.builds)-1] if 'b' in request.GET: home_model.baseline_build = request.GET['b'] else: home_model.baseline_build = cbs.default_baseline_build buildSelectorForm = FormBuildsSelector(baseline_build_selected=home_model.baseline_build, active_build_selected=home_model.active_build) bigtree = BigTree(cbs, home_model.active_build, home_model.baseline_build) for category_node in bigtree.root: total, passed, failed, incomplete = 0,0,0,0 for test_node in category_node.child_tests: total +=1 if test_node.status == BigTree.STATUS_INCOMPLETE: incomplete +=1 elif test_node.status == BigTree.STATUS_PASSED: passed +=1 elif test_node.status == BigTree.STATUS_FAILED: failed +=1 home_model.summary.append({'name': category_node.name, 'status': category_node.status, 'passed': passed, 'failed': failed, 'incomplete': incomplete, 'total': total}) home_model.summary.sort(key=operator.itemgetter('name'), reverse=True) return render(request, "dashboard.html", {"model": home_model, "form_buildsSelector": buildSelectorForm}) home_model.debug_message = "Error connecting CBS" return render(request, "dashboard.html", {"model": home_model}) def composedView(request): composed_model = models.ComposedModel() cbs = CBS() if cbs.connect(): composed_model.builds = cbs.get_all_builds() composed_model.category_name = request.GET['category'] composed_model.active_build = request.GET['a'] composed_model.baseline_build = request.GET['b'] buildSelectorForm = FormBuildsSelector(baseline_build_selected=composed_model.baseline_build, active_build_selected=composed_model.active_build) bigtree = BigTree(cbs,composed_model.active_build, composed_model.baseline_build) for category in bigtree.root: if category.name == composed_model.category_name: tests = list() for test in category.child_tests: metrics = list () for metric in test.child_metrics: metrics.append({'name': metric.name, 'description': metric.description, 'status': metric.status, "baseline": metric.b_value, "current": metric.a_value, 'threshold': metric.threshold}) tests.append({'name': test.name, 'active_datetime': test.active_datetime, 'baseline_datetime': test.baseline_datetime, 'title': test.title, 'status': test.status, 'active_snapshots': test.active_snapshots, 'baseline_snapshots': test.baseline_snapshots, 'metrics': metrics}) composed_model.summary = tests composed_model.summary = sorted(composed_model.summary, key=itemgetter('title')) return render(request, "details.html",{"model": composed_model, "form_buildsSelector": buildSelectorForm}) composed_model.debug_message = "Error connecting CBS" return render(request, "dashboard.html", {"model": composed_model}) def historyView(request): history_model = models.HistoryModel() cbs = CBS() if cbs.connect(): history_model.builds = cbs.get_all_builds() history_model.category_name = request.GET['category'] history_model.test_name = request.GET['test'] history_model.active_build = request.GET['a'] history_model.baseline_build = request.GET['b'] history_model.summary = cbs.get_history_by_test(history_model.category_name,history_model.test_name) buildSelectorForm = FormBuildsSelector(baseline_build_selected=history_model.baseline_build, active_build_selected=history_model.active_build) return render(request, "history.html", {"model": history_model, "form_buildsSelector": buildSelectorForm}) history_model.debug_message = "Error connecting CBS" return render(request, "dashboard.html", {"model": history_model})
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # This is a DOCUMENTATION stub specific to this module, it extends # a documentation fragment located in ansible.utils.module_docs_fragments ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION=''' module: rax_clb_ssl short_description: Manage SSL termination for a Rackspace Cloud Load Balancer. description: - Set up, reconfigure, or remove SSL termination for an existing load balancer. version_added: "2.0" options: loadbalancer: description: - Name or ID of the load balancer on which to manage SSL termination. required: true state: description: - If set to "present", SSL termination will be added to this load balancer. - If "absent", SSL termination will be removed instead. choices: - present - absent default: present enabled: description: - If set to "false", temporarily disable SSL termination without discarding - existing credentials. default: true private_key: description: - The private SSL key as a string in PEM format. certificate: description: - The public SSL certificates as a string in PEM format. intermediate_certificate: description: - One or more intermediate certificate authorities as a string in PEM - format, concatenated into a single string. secure_port: description: - The port to listen for secure traffic. default: 443 secure_traffic_only: description: - If "true", the load balancer will *only* accept secure traffic. default: false https_redirect: description: - If "true", the load balancer will redirect HTTP traffic to HTTPS. - Requires "secure_traffic_only" to be true. Incurs an implicit wait if SSL - termination is also applied or removed. wait: description: - Wait for the balancer to be in state "running" before turning. default: false wait_timeout: description: - How long before "wait" gives up, in seconds. default: 300 author: Ash Wilson extends_documentation_fragment: rackspace ''' EXAMPLES = ''' - name: Enable SSL termination on a load balancer rax_clb_ssl: loadbalancer: the_loadbalancer state: present private_key: "{{ lookup('file', 'credentials/server.key' ) }}" certificate: "{{ lookup('file', 'credentials/server.crt' ) }}" intermediate_certificate: "{{ lookup('file', 'credentials/trust-chain.crt') }}" secure_traffic_only: true wait: true - name: Disable SSL termination rax_clb_ssl: loadbalancer: "{{ registered_lb.balancer.id }}" state: absent wait: true ''' try: import pyrax HAS_PYRAX = True except ImportError: HAS_PYRAX = False def cloud_load_balancer_ssl(module, loadbalancer, state, enabled, private_key, certificate, intermediate_certificate, secure_port, secure_traffic_only, https_redirect, wait, wait_timeout): # Validate arguments. if state == 'present': if not private_key: module.fail_json(msg="private_key must be provided.") else: private_key = private_key.strip() if not certificate: module.fail_json(msg="certificate must be provided.") else: certificate = certificate.strip() attempts = wait_timeout / 5 # Locate the load balancer. balancer = rax_find_loadbalancer(module, pyrax, loadbalancer) existing_ssl = balancer.get_ssl_termination() changed = False if state == 'present': # Apply or reconfigure SSL termination on the load balancer. ssl_attrs = dict( securePort=secure_port, privatekey=private_key, certificate=certificate, intermediateCertificate=intermediate_certificate, enabled=enabled, secureTrafficOnly=secure_traffic_only ) needs_change = False if existing_ssl: for ssl_attr, value in ssl_attrs.items(): if ssl_attr == 'privatekey': # The private key is not included in get_ssl_termination's # output (as it shouldn't be). Also, if you're changing the # private key, you'll also be changing the certificate, # so we don't lose anything by not checking it. continue if value is not None and existing_ssl.get(ssl_attr) != value: # module.fail_json(msg='Unnecessary change', attr=ssl_attr, value=value, existing=existing_ssl.get(ssl_attr)) needs_change = True else: needs_change = True if needs_change: try: balancer.add_ssl_termination(**ssl_attrs) except pyrax.exceptions.PyraxException as e: module.fail_json(msg='%s' % e.message) changed = True elif state == 'absent': # Remove SSL termination if it's already configured. if existing_ssl: try: balancer.delete_ssl_termination() except pyrax.exceptions.PyraxException as e: module.fail_json(msg='%s' % e.message) changed = True if https_redirect is not None and balancer.httpsRedirect != https_redirect: if changed: # This wait is unavoidable because load balancers are immutable # while the SSL termination changes above are being applied. pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts) try: balancer.update(httpsRedirect=https_redirect) except pyrax.exceptions.PyraxException as e: module.fail_json(msg='%s' % e.message) changed = True if changed and wait: pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts) balancer.get() new_ssl_termination = balancer.get_ssl_termination() # Intentionally omit the private key from the module output, so you don't # accidentally echo it with `ansible-playbook -v` or `debug`, and the # certificate, which is just long. Convert other attributes to snake_case # and include https_redirect at the top-level. if new_ssl_termination: new_ssl = dict( enabled=new_ssl_termination['enabled'], secure_port=new_ssl_termination['securePort'], secure_traffic_only=new_ssl_termination['secureTrafficOnly'] ) else: new_ssl = None result = dict( changed=changed, https_redirect=balancer.httpsRedirect, ssl_termination=new_ssl, balancer=rax_to_dict(balancer, 'clb') ) success = True if balancer.status == 'ERROR': result['msg'] = '%s failed to build' % balancer.id success = False elif wait and balancer.status not in ('ACTIVE', 'ERROR'): result['msg'] = 'Timeout waiting on %s' % balancer.id success = False if success: module.exit_json(**result) else: module.fail_json(**result) def main(): argument_spec = rax_argument_spec() argument_spec.update(dict( loadbalancer=dict(required=True), state=dict(default='present', choices=['present', 'absent']), enabled=dict(type='bool', default=True), private_key=dict(), certificate=dict(), intermediate_certificate=dict(), secure_port=dict(type='int', default=443), secure_traffic_only=dict(type='bool', default=False), https_redirect=dict(type='bool'), wait=dict(type='bool', default=False), wait_timeout=dict(type='int', default=300) )) module = AnsibleModule( argument_spec=argument_spec, required_together=rax_required_together(), ) if not HAS_PYRAX: module.fail_json(msg='pyrax is required for this module.') loadbalancer = module.params.get('loadbalancer') state = module.params.get('state') enabled = module.boolean(module.params.get('enabled')) private_key = module.params.get('private_key') certificate = module.params.get('certificate') intermediate_certificate = module.params.get('intermediate_certificate') secure_port = module.params.get('secure_port') secure_traffic_only = module.boolean(module.params.get('secure_traffic_only')) https_redirect = module.boolean(module.params.get('https_redirect')) wait = module.boolean(module.params.get('wait')) wait_timeout = module.params.get('wait_timeout') setup_rax_module(module, pyrax) cloud_load_balancer_ssl( module, loadbalancer, state, enabled, private_key, certificate, intermediate_certificate, secure_port, secure_traffic_only, https_redirect, wait, wait_timeout ) from ansible.module_utils.basic import * from ansible.module_utils.rax import * if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
import traceback from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification log = CPLog(__name__) autoload = 'Toasty' class Toasty(Notification): urls = { 'api': 'http://api.supertoasty.com/notify/%s?%s' } def notify(self, message = '', data = None, listener = None): if not data: data = {} data = { 'title': self.default_title, 'text': toUnicode(message), 'sender': toUnicode("CouchPotato"), 'image': 'https://raw.github.com/RuudBurger/CouchPotatoServer/master/couchpotato/static/images/homescreen.png', } try: self.urlopen(self.urls['api'] % (self.conf('api_key'), tryUrlencode(data)), show_error = False) return True except: log.error('Toasty failed: %s', traceback.format_exc()) return False config = [{ 'name': 'toasty', 'groups': [ { 'tab': 'notifications', 'list': 'notification_providers', 'name': 'toasty', 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', }, { 'name': 'api_key', 'label': 'Device ID', }, { 'name': 'on_snatch', 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Also send message when movie is snatched.', }, ], } ], }]
unknown
codeparrot/codeparrot-clean
# frozen_string_literal: true require "cases/helper" require "models/topic" require "models/person" class InclusionValidationTest < ActiveModel::TestCase def teardown Topic.clear_validators! end def test_validates_inclusion_of_range Topic.validates_inclusion_of(:title, in: "aaa".."bbb") assert_predicate Topic.new("title" => "bbc", "content" => "abc"), :invalid? assert_predicate Topic.new("title" => "aa", "content" => "abc"), :invalid? assert_predicate Topic.new("title" => "aaab", "content" => "abc"), :invalid? assert_predicate Topic.new("title" => "aaa", "content" => "abc"), :valid? assert_predicate Topic.new("title" => "abc", "content" => "abc"), :valid? assert_predicate Topic.new("title" => "bbb", "content" => "abc"), :valid? end def test_validates_inclusion_of_time_range range_begin = 1.year.ago range_end = Time.now Topic.validates_inclusion_of(:created_at, in: range_begin..range_end) assert_predicate Topic.new(title: "aaa", created_at: 2.years.ago), :invalid? assert_predicate Topic.new(title: "aaa", created_at: 3.months.ago), :valid? assert_predicate Topic.new(title: "aaa", created_at: 37.weeks.from_now), :invalid? assert_predicate Topic.new(title: "aaa", created_at: range_begin), :valid? assert_predicate Topic.new(title: "aaa", created_at: range_end), :valid? end def test_validates_inclusion_of_date_range range_begin = 1.year.until(Date.today) range_end = Date.today Topic.validates_inclusion_of(:created_at, in: range_begin..range_end) assert_predicate Topic.new(title: "aaa", created_at: 2.years.until(Date.today)), :invalid? assert_predicate Topic.new(title: "aaa", created_at: 3.months.until(Date.today)), :valid? assert_predicate Topic.new(title: "aaa", created_at: 37.weeks.since(Date.today)), :invalid? assert_predicate Topic.new(title: "aaa", created_at: 1.year.until(Date.today)), :valid? assert_predicate Topic.new(title: "aaa", created_at: Date.today), :valid? assert_predicate Topic.new(title: "aaa", created_at: range_begin), :valid? assert_predicate Topic.new(title: "aaa", created_at: range_end), :valid? end def test_validates_inclusion_of_date_time_range range_begin = 1.year.until(DateTime.current) range_end = DateTime.current Topic.validates_inclusion_of(:created_at, in: range_begin..range_end) assert_predicate Topic.new(title: "aaa", created_at: 2.years.until(DateTime.current)), :invalid? assert_predicate Topic.new(title: "aaa", created_at: 3.months.until(DateTime.current)), :valid? assert_predicate Topic.new(title: "aaa", created_at: 37.weeks.since(DateTime.current)), :invalid? assert_predicate Topic.new(title: "aaa", created_at: range_begin), :valid? assert_predicate Topic.new(title: "aaa", created_at: range_end), :valid? end def test_validates_inclusion_of_beginless_numeric_range range_end = 1000 Topic.validates_inclusion_of(:raw_price, in: ..range_end) assert_predicate Topic.new(title: "aaa", price: -100), :valid? assert_predicate Topic.new(title: "aaa", price: 0), :valid? assert_predicate Topic.new(title: "aaa", price: 100), :valid? assert_predicate Topic.new(title: "aaa", price: 2000), :invalid? assert_predicate Topic.new(title: "aaa", price: range_end), :valid? end def test_validates_inclusion_of_endless_numeric_range range_begin = 0 Topic.validates_inclusion_of(:raw_price, in: range_begin..) assert_predicate Topic.new(title: "aaa", price: -1), :invalid? assert_predicate Topic.new(title: "aaa", price: -100), :invalid? assert_predicate Topic.new(title: "aaa", price: 100), :valid? assert_predicate Topic.new(title: "aaa", price: 2000), :valid? assert_predicate Topic.new(title: "aaa", price: range_begin), :valid? end def test_validates_inclusion_of Topic.validates_inclusion_of(:title, in: %w( a b c d e f g )) assert_predicate Topic.new("title" => "a!", "content" => "abc"), :invalid? assert_predicate Topic.new("title" => "a b", "content" => "abc"), :invalid? assert_predicate Topic.new("title" => nil, "content" => "def"), :invalid? t = Topic.new("title" => "a", "content" => "I know you are but what am I?") assert_predicate t, :valid? t.title = "uhoh" assert_predicate t, :invalid? assert_predicate t.errors[:title], :any? assert_equal ["is not included in the list"], t.errors[:title] assert_raise(ArgumentError) { Topic.validates_inclusion_of(:title, in: nil) } assert_raise(ArgumentError) { Topic.validates_inclusion_of(:title, in: 0) } assert_nothing_raised { Topic.validates_inclusion_of(:title, in: "hi!") } assert_nothing_raised { Topic.validates_inclusion_of(:title, in: {}) } assert_nothing_raised { Topic.validates_inclusion_of(:title, in: []) } end def test_validates_inclusion_of_with_allow_nil Topic.validates_inclusion_of(:title, in: %w( a b c d e f g ), allow_nil: true) assert_predicate Topic.new("title" => "a!", "content" => "abc"), :invalid? assert_predicate Topic.new("title" => "", "content" => "abc"), :invalid? assert_predicate Topic.new("title" => nil, "content" => "abc"), :valid? end def test_validates_inclusion_of_with_formatted_message Topic.validates_inclusion_of(:title, in: %w( a b c d e f g ), message: "option %{value} is not in the list") assert_predicate Topic.new("title" => "a", "content" => "abc"), :valid? t = Topic.new("title" => "uhoh", "content" => "abc") assert_predicate t, :invalid? assert_predicate t.errors[:title], :any? assert_equal ["option uhoh is not in the list"], t.errors[:title] end def test_validates_inclusion_of_with_within_option Topic.validates_inclusion_of(:title, within: %w( a b c d e f g )) assert_predicate Topic.new("title" => "a", "content" => "abc"), :valid? t = Topic.new("title" => "uhoh", "content" => "abc") assert_predicate t, :invalid? assert_predicate t.errors[:title], :any? end def test_validates_inclusion_of_for_ruby_class Person.validates_inclusion_of :karma, in: %w( abe monkey ) p = Person.new p.karma = "Lifo" assert_predicate p, :invalid? assert_equal ["is not included in the list"], p.errors[:karma] p.karma = "monkey" assert_predicate p, :valid? ensure Person.clear_validators! end def test_validates_inclusion_of_with_lambda Topic.validates_inclusion_of :title, in: lambda { |topic| topic.author_name == "sikachu" ? %w( monkey elephant ) : %w( abe wasabi ) } t = Topic.new t.title = "wasabi" t.author_name = "sikachu" assert_predicate t, :invalid? t.title = "elephant" assert_predicate t, :valid? end def test_validates_inclusion_of_with_lambda_without_arguments Topic.validates_inclusion_of :title, in: lambda { %w( monkey elephant ) } t = Topic.new t.title = "wasabi" assert_predicate t, :invalid? t.title = "elephant" assert_predicate t, :valid? end def test_validates_inclusion_of_with_symbol Person.validates_inclusion_of :karma, in: :available_karmas p = Person.new p.karma = "Lifo" def p.available_karmas %w() end assert_predicate p, :invalid? assert_equal ["is not included in the list"], p.errors[:karma] p = Person.new p.karma = "Lifo" def p.available_karmas %w(Lifo) end assert_predicate p, :valid? ensure Person.clear_validators! end def test_validates_inclusion_of_with_array_value Person.validates_inclusion_of :karma, in: %w( abe monkey ) p = Person.new p.karma = %w(Lifo monkey) assert_predicate p, :invalid? assert_equal ["is not included in the list"], p.errors[:karma] p = Person.new p.karma = %w(abe monkey) assert_predicate p, :valid? ensure Person.clear_validators! end end
ruby
github
https://github.com/rails/rails
activemodel/test/cases/validations/inclusion_validation_test.rb
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cm import ( v1 "k8s.io/api/core/v1" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager" "k8s.io/kubernetes/pkg/kubelet/cm/memorymanager" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" ) type InternalContainerLifecycle interface { PreCreateContainer(logger klog.Logger, pod *v1.Pod, container *v1.Container, containerConfig *runtimeapi.ContainerConfig) error PreStartContainer(logger klog.Logger, pod *v1.Pod, container *v1.Container, containerID string) error PostStopContainer(logger klog.Logger, containerID string) error } // Implements InternalContainerLifecycle interface. type internalContainerLifecycleImpl struct { cpuManager cpumanager.Manager memoryManager memorymanager.Manager topologyManager topologymanager.Manager } func (i *internalContainerLifecycleImpl) PreStartContainer(logger klog.Logger, pod *v1.Pod, container *v1.Container, containerID string) error { if i.cpuManager != nil { i.cpuManager.AddContainer(logger, pod, container, containerID) } if i.memoryManager != nil { i.memoryManager.AddContainer(logger, pod, container, containerID) } i.topologyManager.AddContainer(pod, container, containerID) return nil } func (i *internalContainerLifecycleImpl) PostStopContainer(logger klog.Logger, containerID string) error { return i.topologyManager.RemoveContainer(containerID) }
go
github
https://github.com/kubernetes/kubernetes
pkg/kubelet/cm/internal_container_lifecycle.go
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2018, Ansible by Red Hat, inc # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { "metadata_version": "1.1", "status": ["preview"], "supported_by": "network", } DOCUMENTATION = """module: net_put author: Deepak Agrawal (@dagrawal) short_description: Copy a file from Ansible Controller to a network device description: - This module provides functionality to copy file from Ansible controller to network devices. extends_documentation_fragment: - ansible.netcommon.network_agnostic options: src: description: - Specifies the source file. The path to the source file can either be the full path on the Ansible control host or a relative path from the playbook or role root directory. required: true protocol: description: - Protocol used to transfer file. default: scp choices: - scp - sftp dest: description: - Specifies the destination file. The path to destination file can either be the full path or relative path as supported by network_os. default: - Filename from src and at default directory of user shell on network_os. required: false mode: description: - Set the file transfer mode. If mode is set to I(text) then I(src) file will go through Jinja2 template engine to replace any vars if present in the src file. If mode is set to I(binary) then file will be copied as it is to destination device. default: binary choices: - binary - text requirements: - scp notes: - Some devices need specific configurations to be enabled before scp can work These configuration should be pre-configured before using this module e.g ios - C(ip scp server enable). - User privilege to do scp on network device should be pre-configured e.g. ios - need user privilege 15 by default for allowing scp. - Default destination of source file. """ EXAMPLES = """ - name: copy file from ansible controller to a network device net_put: src: running_cfg_ios1.txt - name: copy file at root dir of flash in slot 3 of sw1(ios) net_put: src: running_cfg_sw1.txt protocol: sftp dest : flash3:/running_cfg_sw1.txt """ RETURN = """ """
unknown
codeparrot/codeparrot-clean
"""Configuration classes.""" from __future__ import absolute_import, print_function import os import sys from lib.util import ( CommonConfig, is_shippable, docker_qualify_image, ) from lib.metadata import ( Metadata, ) class EnvironmentConfig(CommonConfig): """Configuration common to all commands which execute in an environment.""" def __init__(self, args, command): """ :type args: any """ super(EnvironmentConfig, self).__init__(args) self.command = command self.local = args.local is True if args.tox is True or args.tox is False or args.tox is None: self.tox = args.tox is True self.tox_args = 0 self.python = args.python if 'python' in args else None # type: str else: self.tox = True self.tox_args = 1 self.python = args.tox # type: str self.docker = docker_qualify_image(args.docker) # type: str self.remote = args.remote # type: str self.docker_privileged = args.docker_privileged if 'docker_privileged' in args else False # type: bool self.docker_util = docker_qualify_image(args.docker_util if 'docker_util' in args else '') # type: str self.docker_pull = args.docker_pull if 'docker_pull' in args else False # type: bool self.tox_sitepackages = args.tox_sitepackages # type: bool self.remote_stage = args.remote_stage # type: str self.remote_aws_region = args.remote_aws_region # type: str self.remote_terminate = args.remote_terminate # type: str self.requirements = args.requirements # type: bool if self.python == 'default': self.python = '.'.join(str(i) for i in sys.version_info[:2]) self.python_version = self.python or '.'.join(str(i) for i in sys.version_info[:2]) self.delegate = self.tox or self.docker or self.remote if self.delegate: self.requirements = True class TestConfig(EnvironmentConfig): """Configuration common to all test commands.""" def __init__(self, args, command): """ :type args: any :type command: str """ super(TestConfig, self).__init__(args, command) self.coverage = args.coverage # type: bool self.coverage_label = args.coverage_label # type: str self.include = args.include # type: list [str] self.exclude = args.exclude # type: list [str] self.require = args.require # type: list [str] self.changed = args.changed # type: bool self.tracked = args.tracked # type: bool self.untracked = args.untracked # type: bool self.committed = args.committed # type: bool self.staged = args.staged # type: bool self.unstaged = args.unstaged # type: bool self.changed_from = args.changed_from # type: str self.changed_path = args.changed_path # type: list [str] self.lint = args.lint if 'lint' in args else False # type: bool self.junit = args.junit if 'junit' in args else False # type: bool self.failure_ok = args.failure_ok if 'failure_ok' in args else False # type: bool self.metadata = Metadata.from_file(args.metadata) if args.metadata else Metadata() self.metadata_path = None class ShellConfig(EnvironmentConfig): """Configuration for the shell command.""" def __init__(self, args): """ :type args: any """ super(ShellConfig, self).__init__(args, 'shell') class SanityConfig(TestConfig): """Configuration for the sanity command.""" def __init__(self, args): """ :type args: any """ super(SanityConfig, self).__init__(args, 'sanity') self.test = args.test # type: list [str] self.skip_test = args.skip_test # type: list [str] self.list_tests = args.list_tests # type: bool if args.base_branch: self.base_branch = args.base_branch # str elif is_shippable(): self.base_branch = os.environ.get('BASE_BRANCH', '') # str if self.base_branch: self.base_branch = 'origin/%s' % self.base_branch else: self.base_branch = '' class IntegrationConfig(TestConfig): """Configuration for the integration command.""" def __init__(self, args, command): """ :type args: any :type command: str """ super(IntegrationConfig, self).__init__(args, command) self.start_at = args.start_at # type: str self.start_at_task = args.start_at_task # type: str self.allow_destructive = args.allow_destructive if 'allow_destructive' in args else False # type: bool self.retry_on_error = args.retry_on_error # type: bool self.continue_on_error = args.continue_on_error # type: bool self.debug_strategy = args.debug_strategy # type: bool self.changed_all_target = args.changed_all_target # type: str self.list_targets = args.list_targets # type: bool self.tags = args.tags self.skip_tags = args.skip_tags self.diff = args.diff if self.list_targets: self.explain = True class PosixIntegrationConfig(IntegrationConfig): """Configuration for the posix integration command.""" def __init__(self, args): """ :type args: any """ super(PosixIntegrationConfig, self).__init__(args, 'integration') class WindowsIntegrationConfig(IntegrationConfig): """Configuration for the windows integration command.""" def __init__(self, args): """ :type args: any """ super(WindowsIntegrationConfig, self).__init__(args, 'windows-integration') self.windows = args.windows # type: list [str] if self.windows: self.allow_destructive = True class NetworkIntegrationConfig(IntegrationConfig): """Configuration for the network integration command.""" def __init__(self, args): """ :type args: any """ super(NetworkIntegrationConfig, self).__init__(args, 'network-integration') self.platform = args.platform # type: list [str] self.inventory = args.inventory # type: str class UnitsConfig(TestConfig): """Configuration for the units command.""" def __init__(self, args): """ :type args: any """ super(UnitsConfig, self).__init__(args, 'units') self.collect_only = args.collect_only # type: bool class CompileConfig(TestConfig): """Configuration for the compile command.""" def __init__(self, args): """ :type args: any """ super(CompileConfig, self).__init__(args, 'compile') class CoverageConfig(EnvironmentConfig): """Configuration for the coverage command.""" def __init__(self, args): """ :type args: any """ super(CoverageConfig, self).__init__(args, 'coverage') self.group_by = frozenset(args.group_by) if 'group_by' in args and args.group_by else set() # type: frozenset [str] self.all = args.all if 'all' in args else False # type: bool self.stub = args.stub if 'stub' in args else False # type: bool class CoverageReportConfig(CoverageConfig): """Configuration for the coverage report command.""" def __init__(self, args): """ :type args: any """ super(CoverageReportConfig, self).__init__(args) self.show_missing = args.show_missing # type: bool
unknown
codeparrot/codeparrot-clean
/* * Copyright (C) 2011 The Guava Authors * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ package com.google.common.util.concurrent; import com.google.common.annotations.GwtCompatible; import java.util.concurrent.Future; import org.jspecify.annotations.Nullable; /** * Transforms a value, possibly asynchronously. For an example usage and more information, see * {@link Futures#transformAsync(ListenableFuture, AsyncFunction, Executor)}. * * @author Chris Povirk * @since 11.0 */ @GwtCompatible public interface AsyncFunction<I extends @Nullable Object, O extends @Nullable Object> { /** * Returns an output {@code Future} to use in place of the given {@code input}. The output {@code * Future} need not be {@linkplain Future#isDone done}, making {@code AsyncFunction} suitable for * asynchronous derivations. * * <p>Throwing an exception from this method is equivalent to returning a failing {@code Future}. */ ListenableFuture<O> apply(@ParametricNullness I input) throws Exception; }
java
github
https://github.com/google/guava
android/guava/src/com/google/common/util/concurrent/AsyncFunction.java
## Input ```javascript import {useNoAlias} from 'shared-runtime'; function useFoo(props: {value: {x: string; y: string} | null}) { const value = props.value; return useNoAlias(value?.x, value?.y) ?? {}; } export const FIXTURE_ENTRYPONT = { fn: useFoo, props: [{value: null}], }; ``` ## Code ```javascript import { useNoAlias } from "shared-runtime"; function useFoo(props) { const value = props.value; return useNoAlias(value?.x, value?.y) ?? {}; } export const FIXTURE_ENTRYPONT = { fn: useFoo, props: [{ value: null }], }; ``` ### Eval output (kind: exception) Fixture not implemented
unknown
github
https://github.com/facebook/react
compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/optional-call-chain-in-logical-expr.expect.md
""" The potential fields of a triaxial ellipsoid. """ from __future__ import division import numpy as np from scipy import linalg from ..constants import SI2MGAL, G, CM, T2NT, SI2EOTVOS, PERM_FREE_SPACE from .. import utils import scipy.special def bx_c(xp,yp,zp,inten,inc,dec,ellipsoids): ''' Calculates the X component of the magnetic field generated by n-ellipsoid. Parameters: * xp,yp,zp: arrays Grid of observation points x, y, and z. * inten,inc,dec: floats Intensity, inclination and declination of the Earth's magnetic field. * ellipsoids: list of :class:`fatiando.mesher.Sphere` Ellipsoid model. Returns: * bx: array The X component of the magnetic field generated by n-ellipsoid. ''' if xp.shape != yp.shape != zp.shape: raise ValueError("Input arrays xp, yp, and zp must have same shape!") size = len(xp) res = np.zeros(size, dtype=np.float) ctemag = 100. for ellipsoid in ellipsoids: b1,b2,b3,V,N1,N2,N3,JRD_ang = ellipsoid_def (xp,yp,zp,inten,inc,dec,ellipsoid) bx = b1*V[0,0]+b2*V[0,1]+b3*V[0,2] res += bx res = res*ctemag return res def by_c(xp,yp,zp,inten,inc,dec,ellipsoids): ''' Calculates the Y component of the magnetic field generated by n-ellipsoid. Parameters: * xp,yp,zp: arrays Grid of observation points x, y, and z. * inten,inc,dec: floats Intensity, inclination and declination of the Earth's magnetic field. * ellipsoids: list of :class:`fatiando.mesher.Sphere` Ellipsoid model. Returns: * by: array The Y component of the magnetic field generated by n-ellipsoid. ''' if xp.shape != yp.shape != zp.shape: raise ValueError("Input arrays xp, yp, and zp must have same shape!") size = len(xp) res = np.zeros(size, dtype=np.float) ctemag = 100. for ellipsoid in ellipsoids: b1,b2,b3,V,N1,N2,N3,JRD_ang = ellipsoid_def (xp,yp,zp,inten,inc,dec,ellipsoid) by = b1*V[1,0]+b2*V[1,1]+b3*V[1,2] res += by res = res*ctemag return res def bz_c(xp,yp,zp,inten,inc,dec,ellipsoids): ''' Calculates the Z component of the magnetic field generated by n-ellipsoid. Parameters: * xp,yp,zp: arrays Grid of observation points x, y, and z. * inten,inc,dec: floats Intensity, inclination and declination of the Earth's magnetic field. * ellipsoids: list of :class:`fatiando.mesher.Sphere` Ellipsoid model. Returns: * bz: array The Z component of the magnetic field generated by n-ellipsoid. ''' if xp.shape != yp.shape != zp.shape: raise ValueError("Input arrays xp, yp, and zp must have same shape!") size = len(xp) res = np.zeros(size, dtype=np.float) ctemag = 100. for ellipsoid in ellipsoids: b1,b2,b3,V,N1,N2,N3,JRD_ang = ellipsoid_def (xp,yp,zp,inten,inc,dec,ellipsoid) bz = b1*V[2,0]+b2*V[2,1]+b3*V[2,2] res += bz res = res*ctemag return res def tf_c(xp,yp,zp,inten,inc,dec,ellipsoids): ''' Calculates the approximated total-field anomaly generated by n-ellipsoid. Parameters: * xp,yp,zp: arrays Grid of observation points x, y, and z. * inten,inc,dec: floats Intensity, inclination and declination of the Earth's magnetic field. * ellipsoids: list of :class:`fatiando.mesher.Sphere` Ellipsoid model. Returnss: * tf : array The total-field anomaly ''' if xp.shape != yp.shape != zp.shape: raise ValueError("Input arrays xp, yp, and zp must have same shape!") size = len(xp) res = np.zeros(size, dtype=np.float) ctemag = 100. for ellipsoid in ellipsoids: b1,b2,b3,V,N1,N2,N3,JRD_ang = ellipsoid_def (xp,yp,zp,inten,inc,dec,ellipsoid) bx = (b1*V[0,0]+b2*V[0,1]+b3*V[0,2])*ctemag by = (b1*V[1,0]+b2*V[1,1]+b3*V[1,2])*ctemag bz = (b1*V[2,0]+b2*V[2,1]+b3*V[2,2])*ctemag tf = bx*np.cos(np.deg2rad(inc))*np.cos(np.deg2rad(dec)) + by*np.cos\ (np.deg2rad(inc))*np.sin(np.deg2rad(dec)) + bz*np.sin(np.deg2rad(inc)) res += tf res = res return res,N1,N2,N3,JRD_ang def ellipsoid_def (xp,yp,zp,inten,inc,dec,ellipsoid): ''' Calculate the potential fields of a homogeneous ellipsoid. **Magnetic** Calculates the magnetic effect produced by a triaxial, a prolate or/and an oblate ellipsoid. The functions are based on Clark et al. (1986). ''' axis = ellipsoid.axis center = ellipsoid.center alpha, delta, gamma = structural_angles(ellipsoid.strike, ellipsoid.dip, ellipsoid.rake) V = ellipsoid.V(angles = [alpha, delta, gamma]) # Remanence values intensity_rem = ellipsoid.props['remanence'][0] incli_rem = ellipsoid.props['remanence'][1] decli_rem = ellipsoid.props['remanence'][2] ln, mn, nn = utils.dircos(incli_rem, decli_rem) k_int = np.array([ellipsoid.props['k'][0],ellipsoid.props['k'][1],\ ellipsoid.props['k'][2]]) #isotropic case if k_int[0] == (k_int[1] and k_int[2]): km = np.diag(k_int) #anisotropic case else: U = ellipsoid.V(angles = [ellipsoid.props['k'][3],\ ellipsoid.props['k'][4],ellipsoid.props['k'][5]]) km = k_matrix(U,V,np.diag(k_int)) print km # Ellipsoid cartesian body coordinates x1,x2,x3 = x_e(xp,yp,zp,center,V) # Largest real root of the cubic equation (Lambda) lamb,teta,q,p,p2,p1,p0 = lamb_T(axis,x1,x2,x3) # Derivatves of lambda dlambx1,dlambx2,dlambx3 = dlambx_T(axis,x1,x2,x3,lamb) # Calculate the eliptical integral parameters F,E,F2,E2,k,theta_l = legendre_integrals(axis,lamb) N1,N2,N3 = N_desmagT(axis,F2,E2) # Integrals calculations A, B, C = potential_integrals(axis,k,theta_l,F,E) # Geometry for the magnetic field m11,m12,m13,m21,m22,m23,m31,m32,m33, cte, V1, V2, V3 = mx(axis,x1,x2,x3,\ dlambx1,dlambx2,dlambx3,A,B,C,lamb) # Earth's field and total body magnetization (including demagnetization) #in the body's coordinate JN = JN_e (intensity_rem,ln,mn,nn,V) lt,mt,nt = utils.dircos(inc, dec) Ft = F_e (inten,lt,mt,nt,V) JR = JR_e (km,JN,Ft) JRD = JRD_e (km,N1,N2,N3,JR) JRD_carte = (V).dot(JRD) JRD_ang = utils.vec2ang(JRD_carte) # Components of the magnetic field in the body coordinates B1 = B1_e (m11,m12,m13,JRD,axis[0],axis[1],axis[2]) B2 = B2_e (m21,m22,m23,JRD,axis[0],axis[1],axis[2]) B3 = B3_e (m31,m32,m33,JRD,axis[0],axis[1],axis[2]) return B1,B2,B3,V,N1,N2,N3,JRD_ang def structural_angles(strike, dip, rake): ''' Calculates the orientation angles alpha, gamma and delta (Clark et al., 1986) as functions of the geological angles strike, dip and rake (Clark et al., 1986; Allmendinger et al., 2012). The function implements the formulas presented by Clark et al. (1986). Parameters: *strike: float strike direction (in degrees). *dip: float true dip (in degrees). *rake: float angle between the strike and the semi-axis a of the body (in degrees). Returns: *alpha, gamma, delta: float, float, float orientation angles (in radians) defined according to Clark et al. (1986). References: Clark, D., Saul, S., and Emerson, D.: Magnetic and gravity anomalies of a triaxial ellipsoid, Exploration Geophysics, 17, 189-200, 1986. Allmendinger, R., Cardozo, N., and Fisher, D. M.: Structural geology algorithms : vectors and tensors, Cambridge University Press, 2012. ''' strike_r = np.deg2rad(strike) cos_dip = np.cos(np.deg2rad(dip)) sin_dip = np.sin(np.deg2rad(dip)) cos_rake = np.cos(np.deg2rad(rake)) sin_rake = np.sin(np.deg2rad(rake)) aux = sin_dip*sin_rake aux1 = cos_rake/np.sqrt(1 - aux*aux) aux2 = sin_dip*cos_rake if aux1 > 1.: aux1 = 1. if aux1 < -1.: aux1 = -1. alpha = strike_r - np.arccos(aux1) if aux2 != 0: gamma = -np.arctan(cos_dip/aux2) else: if cos_dip >= 0: gamma = np.pi/2 if cos_dip <= 0: gamma = -np.pi/2 delta = np.arcsin(aux) assert delta <= np.pi/2, 'delta must be lower than \ or equalt to 90 degrees' assert (gamma >= -np.pi/2) and (gamma <= np.pi/2), 'gamma must lie between \ -90 and 90 degrees.' return alpha, gamma, delta def x_e (xp,yp,zp,center,V): ''' Calculates the new coordinates with origin at the center of the ellipsoid. Parameters: * xp,yp,zp: arrays Grid of observation points x, y, and z. * center: float Origin of the center of the ellipsoid. * V: array Matrix of conversion. Returns: * x1, x2, x3: arrays The three grid points of the body's coordinates. ''' x1 = (xp-center[0])*V[0,0]+(yp-center[1])*V[1,0]-(zp+center[2])*V[2,0] x2 = (xp-center[0])*V[0,1]+(yp-center[1])*V[1,1]-(zp+center[2])*V[2,1] x3 = (xp-center[0])*V[0,2]+(yp-center[1])*V[1,2]-(zp+center[2])*V[2,2] return x1, x2, x3 def JN_e (intensity_rem,ln,mn,nn,V): ''' Changes the remanent magnetization vector to the body coordinate. Parameters: * ln,nn,mn: Direction cosines of the remanent magnetization vector. * V: Matrix of conversion. Returns: * JN: Remanent magnetization vector in the body coordinate. ''' JN = intensity_rem*np.array([[(ln*V[0,0]+mn*V[1,0]+nn*V[2,0])], \ [(ln*V[0,1]+mn*V[1,1]+nn*V[2,1])], [(ln*V[0,2]+mn*V[1,2]+nn*V[2,2])]]) return JN def N_desmagT (axis,F2,E2): ''' Calculates the three demagnetization factor along major, intermediate and minor axis. Parameters: * a,b,c: float Major, intermediate and minor axis, respectively. * F2, E2: float Lagrange's normal eliptic integrals of first and second order. Returns: * N1, N2, N3: floats Major, intermediate and minor demagnetization factors, respectively. ''' N1 = ((axis[0]*axis[1]*axis[2])/((axis[0]**2-axis[1]**2)*\ (axis[0]**2-axis[2]**2)**0.5)) * (F2-E2) N2 = (((axis[0]*axis[1]*axis[2])*\ (axis[0]**2-axis[2]**2)**0.5)/((axis[0]**2-axis[1]**2)*\ (axis[1]**2-axis[2]**2))) * (E2-((axis[1]**2-axis[2]**2)\ /(axis[0]**2-axis[2]**2)) * F2-((axis[2]*(axis[0]**2-axis[1]**2))\ /(axis[0]*axis[1]*(axis[0]**2-axis[2]**2)**0.5))) N3 = ((axis[0]*axis[1]*axis[2])/((axis[1]**2-axis[2]**2)*\ (axis[0]**2-axis[2]**2)**0.5)) * (((axis[1]*(axis[0]**2-axis[2]**2)**0.5)\ /(axis[0]*axis[2]))-E2) return N1, N2, N3 def k_matrix (U,V,K): ''' Build the susceptibility tensor for the anisotropic case. Parameters: * U: array Direction cosines of the susceptibilities. * V: array Matrix of coordinates conversion. * K: array Diagonal matrix with k1,k2,k3 (intensity of the susceptibilities). Returns: * km: array Susceptibility tensors matrix. ''' km = np.dot(np.dot(np.dot(V.T,U), K), np.dot(U.T,V)) return km def lamb_T (axis,x1,x2,x3): ''' Calculates the larger root of the cubic equation: s^3 + p2*s^2 + p1*s + p0 = 0. Parameters: * a, b, c: floats Major, intermediate and minor axis, respectively. * x1, x2, x3: arrays Axis of the body coordinate system. Returns: * lamb: array Larger root of the cubic equation: s^3 + p2*s^2 + p1*s + p0 = 0. * teta, q, p, p2, p1, p0: arrays Constants of the cubic equation. ''' p0 = (axis[0]*axis[1]*axis[2])**2-(axis[1]*axis[2]*x1)**2-\ (axis[2]*axis[0]*x2)**2-(axis[0]*axis[1]*x3)**2 p1 = (axis[0]*axis[1])**2+(axis[1]*axis[2])**2+(axis[2]*axis[0])**2-\ (axis[1]**2+axis[2]**2)*x1**2-(axis[2]**2+axis[0]**2)*x2**2-\ (axis[0]**2+axis[1]**2)*x3**2 p2 = axis[0]**2+axis[1]**2+axis[2]**2-x1**2-x2**2-x3**2 p = p1-(p2**2)/3. q = p0-((p1*p2)/3.)+2*(p2/3.)**3 p3 = (-q/(2*np.sqrt((-p/3.)**3))) for i in range (len(p3)): if p3[i] > 1.: p3[i] = 1. teta = np.arccos(p3) lamb = 2.*((-p/3.)**0.5)*np.cos(teta/3.)-(p2/3.) return lamb, teta, q, p, p2, p1, p0 def legendre_integrals(axis,lamb): ''' Calculates parameters and the Legendre's normal elliptic integrals of first and second order. Parameters: * a, b, c: floats Major, intermediate and minor axis, respectively. * lamb: array Larger root of the cubic equation: s^3 + p2*s^2 + p1*s + p0 = 0. Returns: * F: array Legendre's normal elliptic integrals of first order. * E: array Legendre's normal elliptic integrals of second order. * F2: array Legendre's normal elliptic integrals of first order. * E2: array Legendre's normal elliptic integrals of second order. * k: array Legendre's normal elliptic integrals parameter. * theta_l: array Legendre's normal elliptic integrals parameter. ''' k = np.zeros_like(lamb) k1 = ((axis[0]**2-axis[1]**2)/(axis[0]**2-axis[2]**2))**0.5 k.fill(k1) theta_l = np.arcsin(((axis[0]**2-axis[2]**2)/(axis[0]**2+lamb))**0.5) theta_l2 = np.arccos(axis[2]/axis[0]) F = scipy.special.ellipkinc(theta_l, k**2) E = scipy.special.ellipeinc(theta_l, k**2) F2 = scipy.special.ellipkinc(theta_l2, k1**2) E2 = scipy.special.ellipeinc(theta_l2, k1**2) return F,E,F2,E2,k,theta_l def dlambx_T (axis,x1,x2,x3,lamb): ''' Calculates the derivatives of the ellipsoid equation for each body coordinates in realation to lambda. Parameters: * a, b, c: floats Major, intermediate and minor axis, respectively. * x1, x2, x3: array Axis of the body coordinate system. * lamb: array Larger root of the cubic equation: s^3 + p2*s^2 + p1*s + p0 = 0. Returns: * dlambx1,dlambx2,dlambx3: array Derivatives of the ellipsoid equation for each body coordinates in realation to x1,x2 and x3. ''' dlambx1 = (2*x1/(axis[0]**2+lamb))/((x1/(axis[0]**2+lamb))**2+\ (x2/(axis[1]**2+lamb))**2+((x3/(axis[2]**2+lamb))**2)) dlambx2 = (2*x2/(axis[1]**2+lamb))/((x1/(axis[0]**2+lamb))**2+\ (x2/(axis[1]**2+lamb))**2+((x3/(axis[2]**2+lamb))**2)) dlambx3 = (2*x3/(axis[2]**2+lamb))/((x1/(axis[0]**2+lamb))**2+\ (x2/(axis[1]**2+lamb))**2+((x3/(axis[2]**2+lamb))**2)) return dlambx1, dlambx2, dlambx3 def potential_integrals(axis,k,theta_l,F,E): ''' Calculates the integrals which is part of the solution of the potential field of an homogeneous ellipsoid (Dirichlet,1839). Parameters: * a, b, c: floats Major, intermediate and minor axis, respectively. * k: array Legendre's normal elliptic integrals parameter. * theta_l: array Legendre's normal elliptic integrals parameter. * F: array Legendre's normal elliptic integrals of first order. * E: array Legendre's normal elliptic integrals of second order. Returns: * A2,B2,C2: array Integrals of the potential field of an homogeneous ellipsoid. ''' A2 = (2/((axis[0]**2-axis[1]**2)*(axis[0]**2-axis[2]**2)**0.5))*(F-E) B2 = ((2*(axis[0]**2-axis[2]**2)**0.5)/((axis[0]**2-axis[1]**2)*\ (axis[1]**2-axis[2]**2)))*(E-((axis[1]**2-axis[2]**2)\ /(axis[0]**2-axis[2]**2))*F-((k**2*np.sin(theta_l)*np.cos(theta_l))\ /(1-k**2*np.sin(theta_l)*np.sin(theta_l))**0.5)) C2 = (2/((axis[1]**2-axis[2]**2)*(axis[0]**2-axis[2]**2)**0.5))\ *(((np.sin(theta_l)*((1-k**2*np.sin(theta_l)*np.sin(theta_l))**0.5))\ /np.cos(theta_l))-E) return A2,B2,C2 def mx(axis,x1,x2,x3,dlambx1,dlambx2,dlambx3,A,B,C,lamb): ''' Additional calculations for the ellipsoid magnetic field. Parameters: * a, b, c: floats Major, intermediate and minor axis, respectively. * x1, x2, x3: array Axis of the body coordinate system. * A,B,C: array Integrals of the potential field of an homogeneous ellipsoid * lamb: array Larger root of the cubic equation: s^3 + p2*s^2 + p1*s + p0 = 0. Returns: * m11, m12, m13, m21, m22, m23, m31, m32, m33, cte, V1, V2, V3: array Calculus for the ellipsoid magnetic field. ''' cte = 1/np.sqrt((axis[0]**2+lamb)*(axis[1]**2+lamb)*(axis[2]**2+lamb)) V1 = x1/(axis[0]**2+lamb) V2 = x2/(axis[1]**2+lamb) V3 = x3/(axis[2]**2+lamb) m11 = (cte*dlambx1*V1)-A m12 = cte*dlambx1*V2 m13 = cte*dlambx1*V3 m21 = cte*dlambx2*V1 m22 = (cte*dlambx2*V2)-B m23 = cte*dlambx2*V3 m31 = cte*dlambx3*V1 m32 = cte*dlambx3*V2 m33 = (cte*dlambx3*V3)-C return m11, m12, m13, m21, m22, m23, m31, m32, m33, cte, V1, V2, V3 def jrd_cartesiano (inten,inc,dec,ellipsoids): ''' Calculates the intensity and direction of the resultant vector of magnetization. Parameters: * inten: Intensity of the Earth's magnetic field. * inc: Inclination of the Earth's magnetic field. * dec: Declination of the Earth's magnetic field. * ellipsoid: magnetic ellipsoid model. Returns: * JRD_ang: Vector with intensity and direction of the resultant vector of magnetization in the cartesian coordinates(degrees). ''' inc = np.deg2rad(inc) dec = np.deg2rad(dec) lt,mt,nt = utils.dircos (dec, inc) Ft = [] JR = [] JRD = [] JRD_carte = [] JRD_ang = [] for ellipsoid in ellipsoids: Ft.append(F_e (inten,lt,mt,nt,ellipsoids[i].mcon[0,0],\ ellipsoids[i].mcon[1,0],ellipsoids[i].mcon[2,0],\ ellipsoids[i].mcon[0,1],ellipsoids[i].mcon[1,1],\ ellipsoids[i].mcon[2,1],ellipsoids[i].mcon[0,2],\ ellipsoids[i].mcon[1,2],ellipsoids[i].mcon[2,2])) JR.append(JR_e (ellipsoids[i].km,ellipsoids[i].JN,Ft[i])) JRD.append(JRD_e (ellipsoids[i].km,ellipsoids[i].N1,\ ellipsoids[i].N2,ellipsoids[i].N3,JR[i])) JRD_carte.append((ellipsoids[i].mconT).dot(JRD[i])) JRD_ang.append(utils.vec2ang(JRD_carte[i])) return JRD_ang def F_e (inten,lt,mt,nt,V): ''' Change the magnetization vetor of the Earth's field to the body coordinates. Parameters: * inten: float Intensity of the Earth's magnetic field. * lt,mt,nt: floats Direction cosines of the Earth's magnetic field. * V: array Matrix of body coordinates change. Returns: * Ft: array The magnetization vetor of the Earth's field to the body coordinates. ''' intT= inten/(4*np.pi*100) Ft = intT*np.array([[(lt*V[0,0]+mt*V[1,0]+nt*V[2,0])], \ [(lt*V[0,1]+mt*V[1,1]+nt*V[2,1])], [(lt*V[0,2]+mt*V[1,2]+nt*V[2,2])]]) return Ft def JR_e (km,JN,Ft): ''' Calculates the resultant magnetization vector without self-demagnetization correction. Parameters: * km: array matrix of susceptibilities tensor. * JN: array Remanent magnetization * Ft: array Magnetization vetor of the Earth's field in the body coordinates. Returns: * JR: array Resultant magnetization vector without self-demagnetization correction. ''' JR = km.dot(Ft) + JN return JR def JRD_e (km,N1,N2,N3,JR): ''' Calculates resultant magnetization vector with self-demagnetization correction. Parameters: * km: array matrix of susceptibilities tensor. * N1,N2,N3: floats Demagnetization factors in relation to a, b and c, respectively. * JR: array resultant magnetization vector without self-demagnetization correction. Returns: * JRD: array Resultant magnetization vector without self-demagnetization correction. ''' I = np.identity(3) kn0 = km[:,0]*N1 kn1 = km[:,1]*N2 kn2 = km[:,2]*N3 kn = (np.vstack((kn0,kn1,kn2))).T A = I + kn JRD = (linalg.inv(A)).dot(JR) return JRD def B1_e (m11,m12,m13,J,a,b,c): ''' Calculates the B1 component of the magnetic field generated by n-ellipsoids in the body coordinates. Parameters: * m21,m22,m23: array Calculus for the ellipsoid magnetic field. * J: array Resultant magnetization vector without self-demagnetization correction. * a,b,c: floats Major, intermediate and minor axis, respectively. Returns: * B1: array The B1 component of the magnetic field generated by n-ellipsoids in the body coordinates. ''' B1 = 2*np.pi*a*b*c*(m11*J[0]+m12*J[1]+m13*J[2]) return B1 def B2_e (m21,m22,m23,J,a,b,c): ''' Calculates the B2 component of the magnetic field generated by n-ellipsoids in the body coordinates. Parameters: * m21,m22,m23: array Calculus for the ellipsoid magnetic field. * J: array Resultant magnetization vector without self-demagnetization correction. * a,b,c: floats Major, intermediate and minor axis, respectively. Returns: * B2: array The B2 component of the magnetic field generated by n-ellipsoids in the body coordinates. ''' B2 = 2*np.pi*a*b*c*(m21*J[0]+m22*J[1]+m23*J[2]) return B2 def B3_e (m31,m32,m33,J,a,b,c): ''' Calculates the B3 component of the magnetic field generated by n-ellipsoids in the body coordinates. Parameters: * m31,m32,m33: array Calculus for the ellipsoid magnetic field. * J: array Resultant magnetization vector with self-demagnetization correction. * a,b,c: floats Major, intermediate and minor axis, respectively. Returns: * B3: array The B3 component of the magnetic field generated by n-ellipsoids in the body coordinates. ''' B3 = 2*np.pi*a*b*c*(m31*J[0]+m32*J[1]+m33*J[2]) return B3
unknown
codeparrot/codeparrot-clean
from __future__ import unicode_literals import functools import inspect import re from httpretty import HTTPretty from .responses import metadata_response from .utils import convert_regex_to_flask_path class MockAWS(object): nested_count = 0 def __init__(self, backends): self.backends = backends if self.__class__.nested_count == 0: HTTPretty.reset() def __call__(self, func, reset=True): if inspect.isclass(func): return self.decorate_class(func) return self.decorate_callable(func, reset) def __enter__(self): self.start() def __exit__(self, *args): self.stop() def start(self, reset=True): self.__class__.nested_count += 1 if reset: for backend in self.backends.values(): backend.reset() if not HTTPretty.is_enabled(): HTTPretty.enable() for method in HTTPretty.METHODS: backend = list(self.backends.values())[0] for key, value in backend.urls.items(): HTTPretty.register_uri( method=method, uri=re.compile(key), body=value, ) # Mock out localhost instance metadata HTTPretty.register_uri( method=method, uri=re.compile('http://169.254.169.254/latest/meta-data/.*'), body=metadata_response ) def stop(self): self.__class__.nested_count -= 1 if self.__class__.nested_count < 0: raise RuntimeError('Called stop() before start().') if self.__class__.nested_count == 0: HTTPretty.disable() HTTPretty.reset() def decorate_callable(self, func, reset): def wrapper(*args, **kwargs): self.start(reset=reset) try: result = func(*args, **kwargs) finally: self.stop() return result functools.update_wrapper(wrapper, func) wrapper.__wrapped__ = func return wrapper def decorate_class(self, klass): for attr in dir(klass): if attr.startswith("_"): continue attr_value = getattr(klass, attr) if not hasattr(attr_value, "__call__"): continue # Check if this is a classmethod. If so, skip patching if inspect.ismethod(attr_value) and attr_value.__self__ is klass: continue try: setattr(klass, attr, self(attr_value, reset=False)) except TypeError: # Sometimes we can't set this for built-in types continue return klass class Model(type): def __new__(self, clsname, bases, namespace): cls = super(Model, self).__new__(self, clsname, bases, namespace) cls.__models__ = {} for name, value in namespace.items(): model = getattr(value, "__returns_model__", False) if model is not False: cls.__models__[model] = name for base in bases: cls.__models__.update(getattr(base, "__models__", {})) return cls @staticmethod def prop(model_name): """ decorator to mark a class method as returning model values """ def dec(f): f.__returns_model__ = model_name return f return dec class BaseBackend(object): def reset(self): self.__dict__ = {} self.__init__() @property def _url_module(self): backend_module = self.__class__.__module__ backend_urls_module_name = backend_module.replace("models", "urls") backend_urls_module = __import__(backend_urls_module_name, fromlist=['url_bases', 'url_paths']) return backend_urls_module @property def urls(self): """ A dictionary of the urls to be mocked with this service and the handlers that should be called in their place """ url_bases = self._url_module.url_bases unformatted_paths = self._url_module.url_paths urls = {} for url_base in url_bases: for url_path, handler in unformatted_paths.items(): url = url_path.format(url_base) urls[url] = handler return urls @property def url_paths(self): """ A dictionary of the paths of the urls to be mocked with this service and the handlers that should be called in their place """ unformatted_paths = self._url_module.url_paths paths = {} for unformatted_path, handler in unformatted_paths.items(): path = unformatted_path.format("") paths[path] = handler return paths @property def url_bases(self): """ A list containing the url_bases extracted from urls.py """ return self._url_module.url_bases @property def flask_paths(self): """ The url paths that will be used for the flask server """ paths = {} for url_path, handler in self.url_paths.items(): url_path = convert_regex_to_flask_path(url_path) paths[url_path] = handler return paths def decorator(self, func=None): if func: return MockAWS({'global': self})(func) else: return MockAWS({'global': self})
unknown
codeparrot/codeparrot-clean
// Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 package terraform import ( "fmt" "log" "time" "github.com/hashicorp/hcl/v2" "github.com/zclconf/go-cty/cty" "github.com/hashicorp/terraform/internal/addrs" "github.com/hashicorp/terraform/internal/configs" "github.com/hashicorp/terraform/internal/deprecation" "github.com/hashicorp/terraform/internal/didyoumean" "github.com/hashicorp/terraform/internal/instances" "github.com/hashicorp/terraform/internal/lang" "github.com/hashicorp/terraform/internal/lang/marks" "github.com/hashicorp/terraform/internal/namedvals" "github.com/hashicorp/terraform/internal/plans" "github.com/hashicorp/terraform/internal/plans/deferring" "github.com/hashicorp/terraform/internal/providers" "github.com/hashicorp/terraform/internal/resources/ephemeral" "github.com/hashicorp/terraform/internal/states" "github.com/hashicorp/terraform/internal/tfdiags" ) // Evaluator provides the necessary contextual data for evaluating expressions // for a particular walk operation. type Evaluator struct { // Operation defines what type of operation this evaluator is being used // for. Operation walkOperation // Meta is contextual metadata about the current operation. Meta *ContextMeta // Config is the root node in the configuration tree. Config *configs.Config // Instances tracks the dynamic instances that are associated with each // module call or resource. The graph walk gradually registers the // set of instances for each object within the graph nodes for those // objects, and so as long as the graph has been built correctly the // set of instances for an object should always be available by the time // we're evaluating expressions that refer to it. Instances *instances.Expander // NamedValues is where we keep the values of already-evaluated input // variables, local values, and output values. NamedValues *namedvals.State // EphemeralResources tracks the currently-open instances of any ephemeral // resources. EphemeralResources *ephemeral.Resources // Deferrals tracks resources and modules that have had either their // expansion or their specific planned actions deferred to a future // plan/apply round. Deferrals *deferring.Deferred // Plugins is the library of available plugin components (providers and // provisioners) that we have available to help us evaluate expressions // that interact with plugin-provided objects. // // From this we only access the schemas of the plugins, and don't otherwise // interact with plugin instances. Plugins *contextPlugins // State is the current state, embedded in a wrapper that ensures that // it can be safely accessed and modified concurrently. State *states.SyncState // Changes is the set of proposed changes, embedded in a wrapper that // ensures they can be safely accessed and modified concurrently. Changes *plans.ChangesSync // FunctionResults carries forward the global cache of function results to // be used when building out all the builtin functions returned in the // Scope. FunctionResults *lang.FunctionResults PlanTimestamp time.Time } // Scope creates an evaluation scope for the given module path and optional // resource. // // If the "self" argument is nil then the "self" object is not available // in evaluated expressions. Otherwise, it behaves as an alias for the given // address. func (e *Evaluator) Scope(data lang.Data, self addrs.Referenceable, source addrs.Referenceable, extFuncs lang.ExternalFuncs) *lang.Scope { return &lang.Scope{ Data: data, ParseRef: addrs.ParseRef, SelfAddr: self, SourceAddr: source, PureOnly: e.Operation != walkApply && e.Operation != walkDestroy && e.Operation != walkEval, BaseDir: ".", // Always current working directory for now. PlanTimestamp: e.PlanTimestamp, ExternalFuncs: extFuncs, FunctionResults: e.FunctionResults, } } // evaluationStateData is an implementation of lang.Data that resolves // references primarily (but not exclusively) using information from a State. type evaluationStateData struct { *evaluationData // ModulePath is the path through the dynamic module tree to the module // that references will be resolved relative to. ModulePath addrs.ModuleInstance // InstanceKeyData describes the values, if any, that are accessible due // to repetition of a containing object using "count" or "for_each" // arguments. (It is _not_ used for the for_each inside "dynamic" blocks, // since the user specifies in that case which variable name to locally // shadow.) InstanceKeyData InstanceKeyEvalData // Operation records the type of walk the evaluationStateData is being used // for. Operation walkOperation } // InstanceKeyEvalData is the old name for instances.RepetitionData, aliased // here for compatibility. In new code, use instances.RepetitionData instead. type InstanceKeyEvalData = instances.RepetitionData // EvalDataForInstanceKey constructs a suitable InstanceKeyEvalData for // evaluating in a context that has the given instance key. // // The forEachMap argument can be nil when preparing for evaluation // in a context where each.value is prohibited, such as a destroy-time // provisioner. In that case, the returned EachValue will always be // cty.NilVal. func EvalDataForInstanceKey(key addrs.InstanceKey, forEachMap map[string]cty.Value) InstanceKeyEvalData { var evalData InstanceKeyEvalData if key == nil { return evalData } keyValue := key.Value() switch keyValue.Type() { case cty.String: evalData.EachKey = keyValue evalData.EachValue = forEachMap[keyValue.AsString()] case cty.Number: evalData.CountIndex = keyValue } return evalData } // EvalDataForNoInstanceKey is a value of InstanceKeyData that sets no instance // key values at all, suitable for use in contexts where no keyed instance // is relevant. var EvalDataForNoInstanceKey = InstanceKeyEvalData{} // evaluationStateData must implement lang.Data var _ lang.Data = (*evaluationStateData)(nil) // StaticValidateReferences calls [Evaluator.StaticValidateReferences] on // the evaluator embedded in this data object, using this data object's // static module path. func (d *evaluationStateData) StaticValidateReferences(refs []*addrs.Reference, self addrs.Referenceable, source addrs.Referenceable) tfdiags.Diagnostics { return d.Evaluator.StaticValidateReferences(refs, d.ModulePath.Module(), self, source) } func (d *evaluationStateData) GetCountAttr(addr addrs.CountAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { var diags tfdiags.Diagnostics switch addr.Name { case "index": idxVal := d.InstanceKeyData.CountIndex if idxVal == cty.NilVal { diags = diags.Append(&hcl.Diagnostic{ Severity: hcl.DiagError, Summary: `Reference to "count" in non-counted context`, Detail: `The "count" object can only be used in "module", "resource", and "data" blocks, and only when the "count" argument is set.`, Subject: rng.ToHCL().Ptr(), }) return cty.UnknownVal(cty.Number), diags } return idxVal, diags default: diags = diags.Append(&hcl.Diagnostic{ Severity: hcl.DiagError, Summary: `Invalid "count" attribute`, Detail: fmt.Sprintf(`The "count" object does not have an attribute named %q. The only supported attribute is count.index, which is the index of each instance of a resource block that has the "count" argument set.`, addr.Name), Subject: rng.ToHCL().Ptr(), }) return cty.DynamicVal, diags } } func (d *evaluationStateData) GetForEachAttr(addr addrs.ForEachAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { var diags tfdiags.Diagnostics var returnVal cty.Value switch addr.Name { case "key": returnVal = d.InstanceKeyData.EachKey case "value": returnVal = d.InstanceKeyData.EachValue if returnVal == cty.NilVal { diags = diags.Append(&hcl.Diagnostic{ Severity: hcl.DiagError, Summary: `each.value cannot be used in this context`, Detail: `A reference to "each.value" has been used in a context in which it is unavailable, such as when the configuration no longer contains the value in its "for_each" expression. Remove this reference to each.value in your configuration to work around this error.`, Subject: rng.ToHCL().Ptr(), }) return cty.UnknownVal(cty.DynamicPseudoType), diags } default: diags = diags.Append(&hcl.Diagnostic{ Severity: hcl.DiagError, Summary: `Invalid "each" attribute`, Detail: fmt.Sprintf(`The "each" object does not have an attribute named %q. The supported attributes are each.key and each.value, the current key and value pair of the "for_each" attribute set.`, addr.Name), Subject: rng.ToHCL().Ptr(), }) return cty.DynamicVal, diags } if returnVal == cty.NilVal { diags = diags.Append(&hcl.Diagnostic{ Severity: hcl.DiagError, Summary: `Reference to "each" in context without for_each`, Detail: `The "each" object can be used only in "module" or "resource" blocks, and only when the "for_each" argument is set.`, Subject: rng.ToHCL().Ptr(), }) return cty.UnknownVal(cty.DynamicPseudoType), diags } return returnVal, diags } func (d *evaluationStateData) GetInputVariable(addr addrs.InputVariable, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { var diags tfdiags.Diagnostics // First we'll make sure the requested value is declared in configuration, // so we can produce a nice message if not. moduleConfig := d.Evaluator.Config.DescendantForInstance(d.ModulePath) if moduleConfig == nil { // should never happen, since we can't be evaluating in a module // that wasn't mentioned in configuration. panic(fmt.Sprintf("input variable read from %s, which has no configuration", d.ModulePath)) } config := moduleConfig.Module.Variables[addr.Name] if config == nil { var suggestions []string for k := range moduleConfig.Module.Variables { suggestions = append(suggestions, k) } suggestion := didyoumean.NameSuggestion(addr.Name, suggestions) if suggestion != "" { suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) } else { suggestion = fmt.Sprintf(" This variable can be declared with a variable %q {} block.", addr.Name) } diags = diags.Append(&hcl.Diagnostic{ Severity: hcl.DiagError, Summary: `Reference to undeclared input variable`, Detail: fmt.Sprintf(`An input variable with the name %q has not been declared.%s`, addr.Name, suggestion), Subject: rng.ToHCL().Ptr(), }) return cty.DynamicVal, diags } // During the validate walk, input variables are always unknown so // that we are validating the configuration for all possible input values // rather than for a specific set. Checking against a specific set of // input values then happens during the plan walk. // // This is important because otherwise the validation walk will tend to be // overly strict, requiring expressions throughout the configuration to // be complicated to accommodate all possible inputs, whereas returning // unknown here allows for simpler patterns like using input values as // guards to broadly enable/disable resources, avoid processing things // that are disabled, etc. Terraform's static validation leans towards // being liberal in what it accepts because the subsequent plan walk has // more information available and so can be more conservative. if d.Operation == walkValidate { // We should still capture the statically-configured marks during // the validate walk. ret := cty.UnknownVal(config.Type) if config.Sensitive { ret = ret.Mark(marks.Sensitive) } if config.Ephemeral { ret = ret.Mark(marks.Ephemeral) } return ret, diags } var val cty.Value if target := d.ModulePath.InputVariable(addr.Name); !d.Evaluator.NamedValues.HasInputVariableValue(target) { val = cty.DynamicVal diags = diags.Append(&hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Reference to uninitialized variable", Detail: fmt.Sprintf("The variable %s was not processed by the most recent operation, this likely means the previous operation either failed or was incomplete due to targeting.", addr), Subject: rng.ToHCL().Ptr(), }) } else { val = d.Evaluator.NamedValues.GetInputVariableValue(target) } // Mark if sensitive and/or ephemeral if config.Sensitive { val = val.Mark(marks.Sensitive) } if config.Ephemeral { val = val.Mark(marks.Ephemeral) } return val, diags } func (d *evaluationStateData) GetLocalValue(addr addrs.LocalValue, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { var diags tfdiags.Diagnostics // First we'll make sure the requested value is declared in configuration, // so we can produce a nice message if not. moduleConfig := d.Evaluator.Config.DescendantForInstance(d.ModulePath) if moduleConfig == nil { // should never happen, since we can't be evaluating in a module // that wasn't mentioned in configuration. panic(fmt.Sprintf("local value read from %s, which has no configuration", d.ModulePath)) } config := moduleConfig.Module.Locals[addr.Name] if config == nil { var suggestions []string for k := range moduleConfig.Module.Locals { suggestions = append(suggestions, k) } suggestion := didyoumean.NameSuggestion(addr.Name, suggestions) if suggestion != "" { suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) } diags = diags.Append(&hcl.Diagnostic{ Severity: hcl.DiagError, Summary: `Reference to undeclared local value`, Detail: fmt.Sprintf(`A local value with the name %q has not been declared.%s`, addr.Name, suggestion), Subject: rng.ToHCL().Ptr(), }) return cty.DynamicVal, diags } target := addr.Absolute(d.ModulePath) if !d.Evaluator.NamedValues.HasLocalValue(target) { return cty.DynamicVal, diags.Append(&hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Reference to uninitialized local value", Detail: fmt.Sprintf("The local value %s was not processed by the most recent operation, this likely means the previous operation either failed or was incomplete due to targeting.", addr), Subject: rng.ToHCL().Ptr(), }) } return d.Evaluator.NamedValues.GetLocalValue(target), diags } func (d *evaluationStateData) GetModule(addr addrs.ModuleCall, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { var diags tfdiags.Diagnostics // Output results live in the module that declares them, which is one of // the child module instances of our current module path. moduleAddr := d.ModulePath.Module().Child(addr.Name) absAddr := addr.Absolute(d.ModulePath) parentCfg := d.Evaluator.Config.DescendantForInstance(d.ModulePath) callConfig, ok := parentCfg.Module.ModuleCalls[addr.Name] if !ok { diags = diags.Append(&hcl.Diagnostic{ Severity: hcl.DiagError, Summary: `Reference to undeclared module`, Detail: fmt.Sprintf(`The configuration contains no %s.`, moduleAddr), Subject: rng.ToHCL().Ptr(), }) return cty.DynamicVal, diags } // We'll consult the configuration to see what output names we are // expecting, so we can ensure the resulting object is of the expected // type even if our data is incomplete for some reason. moduleConfig := d.Evaluator.Config.Descendant(moduleAddr) if moduleConfig == nil { // should never happen, since we have a valid module call above, this // should be caught during static validation. panic(fmt.Sprintf("output value read from %s, which has no configuration", moduleAddr)) } outputConfigs := moduleConfig.Module.Outputs // We don't do instance expansion during validation, and so we need to // return an unknown value. Technically we should always return // cty.DynamicVal here because the final value during plan will always // be an object or tuple type with unpredictable attributes/elements, // but because we never actually carry values forward from validation to // planning we lie a little here and return unknown list and map types, // just to give us more opportunities to catch author mistakes during // validation. // // This means that in practice any expression that refers to a module // call must be written to be valid for either a collection type or // structural type of similar kind, so that it can be considered as // valid during both the validate and plan walks. if d.Operation == walkValidate { // In case of non-expanded module calls we return a known object with unknonwn values // In case of an expanded module call we return unknown list/map // This means deprecation can only for non-expanded modules be detected during validate // since we don't want false positives. The plan walk will give definitive warnings. atys := make(map[string]cty.Type, len(outputConfigs)) as := make(map[string]cty.Value, len(outputConfigs)) for name, c := range outputConfigs { atys[name] = cty.DynamicPseudoType // output values are dynamically-typed val := cty.UnknownVal(cty.DynamicPseudoType) if c.DeprecatedSet { val = val.Mark(marks.NewDeprecation(c.Deprecated, absAddr.Output(name).ConfigOutputValue().String())) } as[name] = val } instTy := cty.Object(atys) switch { case callConfig.Count != nil: return cty.UnknownVal(cty.List(instTy)), diags case callConfig.ForEach != nil: return cty.UnknownVal(cty.Map(instTy)), diags default: val := cty.ObjectVal(as) return val, diags } } // For all other walk types, we proceed to dynamic evaluation of individual // instances, using the global instance expander. An earlier graph node // should always have registered the expansion of this module call before // we get here, unless there's a bug in the graph builders. allInstances := d.Evaluator.Instances instKeyType, instKeys, known := allInstances.ExpandAbsModuleCall(absAddr) if !known { // If we don't know which instances exist then we can't really predict // anything at all. We can't even predict the return type based on // instKeyType because output values are dynamically-typed and so // our final result will always be an object or tuple type whose // attribute/element count we cannot predict. return cty.DynamicVal, diags } instanceObjVal := func(instKey addrs.InstanceKey) (cty.Value, tfdiags.Diagnostics) { // This function must always return a valid value, even if it's // just a cty.DynamicVal placeholder accompanying error diagnostics. var diags tfdiags.Diagnostics namedVals := d.Evaluator.NamedValues moduleInstAddr := absAddr.Instance(instKey) attrs := make(map[string]cty.Value, len(outputConfigs)) for name, cfg := range outputConfigs { outputAddr := moduleInstAddr.OutputValue(name) // Although we do typically expect the graph dependencies to // ensure that values get registered before they are needed, // we track depedencies with specific output values where // possible, instead of with entire module calls, and so // in this specific case it's valid for some of this call's // output values to not be known yet, with the graph builder // being responsible for making sure that no expression // in the configuration can actually observe that. if !namedVals.HasOutputValue(outputAddr) { attrs[name] = cty.DynamicVal continue } outputVal := namedVals.GetOutputValue(outputAddr) if cfg.Sensitive { outputVal = outputVal.Mark(marks.Sensitive) } if cfg.DeprecatedSet { outputVal = outputVal.Mark(marks.NewDeprecation(cfg.Deprecated, moduleInstAddr.OutputValue(name).ConfigOutputValue().String())) } attrs[name] = outputVal } return cty.ObjectVal(attrs), diags } switch instKeyType { case addrs.NoKeyType: // In this case we should always have exactly one instance that // is addrs.NoKey. If not then there's a bug in the [instances.Expander] // implementation. if len(instKeys) != 1 { panic(fmt.Sprintf("module call has no instance key type but has %d instances (should be 1)", len(instKeys))) } ret, moreDiags := instanceObjVal(instKeys[0]) diags = diags.Append(moreDiags) return ret, diags case addrs.IntKeyType: // We can assume that the instance keys are in ascending numerical order // and are consecutive, per the contract of allInstances.ExpandModuleCall. elems := make([]cty.Value, 0, len(instKeys)) for _, instKey := range instKeys { instVal, moreDiags := instanceObjVal(instKey) elems = append(elems, instVal) diags = diags.Append(moreDiags) } return cty.TupleVal(elems), diags case addrs.StringKeyType: attrs := make(map[string]cty.Value, len(instKeys)) for _, instKey := range instKeys { instVal, moreDiags := instanceObjVal(instKey) attrs[string(instKey.(addrs.StringKey))] = instVal diags = diags.Append(moreDiags) } return cty.ObjectVal(attrs), diags default: diags = diags.Append(&hcl.Diagnostic{ Severity: hcl.DiagError, Summary: `Unsupported instance key type`, Detail: fmt.Sprintf( `Module call %s has instance key type %#v, which is not supported by the expression evaluator. This is a bug in Terraform.`, absAddr, instKeyType, ), Subject: rng.ToHCL().Ptr(), }) return cty.DynamicVal, diags } } func (d *evaluationStateData) GetResource(addr addrs.Resource, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { var diags tfdiags.Diagnostics // First we'll consult the configuration to see if an resource of this // name is declared at all. moduleAddr := d.ModulePath moduleConfig := d.Evaluator.Config.DescendantForInstance(moduleAddr) if moduleConfig == nil { // should never happen, since we can't be evaluating in a module // that wasn't mentioned in configuration. panic(fmt.Sprintf("resource value read from %s, which has no configuration", moduleAddr)) } config := moduleConfig.Module.ResourceByAddr(addr) if config == nil { diags = diags.Append(&hcl.Diagnostic{ Severity: hcl.DiagError, Summary: `Reference to undeclared resource`, Detail: fmt.Sprintf(`A resource %q %q has not been declared in %s`, addr.Type, addr.Name, moduleDisplayAddr(moduleAddr)), Subject: rng.ToHCL().Ptr(), }) return cty.DynamicVal, diags } // Much of this function was written before we had factored out the handling // of instance keys into the separate instance expander model, and so it // does a bunch of instance-related work itself below. // // Currently, unknown instance keys are only possible when planning with // DeferralAllowed set to true in the PlanOpts, which should only be the // case in the stacks runtime (not the "normal terraform" modules runtime). // Thus, we have some amount of duplicated code remaining, to be more // certain that stacks-specific behaviors won't leak out into the standard // runtime. // // TODO: When deferred actions are more stable and robust in stacks, it // would be nice to rework this function to rely on the ResourceInstanceKeys // result for _all_ of its work, rather than continuing to duplicate a bunch // of the logic we've tried to encapsulate over ther already. if d.Operation == walkPlan || d.Operation == walkApply { if !d.Evaluator.Instances.ResourceInstanceExpanded(addr.Absolute(moduleAddr)) { // Then we've asked for a resource that hasn't been evaluated yet. // This means that either something has gone wrong in the graph or // the console or test command has an errored plan and is attempting // to load an invalid resource from it. unknownVal := cty.DynamicVal // If an ephemeral resource is deferred we need to mark the returned unknown value as ephemeral if addr.Mode == addrs.EphemeralResourceMode { unknownVal = unknownVal.Mark(marks.Ephemeral) } return unknownVal, diags.Append(&hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Reference to uninitialized resource", Detail: fmt.Sprintf("The resource %s was not processed by the most recent operation, this likely means the previous operation either failed or was incomplete due to targeting.", addr), Subject: rng.ToHCL().Ptr(), }) } if _, _, hasUnknownKeys := d.Evaluator.Instances.ResourceInstanceKeys(addr.Absolute(moduleAddr)); hasUnknownKeys { // There really isn't anything interesting we can do in this situation, // because it means we have an unknown for_each/count, in which case // we can't even predict what the result type will be because it // would be either an object or tuple type decided based on the instance // keys. // (We can't get in here for a single-instance resource because in that // case we would know that there's only one key and it's addrs.NoKey, // so we'll fall through to the other logic below.) unknownVal := cty.DynamicVal // If an ephemeral resource is deferred we need to mark the returned unknown value as ephemeral if addr.Mode == addrs.EphemeralResourceMode { unknownVal = unknownVal.Mark(marks.Ephemeral) } return unknownVal, diags } } // Build the provider address from configuration, since we may not have // state available in all cases. // We need to build an abs provider address, but we can use a default // instance since we're only interested in the schema. schema := d.getResourceSchema(addr, config.Provider) if schema.Body == nil { // This shouldn't happen, since validation before we get here should've // taken care of it, but we'll show a reasonable error message anyway. diags = diags.Append(&hcl.Diagnostic{ Severity: hcl.DiagError, Summary: `Missing resource type schema`, Detail: fmt.Sprintf("No schema is available for %s in %s. This is a bug in Terraform and should be reported.", addr, config.Provider), Subject: rng.ToHCL().Ptr(), }) return cty.DynamicVal, diags } ty := schema.Body.ImpliedType() switch addr.Mode { case addrs.EphemeralResourceMode: // FIXME: This does not yet work with deferrals, and it would be nice to // find some way to refactor this so that the following code is not so // tethered to the current implementation details. Instead we should // have an abstract idea of first determining what instances the // resource has (using d.Evaluator.Instances.ResourceInstanceKeys) and // then retrieving the value for each instance to assemble into the // result, using some per-resource-mode logic maintained elsewhere. val, epehemeralDiags := d.getEphemeralResource(addr, rng) diags = diags.Append(epehemeralDiags) return deprecation.MarkDeprecatedValues(val, schema.Body, addr.Absolute(d.ModulePath).String()), diags case addrs.ListResourceMode: val, listDiags := d.getListResource(config, rng) diags = diags.Append(listDiags) return deprecation.MarkDeprecatedValues(val, schema.Body, addr.Absolute(d.ModulePath).String()), diags default: // continue with the rest of the function } // Now, we're going to build up a value that represents the resource // or resources that are in the state. instances := map[addrs.InstanceKey]cty.Value{} // First, we're going to load any instances that we have written into the // deferrals system. A deferred resource overrides anything that might be // in the state for the resource, so we do this first. for key, value := range d.Evaluator.Deferrals.GetDeferredResourceInstances(addr.Absolute(d.ModulePath)) { instances[key] = value } // Proactively read out all the resource changes before iteration. Not only // does GetResourceInstanceChange have to iterate over all instances // internally causing an n^2 lookup, but Changes is also a major point of // lock contention. resChanges := d.Evaluator.Changes.GetChangesForConfigResource(addr.InModule(moduleConfig.Path)) instChanges := addrs.MakeMap[addrs.AbsResourceInstance, *plans.ResourceInstanceChange]() for _, ch := range resChanges { instChanges.Put(ch.Addr, ch) } rs := d.Evaluator.State.Resource(addr.Absolute(d.ModulePath)) // Decode all instances in the current state pendingDestroy := d.Operation == walkDestroy if rs != nil { for key, is := range rs.Instances { if _, ok := instances[key]; ok { // Then we've already loaded this instance from the deferrals so // we'll just ignore it being in state. continue } // Otherwise, we'll load the instance from state. if is == nil || is.Current == nil { // Assume we're dealing with an instance that hasn't been created yet. instances[key] = cty.UnknownVal(ty) continue } instAddr := addr.Instance(key).Absolute(d.ModulePath) change := instChanges.Get(instAddr) if change != nil { // Don't take any resources that are yet to be deleted into account. // If the referenced resource is CreateBeforeDestroy, then orphaned // instances will be in the state, as they are not destroyed until // after their dependants are updated. if change.Action == plans.Delete { if !pendingDestroy { continue } } } // Planned resources are temporarily stored in state with empty values, // and need to be replaced by the planned value here. if is.Current.Status == states.ObjectPlanned { if change == nil { // FIXME: This is usually an unfortunate case where we need to // lookup an individual instance referenced via "self" for // postconditions which we know exists, but because evaluation // must always get the resource in aggregate some instance // changes may not yet be registered. instances[key] = cty.DynamicVal // log the problem for debugging, since it may be a legitimate error we can't catch log.Printf("[WARN] instance %s is marked as having a change pending but that change is not recorded in the plan", instAddr) continue } instances[key] = change.After continue } ios, err := is.Current.Decode(schema) if err != nil { // This shouldn't happen, since by the time we get here we // should have upgraded the state data already. diags = diags.Append(&hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Invalid resource instance data in state", Detail: fmt.Sprintf("Instance %s data could not be decoded from the state: %s.", instAddr, err), Subject: &config.DeclRange, }) continue } val := ios.Value instances[key] = val } } if len(instances) == 0 { switch d.Operation { case walkPlan, walkApply: // During plan and apply as we evaluate each removed instance they // are removed from the working state. Since we know there are no // instances, return an empty container of the expected type. switch { case config.Count != nil: return cty.EmptyTupleVal, diags case config.ForEach != nil: return cty.EmptyObjectVal, diags default: // While we can reference an expanded resource with 0 // instances, we cannot reference instances that do not exist. // Due to the fact that we may have direct references to // instances that may end up in a root output during destroy // (since a planned destroy cannot yet remove root outputs), we // need to return a dynamic value here to allow evaluation to // continue. log.Printf("[ERROR] unknown instance %q referenced during %s", addr.Absolute(d.ModulePath), d.Operation) return cty.DynamicVal, diags } case walkImport: // Import does not yet plan resource changes, so new resources from // config are not going to be found here. Once walkImport fully // plans resources, this case should not longer be needed. // In the single instance case, we can return a typed unknown value // for the instance to better satisfy other expressions using the // value. This of course will not help if statically known // attributes are expected to be known elsewhere, but reduces the // number of problematic configs for now. // Unlike in plan and apply above we can't be sure the count or // for_each instances are empty, so we return a DynamicVal. We // don't really have a good value to return otherwise -- empty // values will fail for direct index expressions, and unknown // Lists and Maps could fail in some type unifications. switch { case config.Count != nil: return cty.DynamicVal, diags case config.ForEach != nil: return cty.DynamicVal, diags default: return cty.UnknownVal(ty), diags } default: // We should only end up here during the validate walk (or // console/eval), since later walks should have at least partial // states populated for all resources in the configuration. switch { case config.Count != nil: return deprecation.MarkDeprecatedValues(cty.DynamicVal, schema.Body, addr.Absolute(d.ModulePath).String()), diags case config.ForEach != nil: return deprecation.MarkDeprecatedValues(cty.DynamicVal, schema.Body, addr.Absolute(d.ModulePath).String()), diags default: // We don't know the values of the single resource instance, but we know the general // shape these values will take. content := map[string]cty.Value{} for attr, attrType := range ty.AttributeTypes() { content[attr] = cty.UnknownVal(attrType) } return deprecation.MarkDeprecatedValues(cty.ObjectVal(content), schema.Body, addr.Absolute(d.ModulePath).String()), diags } } } // ret should be populated with a valid value in all cases below var ret cty.Value switch { case config.Count != nil: // figure out what the last index we have is length := -1 for key := range instances { intKey, ok := key.(addrs.IntKey) if !ok { continue } if int(intKey) >= length { length = int(intKey) + 1 } } if length > 0 { vals := make([]cty.Value, length) for key, instance := range instances { intKey, ok := key.(addrs.IntKey) if !ok { // old key from state, which isn't valid for evaluation continue } vals[int(intKey)] = deprecation.MarkDeprecatedValues(instance, schema.Body, addr.Absolute(d.ModulePath).Instance(key).String()) } // Insert unknown values where there are any missing instances for i, v := range vals { if v == cty.NilVal { vals[i] = cty.UnknownVal(ty) } } ret = cty.TupleVal(vals) } else { ret = cty.EmptyTupleVal } case config.ForEach != nil: vals := make(map[string]cty.Value) for key, instance := range instances { strKey, ok := key.(addrs.StringKey) if !ok { // old key that is being dropped and not used for evaluation continue } vals[string(strKey)] = deprecation.MarkDeprecatedValues(instance, schema.Body, addr.Absolute(d.ModulePath).Instance(key).String()) } if len(vals) > 0 { // We use an object rather than a map here because resource schemas // may include dynamically-typed attributes, which will then cause // each instance to potentially have a different runtime type even // though they all conform to the static schema. ret = cty.ObjectVal(vals) } else { ret = cty.EmptyObjectVal } default: val, ok := instances[addrs.NoKey] if !ok { // if the instance is missing, insert an unknown value val = cty.UnknownVal(ty) } ret = deprecation.MarkDeprecatedValues(val, schema.Body, addr.Absolute(d.ModulePath).String()) } return ret, diags } func (d *evaluationStateData) getListResource(config *configs.Resource, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { var diags tfdiags.Diagnostics switch d.Operation { case walkValidate: return cty.DynamicVal, diags case walkPlan: // continue default: return cty.DynamicVal, diags.Append(&hcl.Diagnostic{ Severity: hcl.DiagError, Summary: `Unsupported operation`, Detail: fmt.Sprintf("List resources are not supported in %s operations.", d.Operation), Subject: rng.ToHCL().Ptr(), }) } lAddr := config.Addr() mAddr := addrs.Resource{ Mode: addrs.ManagedResourceMode, Type: lAddr.Type, Name: lAddr.Name, } resourceSchema := d.getResourceSchema(mAddr, config.Provider) if resourceSchema.Body == nil { // This shouldn't happen, since validation before we get here should've // taken care of it, but we'll show a reasonable error message anyway. diags = diags.Append(&hcl.Diagnostic{ Severity: hcl.DiagError, Summary: `Missing resource type schema`, Detail: fmt.Sprintf("No schema is available for %s in %s. This is a bug in Terraform and should be reported.", lAddr, config.Provider), Subject: rng.ToHCL().Ptr(), }) return cty.DynamicVal, diags } resourceType := resourceSchema.Body.ImpliedType() queries := d.Evaluator.Changes.GetQueryInstancesForAbsResource(lAddr.Absolute(d.ModulePath)) if len(queries) == 0 { // Since we know there are no instances, return an empty container of the expected type. switch { case config.Count != nil: return cty.EmptyTupleVal, diags case config.ForEach != nil: return cty.EmptyObjectVal, diags default: return cty.DynamicVal, diags } } var ret cty.Value switch { case config.Count != nil: // figure out what the last index we have is length := -1 for _, inst := range queries { if intKey, ok := inst.Addr.Resource.Key.(addrs.IntKey); ok { length = max(int(intKey)+1, length) } } if length > 0 { vals := make([]cty.Value, length) for _, inst := range queries { key := inst.Addr.Resource.Key if intKey, ok := key.(addrs.IntKey); ok { vals[int(intKey)] = inst.Results.Value } } // Insert unknown values where there are any missing instances for i, v := range vals { if v == cty.NilVal { vals[i] = cty.UnknownVal(resourceType) } } ret = cty.TupleVal(vals) } else { ret = cty.EmptyTupleVal } case config.ForEach != nil: vals := make(map[string]cty.Value) for _, inst := range queries { key := inst.Addr.Resource.Key if strKey, ok := key.(addrs.StringKey); ok { vals[string(strKey)] = inst.Results.Value } } if len(vals) > 0 { // We use an object rather than a map here because resource schemas // may include dynamically-typed attributes, which will then cause // each instance to potentially have a different runtime type even // though they all conform to the static schema. ret = cty.ObjectVal(vals) } else { ret = cty.EmptyObjectVal } default: if len(queries) <= 0 { // if the instance is missing, insert an empty tuple ret = cty.ObjectVal(map[string]cty.Value{ "data": cty.EmptyTupleVal, }) } else { ret = queries[0].Results.Value } } return ret, diags } func (d *evaluationStateData) getEphemeralResource(addr addrs.Resource, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { var diags tfdiags.Diagnostics if d.Operation == walkValidate || d.Operation == walkEval { // Ephemeral instances are never live during the validate walk. Eval is // similarly offline, and since there is no value stored we can't return // anything other than dynamic. return cty.DynamicVal.Mark(marks.Ephemeral), diags } // Now, we're going to build up a value that represents the resource // or resources that are in the state. instances := map[addrs.InstanceKey]cty.Value{} // First, we're going to load any instances that we have written into the // deferrals system. A deferred resource overrides anything that might be // in the state for the resource, so we do this first. for key, value := range d.Evaluator.Deferrals.GetDeferredResourceInstances(addr.Absolute(d.ModulePath)) { instances[key] = value } absAddr := addr.Absolute(d.ModulePath) keyType, keys, haveUnknownKeys := d.Evaluator.Instances.ResourceInstanceKeys(absAddr) if haveUnknownKeys { // We can probably do better than totally unknown at least for a // single-instance resource, but we'll just keep it simple for now. // Result must be marked as ephemeral so that we can still catch // attempts to use the results in non-ephemeral locations, so that // the operator doesn't end up trapped with an error on a subsequent // plan/apply round. return cty.DynamicVal.Mark(marks.Ephemeral), diags } ephems := d.Evaluator.EphemeralResources getInstValue := func(addr addrs.AbsResourceInstance) (cty.Value, tfdiags.Diagnostics) { var diags tfdiags.Diagnostics // If we have a deferred instance with this key we don't need to check if it is live or not, // it has not been created so we can just return the deferred value. if v, ok := instances[addr.Resource.Key]; ok { return v, diags } val, isLive := ephems.InstanceValue(addr) if !isLive { // If the instance is no longer "live" by the time we're accessing // it then that suggests that it needed renewal and renewal has // failed, and so the object's value is no longer usable. We'll // still return the value in case it's somehow useful for diagnosis, // but we return an error to prevent further evaluation of whatever // other expression depended on the liveness of this object. // // This error message is written on the assumption that it will // always appear alongside the provider's renewal error, but that'll // be exposed only once the (now-zombied) ephemeral resource is // eventually closed, so that we can avoid returning the same error // multiple times. diags = diags.Append(&hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Ephemeral resource instance has expired", Detail: fmt.Sprintf( "The remote object for %s is no longer available due to a renewal error, so Terraform cannot evaluate this expression.", addr, ), Subject: rng.ToHCL().Ptr(), }) } if val == cty.NilVal { val = cty.DynamicVal.Mark(marks.Ephemeral) } return val, diags } switch keyType { case addrs.NoKeyType: // For "no key" we're returning just a single object representing // the single instance of this resource. instVal, moreDiags := getInstValue(absAddr.Instance(addrs.NoKey)) diags = diags.Append(moreDiags) return instVal, diags case addrs.IntKeyType: // For integer keys we're returning a tuple-typed value whose // indices are the keys. elems := make([]cty.Value, len(keys)) for _, key := range keys { idx := int(key.(addrs.IntKey)) instAddr := absAddr.Instance(key) instVal, moreDiags := getInstValue(instAddr) diags = diags.Append(moreDiags) elems[idx] = instVal } return cty.TupleVal(elems), diags case addrs.StringKeyType: // For string keys we're returning an object-typed value whose // attributes are the keys. attrs := make(map[string]cty.Value, len(keys)) for _, key := range keys { attrName := string(key.(addrs.StringKey)) instAddr := absAddr.Instance(key) instVal, moreDiags := getInstValue(instAddr) diags = diags.Append(moreDiags) attrs[attrName] = instVal } return cty.ObjectVal(attrs), diags default: panic(fmt.Sprintf("unhandled instance key type %#v", keyType)) } } func (d *evaluationStateData) getResourceSchema(addr addrs.Resource, providerAddr addrs.Provider) providers.Schema { schema, err := d.Evaluator.Plugins.ResourceTypeSchema(providerAddr, addr.Mode, addr.Type) if err != nil { // We have plently other codepaths that will detect and report // schema lookup errors before we'd reach this point, so we'll just // treat a failure here the same as having no schema. return providers.Schema{} } return schema } func (d *evaluationStateData) GetOutput(addr addrs.OutputValue, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { var diags tfdiags.Diagnostics // First we'll make sure the requested value is declared in configuration, // so we can produce a nice message if not. moduleConfig := d.Evaluator.Config.DescendantForInstance(d.ModulePath) if moduleConfig == nil { // should never happen, since we can't be evaluating in a module // that wasn't mentioned in configuration. panic(fmt.Sprintf("output value read from %s, which has no configuration", d.ModulePath)) } config := moduleConfig.Module.Outputs[addr.Name] if config == nil { var suggestions []string for k := range moduleConfig.Module.Outputs { suggestions = append(suggestions, k) } suggestion := didyoumean.NameSuggestion(addr.Name, suggestions) if suggestion != "" { suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) } diags = diags.Append(&hcl.Diagnostic{ Severity: hcl.DiagError, Summary: `Reference to undeclared output value`, Detail: fmt.Sprintf(`An output value with the name %q has not been declared.%s`, addr.Name, suggestion), Subject: rng.ToHCL().Ptr(), }) return cty.DynamicVal, diags } var value cty.Value if !d.Evaluator.NamedValues.HasOutputValue(addr.Absolute(d.ModulePath)) { value = cty.DynamicVal } else { value = d.Evaluator.NamedValues.GetOutputValue(addr.Absolute(d.ModulePath)) } if config.Sensitive { value = value.Mark(marks.Sensitive) } if config.Ephemeral { value = value.Mark(marks.Ephemeral) } if config.DeprecatedSet { value = value.Mark(marks.NewDeprecation(config.Deprecated, addr.InModule(d.Module).String())) } return value, diags } // moduleDisplayAddr returns a string describing the given module instance // address that is appropriate for returning to users in situations where the // root module is possible. Specifically, it returns "the root module" if the // root module instance is given, or a string representation of the module // address otherwise. func moduleDisplayAddr(addr addrs.ModuleInstance) string { switch { case addr.IsRoot(): return "the root module" default: return addr.String() } }
go
github
https://github.com/hashicorp/terraform
internal/terraform/evaluate.go
/*------------------------------------------------------------------------- * * execExpr.c * Expression evaluation infrastructure. * * During executor startup, we compile each expression tree (which has * previously been processed by the parser and planner) into an ExprState, * using ExecInitExpr() et al. This converts the tree into a flat array * of ExprEvalSteps, which may be thought of as instructions in a program. * At runtime, we'll execute steps, starting with the first, until we reach * an EEOP_DONE_{RETURN|NO_RETURN} opcode. * * This file contains the "compilation" logic. It is independent of the * specific execution technology we use (switch statement, computed goto, * JIT compilation, etc). * * See src/backend/executor/README for some background, specifically the * "Expression Trees and ExprState nodes", "Expression Initialization", * and "Expression Evaluation" sections. * * * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * * IDENTIFICATION * src/backend/executor/execExpr.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include "access/nbtree.h" #include "catalog/objectaccess.h" #include "catalog/pg_proc.h" #include "catalog/pg_type.h" #include "executor/execExpr.h" #include "executor/nodeSubplan.h" #include "funcapi.h" #include "jit/jit.h" #include "miscadmin.h" #include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" #include "nodes/subscripting.h" #include "optimizer/optimizer.h" #include "pgstat.h" #include "utils/acl.h" #include "utils/array.h" #include "utils/builtins.h" #include "utils/jsonfuncs.h" #include "utils/jsonpath.h" #include "utils/lsyscache.h" #include "utils/typcache.h" typedef struct ExprSetupInfo { /* * Highest attribute numbers fetched from inner/outer/scan/old/new tuple * slots: */ AttrNumber last_inner; AttrNumber last_outer; AttrNumber last_scan; AttrNumber last_old; AttrNumber last_new; /* MULTIEXPR SubPlan nodes appearing in the expression: */ List *multiexpr_subplans; } ExprSetupInfo; static void ExecReadyExpr(ExprState *state); static void ExecInitExprRec(Expr *node, ExprState *state, Datum *resv, bool *resnull); static void ExecInitFunc(ExprEvalStep *scratch, Expr *node, List *args, Oid funcid, Oid inputcollid, ExprState *state); static void ExecInitSubPlanExpr(SubPlan *subplan, ExprState *state, Datum *resv, bool *resnull); static void ExecCreateExprSetupSteps(ExprState *state, Node *node); static void ExecPushExprSetupSteps(ExprState *state, ExprSetupInfo *info); static bool expr_setup_walker(Node *node, ExprSetupInfo *info); static bool ExecComputeSlotInfo(ExprState *state, ExprEvalStep *op); static void ExecInitWholeRowVar(ExprEvalStep *scratch, Var *variable, ExprState *state); static void ExecInitSubscriptingRef(ExprEvalStep *scratch, SubscriptingRef *sbsref, ExprState *state, Datum *resv, bool *resnull); static bool isAssignmentIndirectionExpr(Expr *expr); static void ExecInitCoerceToDomain(ExprEvalStep *scratch, CoerceToDomain *ctest, ExprState *state, Datum *resv, bool *resnull); static void ExecBuildAggTransCall(ExprState *state, AggState *aggstate, ExprEvalStep *scratch, FunctionCallInfo fcinfo, AggStatePerTrans pertrans, int transno, int setno, int setoff, bool ishash, bool nullcheck); static void ExecInitJsonExpr(JsonExpr *jsexpr, ExprState *state, Datum *resv, bool *resnull, ExprEvalStep *scratch); static void ExecInitJsonCoercion(ExprState *state, JsonReturning *returning, ErrorSaveContext *escontext, bool omit_quotes, bool exists_coerce, Datum *resv, bool *resnull); /* * ExecInitExpr: prepare an expression tree for execution * * This function builds and returns an ExprState implementing the given * Expr node tree. The return ExprState can then be handed to ExecEvalExpr * for execution. Because the Expr tree itself is read-only as far as * ExecInitExpr and ExecEvalExpr are concerned, several different executions * of the same plan tree can occur concurrently. (But note that an ExprState * does mutate at runtime, so it can't be re-used concurrently.) * * This must be called in a memory context that will last as long as repeated * executions of the expression are needed. Typically the context will be * the same as the per-query context of the associated ExprContext. * * Any Aggref, WindowFunc, or SubPlan nodes found in the tree are added to * the lists of such nodes held by the parent PlanState. * * Note: there is no ExecEndExpr function; we assume that any resource * cleanup needed will be handled by just releasing the memory context * in which the state tree is built. Functions that require additional * cleanup work can register a shutdown callback in the ExprContext. * * 'node' is the root of the expression tree to compile. * 'parent' is the PlanState node that owns the expression. * * 'parent' may be NULL if we are preparing an expression that is not * associated with a plan tree. (If so, it can't have aggs or subplans.) * Such cases should usually come through ExecPrepareExpr, not directly here. * * Also, if 'node' is NULL, we just return NULL. This is convenient for some * callers that may or may not have an expression that needs to be compiled. * Note that a NULL ExprState pointer *cannot* be handed to ExecEvalExpr, * although ExecQual and ExecCheck will accept one (and treat it as "true"). */ ExprState * ExecInitExpr(Expr *node, PlanState *parent) { ExprState *state; ExprEvalStep scratch = {0}; /* Special case: NULL expression produces a NULL ExprState pointer */ if (node == NULL) return NULL; /* Initialize ExprState with empty step list */ state = makeNode(ExprState); state->expr = node; state->parent = parent; state->ext_params = NULL; /* Insert setup steps as needed */ ExecCreateExprSetupSteps(state, (Node *) node); /* Compile the expression proper */ ExecInitExprRec(node, state, &state->resvalue, &state->resnull); /* Finally, append a DONE step */ scratch.opcode = EEOP_DONE_RETURN; ExprEvalPushStep(state, &scratch); ExecReadyExpr(state); return state; } /* * ExecInitExprWithParams: prepare a standalone expression tree for execution * * This is the same as ExecInitExpr, except that there is no parent PlanState, * and instead we may have a ParamListInfo describing PARAM_EXTERN Params. */ ExprState * ExecInitExprWithParams(Expr *node, ParamListInfo ext_params) { ExprState *state; ExprEvalStep scratch = {0}; /* Special case: NULL expression produces a NULL ExprState pointer */ if (node == NULL) return NULL; /* Initialize ExprState with empty step list */ state = makeNode(ExprState); state->expr = node; state->parent = NULL; state->ext_params = ext_params; /* Insert setup steps as needed */ ExecCreateExprSetupSteps(state, (Node *) node); /* Compile the expression proper */ ExecInitExprRec(node, state, &state->resvalue, &state->resnull); /* Finally, append a DONE step */ scratch.opcode = EEOP_DONE_RETURN; ExprEvalPushStep(state, &scratch); ExecReadyExpr(state); return state; } /* * ExecInitQual: prepare a qual for execution by ExecQual * * Prepares for the evaluation of a conjunctive boolean expression (qual list * with implicit AND semantics) that returns true if none of the * subexpressions are false. * * We must return true if the list is empty. Since that's a very common case, * we optimize it a bit further by translating to a NULL ExprState pointer * rather than setting up an ExprState that computes constant TRUE. (Some * especially hot-spot callers of ExecQual detect this and avoid calling * ExecQual at all.) * * If any of the subexpressions yield NULL, then the result of the conjunction * is false. This makes ExecQual primarily useful for evaluating WHERE * clauses, since SQL specifies that tuples with null WHERE results do not * get selected. */ ExprState * ExecInitQual(List *qual, PlanState *parent) { ExprState *state; ExprEvalStep scratch = {0}; List *adjust_jumps = NIL; /* short-circuit (here and in ExecQual) for empty restriction list */ if (qual == NIL) return NULL; Assert(IsA(qual, List)); state = makeNode(ExprState); state->expr = (Expr *) qual; state->parent = parent; state->ext_params = NULL; /* mark expression as to be used with ExecQual() */ state->flags = EEO_FLAG_IS_QUAL; /* Insert setup steps as needed */ ExecCreateExprSetupSteps(state, (Node *) qual); /* * ExecQual() needs to return false for an expression returning NULL. That * allows us to short-circuit the evaluation the first time a NULL is * encountered. As qual evaluation is a hot-path this warrants using a * special opcode for qual evaluation that's simpler than BOOL_AND (which * has more complex NULL handling). */ scratch.opcode = EEOP_QUAL; /* * We can use ExprState's resvalue/resnull as target for each qual expr. */ scratch.resvalue = &state->resvalue; scratch.resnull = &state->resnull; foreach_ptr(Expr, node, qual) { /* first evaluate expression */ ExecInitExprRec(node, state, &state->resvalue, &state->resnull); /* then emit EEOP_QUAL to detect if it's false (or null) */ scratch.d.qualexpr.jumpdone = -1; ExprEvalPushStep(state, &scratch); adjust_jumps = lappend_int(adjust_jumps, state->steps_len - 1); } /* adjust jump targets */ foreach_int(jump, adjust_jumps) { ExprEvalStep *as = &state->steps[jump]; Assert(as->opcode == EEOP_QUAL); Assert(as->d.qualexpr.jumpdone == -1); as->d.qualexpr.jumpdone = state->steps_len; } /* * At the end, we don't need to do anything more. The last qual expr must * have yielded TRUE, and since its result is stored in the desired output * location, we're done. */ scratch.opcode = EEOP_DONE_RETURN; ExprEvalPushStep(state, &scratch); ExecReadyExpr(state); return state; } /* * ExecInitCheck: prepare a check constraint for execution by ExecCheck * * This is much like ExecInitQual/ExecQual, except that a null result from * the conjunction is treated as TRUE. This behavior is appropriate for * evaluating CHECK constraints, since SQL specifies that NULL constraint * conditions are not failures. * * Note that like ExecInitQual, this expects input in implicit-AND format. * Users of ExecCheck that have expressions in normal explicit-AND format * can just apply ExecInitExpr to produce suitable input for ExecCheck. */ ExprState * ExecInitCheck(List *qual, PlanState *parent) { /* short-circuit (here and in ExecCheck) for empty restriction list */ if (qual == NIL) return NULL; Assert(IsA(qual, List)); /* * Just convert the implicit-AND list to an explicit AND (if there's more * than one entry), and compile normally. Unlike ExecQual, we can't * short-circuit on NULL results, so the regular AND behavior is needed. */ return ExecInitExpr(make_ands_explicit(qual), parent); } /* * Call ExecInitExpr() on a list of expressions, return a list of ExprStates. */ List * ExecInitExprList(List *nodes, PlanState *parent) { List *result = NIL; ListCell *lc; foreach(lc, nodes) { Expr *e = lfirst(lc); result = lappend(result, ExecInitExpr(e, parent)); } return result; } /* * ExecBuildProjectionInfo * * Build a ProjectionInfo node for evaluating the given tlist in the given * econtext, and storing the result into the tuple slot. (Caller must have * ensured that tuple slot has a descriptor matching the tlist!) * * inputDesc can be NULL, but if it is not, we check to see whether simple * Vars in the tlist match the descriptor. It is important to provide * inputDesc for relation-scan plan nodes, as a cross check that the relation * hasn't been changed since the plan was made. At higher levels of a plan, * there is no need to recheck. * * This is implemented by internally building an ExprState that performs the * whole projection in one go. * * Caution: before PG v10, the targetList was a list of ExprStates; now it * should be the planner-created targetlist, since we do the compilation here. */ ProjectionInfo * ExecBuildProjectionInfo(List *targetList, ExprContext *econtext, TupleTableSlot *slot, PlanState *parent, TupleDesc inputDesc) { ProjectionInfo *projInfo = makeNode(ProjectionInfo); ExprState *state; ExprEvalStep scratch = {0}; ListCell *lc; projInfo->pi_exprContext = econtext; /* We embed ExprState into ProjectionInfo instead of doing extra palloc */ projInfo->pi_state.type = T_ExprState; state = &projInfo->pi_state; state->expr = (Expr *) targetList; state->parent = parent; state->ext_params = NULL; state->resultslot = slot; /* Insert setup steps as needed */ ExecCreateExprSetupSteps(state, (Node *) targetList); /* Now compile each tlist column */ foreach(lc, targetList) { TargetEntry *tle = lfirst_node(TargetEntry, lc); Var *variable = NULL; AttrNumber attnum = 0; bool isSafeVar = false; /* * If tlist expression is a safe non-system Var, use the fast-path * ASSIGN_*_VAR opcodes. "Safe" means that we don't need to apply * CheckVarSlotCompatibility() during plan startup. If a source slot * was provided, we make the equivalent tests here; if a slot was not * provided, we assume that no check is needed because we're dealing * with a non-relation-scan-level expression. */ if (tle->expr != NULL && IsA(tle->expr, Var) && ((Var *) tle->expr)->varattno > 0) { /* Non-system Var, but how safe is it? */ variable = (Var *) tle->expr; attnum = variable->varattno; if (inputDesc == NULL) isSafeVar = true; /* can't check, just assume OK */ else if (attnum <= inputDesc->natts) { Form_pg_attribute attr = TupleDescAttr(inputDesc, attnum - 1); /* * If user attribute is dropped or has a type mismatch, don't * use ASSIGN_*_VAR. Instead let the normal expression * machinery handle it (which'll possibly error out). */ if (!attr->attisdropped && variable->vartype == attr->atttypid) { isSafeVar = true; } } } if (isSafeVar) { /* Fast-path: just generate an EEOP_ASSIGN_*_VAR step */ switch (variable->varno) { case INNER_VAR: /* get the tuple from the inner node */ scratch.opcode = EEOP_ASSIGN_INNER_VAR; break; case OUTER_VAR: /* get the tuple from the outer node */ scratch.opcode = EEOP_ASSIGN_OUTER_VAR; break; /* INDEX_VAR is handled by default case */ default: /* * Get the tuple from the relation being scanned, or the * old/new tuple slot, if old/new values were requested. */ switch (variable->varreturningtype) { case VAR_RETURNING_DEFAULT: scratch.opcode = EEOP_ASSIGN_SCAN_VAR; break; case VAR_RETURNING_OLD: scratch.opcode = EEOP_ASSIGN_OLD_VAR; state->flags |= EEO_FLAG_HAS_OLD; break; case VAR_RETURNING_NEW: scratch.opcode = EEOP_ASSIGN_NEW_VAR; state->flags |= EEO_FLAG_HAS_NEW; break; } break; } scratch.d.assign_var.attnum = attnum - 1; scratch.d.assign_var.resultnum = tle->resno - 1; ExprEvalPushStep(state, &scratch); } else { /* * Otherwise, compile the column expression normally. * * We can't tell the expression to evaluate directly into the * result slot, as the result slot (and the exprstate for that * matter) can change between executions. We instead evaluate * into the ExprState's resvalue/resnull and then move. */ ExecInitExprRec(tle->expr, state, &state->resvalue, &state->resnull); /* * Column might be referenced multiple times in upper nodes, so * force value to R/O - but only if it could be an expanded datum. */ if (get_typlen(exprType((Node *) tle->expr)) == -1) scratch.opcode = EEOP_ASSIGN_TMP_MAKE_RO; else scratch.opcode = EEOP_ASSIGN_TMP; scratch.d.assign_tmp.resultnum = tle->resno - 1; ExprEvalPushStep(state, &scratch); } } scratch.opcode = EEOP_DONE_NO_RETURN; ExprEvalPushStep(state, &scratch); ExecReadyExpr(state); return projInfo; } /* * ExecBuildUpdateProjection * * Build a ProjectionInfo node for constructing a new tuple during UPDATE. * The projection will be executed in the given econtext and the result will * be stored into the given tuple slot. (Caller must have ensured that tuple * slot has a descriptor matching the target rel!) * * When evalTargetList is false, targetList contains the UPDATE ... SET * expressions that have already been computed by a subplan node; the values * from this tlist are assumed to be available in the "outer" tuple slot. * When evalTargetList is true, targetList contains the UPDATE ... SET * expressions that must be computed (which could contain references to * the outer, inner, or scan tuple slots). * * In either case, targetColnos contains a list of the target column numbers * corresponding to the non-resjunk entries of targetList. The tlist values * are assigned into these columns of the result tuple slot. Target columns * not listed in targetColnos are filled from the UPDATE's old tuple, which * is assumed to be available in the "scan" tuple slot. * * targetList can also contain resjunk columns. These must be evaluated * if evalTargetList is true, but their values are discarded. * * relDesc must describe the relation we intend to update. * * This is basically a specialized variant of ExecBuildProjectionInfo. * However, it also performs sanity checks equivalent to ExecCheckPlanOutput. * Since we never make a normal tlist equivalent to the whole * tuple-to-be-assigned, there is no convenient way to apply * ExecCheckPlanOutput, so we must do our safety checks here. */ ProjectionInfo * ExecBuildUpdateProjection(List *targetList, bool evalTargetList, List *targetColnos, TupleDesc relDesc, ExprContext *econtext, TupleTableSlot *slot, PlanState *parent) { ProjectionInfo *projInfo = makeNode(ProjectionInfo); ExprState *state; int nAssignableCols; bool sawJunk; Bitmapset *assignedCols; ExprSetupInfo deform = {0, 0, 0, 0, 0, NIL}; ExprEvalStep scratch = {0}; int outerattnum; ListCell *lc, *lc2; projInfo->pi_exprContext = econtext; /* We embed ExprState into ProjectionInfo instead of doing extra palloc */ projInfo->pi_state.type = T_ExprState; state = &projInfo->pi_state; if (evalTargetList) state->expr = (Expr *) targetList; else state->expr = NULL; /* not used */ state->parent = parent; state->ext_params = NULL; state->resultslot = slot; /* * Examine the targetList to see how many non-junk columns there are, and * to verify that the non-junk columns come before the junk ones. */ nAssignableCols = 0; sawJunk = false; foreach(lc, targetList) { TargetEntry *tle = lfirst_node(TargetEntry, lc); if (tle->resjunk) sawJunk = true; else { if (sawJunk) elog(ERROR, "subplan target list is out of order"); nAssignableCols++; } } /* We should have one targetColnos entry per non-junk column */ if (nAssignableCols != list_length(targetColnos)) elog(ERROR, "targetColnos does not match subplan target list"); /* * Build a bitmapset of the columns in targetColnos. (We could just use * list_member_int() tests, but that risks O(N^2) behavior with many * columns.) */ assignedCols = NULL; foreach(lc, targetColnos) { AttrNumber targetattnum = lfirst_int(lc); assignedCols = bms_add_member(assignedCols, targetattnum); } /* * We need to insert EEOP_*_FETCHSOME steps to ensure the input tuples are * sufficiently deconstructed. The scan tuple must be deconstructed at * least as far as the last old column we need. */ for (int attnum = relDesc->natts; attnum > 0; attnum--) { CompactAttribute *attr = TupleDescCompactAttr(relDesc, attnum - 1); if (attr->attisdropped) continue; if (bms_is_member(attnum, assignedCols)) continue; deform.last_scan = attnum; break; } /* * If we're actually evaluating the tlist, incorporate its input * requirements too; otherwise, we'll just need to fetch the appropriate * number of columns of the "outer" tuple. */ if (evalTargetList) expr_setup_walker((Node *) targetList, &deform); else deform.last_outer = nAssignableCols; ExecPushExprSetupSteps(state, &deform); /* * Now generate code to evaluate the tlist's assignable expressions or * fetch them from the outer tuple, incidentally validating that they'll * be of the right data type. The checks above ensure that the forboth() * will iterate over exactly the non-junk columns. Note that we don't * bother evaluating any remaining resjunk columns. */ outerattnum = 0; forboth(lc, targetList, lc2, targetColnos) { TargetEntry *tle = lfirst_node(TargetEntry, lc); AttrNumber targetattnum = lfirst_int(lc2); Form_pg_attribute attr; Assert(!tle->resjunk); /* * Apply sanity checks comparable to ExecCheckPlanOutput(). */ if (targetattnum <= 0 || targetattnum > relDesc->natts) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("table row type and query-specified row type do not match"), errdetail("Query has too many columns."))); attr = TupleDescAttr(relDesc, targetattnum - 1); if (attr->attisdropped) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("table row type and query-specified row type do not match"), errdetail("Query provides a value for a dropped column at ordinal position %d.", targetattnum))); if (exprType((Node *) tle->expr) != attr->atttypid) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("table row type and query-specified row type do not match"), errdetail("Table has type %s at ordinal position %d, but query expects %s.", format_type_be(attr->atttypid), targetattnum, format_type_be(exprType((Node *) tle->expr))))); /* OK, generate code to perform the assignment. */ if (evalTargetList) { /* * We must evaluate the TLE's expression and assign it. We do not * bother jumping through hoops for "safe" Vars like * ExecBuildProjectionInfo does; this is a relatively less-used * path and it doesn't seem worth expending code for that. */ ExecInitExprRec(tle->expr, state, &state->resvalue, &state->resnull); /* Needn't worry about read-only-ness here, either. */ scratch.opcode = EEOP_ASSIGN_TMP; scratch.d.assign_tmp.resultnum = targetattnum - 1; ExprEvalPushStep(state, &scratch); } else { /* Just assign from the outer tuple. */ scratch.opcode = EEOP_ASSIGN_OUTER_VAR; scratch.d.assign_var.attnum = outerattnum; scratch.d.assign_var.resultnum = targetattnum - 1; ExprEvalPushStep(state, &scratch); } outerattnum++; } /* * Now generate code to copy over any old columns that were not assigned * to, and to ensure that dropped columns are set to NULL. */ for (int attnum = 1; attnum <= relDesc->natts; attnum++) { CompactAttribute *attr = TupleDescCompactAttr(relDesc, attnum - 1); if (attr->attisdropped) { /* Put a null into the ExprState's resvalue/resnull ... */ scratch.opcode = EEOP_CONST; scratch.resvalue = &state->resvalue; scratch.resnull = &state->resnull; scratch.d.constval.value = (Datum) 0; scratch.d.constval.isnull = true; ExprEvalPushStep(state, &scratch); /* ... then assign it to the result slot */ scratch.opcode = EEOP_ASSIGN_TMP; scratch.d.assign_tmp.resultnum = attnum - 1; ExprEvalPushStep(state, &scratch); } else if (!bms_is_member(attnum, assignedCols)) { /* Certainly the right type, so needn't check */ scratch.opcode = EEOP_ASSIGN_SCAN_VAR; scratch.d.assign_var.attnum = attnum - 1; scratch.d.assign_var.resultnum = attnum - 1; ExprEvalPushStep(state, &scratch); } } scratch.opcode = EEOP_DONE_NO_RETURN; ExprEvalPushStep(state, &scratch); ExecReadyExpr(state); return projInfo; } /* * ExecPrepareExpr --- initialize for expression execution outside a normal * Plan tree context. * * This differs from ExecInitExpr in that we don't assume the caller is * already running in the EState's per-query context. Also, we run the * passed expression tree through expression_planner() to prepare it for * execution. (In ordinary Plan trees the regular planning process will have * made the appropriate transformations on expressions, but for standalone * expressions this won't have happened.) */ ExprState * ExecPrepareExpr(Expr *node, EState *estate) { ExprState *result; MemoryContext oldcontext; oldcontext = MemoryContextSwitchTo(estate->es_query_cxt); node = expression_planner(node); result = ExecInitExpr(node, NULL); MemoryContextSwitchTo(oldcontext); return result; } /* * ExecPrepareQual --- initialize for qual execution outside a normal * Plan tree context. * * This differs from ExecInitQual in that we don't assume the caller is * already running in the EState's per-query context. Also, we run the * passed expression tree through expression_planner() to prepare it for * execution. (In ordinary Plan trees the regular planning process will have * made the appropriate transformations on expressions, but for standalone * expressions this won't have happened.) */ ExprState * ExecPrepareQual(List *qual, EState *estate) { ExprState *result; MemoryContext oldcontext; oldcontext = MemoryContextSwitchTo(estate->es_query_cxt); qual = (List *) expression_planner((Expr *) qual); result = ExecInitQual(qual, NULL); MemoryContextSwitchTo(oldcontext); return result; } /* * ExecPrepareCheck -- initialize check constraint for execution outside a * normal Plan tree context. * * See ExecPrepareExpr() and ExecInitCheck() for details. */ ExprState * ExecPrepareCheck(List *qual, EState *estate) { ExprState *result; MemoryContext oldcontext; oldcontext = MemoryContextSwitchTo(estate->es_query_cxt); qual = (List *) expression_planner((Expr *) qual); result = ExecInitCheck(qual, NULL); MemoryContextSwitchTo(oldcontext); return result; } /* * Call ExecPrepareExpr() on each member of a list of Exprs, and return * a list of ExprStates. * * See ExecPrepareExpr() for details. */ List * ExecPrepareExprList(List *nodes, EState *estate) { List *result = NIL; MemoryContext oldcontext; ListCell *lc; /* Ensure that the list cell nodes are in the right context too */ oldcontext = MemoryContextSwitchTo(estate->es_query_cxt); foreach(lc, nodes) { Expr *e = (Expr *) lfirst(lc); result = lappend(result, ExecPrepareExpr(e, estate)); } MemoryContextSwitchTo(oldcontext); return result; } /* * ExecCheck - evaluate a check constraint * * For check constraints, a null result is taken as TRUE, ie the constraint * passes. * * The check constraint may have been prepared with ExecInitCheck * (possibly via ExecPrepareCheck) if the caller had it in implicit-AND * format, but a regular boolean expression prepared with ExecInitExpr or * ExecPrepareExpr works too. */ bool ExecCheck(ExprState *state, ExprContext *econtext) { Datum ret; bool isnull; /* short-circuit (here and in ExecInitCheck) for empty restriction list */ if (state == NULL) return true; /* verify that expression was not compiled using ExecInitQual */ Assert(!(state->flags & EEO_FLAG_IS_QUAL)); ret = ExecEvalExprSwitchContext(state, econtext, &isnull); if (isnull) return true; return DatumGetBool(ret); } /* * Prepare a compiled expression for execution. This has to be called for * every ExprState before it can be executed. * * NB: While this currently only calls ExecReadyInterpretedExpr(), * this will likely get extended to further expression evaluation methods. * Therefore this should be used instead of directly calling * ExecReadyInterpretedExpr(). */ static void ExecReadyExpr(ExprState *state) { if (jit_compile_expr(state)) return; ExecReadyInterpretedExpr(state); } /* * Append the steps necessary for the evaluation of node to ExprState->steps, * possibly recursing into sub-expressions of node. * * node - expression to evaluate * state - ExprState to whose ->steps to append the necessary operations * resv / resnull - where to store the result of the node into */ static void ExecInitExprRec(Expr *node, ExprState *state, Datum *resv, bool *resnull) { ExprEvalStep scratch = {0}; /* Guard against stack overflow due to overly complex expressions */ check_stack_depth(); /* Step's output location is always what the caller gave us */ Assert(resv != NULL && resnull != NULL); scratch.resvalue = resv; scratch.resnull = resnull; /* cases should be ordered as they are in enum NodeTag */ switch (nodeTag(node)) { case T_Var: { Var *variable = (Var *) node; if (variable->varattno == InvalidAttrNumber) { /* whole-row Var */ ExecInitWholeRowVar(&scratch, variable, state); } else if (variable->varattno <= 0) { /* system column */ scratch.d.var.attnum = variable->varattno; scratch.d.var.vartype = variable->vartype; scratch.d.var.varreturningtype = variable->varreturningtype; switch (variable->varno) { case INNER_VAR: scratch.opcode = EEOP_INNER_SYSVAR; break; case OUTER_VAR: scratch.opcode = EEOP_OUTER_SYSVAR; break; /* INDEX_VAR is handled by default case */ default: switch (variable->varreturningtype) { case VAR_RETURNING_DEFAULT: scratch.opcode = EEOP_SCAN_SYSVAR; break; case VAR_RETURNING_OLD: scratch.opcode = EEOP_OLD_SYSVAR; state->flags |= EEO_FLAG_HAS_OLD; break; case VAR_RETURNING_NEW: scratch.opcode = EEOP_NEW_SYSVAR; state->flags |= EEO_FLAG_HAS_NEW; break; } break; } } else { /* regular user column */ scratch.d.var.attnum = variable->varattno - 1; scratch.d.var.vartype = variable->vartype; scratch.d.var.varreturningtype = variable->varreturningtype; switch (variable->varno) { case INNER_VAR: scratch.opcode = EEOP_INNER_VAR; break; case OUTER_VAR: scratch.opcode = EEOP_OUTER_VAR; break; /* INDEX_VAR is handled by default case */ default: switch (variable->varreturningtype) { case VAR_RETURNING_DEFAULT: scratch.opcode = EEOP_SCAN_VAR; break; case VAR_RETURNING_OLD: scratch.opcode = EEOP_OLD_VAR; state->flags |= EEO_FLAG_HAS_OLD; break; case VAR_RETURNING_NEW: scratch.opcode = EEOP_NEW_VAR; state->flags |= EEO_FLAG_HAS_NEW; break; } break; } } ExprEvalPushStep(state, &scratch); break; } case T_Const: { Const *con = (Const *) node; scratch.opcode = EEOP_CONST; scratch.d.constval.value = con->constvalue; scratch.d.constval.isnull = con->constisnull; ExprEvalPushStep(state, &scratch); break; } case T_Param: { Param *param = (Param *) node; ParamListInfo params; switch (param->paramkind) { case PARAM_EXEC: scratch.opcode = EEOP_PARAM_EXEC; scratch.d.param.paramid = param->paramid; scratch.d.param.paramtype = param->paramtype; ExprEvalPushStep(state, &scratch); break; case PARAM_EXTERN: /* * If we have a relevant ParamCompileHook, use it; * otherwise compile a standard EEOP_PARAM_EXTERN * step. ext_params, if supplied, takes precedence * over info from the parent node's EState (if any). */ if (state->ext_params) params = state->ext_params; else if (state->parent && state->parent->state) params = state->parent->state->es_param_list_info; else params = NULL; if (params && params->paramCompile) { params->paramCompile(params, param, state, resv, resnull); } else { scratch.opcode = EEOP_PARAM_EXTERN; scratch.d.param.paramid = param->paramid; scratch.d.param.paramtype = param->paramtype; ExprEvalPushStep(state, &scratch); } break; default: elog(ERROR, "unrecognized paramkind: %d", (int) param->paramkind); break; } break; } case T_Aggref: { Aggref *aggref = (Aggref *) node; scratch.opcode = EEOP_AGGREF; scratch.d.aggref.aggno = aggref->aggno; if (state->parent && IsA(state->parent, AggState)) { AggState *aggstate = (AggState *) state->parent; aggstate->aggs = lappend(aggstate->aggs, aggref); } else { /* planner messed up */ elog(ERROR, "Aggref found in non-Agg plan node"); } ExprEvalPushStep(state, &scratch); break; } case T_GroupingFunc: { GroupingFunc *grp_node = (GroupingFunc *) node; Agg *agg; if (!state->parent || !IsA(state->parent, AggState) || !IsA(state->parent->plan, Agg)) elog(ERROR, "GroupingFunc found in non-Agg plan node"); scratch.opcode = EEOP_GROUPING_FUNC; agg = (Agg *) (state->parent->plan); if (agg->groupingSets) scratch.d.grouping_func.clauses = grp_node->cols; else scratch.d.grouping_func.clauses = NIL; ExprEvalPushStep(state, &scratch); break; } case T_WindowFunc: { WindowFunc *wfunc = (WindowFunc *) node; WindowFuncExprState *wfstate = makeNode(WindowFuncExprState); wfstate->wfunc = wfunc; if (state->parent && IsA(state->parent, WindowAggState)) { WindowAggState *winstate = (WindowAggState *) state->parent; int nfuncs; winstate->funcs = lappend(winstate->funcs, wfstate); nfuncs = ++winstate->numfuncs; if (wfunc->winagg) winstate->numaggs++; /* for now initialize agg using old style expressions */ wfstate->args = ExecInitExprList(wfunc->args, state->parent); wfstate->aggfilter = ExecInitExpr(wfunc->aggfilter, state->parent); /* * Complain if the windowfunc's arguments contain any * windowfuncs; nested window functions are semantically * nonsensical. (This should have been caught earlier, * but we defend against it here anyway.) */ if (nfuncs != winstate->numfuncs) ereport(ERROR, (errcode(ERRCODE_WINDOWING_ERROR), errmsg("window function calls cannot be nested"))); } else { /* planner messed up */ elog(ERROR, "WindowFunc found in non-WindowAgg plan node"); } scratch.opcode = EEOP_WINDOW_FUNC; scratch.d.window_func.wfstate = wfstate; ExprEvalPushStep(state, &scratch); break; } case T_MergeSupportFunc: { /* must be in a MERGE, else something messed up */ if (!state->parent || !IsA(state->parent, ModifyTableState) || ((ModifyTableState *) state->parent)->operation != CMD_MERGE) elog(ERROR, "MergeSupportFunc found in non-merge plan node"); scratch.opcode = EEOP_MERGE_SUPPORT_FUNC; ExprEvalPushStep(state, &scratch); break; } case T_SubscriptingRef: { SubscriptingRef *sbsref = (SubscriptingRef *) node; ExecInitSubscriptingRef(&scratch, sbsref, state, resv, resnull); break; } case T_FuncExpr: { FuncExpr *func = (FuncExpr *) node; ExecInitFunc(&scratch, node, func->args, func->funcid, func->inputcollid, state); ExprEvalPushStep(state, &scratch); break; } case T_OpExpr: { OpExpr *op = (OpExpr *) node; ExecInitFunc(&scratch, node, op->args, op->opfuncid, op->inputcollid, state); ExprEvalPushStep(state, &scratch); break; } case T_DistinctExpr: { DistinctExpr *op = (DistinctExpr *) node; ExecInitFunc(&scratch, node, op->args, op->opfuncid, op->inputcollid, state); /* * Change opcode of call instruction to EEOP_DISTINCT. * * XXX: historically we've not called the function usage * pgstat infrastructure - that seems inconsistent given that * we do so for normal function *and* operator evaluation. If * we decided to do that here, we'd probably want separate * opcodes for FUSAGE or not. */ scratch.opcode = EEOP_DISTINCT; ExprEvalPushStep(state, &scratch); break; } case T_NullIfExpr: { NullIfExpr *op = (NullIfExpr *) node; ExecInitFunc(&scratch, node, op->args, op->opfuncid, op->inputcollid, state); /* * If first argument is of varlena type, we'll need to ensure * that the value passed to the comparison function is a * read-only pointer. */ scratch.d.func.make_ro = (get_typlen(exprType((Node *) linitial(op->args))) == -1); /* * Change opcode of call instruction to EEOP_NULLIF. * * XXX: historically we've not called the function usage * pgstat infrastructure - that seems inconsistent given that * we do so for normal function *and* operator evaluation. If * we decided to do that here, we'd probably want separate * opcodes for FUSAGE or not. */ scratch.opcode = EEOP_NULLIF; ExprEvalPushStep(state, &scratch); break; } case T_ScalarArrayOpExpr: { ScalarArrayOpExpr *opexpr = (ScalarArrayOpExpr *) node; Expr *scalararg; Expr *arrayarg; FmgrInfo *finfo; FunctionCallInfo fcinfo; AclResult aclresult; Oid cmpfuncid; /* * Select the correct comparison function. When we do hashed * NOT IN clauses, the opfuncid will be the inequality * comparison function and negfuncid will be set to equality. * We need to use the equality function for hash probes. */ if (OidIsValid(opexpr->negfuncid)) { Assert(OidIsValid(opexpr->hashfuncid)); cmpfuncid = opexpr->negfuncid; } else cmpfuncid = opexpr->opfuncid; Assert(list_length(opexpr->args) == 2); scalararg = (Expr *) linitial(opexpr->args); arrayarg = (Expr *) lsecond(opexpr->args); /* Check permission to call function */ aclresult = object_aclcheck(ProcedureRelationId, cmpfuncid, GetUserId(), ACL_EXECUTE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(cmpfuncid)); InvokeFunctionExecuteHook(cmpfuncid); if (OidIsValid(opexpr->hashfuncid)) { aclresult = object_aclcheck(ProcedureRelationId, opexpr->hashfuncid, GetUserId(), ACL_EXECUTE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(opexpr->hashfuncid)); InvokeFunctionExecuteHook(opexpr->hashfuncid); } /* Set up the primary fmgr lookup information */ finfo = palloc0_object(FmgrInfo); fcinfo = palloc0(SizeForFunctionCallInfo(2)); fmgr_info(cmpfuncid, finfo); fmgr_info_set_expr((Node *) node, finfo); InitFunctionCallInfoData(*fcinfo, finfo, 2, opexpr->inputcollid, NULL, NULL); /* * If hashfuncid is set, we create a EEOP_HASHED_SCALARARRAYOP * step instead of a EEOP_SCALARARRAYOP. This provides much * faster lookup performance than the normal linear search * when the number of items in the array is anything but very * small. */ if (OidIsValid(opexpr->hashfuncid)) { /* Evaluate scalar directly into left function argument */ ExecInitExprRec(scalararg, state, &fcinfo->args[0].value, &fcinfo->args[0].isnull); /* * Evaluate array argument into our return value. There's * no danger in that, because the return value is * guaranteed to be overwritten by * EEOP_HASHED_SCALARARRAYOP, and will not be passed to * any other expression. */ ExecInitExprRec(arrayarg, state, resv, resnull); /* And perform the operation */ scratch.opcode = EEOP_HASHED_SCALARARRAYOP; scratch.d.hashedscalararrayop.inclause = opexpr->useOr; scratch.d.hashedscalararrayop.finfo = finfo; scratch.d.hashedscalararrayop.fcinfo_data = fcinfo; scratch.d.hashedscalararrayop.saop = opexpr; ExprEvalPushStep(state, &scratch); } else { /* Evaluate scalar directly into left function argument */ ExecInitExprRec(scalararg, state, &fcinfo->args[0].value, &fcinfo->args[0].isnull); /* * Evaluate array argument into our return value. There's * no danger in that, because the return value is * guaranteed to be overwritten by EEOP_SCALARARRAYOP, and * will not be passed to any other expression. */ ExecInitExprRec(arrayarg, state, resv, resnull); /* And perform the operation */ scratch.opcode = EEOP_SCALARARRAYOP; scratch.d.scalararrayop.element_type = InvalidOid; scratch.d.scalararrayop.useOr = opexpr->useOr; scratch.d.scalararrayop.finfo = finfo; scratch.d.scalararrayop.fcinfo_data = fcinfo; scratch.d.scalararrayop.fn_addr = finfo->fn_addr; ExprEvalPushStep(state, &scratch); } break; } case T_BoolExpr: { BoolExpr *boolexpr = (BoolExpr *) node; int nargs = list_length(boolexpr->args); List *adjust_jumps = NIL; int off; ListCell *lc; /* allocate scratch memory used by all steps of AND/OR */ if (boolexpr->boolop != NOT_EXPR) scratch.d.boolexpr.anynull = palloc_object(bool); /* * For each argument evaluate the argument itself, then * perform the bool operation's appropriate handling. * * We can evaluate each argument into our result area, since * the short-circuiting logic means we only need to remember * previous NULL values. * * AND/OR is split into separate STEP_FIRST (one) / STEP (zero * or more) / STEP_LAST (one) steps, as each of those has to * perform different work. The FIRST/LAST split is valid * because AND/OR have at least two arguments. */ off = 0; foreach(lc, boolexpr->args) { Expr *arg = (Expr *) lfirst(lc); /* Evaluate argument into our output variable */ ExecInitExprRec(arg, state, resv, resnull); /* Perform the appropriate step type */ switch (boolexpr->boolop) { case AND_EXPR: Assert(nargs >= 2); if (off == 0) scratch.opcode = EEOP_BOOL_AND_STEP_FIRST; else if (off + 1 == nargs) scratch.opcode = EEOP_BOOL_AND_STEP_LAST; else scratch.opcode = EEOP_BOOL_AND_STEP; break; case OR_EXPR: Assert(nargs >= 2); if (off == 0) scratch.opcode = EEOP_BOOL_OR_STEP_FIRST; else if (off + 1 == nargs) scratch.opcode = EEOP_BOOL_OR_STEP_LAST; else scratch.opcode = EEOP_BOOL_OR_STEP; break; case NOT_EXPR: Assert(nargs == 1); scratch.opcode = EEOP_BOOL_NOT_STEP; break; default: elog(ERROR, "unrecognized boolop: %d", (int) boolexpr->boolop); break; } scratch.d.boolexpr.jumpdone = -1; ExprEvalPushStep(state, &scratch); adjust_jumps = lappend_int(adjust_jumps, state->steps_len - 1); off++; } /* adjust jump targets */ foreach(lc, adjust_jumps) { ExprEvalStep *as = &state->steps[lfirst_int(lc)]; Assert(as->d.boolexpr.jumpdone == -1); as->d.boolexpr.jumpdone = state->steps_len; } break; } case T_SubPlan: { SubPlan *subplan = (SubPlan *) node; /* * Real execution of a MULTIEXPR SubPlan has already been * done. What we have to do here is return a dummy NULL record * value in case this targetlist element is assigned * someplace. */ if (subplan->subLinkType == MULTIEXPR_SUBLINK) { scratch.opcode = EEOP_CONST; scratch.d.constval.value = (Datum) 0; scratch.d.constval.isnull = true; ExprEvalPushStep(state, &scratch); break; } ExecInitSubPlanExpr(subplan, state, resv, resnull); break; } case T_FieldSelect: { FieldSelect *fselect = (FieldSelect *) node; /* evaluate row/record argument into result area */ ExecInitExprRec(fselect->arg, state, resv, resnull); /* and extract field */ scratch.opcode = EEOP_FIELDSELECT; scratch.d.fieldselect.fieldnum = fselect->fieldnum; scratch.d.fieldselect.resulttype = fselect->resulttype; scratch.d.fieldselect.rowcache.cacheptr = NULL; ExprEvalPushStep(state, &scratch); break; } case T_FieldStore: { FieldStore *fstore = (FieldStore *) node; TupleDesc tupDesc; ExprEvalRowtypeCache *rowcachep; Datum *values; bool *nulls; int ncolumns; ListCell *l1, *l2; /* find out the number of columns in the composite type */ tupDesc = lookup_rowtype_tupdesc(fstore->resulttype, -1); ncolumns = tupDesc->natts; ReleaseTupleDesc(tupDesc); /* create workspace for column values */ values = palloc_array(Datum, ncolumns); nulls = palloc_array(bool, ncolumns); /* create shared composite-type-lookup cache struct */ rowcachep = palloc_object(ExprEvalRowtypeCache); rowcachep->cacheptr = NULL; /* emit code to evaluate the composite input value */ ExecInitExprRec(fstore->arg, state, resv, resnull); /* next, deform the input tuple into our workspace */ scratch.opcode = EEOP_FIELDSTORE_DEFORM; scratch.d.fieldstore.fstore = fstore; scratch.d.fieldstore.rowcache = rowcachep; scratch.d.fieldstore.values = values; scratch.d.fieldstore.nulls = nulls; scratch.d.fieldstore.ncolumns = ncolumns; ExprEvalPushStep(state, &scratch); /* evaluate new field values, store in workspace columns */ forboth(l1, fstore->newvals, l2, fstore->fieldnums) { Expr *e = (Expr *) lfirst(l1); AttrNumber fieldnum = lfirst_int(l2); Datum *save_innermost_caseval; bool *save_innermost_casenull; if (fieldnum <= 0 || fieldnum > ncolumns) elog(ERROR, "field number %d is out of range in FieldStore", fieldnum); /* * Use the CaseTestExpr mechanism to pass down the old * value of the field being replaced; this is needed in * case the newval is itself a FieldStore or * SubscriptingRef that has to obtain and modify the old * value. It's safe to reuse the CASE mechanism because * there cannot be a CASE between here and where the value * would be needed, and a field assignment can't be within * a CASE either. (So saving and restoring * innermost_caseval is just paranoia, but let's do it * anyway.) * * Another non-obvious point is that it's safe to use the * field's values[]/nulls[] entries as both the caseval * source and the result address for this subexpression. * That's okay only because (1) both FieldStore and * SubscriptingRef evaluate their arg or refexpr inputs * first, and (2) any such CaseTestExpr is directly the * arg or refexpr input. So any read of the caseval will * occur before there's a chance to overwrite it. Also, * if multiple entries in the newvals/fieldnums lists * target the same field, they'll effectively be applied * left-to-right which is what we want. */ save_innermost_caseval = state->innermost_caseval; save_innermost_casenull = state->innermost_casenull; state->innermost_caseval = &values[fieldnum - 1]; state->innermost_casenull = &nulls[fieldnum - 1]; ExecInitExprRec(e, state, &values[fieldnum - 1], &nulls[fieldnum - 1]); state->innermost_caseval = save_innermost_caseval; state->innermost_casenull = save_innermost_casenull; } /* finally, form result tuple */ scratch.opcode = EEOP_FIELDSTORE_FORM; scratch.d.fieldstore.fstore = fstore; scratch.d.fieldstore.rowcache = rowcachep; scratch.d.fieldstore.values = values; scratch.d.fieldstore.nulls = nulls; scratch.d.fieldstore.ncolumns = ncolumns; ExprEvalPushStep(state, &scratch); break; } case T_RelabelType: { /* relabel doesn't need to do anything at runtime */ RelabelType *relabel = (RelabelType *) node; ExecInitExprRec(relabel->arg, state, resv, resnull); break; } case T_CoerceViaIO: { CoerceViaIO *iocoerce = (CoerceViaIO *) node; Oid iofunc; bool typisvarlena; Oid typioparam; FunctionCallInfo fcinfo_in; /* evaluate argument into step's result area */ ExecInitExprRec(iocoerce->arg, state, resv, resnull); /* * Prepare both output and input function calls, to be * evaluated inside a single evaluation step for speed - this * can be a very common operation. * * We don't check permissions here as a type's input/output * function are assumed to be executable by everyone. */ if (state->escontext == NULL) scratch.opcode = EEOP_IOCOERCE; else scratch.opcode = EEOP_IOCOERCE_SAFE; /* lookup the source type's output function */ scratch.d.iocoerce.finfo_out = palloc0_object(FmgrInfo); scratch.d.iocoerce.fcinfo_data_out = palloc0(SizeForFunctionCallInfo(1)); getTypeOutputInfo(exprType((Node *) iocoerce->arg), &iofunc, &typisvarlena); fmgr_info(iofunc, scratch.d.iocoerce.finfo_out); fmgr_info_set_expr((Node *) node, scratch.d.iocoerce.finfo_out); InitFunctionCallInfoData(*scratch.d.iocoerce.fcinfo_data_out, scratch.d.iocoerce.finfo_out, 1, InvalidOid, NULL, NULL); /* lookup the result type's input function */ scratch.d.iocoerce.finfo_in = palloc0_object(FmgrInfo); scratch.d.iocoerce.fcinfo_data_in = palloc0(SizeForFunctionCallInfo(3)); getTypeInputInfo(iocoerce->resulttype, &iofunc, &typioparam); fmgr_info(iofunc, scratch.d.iocoerce.finfo_in); fmgr_info_set_expr((Node *) node, scratch.d.iocoerce.finfo_in); InitFunctionCallInfoData(*scratch.d.iocoerce.fcinfo_data_in, scratch.d.iocoerce.finfo_in, 3, InvalidOid, NULL, NULL); /* * We can preload the second and third arguments for the input * function, since they're constants. */ fcinfo_in = scratch.d.iocoerce.fcinfo_data_in; fcinfo_in->args[1].value = ObjectIdGetDatum(typioparam); fcinfo_in->args[1].isnull = false; fcinfo_in->args[2].value = Int32GetDatum(-1); fcinfo_in->args[2].isnull = false; fcinfo_in->context = (Node *) state->escontext; ExprEvalPushStep(state, &scratch); break; } case T_ArrayCoerceExpr: { ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node; Oid resultelemtype; ExprState *elemstate; /* evaluate argument into step's result area */ ExecInitExprRec(acoerce->arg, state, resv, resnull); resultelemtype = get_element_type(acoerce->resulttype); if (!OidIsValid(resultelemtype)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("target type is not an array"))); /* * Construct a sub-expression for the per-element expression; * but don't ready it until after we check it for triviality. * We assume it hasn't any Var references, but does have a * CaseTestExpr representing the source array element values. */ elemstate = makeNode(ExprState); elemstate->expr = acoerce->elemexpr; elemstate->parent = state->parent; elemstate->ext_params = state->ext_params; elemstate->innermost_caseval = palloc_object(Datum); elemstate->innermost_casenull = palloc_object(bool); ExecInitExprRec(acoerce->elemexpr, elemstate, &elemstate->resvalue, &elemstate->resnull); if (elemstate->steps_len == 1 && elemstate->steps[0].opcode == EEOP_CASE_TESTVAL) { /* Trivial, so we need no per-element work at runtime */ elemstate = NULL; } else { /* Not trivial, so append a DONE step */ scratch.opcode = EEOP_DONE_RETURN; ExprEvalPushStep(elemstate, &scratch); /* and ready the subexpression */ ExecReadyExpr(elemstate); } scratch.opcode = EEOP_ARRAYCOERCE; scratch.d.arraycoerce.elemexprstate = elemstate; scratch.d.arraycoerce.resultelemtype = resultelemtype; if (elemstate) { /* Set up workspace for array_map */ scratch.d.arraycoerce.amstate = palloc0_object(ArrayMapState); } else { /* Don't need workspace if there's no subexpression */ scratch.d.arraycoerce.amstate = NULL; } ExprEvalPushStep(state, &scratch); break; } case T_ConvertRowtypeExpr: { ConvertRowtypeExpr *convert = (ConvertRowtypeExpr *) node; ExprEvalRowtypeCache *rowcachep; /* cache structs must be out-of-line for space reasons */ rowcachep = palloc(2 * sizeof(ExprEvalRowtypeCache)); rowcachep[0].cacheptr = NULL; rowcachep[1].cacheptr = NULL; /* evaluate argument into step's result area */ ExecInitExprRec(convert->arg, state, resv, resnull); /* and push conversion step */ scratch.opcode = EEOP_CONVERT_ROWTYPE; scratch.d.convert_rowtype.inputtype = exprType((Node *) convert->arg); scratch.d.convert_rowtype.outputtype = convert->resulttype; scratch.d.convert_rowtype.incache = &rowcachep[0]; scratch.d.convert_rowtype.outcache = &rowcachep[1]; scratch.d.convert_rowtype.map = NULL; ExprEvalPushStep(state, &scratch); break; } /* note that CaseWhen expressions are handled within this block */ case T_CaseExpr: { CaseExpr *caseExpr = (CaseExpr *) node; List *adjust_jumps = NIL; Datum *caseval = NULL; bool *casenull = NULL; ListCell *lc; /* * If there's a test expression, we have to evaluate it and * save the value where the CaseTestExpr placeholders can find * it. */ if (caseExpr->arg != NULL) { /* Evaluate testexpr into caseval/casenull workspace */ caseval = palloc_object(Datum); casenull = palloc_object(bool); ExecInitExprRec(caseExpr->arg, state, caseval, casenull); /* * Since value might be read multiple times, force to R/O * - but only if it could be an expanded datum. */ if (get_typlen(exprType((Node *) caseExpr->arg)) == -1) { /* change caseval in-place */ scratch.opcode = EEOP_MAKE_READONLY; scratch.resvalue = caseval; scratch.resnull = casenull; scratch.d.make_readonly.value = caseval; scratch.d.make_readonly.isnull = casenull; ExprEvalPushStep(state, &scratch); /* restore normal settings of scratch fields */ scratch.resvalue = resv; scratch.resnull = resnull; } } /* * Prepare to evaluate each of the WHEN clauses in turn; as * soon as one is true we return the value of the * corresponding THEN clause. If none are true then we return * the value of the ELSE clause, or NULL if there is none. */ foreach(lc, caseExpr->args) { CaseWhen *when = (CaseWhen *) lfirst(lc); Datum *save_innermost_caseval; bool *save_innermost_casenull; int whenstep; /* * Make testexpr result available to CaseTestExpr nodes * within the condition. We must save and restore prior * setting of innermost_caseval fields, in case this node * is itself within a larger CASE. * * If there's no test expression, we don't actually need * to save and restore these fields; but it's less code to * just do so unconditionally. */ save_innermost_caseval = state->innermost_caseval; save_innermost_casenull = state->innermost_casenull; state->innermost_caseval = caseval; state->innermost_casenull = casenull; /* evaluate condition into CASE's result variables */ ExecInitExprRec(when->expr, state, resv, resnull); state->innermost_caseval = save_innermost_caseval; state->innermost_casenull = save_innermost_casenull; /* If WHEN result isn't true, jump to next CASE arm */ scratch.opcode = EEOP_JUMP_IF_NOT_TRUE; scratch.d.jump.jumpdone = -1; /* computed later */ ExprEvalPushStep(state, &scratch); whenstep = state->steps_len - 1; /* * If WHEN result is true, evaluate THEN result, storing * it into the CASE's result variables. */ ExecInitExprRec(when->result, state, resv, resnull); /* Emit JUMP step to jump to end of CASE's code */ scratch.opcode = EEOP_JUMP; scratch.d.jump.jumpdone = -1; /* computed later */ ExprEvalPushStep(state, &scratch); /* * Don't know address for that jump yet, compute once the * whole CASE expression is built. */ adjust_jumps = lappend_int(adjust_jumps, state->steps_len - 1); /* * But we can set WHEN test's jump target now, to make it * jump to the next WHEN subexpression or the ELSE. */ state->steps[whenstep].d.jump.jumpdone = state->steps_len; } /* transformCaseExpr always adds a default */ Assert(caseExpr->defresult); /* evaluate ELSE expr into CASE's result variables */ ExecInitExprRec(caseExpr->defresult, state, resv, resnull); /* adjust jump targets */ foreach(lc, adjust_jumps) { ExprEvalStep *as = &state->steps[lfirst_int(lc)]; Assert(as->opcode == EEOP_JUMP); Assert(as->d.jump.jumpdone == -1); as->d.jump.jumpdone = state->steps_len; } break; } case T_CaseTestExpr: { /* * Read from location identified by innermost_caseval. Note * that innermost_caseval could be NULL, if this node isn't * actually within a CaseExpr, ArrayCoerceExpr, etc structure. * That can happen because some parts of the system abuse * CaseTestExpr to cause a read of a value externally supplied * in econtext->caseValue_datum. We'll take care of that by * generating a specialized operation. */ if (state->innermost_caseval == NULL) scratch.opcode = EEOP_CASE_TESTVAL_EXT; else { scratch.opcode = EEOP_CASE_TESTVAL; scratch.d.casetest.value = state->innermost_caseval; scratch.d.casetest.isnull = state->innermost_casenull; } ExprEvalPushStep(state, &scratch); break; } case T_ArrayExpr: { ArrayExpr *arrayexpr = (ArrayExpr *) node; int nelems = list_length(arrayexpr->elements); ListCell *lc; int elemoff; /* * Evaluate by computing each element, and then forming the * array. Elements are computed into scratch arrays * associated with the ARRAYEXPR step. */ scratch.opcode = EEOP_ARRAYEXPR; scratch.d.arrayexpr.elemvalues = palloc_array(Datum, nelems); scratch.d.arrayexpr.elemnulls = palloc_array(bool, nelems); scratch.d.arrayexpr.nelems = nelems; /* fill remaining fields of step */ scratch.d.arrayexpr.multidims = arrayexpr->multidims; scratch.d.arrayexpr.elemtype = arrayexpr->element_typeid; /* do one-time catalog lookup for type info */ get_typlenbyvalalign(arrayexpr->element_typeid, &scratch.d.arrayexpr.elemlength, &scratch.d.arrayexpr.elembyval, &scratch.d.arrayexpr.elemalign); /* prepare to evaluate all arguments */ elemoff = 0; foreach(lc, arrayexpr->elements) { Expr *e = (Expr *) lfirst(lc); ExecInitExprRec(e, state, &scratch.d.arrayexpr.elemvalues[elemoff], &scratch.d.arrayexpr.elemnulls[elemoff]); elemoff++; } /* and then collect all into an array */ ExprEvalPushStep(state, &scratch); break; } case T_RowExpr: { RowExpr *rowexpr = (RowExpr *) node; int nelems = list_length(rowexpr->args); TupleDesc tupdesc; int i; ListCell *l; /* Build tupdesc to describe result tuples */ if (rowexpr->row_typeid == RECORDOID) { /* generic record, use types of given expressions */ tupdesc = ExecTypeFromExprList(rowexpr->args); /* ... but adopt RowExpr's column aliases */ ExecTypeSetColNames(tupdesc, rowexpr->colnames); /* Bless the tupdesc so it can be looked up later */ BlessTupleDesc(tupdesc); } else { /* it's been cast to a named type, use that */ tupdesc = lookup_rowtype_tupdesc_copy(rowexpr->row_typeid, -1); } /* * In the named-type case, the tupdesc could have more columns * than are in the args list, since the type might have had * columns added since the ROW() was parsed. We want those * extra columns to go to nulls, so we make sure that the * workspace arrays are large enough and then initialize any * extra columns to read as NULLs. */ Assert(nelems <= tupdesc->natts); nelems = Max(nelems, tupdesc->natts); /* * Evaluate by first building datums for each field, and then * a final step forming the composite datum. */ scratch.opcode = EEOP_ROW; scratch.d.row.tupdesc = tupdesc; /* space for the individual field datums */ scratch.d.row.elemvalues = palloc_array(Datum, nelems); scratch.d.row.elemnulls = palloc_array(bool, nelems); /* as explained above, make sure any extra columns are null */ memset(scratch.d.row.elemnulls, true, sizeof(bool) * nelems); /* Set up evaluation, skipping any deleted columns */ i = 0; foreach(l, rowexpr->args) { Form_pg_attribute att = TupleDescAttr(tupdesc, i); Expr *e = (Expr *) lfirst(l); if (!att->attisdropped) { /* * Guard against ALTER COLUMN TYPE on rowtype since * the RowExpr was created. XXX should we check * typmod too? Not sure we can be sure it'll be the * same. */ if (exprType((Node *) e) != att->atttypid) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("ROW() column has type %s instead of type %s", format_type_be(exprType((Node *) e)), format_type_be(att->atttypid)))); } else { /* * Ignore original expression and insert a NULL. We * don't really care what type of NULL it is, so * always make an int4 NULL. */ e = (Expr *) makeNullConst(INT4OID, -1, InvalidOid); } /* Evaluate column expr into appropriate workspace slot */ ExecInitExprRec(e, state, &scratch.d.row.elemvalues[i], &scratch.d.row.elemnulls[i]); i++; } /* And finally build the row value */ ExprEvalPushStep(state, &scratch); break; } case T_RowCompareExpr: { RowCompareExpr *rcexpr = (RowCompareExpr *) node; int nopers = list_length(rcexpr->opnos); List *adjust_jumps = NIL; ListCell *l_left_expr, *l_right_expr, *l_opno, *l_opfamily, *l_inputcollid; ListCell *lc; /* * Iterate over each field, prepare comparisons. To handle * NULL results, prepare jumps to after the expression. If a * comparison yields a != 0 result, jump to the final step. */ Assert(list_length(rcexpr->largs) == nopers); Assert(list_length(rcexpr->rargs) == nopers); Assert(list_length(rcexpr->opfamilies) == nopers); Assert(list_length(rcexpr->inputcollids) == nopers); forfive(l_left_expr, rcexpr->largs, l_right_expr, rcexpr->rargs, l_opno, rcexpr->opnos, l_opfamily, rcexpr->opfamilies, l_inputcollid, rcexpr->inputcollids) { Expr *left_expr = (Expr *) lfirst(l_left_expr); Expr *right_expr = (Expr *) lfirst(l_right_expr); Oid opno = lfirst_oid(l_opno); Oid opfamily = lfirst_oid(l_opfamily); Oid inputcollid = lfirst_oid(l_inputcollid); int strategy; Oid lefttype; Oid righttype; Oid proc; FmgrInfo *finfo; FunctionCallInfo fcinfo; get_op_opfamily_properties(opno, opfamily, false, &strategy, &lefttype, &righttype); proc = get_opfamily_proc(opfamily, lefttype, righttype, BTORDER_PROC); if (!OidIsValid(proc)) elog(ERROR, "missing support function %d(%u,%u) in opfamily %u", BTORDER_PROC, lefttype, righttype, opfamily); /* Set up the primary fmgr lookup information */ finfo = palloc0_object(FmgrInfo); fcinfo = palloc0(SizeForFunctionCallInfo(2)); fmgr_info(proc, finfo); fmgr_info_set_expr((Node *) node, finfo); InitFunctionCallInfoData(*fcinfo, finfo, 2, inputcollid, NULL, NULL); /* * If we enforced permissions checks on index support * functions, we'd need to make a check here. But the * index support machinery doesn't do that, and thus * neither does this code. */ /* evaluate left and right args directly into fcinfo */ ExecInitExprRec(left_expr, state, &fcinfo->args[0].value, &fcinfo->args[0].isnull); ExecInitExprRec(right_expr, state, &fcinfo->args[1].value, &fcinfo->args[1].isnull); scratch.opcode = EEOP_ROWCOMPARE_STEP; scratch.d.rowcompare_step.finfo = finfo; scratch.d.rowcompare_step.fcinfo_data = fcinfo; scratch.d.rowcompare_step.fn_addr = finfo->fn_addr; /* jump targets filled below */ scratch.d.rowcompare_step.jumpnull = -1; scratch.d.rowcompare_step.jumpdone = -1; ExprEvalPushStep(state, &scratch); adjust_jumps = lappend_int(adjust_jumps, state->steps_len - 1); } /* * We could have a zero-column rowtype, in which case the rows * necessarily compare equal. */ if (nopers == 0) { scratch.opcode = EEOP_CONST; scratch.d.constval.value = Int32GetDatum(0); scratch.d.constval.isnull = false; ExprEvalPushStep(state, &scratch); } /* Finally, examine the last comparison result */ scratch.opcode = EEOP_ROWCOMPARE_FINAL; scratch.d.rowcompare_final.cmptype = rcexpr->cmptype; ExprEvalPushStep(state, &scratch); /* adjust jump targets */ foreach(lc, adjust_jumps) { ExprEvalStep *as = &state->steps[lfirst_int(lc)]; Assert(as->opcode == EEOP_ROWCOMPARE_STEP); Assert(as->d.rowcompare_step.jumpdone == -1); Assert(as->d.rowcompare_step.jumpnull == -1); /* jump to comparison evaluation */ as->d.rowcompare_step.jumpdone = state->steps_len - 1; /* jump to the following expression */ as->d.rowcompare_step.jumpnull = state->steps_len; } break; } case T_CoalesceExpr: { CoalesceExpr *coalesce = (CoalesceExpr *) node; List *adjust_jumps = NIL; ListCell *lc; /* We assume there's at least one arg */ Assert(coalesce->args != NIL); /* * Prepare evaluation of all coalesced arguments, after each * one push a step that short-circuits if not null. */ foreach(lc, coalesce->args) { Expr *e = (Expr *) lfirst(lc); /* evaluate argument, directly into result datum */ ExecInitExprRec(e, state, resv, resnull); /* if it's not null, skip to end of COALESCE expr */ scratch.opcode = EEOP_JUMP_IF_NOT_NULL; scratch.d.jump.jumpdone = -1; /* adjust later */ ExprEvalPushStep(state, &scratch); adjust_jumps = lappend_int(adjust_jumps, state->steps_len - 1); } /* * No need to add a constant NULL return - we only can get to * the end of the expression if a NULL already is being * returned. */ /* adjust jump targets */ foreach(lc, adjust_jumps) { ExprEvalStep *as = &state->steps[lfirst_int(lc)]; Assert(as->opcode == EEOP_JUMP_IF_NOT_NULL); Assert(as->d.jump.jumpdone == -1); as->d.jump.jumpdone = state->steps_len; } break; } case T_MinMaxExpr: { MinMaxExpr *minmaxexpr = (MinMaxExpr *) node; int nelems = list_length(minmaxexpr->args); TypeCacheEntry *typentry; FmgrInfo *finfo; FunctionCallInfo fcinfo; ListCell *lc; int off; /* Look up the btree comparison function for the datatype */ typentry = lookup_type_cache(minmaxexpr->minmaxtype, TYPECACHE_CMP_PROC); if (!OidIsValid(typentry->cmp_proc)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), errmsg("could not identify a comparison function for type %s", format_type_be(minmaxexpr->minmaxtype)))); /* * If we enforced permissions checks on index support * functions, we'd need to make a check here. But the index * support machinery doesn't do that, and thus neither does * this code. */ /* Perform function lookup */ finfo = palloc0_object(FmgrInfo); fcinfo = palloc0(SizeForFunctionCallInfo(2)); fmgr_info(typentry->cmp_proc, finfo); fmgr_info_set_expr((Node *) node, finfo); InitFunctionCallInfoData(*fcinfo, finfo, 2, minmaxexpr->inputcollid, NULL, NULL); scratch.opcode = EEOP_MINMAX; /* allocate space to store arguments */ scratch.d.minmax.values = palloc_array(Datum, nelems); scratch.d.minmax.nulls = palloc_array(bool, nelems); scratch.d.minmax.nelems = nelems; scratch.d.minmax.op = minmaxexpr->op; scratch.d.minmax.finfo = finfo; scratch.d.minmax.fcinfo_data = fcinfo; /* evaluate expressions into minmax->values/nulls */ off = 0; foreach(lc, minmaxexpr->args) { Expr *e = (Expr *) lfirst(lc); ExecInitExprRec(e, state, &scratch.d.minmax.values[off], &scratch.d.minmax.nulls[off]); off++; } /* and push the final comparison */ ExprEvalPushStep(state, &scratch); break; } case T_SQLValueFunction: { SQLValueFunction *svf = (SQLValueFunction *) node; scratch.opcode = EEOP_SQLVALUEFUNCTION; scratch.d.sqlvaluefunction.svf = svf; ExprEvalPushStep(state, &scratch); break; } case T_XmlExpr: { XmlExpr *xexpr = (XmlExpr *) node; int nnamed = list_length(xexpr->named_args); int nargs = list_length(xexpr->args); int off; ListCell *arg; scratch.opcode = EEOP_XMLEXPR; scratch.d.xmlexpr.xexpr = xexpr; /* allocate space for storing all the arguments */ if (nnamed) { scratch.d.xmlexpr.named_argvalue = palloc_array(Datum, nnamed); scratch.d.xmlexpr.named_argnull = palloc_array(bool, nnamed); } else { scratch.d.xmlexpr.named_argvalue = NULL; scratch.d.xmlexpr.named_argnull = NULL; } if (nargs) { scratch.d.xmlexpr.argvalue = palloc_array(Datum, nargs); scratch.d.xmlexpr.argnull = palloc_array(bool, nargs); } else { scratch.d.xmlexpr.argvalue = NULL; scratch.d.xmlexpr.argnull = NULL; } /* prepare argument execution */ off = 0; foreach(arg, xexpr->named_args) { Expr *e = (Expr *) lfirst(arg); ExecInitExprRec(e, state, &scratch.d.xmlexpr.named_argvalue[off], &scratch.d.xmlexpr.named_argnull[off]); off++; } off = 0; foreach(arg, xexpr->args) { Expr *e = (Expr *) lfirst(arg); ExecInitExprRec(e, state, &scratch.d.xmlexpr.argvalue[off], &scratch.d.xmlexpr.argnull[off]); off++; } /* and evaluate the actual XML expression */ ExprEvalPushStep(state, &scratch); break; } case T_JsonValueExpr: { JsonValueExpr *jve = (JsonValueExpr *) node; Assert(jve->raw_expr != NULL); ExecInitExprRec(jve->raw_expr, state, resv, resnull); Assert(jve->formatted_expr != NULL); ExecInitExprRec(jve->formatted_expr, state, resv, resnull); break; } case T_JsonConstructorExpr: { JsonConstructorExpr *ctor = (JsonConstructorExpr *) node; List *args = ctor->args; ListCell *lc; int nargs = list_length(args); int argno = 0; if (ctor->func) { ExecInitExprRec(ctor->func, state, resv, resnull); } else if ((ctor->type == JSCTOR_JSON_PARSE && !ctor->unique) || ctor->type == JSCTOR_JSON_SERIALIZE) { /* Use the value of the first argument as result */ ExecInitExprRec(linitial(args), state, resv, resnull); } else { JsonConstructorExprState *jcstate; jcstate = palloc0_object(JsonConstructorExprState); scratch.opcode = EEOP_JSON_CONSTRUCTOR; scratch.d.json_constructor.jcstate = jcstate; jcstate->constructor = ctor; jcstate->arg_values = palloc_array(Datum, nargs); jcstate->arg_nulls = palloc_array(bool, nargs); jcstate->arg_types = palloc_array(Oid, nargs); jcstate->nargs = nargs; foreach(lc, args) { Expr *arg = (Expr *) lfirst(lc); jcstate->arg_types[argno] = exprType((Node *) arg); if (IsA(arg, Const)) { /* Don't evaluate const arguments every round */ Const *con = (Const *) arg; jcstate->arg_values[argno] = con->constvalue; jcstate->arg_nulls[argno] = con->constisnull; } else { ExecInitExprRec(arg, state, &jcstate->arg_values[argno], &jcstate->arg_nulls[argno]); } argno++; } /* prepare type cache for datum_to_json[b]() */ if (ctor->type == JSCTOR_JSON_SCALAR) { bool is_jsonb = ctor->returning->format->format_type == JS_FORMAT_JSONB; jcstate->arg_type_cache = palloc(sizeof(*jcstate->arg_type_cache) * nargs); for (int i = 0; i < nargs; i++) { JsonTypeCategory category; Oid outfuncid; Oid typid = jcstate->arg_types[i]; json_categorize_type(typid, is_jsonb, &category, &outfuncid); jcstate->arg_type_cache[i].outfuncid = outfuncid; jcstate->arg_type_cache[i].category = (int) category; } } ExprEvalPushStep(state, &scratch); } if (ctor->coercion) { Datum *innermost_caseval = state->innermost_caseval; bool *innermost_isnull = state->innermost_casenull; state->innermost_caseval = resv; state->innermost_casenull = resnull; ExecInitExprRec(ctor->coercion, state, resv, resnull); state->innermost_caseval = innermost_caseval; state->innermost_casenull = innermost_isnull; } } break; case T_JsonIsPredicate: { JsonIsPredicate *pred = (JsonIsPredicate *) node; ExecInitExprRec((Expr *) pred->expr, state, resv, resnull); scratch.opcode = EEOP_IS_JSON; scratch.d.is_json.pred = pred; ExprEvalPushStep(state, &scratch); break; } case T_JsonExpr: { JsonExpr *jsexpr = castNode(JsonExpr, node); /* * No need to initialize a full JsonExprState For * JSON_TABLE(), because the upstream caller tfuncFetchRows() * is only interested in the value of formatted_expr. */ if (jsexpr->op == JSON_TABLE_OP) ExecInitExprRec((Expr *) jsexpr->formatted_expr, state, resv, resnull); else ExecInitJsonExpr(jsexpr, state, resv, resnull, &scratch); break; } case T_NullTest: { NullTest *ntest = (NullTest *) node; if (ntest->nulltesttype == IS_NULL) { if (ntest->argisrow) scratch.opcode = EEOP_NULLTEST_ROWISNULL; else scratch.opcode = EEOP_NULLTEST_ISNULL; } else if (ntest->nulltesttype == IS_NOT_NULL) { if (ntest->argisrow) scratch.opcode = EEOP_NULLTEST_ROWISNOTNULL; else scratch.opcode = EEOP_NULLTEST_ISNOTNULL; } else { elog(ERROR, "unrecognized nulltesttype: %d", (int) ntest->nulltesttype); } /* initialize cache in case it's a row test */ scratch.d.nulltest_row.rowcache.cacheptr = NULL; /* first evaluate argument into result variable */ ExecInitExprRec(ntest->arg, state, resv, resnull); /* then push the test of that argument */ ExprEvalPushStep(state, &scratch); break; } case T_BooleanTest: { BooleanTest *btest = (BooleanTest *) node; /* * Evaluate argument, directly into result datum. That's ok, * because resv/resnull is definitely not used anywhere else, * and will get overwritten by the below EEOP_BOOLTEST_IS_* * step. */ ExecInitExprRec(btest->arg, state, resv, resnull); switch (btest->booltesttype) { case IS_TRUE: scratch.opcode = EEOP_BOOLTEST_IS_TRUE; break; case IS_NOT_TRUE: scratch.opcode = EEOP_BOOLTEST_IS_NOT_TRUE; break; case IS_FALSE: scratch.opcode = EEOP_BOOLTEST_IS_FALSE; break; case IS_NOT_FALSE: scratch.opcode = EEOP_BOOLTEST_IS_NOT_FALSE; break; case IS_UNKNOWN: /* Same as scalar IS NULL test */ scratch.opcode = EEOP_NULLTEST_ISNULL; break; case IS_NOT_UNKNOWN: /* Same as scalar IS NOT NULL test */ scratch.opcode = EEOP_NULLTEST_ISNOTNULL; break; default: elog(ERROR, "unrecognized booltesttype: %d", (int) btest->booltesttype); } ExprEvalPushStep(state, &scratch); break; } case T_CoerceToDomain: { CoerceToDomain *ctest = (CoerceToDomain *) node; ExecInitCoerceToDomain(&scratch, ctest, state, resv, resnull); break; } case T_CoerceToDomainValue: { /* * Read from location identified by innermost_domainval. Note * that innermost_domainval could be NULL, if we're compiling * a standalone domain check rather than one embedded in a * larger expression. In that case we must read from * econtext->domainValue_datum. We'll take care of that by * generating a specialized operation. */ if (state->innermost_domainval == NULL) scratch.opcode = EEOP_DOMAIN_TESTVAL_EXT; else { scratch.opcode = EEOP_DOMAIN_TESTVAL; /* we share instruction union variant with case testval */ scratch.d.casetest.value = state->innermost_domainval; scratch.d.casetest.isnull = state->innermost_domainnull; } ExprEvalPushStep(state, &scratch); break; } case T_CurrentOfExpr: { scratch.opcode = EEOP_CURRENTOFEXPR; ExprEvalPushStep(state, &scratch); break; } case T_NextValueExpr: { NextValueExpr *nve = (NextValueExpr *) node; scratch.opcode = EEOP_NEXTVALUEEXPR; scratch.d.nextvalueexpr.seqid = nve->seqid; scratch.d.nextvalueexpr.seqtypid = nve->typeId; ExprEvalPushStep(state, &scratch); break; } case T_ReturningExpr: { ReturningExpr *rexpr = (ReturningExpr *) node; int retstep; /* Skip expression evaluation if OLD/NEW row doesn't exist */ scratch.opcode = EEOP_RETURNINGEXPR; scratch.d.returningexpr.nullflag = rexpr->retold ? EEO_FLAG_OLD_IS_NULL : EEO_FLAG_NEW_IS_NULL; scratch.d.returningexpr.jumpdone = -1; /* set below */ ExprEvalPushStep(state, &scratch); retstep = state->steps_len - 1; /* Steps to evaluate expression to return */ ExecInitExprRec(rexpr->retexpr, state, resv, resnull); /* Jump target used if OLD/NEW row doesn't exist */ state->steps[retstep].d.returningexpr.jumpdone = state->steps_len; /* Update ExprState flags */ if (rexpr->retold) state->flags |= EEO_FLAG_HAS_OLD; else state->flags |= EEO_FLAG_HAS_NEW; break; } default: elog(ERROR, "unrecognized node type: %d", (int) nodeTag(node)); break; } } /* * Add another expression evaluation step to ExprState->steps. * * Note that this potentially re-allocates es->steps, therefore no pointer * into that array may be used while the expression is still being built. */ void ExprEvalPushStep(ExprState *es, const ExprEvalStep *s) { if (es->steps_alloc == 0) { es->steps_alloc = 16; es->steps = palloc_array(ExprEvalStep, es->steps_alloc); } else if (es->steps_alloc == es->steps_len) { es->steps_alloc *= 2; es->steps = repalloc(es->steps, sizeof(ExprEvalStep) * es->steps_alloc); } memcpy(&es->steps[es->steps_len++], s, sizeof(ExprEvalStep)); } /* * Perform setup necessary for the evaluation of a function-like expression, * appending argument evaluation steps to the steps list in *state, and * setting up *scratch so it is ready to be pushed. * * *scratch is not pushed here, so that callers may override the opcode, * which is useful for function-like cases like DISTINCT. */ static void ExecInitFunc(ExprEvalStep *scratch, Expr *node, List *args, Oid funcid, Oid inputcollid, ExprState *state) { int nargs = list_length(args); AclResult aclresult; FmgrInfo *flinfo; FunctionCallInfo fcinfo; int argno; ListCell *lc; /* Check permission to call function */ aclresult = object_aclcheck(ProcedureRelationId, funcid, GetUserId(), ACL_EXECUTE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(funcid)); InvokeFunctionExecuteHook(funcid); /* * Safety check on nargs. Under normal circumstances this should never * fail, as parser should check sooner. But possibly it might fail if * server has been compiled with FUNC_MAX_ARGS smaller than some functions * declared in pg_proc? */ if (nargs > FUNC_MAX_ARGS) ereport(ERROR, (errcode(ERRCODE_TOO_MANY_ARGUMENTS), errmsg_plural("cannot pass more than %d argument to a function", "cannot pass more than %d arguments to a function", FUNC_MAX_ARGS, FUNC_MAX_ARGS))); /* Allocate function lookup data and parameter workspace for this call */ scratch->d.func.finfo = palloc0_object(FmgrInfo); scratch->d.func.fcinfo_data = palloc0(SizeForFunctionCallInfo(nargs)); flinfo = scratch->d.func.finfo; fcinfo = scratch->d.func.fcinfo_data; /* Set up the primary fmgr lookup information */ fmgr_info(funcid, flinfo); fmgr_info_set_expr((Node *) node, flinfo); /* Initialize function call parameter structure too */ InitFunctionCallInfoData(*fcinfo, flinfo, nargs, inputcollid, NULL, NULL); /* Keep extra copies of this info to save an indirection at runtime */ scratch->d.func.fn_addr = flinfo->fn_addr; scratch->d.func.nargs = nargs; /* We only support non-set functions here */ if (flinfo->fn_retset) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("set-valued function called in context that cannot accept a set"), state->parent ? executor_errposition(state->parent->state, exprLocation((Node *) node)) : 0)); /* Build code to evaluate arguments directly into the fcinfo struct */ argno = 0; foreach(lc, args) { Expr *arg = (Expr *) lfirst(lc); if (IsA(arg, Const)) { /* * Don't evaluate const arguments every round; especially * interesting for constants in comparisons. */ Const *con = (Const *) arg; fcinfo->args[argno].value = con->constvalue; fcinfo->args[argno].isnull = con->constisnull; } else { ExecInitExprRec(arg, state, &fcinfo->args[argno].value, &fcinfo->args[argno].isnull); } argno++; } /* Insert appropriate opcode depending on strictness and stats level */ if (pgstat_track_functions <= flinfo->fn_stats) { if (flinfo->fn_strict && nargs > 0) { /* Choose nargs optimized implementation if available. */ if (nargs == 1) scratch->opcode = EEOP_FUNCEXPR_STRICT_1; else if (nargs == 2) scratch->opcode = EEOP_FUNCEXPR_STRICT_2; else scratch->opcode = EEOP_FUNCEXPR_STRICT; } else scratch->opcode = EEOP_FUNCEXPR; } else { if (flinfo->fn_strict && nargs > 0) scratch->opcode = EEOP_FUNCEXPR_STRICT_FUSAGE; else scratch->opcode = EEOP_FUNCEXPR_FUSAGE; } } /* * Append the steps necessary for the evaluation of a SubPlan node to * ExprState->steps. * * subplan - SubPlan expression to evaluate * state - ExprState to whose ->steps to append the necessary operations * resv / resnull - where to store the result of the node into */ static void ExecInitSubPlanExpr(SubPlan *subplan, ExprState *state, Datum *resv, bool *resnull) { ExprEvalStep scratch = {0}; SubPlanState *sstate; ListCell *pvar; ListCell *l; if (!state->parent) elog(ERROR, "SubPlan found with no parent plan"); /* * Generate steps to evaluate input arguments for the subplan. * * We evaluate the argument expressions into resv/resnull, and then use * PARAM_SET to update the parameter. We do that, instead of evaluating * directly into the param, to avoid depending on the pointer value * remaining stable / being included in the generated expression. It's ok * to use resv/resnull for multiple params, as each parameter evaluation * is immediately followed by an EEOP_PARAM_SET (and thus are saved before * they could be overwritten again). * * Any calculation we have to do can be done in the parent econtext, since * the Param values don't need to have per-query lifetime. */ Assert(list_length(subplan->parParam) == list_length(subplan->args)); forboth(l, subplan->parParam, pvar, subplan->args) { int paramid = lfirst_int(l); Expr *arg = (Expr *) lfirst(pvar); ExecInitExprRec(arg, state, resv, resnull); scratch.opcode = EEOP_PARAM_SET; scratch.resvalue = resv; scratch.resnull = resnull; scratch.d.param.paramid = paramid; /* paramtype's not actually used, but we might as well fill it */ scratch.d.param.paramtype = exprType((Node *) arg); ExprEvalPushStep(state, &scratch); } sstate = ExecInitSubPlan(subplan, state->parent); /* add SubPlanState nodes to state->parent->subPlan */ state->parent->subPlan = lappend(state->parent->subPlan, sstate); scratch.opcode = EEOP_SUBPLAN; scratch.resvalue = resv; scratch.resnull = resnull; scratch.d.subplan.sstate = sstate; ExprEvalPushStep(state, &scratch); } /* * Add expression steps performing setup that's needed before any of the * main execution of the expression. */ static void ExecCreateExprSetupSteps(ExprState *state, Node *node) { ExprSetupInfo info = {0, 0, 0, 0, 0, NIL}; /* Prescan to find out what we need. */ expr_setup_walker(node, &info); /* And generate those steps. */ ExecPushExprSetupSteps(state, &info); } /* * Add steps performing expression setup as indicated by "info". * This is useful when building an ExprState covering more than one expression. */ static void ExecPushExprSetupSteps(ExprState *state, ExprSetupInfo *info) { ExprEvalStep scratch = {0}; ListCell *lc; scratch.resvalue = NULL; scratch.resnull = NULL; /* * Add steps deforming the ExprState's inner/outer/scan/old/new slots as * much as required by any Vars appearing in the expression. */ if (info->last_inner > 0) { scratch.opcode = EEOP_INNER_FETCHSOME; scratch.d.fetch.last_var = info->last_inner; scratch.d.fetch.fixed = false; scratch.d.fetch.kind = NULL; scratch.d.fetch.known_desc = NULL; if (ExecComputeSlotInfo(state, &scratch)) ExprEvalPushStep(state, &scratch); } if (info->last_outer > 0) { scratch.opcode = EEOP_OUTER_FETCHSOME; scratch.d.fetch.last_var = info->last_outer; scratch.d.fetch.fixed = false; scratch.d.fetch.kind = NULL; scratch.d.fetch.known_desc = NULL; if (ExecComputeSlotInfo(state, &scratch)) ExprEvalPushStep(state, &scratch); } if (info->last_scan > 0) { scratch.opcode = EEOP_SCAN_FETCHSOME; scratch.d.fetch.last_var = info->last_scan; scratch.d.fetch.fixed = false; scratch.d.fetch.kind = NULL; scratch.d.fetch.known_desc = NULL; if (ExecComputeSlotInfo(state, &scratch)) ExprEvalPushStep(state, &scratch); } if (info->last_old > 0) { scratch.opcode = EEOP_OLD_FETCHSOME; scratch.d.fetch.last_var = info->last_old; scratch.d.fetch.fixed = false; scratch.d.fetch.kind = NULL; scratch.d.fetch.known_desc = NULL; if (ExecComputeSlotInfo(state, &scratch)) ExprEvalPushStep(state, &scratch); } if (info->last_new > 0) { scratch.opcode = EEOP_NEW_FETCHSOME; scratch.d.fetch.last_var = info->last_new; scratch.d.fetch.fixed = false; scratch.d.fetch.kind = NULL; scratch.d.fetch.known_desc = NULL; if (ExecComputeSlotInfo(state, &scratch)) ExprEvalPushStep(state, &scratch); } /* * Add steps to execute any MULTIEXPR SubPlans appearing in the * expression. We need to evaluate these before any of the Params * referencing their outputs are used, but after we've prepared for any * Var references they may contain. (There cannot be cross-references * between MULTIEXPR SubPlans, so we needn't worry about their order.) */ foreach(lc, info->multiexpr_subplans) { SubPlan *subplan = (SubPlan *) lfirst(lc); Assert(subplan->subLinkType == MULTIEXPR_SUBLINK); /* The result can be ignored, but we better put it somewhere */ ExecInitSubPlanExpr(subplan, state, &state->resvalue, &state->resnull); } } /* * expr_setup_walker: expression walker for ExecCreateExprSetupSteps */ static bool expr_setup_walker(Node *node, ExprSetupInfo *info) { if (node == NULL) return false; if (IsA(node, Var)) { Var *variable = (Var *) node; AttrNumber attnum = variable->varattno; switch (variable->varno) { case INNER_VAR: info->last_inner = Max(info->last_inner, attnum); break; case OUTER_VAR: info->last_outer = Max(info->last_outer, attnum); break; /* INDEX_VAR is handled by default case */ default: switch (variable->varreturningtype) { case VAR_RETURNING_DEFAULT: info->last_scan = Max(info->last_scan, attnum); break; case VAR_RETURNING_OLD: info->last_old = Max(info->last_old, attnum); break; case VAR_RETURNING_NEW: info->last_new = Max(info->last_new, attnum); break; } break; } return false; } /* Collect all MULTIEXPR SubPlans, too */ if (IsA(node, SubPlan)) { SubPlan *subplan = (SubPlan *) node; if (subplan->subLinkType == MULTIEXPR_SUBLINK) info->multiexpr_subplans = lappend(info->multiexpr_subplans, subplan); } /* * Don't examine the arguments or filters of Aggrefs or WindowFuncs, * because those do not represent expressions to be evaluated within the * calling expression's econtext. GroupingFunc arguments are never * evaluated at all. */ if (IsA(node, Aggref)) return false; if (IsA(node, WindowFunc)) return false; if (IsA(node, GroupingFunc)) return false; return expression_tree_walker(node, expr_setup_walker, info); } /* * Compute additional information for EEOP_*_FETCHSOME ops. * * The goal is to determine whether a slot is 'fixed', that is, every * evaluation of the expression will have the same type of slot, with an * equivalent descriptor. * * EEOP_OLD_FETCHSOME and EEOP_NEW_FETCHSOME are used to process RETURNING, if * OLD/NEW columns are referred to explicitly. In both cases, the tuple * descriptor comes from the parent scan node, so we treat them the same as * EEOP_SCAN_FETCHSOME. * * Returns true if the deforming step is required, false otherwise. */ static bool ExecComputeSlotInfo(ExprState *state, ExprEvalStep *op) { PlanState *parent = state->parent; TupleDesc desc = NULL; const TupleTableSlotOps *tts_ops = NULL; bool isfixed = false; ExprEvalOp opcode = op->opcode; Assert(opcode == EEOP_INNER_FETCHSOME || opcode == EEOP_OUTER_FETCHSOME || opcode == EEOP_SCAN_FETCHSOME || opcode == EEOP_OLD_FETCHSOME || opcode == EEOP_NEW_FETCHSOME); if (op->d.fetch.known_desc != NULL) { desc = op->d.fetch.known_desc; tts_ops = op->d.fetch.kind; isfixed = op->d.fetch.kind != NULL; } else if (!parent) { isfixed = false; } else if (opcode == EEOP_INNER_FETCHSOME) { PlanState *is = innerPlanState(parent); if (parent->inneropsset && !parent->inneropsfixed) { isfixed = false; } else if (parent->inneropsset && parent->innerops) { isfixed = true; tts_ops = parent->innerops; desc = ExecGetResultType(is); } else if (is) { tts_ops = ExecGetResultSlotOps(is, &isfixed); desc = ExecGetResultType(is); } } else if (opcode == EEOP_OUTER_FETCHSOME) { PlanState *os = outerPlanState(parent); if (parent->outeropsset && !parent->outeropsfixed) { isfixed = false; } else if (parent->outeropsset && parent->outerops) { isfixed = true; tts_ops = parent->outerops; desc = ExecGetResultType(os); } else if (os) { tts_ops = ExecGetResultSlotOps(os, &isfixed); desc = ExecGetResultType(os); } } else if (opcode == EEOP_SCAN_FETCHSOME || opcode == EEOP_OLD_FETCHSOME || opcode == EEOP_NEW_FETCHSOME) { desc = parent->scandesc; if (parent->scanops) tts_ops = parent->scanops; if (parent->scanopsset) isfixed = parent->scanopsfixed; } if (isfixed && desc != NULL && tts_ops != NULL) { op->d.fetch.fixed = true; op->d.fetch.kind = tts_ops; op->d.fetch.known_desc = desc; } else { op->d.fetch.fixed = false; op->d.fetch.kind = NULL; op->d.fetch.known_desc = NULL; } /* if the slot is known to always virtual we never need to deform */ if (op->d.fetch.fixed && op->d.fetch.kind == &TTSOpsVirtual) return false; return true; } /* * Prepare step for the evaluation of a whole-row variable. * The caller still has to push the step. */ static void ExecInitWholeRowVar(ExprEvalStep *scratch, Var *variable, ExprState *state) { PlanState *parent = state->parent; /* fill in all but the target */ scratch->opcode = EEOP_WHOLEROW; scratch->d.wholerow.var = variable; scratch->d.wholerow.first = true; scratch->d.wholerow.slow = false; scratch->d.wholerow.tupdesc = NULL; /* filled at runtime */ scratch->d.wholerow.junkFilter = NULL; /* update ExprState flags if Var refers to OLD/NEW */ if (variable->varreturningtype == VAR_RETURNING_OLD) state->flags |= EEO_FLAG_HAS_OLD; else if (variable->varreturningtype == VAR_RETURNING_NEW) state->flags |= EEO_FLAG_HAS_NEW; /* * If the input tuple came from a subquery, it might contain "resjunk" * columns (such as GROUP BY or ORDER BY columns), which we don't want to * keep in the whole-row result. We can get rid of such columns by * passing the tuple through a JunkFilter --- but to make one, we have to * lay our hands on the subquery's targetlist. Fortunately, there are not * very many cases where this can happen, and we can identify all of them * by examining our parent PlanState. We assume this is not an issue in * standalone expressions that don't have parent plans. (Whole-row Vars * can occur in such expressions, but they will always be referencing * table rows.) */ if (parent) { PlanState *subplan = NULL; switch (nodeTag(parent)) { case T_SubqueryScanState: subplan = ((SubqueryScanState *) parent)->subplan; break; case T_CteScanState: subplan = ((CteScanState *) parent)->cteplanstate; break; default: break; } if (subplan) { bool junk_filter_needed = false; ListCell *tlist; /* Detect whether subplan tlist actually has any junk columns */ foreach(tlist, subplan->plan->targetlist) { TargetEntry *tle = (TargetEntry *) lfirst(tlist); if (tle->resjunk) { junk_filter_needed = true; break; } } /* If so, build the junkfilter now */ if (junk_filter_needed) { scratch->d.wholerow.junkFilter = ExecInitJunkFilter(subplan->plan->targetlist, ExecInitExtraTupleSlot(parent->state, NULL, &TTSOpsVirtual)); } } } } /* * Prepare evaluation of a SubscriptingRef expression. */ static void ExecInitSubscriptingRef(ExprEvalStep *scratch, SubscriptingRef *sbsref, ExprState *state, Datum *resv, bool *resnull) { bool isAssignment = (sbsref->refassgnexpr != NULL); int nupper = list_length(sbsref->refupperindexpr); int nlower = list_length(sbsref->reflowerindexpr); const SubscriptRoutines *sbsroutines; SubscriptingRefState *sbsrefstate; SubscriptExecSteps methods; char *ptr; List *adjust_jumps = NIL; ListCell *lc; int i; /* Look up the subscripting support methods */ sbsroutines = getSubscriptingRoutines(sbsref->refcontainertype, NULL); if (!sbsroutines) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("cannot subscript type %s because it does not support subscripting", format_type_be(sbsref->refcontainertype)), state->parent ? executor_errposition(state->parent->state, exprLocation((Node *) sbsref)) : 0)); /* Allocate sbsrefstate, with enough space for per-subscript arrays too */ sbsrefstate = palloc0(MAXALIGN(sizeof(SubscriptingRefState)) + (nupper + nlower) * (sizeof(Datum) + 2 * sizeof(bool))); /* Fill constant fields of SubscriptingRefState */ sbsrefstate->isassignment = isAssignment; sbsrefstate->numupper = nupper; sbsrefstate->numlower = nlower; /* Set up per-subscript arrays */ ptr = ((char *) sbsrefstate) + MAXALIGN(sizeof(SubscriptingRefState)); sbsrefstate->upperindex = (Datum *) ptr; ptr += nupper * sizeof(Datum); sbsrefstate->lowerindex = (Datum *) ptr; ptr += nlower * sizeof(Datum); sbsrefstate->upperprovided = (bool *) ptr; ptr += nupper * sizeof(bool); sbsrefstate->lowerprovided = (bool *) ptr; ptr += nlower * sizeof(bool); sbsrefstate->upperindexnull = (bool *) ptr; ptr += nupper * sizeof(bool); sbsrefstate->lowerindexnull = (bool *) ptr; /* ptr += nlower * sizeof(bool); */ /* * Let the container-type-specific code have a chance. It must fill the * "methods" struct with function pointers for us to possibly use in * execution steps below; and it can optionally set up some data pointed * to by the workspace field. */ memset(&methods, 0, sizeof(methods)); sbsroutines->exec_setup(sbsref, sbsrefstate, &methods); /* * Evaluate array input. It's safe to do so into resv/resnull, because we * won't use that as target for any of the other subexpressions, and it'll * be overwritten by the final EEOP_SBSREF_FETCH/ASSIGN step, which is * pushed last. */ ExecInitExprRec(sbsref->refexpr, state, resv, resnull); /* * If refexpr yields NULL, and the operation should be strict, then result * is NULL. We can implement this with just JUMP_IF_NULL, since we * evaluated the array into the desired target location. */ if (!isAssignment && sbsroutines->fetch_strict) { scratch->opcode = EEOP_JUMP_IF_NULL; scratch->d.jump.jumpdone = -1; /* adjust later */ ExprEvalPushStep(state, scratch); adjust_jumps = lappend_int(adjust_jumps, state->steps_len - 1); } /* Evaluate upper subscripts */ i = 0; foreach(lc, sbsref->refupperindexpr) { Expr *e = (Expr *) lfirst(lc); /* When slicing, individual subscript bounds can be omitted */ if (!e) { sbsrefstate->upperprovided[i] = false; sbsrefstate->upperindexnull[i] = true; } else { sbsrefstate->upperprovided[i] = true; /* Each subscript is evaluated into appropriate array entry */ ExecInitExprRec(e, state, &sbsrefstate->upperindex[i], &sbsrefstate->upperindexnull[i]); } i++; } /* Evaluate lower subscripts similarly */ i = 0; foreach(lc, sbsref->reflowerindexpr) { Expr *e = (Expr *) lfirst(lc); /* When slicing, individual subscript bounds can be omitted */ if (!e) { sbsrefstate->lowerprovided[i] = false; sbsrefstate->lowerindexnull[i] = true; } else { sbsrefstate->lowerprovided[i] = true; /* Each subscript is evaluated into appropriate array entry */ ExecInitExprRec(e, state, &sbsrefstate->lowerindex[i], &sbsrefstate->lowerindexnull[i]); } i++; } /* SBSREF_SUBSCRIPTS checks and converts all the subscripts at once */ if (methods.sbs_check_subscripts) { scratch->opcode = EEOP_SBSREF_SUBSCRIPTS; scratch->d.sbsref_subscript.subscriptfunc = methods.sbs_check_subscripts; scratch->d.sbsref_subscript.state = sbsrefstate; scratch->d.sbsref_subscript.jumpdone = -1; /* adjust later */ ExprEvalPushStep(state, scratch); adjust_jumps = lappend_int(adjust_jumps, state->steps_len - 1); } if (isAssignment) { Datum *save_innermost_caseval; bool *save_innermost_casenull; /* Check for unimplemented methods */ if (!methods.sbs_assign) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("type %s does not support subscripted assignment", format_type_be(sbsref->refcontainertype)))); /* * We might have a nested-assignment situation, in which the * refassgnexpr is itself a FieldStore or SubscriptingRef that needs * to obtain and modify the previous value of the array element or * slice being replaced. If so, we have to extract that value from * the array and pass it down via the CaseTestExpr mechanism. It's * safe to reuse the CASE mechanism because there cannot be a CASE * between here and where the value would be needed, and an array * assignment can't be within a CASE either. (So saving and restoring * innermost_caseval is just paranoia, but let's do it anyway.) * * Since fetching the old element might be a nontrivial expense, do it * only if the argument actually needs it. */ if (isAssignmentIndirectionExpr(sbsref->refassgnexpr)) { if (!methods.sbs_fetch_old) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("type %s does not support subscripted assignment", format_type_be(sbsref->refcontainertype)))); scratch->opcode = EEOP_SBSREF_OLD; scratch->d.sbsref.subscriptfunc = methods.sbs_fetch_old; scratch->d.sbsref.state = sbsrefstate; ExprEvalPushStep(state, scratch); } /* SBSREF_OLD puts extracted value into prevvalue/prevnull */ save_innermost_caseval = state->innermost_caseval; save_innermost_casenull = state->innermost_casenull; state->innermost_caseval = &sbsrefstate->prevvalue; state->innermost_casenull = &sbsrefstate->prevnull; /* evaluate replacement value into replacevalue/replacenull */ ExecInitExprRec(sbsref->refassgnexpr, state, &sbsrefstate->replacevalue, &sbsrefstate->replacenull); state->innermost_caseval = save_innermost_caseval; state->innermost_casenull = save_innermost_casenull; /* and perform the assignment */ scratch->opcode = EEOP_SBSREF_ASSIGN; scratch->d.sbsref.subscriptfunc = methods.sbs_assign; scratch->d.sbsref.state = sbsrefstate; ExprEvalPushStep(state, scratch); } else { /* array fetch is much simpler */ scratch->opcode = EEOP_SBSREF_FETCH; scratch->d.sbsref.subscriptfunc = methods.sbs_fetch; scratch->d.sbsref.state = sbsrefstate; ExprEvalPushStep(state, scratch); } /* adjust jump targets */ foreach(lc, adjust_jumps) { ExprEvalStep *as = &state->steps[lfirst_int(lc)]; if (as->opcode == EEOP_SBSREF_SUBSCRIPTS) { Assert(as->d.sbsref_subscript.jumpdone == -1); as->d.sbsref_subscript.jumpdone = state->steps_len; } else { Assert(as->opcode == EEOP_JUMP_IF_NULL); Assert(as->d.jump.jumpdone == -1); as->d.jump.jumpdone = state->steps_len; } } } /* * Helper for preparing SubscriptingRef expressions for evaluation: is expr * a nested FieldStore or SubscriptingRef that needs the old element value * passed down? * * (We could use this in FieldStore too, but in that case passing the old * value is so cheap there's no need.) * * Note: it might seem that this needs to recurse, but in most cases it does * not; the CaseTestExpr, if any, will be directly the arg or refexpr of the * top-level node. Nested-assignment situations give rise to expression * trees in which each level of assignment has its own CaseTestExpr, and the * recursive structure appears within the newvals or refassgnexpr field. * There is an exception, though: if the array is an array-of-domain, we will * have a CoerceToDomain or RelabelType as the refassgnexpr, and we need to * be able to look through that. */ static bool isAssignmentIndirectionExpr(Expr *expr) { if (expr == NULL) return false; /* just paranoia */ if (IsA(expr, FieldStore)) { FieldStore *fstore = (FieldStore *) expr; if (fstore->arg && IsA(fstore->arg, CaseTestExpr)) return true; } else if (IsA(expr, SubscriptingRef)) { SubscriptingRef *sbsRef = (SubscriptingRef *) expr; if (sbsRef->refexpr && IsA(sbsRef->refexpr, CaseTestExpr)) return true; } else if (IsA(expr, CoerceToDomain)) { CoerceToDomain *cd = (CoerceToDomain *) expr; return isAssignmentIndirectionExpr(cd->arg); } else if (IsA(expr, RelabelType)) { RelabelType *r = (RelabelType *) expr; return isAssignmentIndirectionExpr(r->arg); } return false; } /* * Prepare evaluation of a CoerceToDomain expression. */ static void ExecInitCoerceToDomain(ExprEvalStep *scratch, CoerceToDomain *ctest, ExprState *state, Datum *resv, bool *resnull) { DomainConstraintRef *constraint_ref; Datum *domainval = NULL; bool *domainnull = NULL; ListCell *l; scratch->d.domaincheck.resulttype = ctest->resulttype; /* we'll allocate workspace only if needed */ scratch->d.domaincheck.checkvalue = NULL; scratch->d.domaincheck.checknull = NULL; scratch->d.domaincheck.escontext = state->escontext; /* * Evaluate argument - it's fine to directly store it into resv/resnull, * if there's constraint failures there'll be errors, otherwise it's what * needs to be returned. */ ExecInitExprRec(ctest->arg, state, resv, resnull); /* * Note: if the argument is of varlena type, it could be a R/W expanded * object. We want to return the R/W pointer as the final result, but we * have to pass a R/O pointer as the value to be tested by any functions * in check expressions. We don't bother to emit a MAKE_READONLY step * unless there's actually at least one check expression, though. Until * we've tested that, domainval/domainnull are NULL. */ /* * Collect the constraints associated with the domain. * * Note: before PG v10 we'd recheck the set of constraints during each * evaluation of the expression. Now we bake them into the ExprState * during executor initialization. That means we don't need typcache.c to * provide compiled exprs. */ constraint_ref = palloc_object(DomainConstraintRef); InitDomainConstraintRef(ctest->resulttype, constraint_ref, CurrentMemoryContext, false); /* * Compile code to check each domain constraint. NOTNULL constraints can * just be applied on the resv/resnull value, but for CHECK constraints we * need more pushups. */ foreach(l, constraint_ref->constraints) { DomainConstraintState *con = (DomainConstraintState *) lfirst(l); Datum *save_innermost_domainval; bool *save_innermost_domainnull; scratch->d.domaincheck.constraintname = con->name; switch (con->constrainttype) { case DOM_CONSTRAINT_NOTNULL: scratch->opcode = EEOP_DOMAIN_NOTNULL; ExprEvalPushStep(state, scratch); break; case DOM_CONSTRAINT_CHECK: /* Allocate workspace for CHECK output if we didn't yet */ if (scratch->d.domaincheck.checkvalue == NULL) { scratch->d.domaincheck.checkvalue = palloc_object(Datum); scratch->d.domaincheck.checknull = palloc_object(bool); } /* * If first time through, determine where CoerceToDomainValue * nodes should read from. */ if (domainval == NULL) { /* * Since value might be read multiple times, force to R/O * - but only if it could be an expanded datum. */ if (get_typlen(ctest->resulttype) == -1) { ExprEvalStep scratch2 = {0}; /* Yes, so make output workspace for MAKE_READONLY */ domainval = palloc_object(Datum); domainnull = palloc_object(bool); /* Emit MAKE_READONLY */ scratch2.opcode = EEOP_MAKE_READONLY; scratch2.resvalue = domainval; scratch2.resnull = domainnull; scratch2.d.make_readonly.value = resv; scratch2.d.make_readonly.isnull = resnull; ExprEvalPushStep(state, &scratch2); } else { /* No, so it's fine to read from resv/resnull */ domainval = resv; domainnull = resnull; } } /* * Set up value to be returned by CoerceToDomainValue nodes. * We must save and restore innermost_domainval/null fields, * in case this node is itself within a check expression for * another domain. */ save_innermost_domainval = state->innermost_domainval; save_innermost_domainnull = state->innermost_domainnull; state->innermost_domainval = domainval; state->innermost_domainnull = domainnull; /* evaluate check expression value */ ExecInitExprRec(con->check_expr, state, scratch->d.domaincheck.checkvalue, scratch->d.domaincheck.checknull); state->innermost_domainval = save_innermost_domainval; state->innermost_domainnull = save_innermost_domainnull; /* now test result */ scratch->opcode = EEOP_DOMAIN_CHECK; ExprEvalPushStep(state, scratch); break; default: elog(ERROR, "unrecognized constraint type: %d", (int) con->constrainttype); break; } } } /* * Build transition/combine function invocations for all aggregate transition * / combination function invocations in a grouping sets phase. This has to * invoke all sort based transitions in a phase (if doSort is true), all hash * based transitions (if doHash is true), or both (both true). * * The resulting expression will, for each set of transition values, first * check for filters, evaluate aggregate input, check that that input is not * NULL for a strict transition function, and then finally invoke the * transition for each of the concurrently computed grouping sets. * * If nullcheck is true, the generated code will check for a NULL pointer to * the array of AggStatePerGroup, and skip evaluation if so. */ ExprState * ExecBuildAggTrans(AggState *aggstate, AggStatePerPhase phase, bool doSort, bool doHash, bool nullcheck) { ExprState *state = makeNode(ExprState); PlanState *parent = &aggstate->ss.ps; ExprEvalStep scratch = {0}; bool isCombine = DO_AGGSPLIT_COMBINE(aggstate->aggsplit); ExprSetupInfo deform = {0, 0, 0, 0, 0, NIL}; state->expr = (Expr *) aggstate; state->parent = parent; scratch.resvalue = &state->resvalue; scratch.resnull = &state->resnull; /* * First figure out which slots, and how many columns from each, we're * going to need. */ for (int transno = 0; transno < aggstate->numtrans; transno++) { AggStatePerTrans pertrans = &aggstate->pertrans[transno]; expr_setup_walker((Node *) pertrans->aggref->aggdirectargs, &deform); expr_setup_walker((Node *) pertrans->aggref->args, &deform); expr_setup_walker((Node *) pertrans->aggref->aggorder, &deform); expr_setup_walker((Node *) pertrans->aggref->aggdistinct, &deform); expr_setup_walker((Node *) pertrans->aggref->aggfilter, &deform); } ExecPushExprSetupSteps(state, &deform); /* * Emit instructions for each transition value / grouping set combination. */ for (int transno = 0; transno < aggstate->numtrans; transno++) { AggStatePerTrans pertrans = &aggstate->pertrans[transno]; FunctionCallInfo trans_fcinfo = pertrans->transfn_fcinfo; List *adjust_bailout = NIL; NullableDatum *strictargs = NULL; bool *strictnulls = NULL; int argno; ListCell *bail; /* * If filter present, emit. Do so before evaluating the input, to * avoid potentially unneeded computations, or even worse, unintended * side-effects. When combining, all the necessary filtering has * already been done. */ if (pertrans->aggref->aggfilter && !isCombine) { /* evaluate filter expression */ ExecInitExprRec(pertrans->aggref->aggfilter, state, &state->resvalue, &state->resnull); /* and jump out if false */ scratch.opcode = EEOP_JUMP_IF_NOT_TRUE; scratch.d.jump.jumpdone = -1; /* adjust later */ ExprEvalPushStep(state, &scratch); adjust_bailout = lappend_int(adjust_bailout, state->steps_len - 1); } /* * Evaluate arguments to aggregate/combine function. */ argno = 0; if (isCombine) { /* * Combining two aggregate transition values. Instead of directly * coming from a tuple the input is a, potentially deserialized, * transition value. */ TargetEntry *source_tle; Assert(pertrans->numSortCols == 0); Assert(list_length(pertrans->aggref->args) == 1); strictargs = trans_fcinfo->args + 1; source_tle = (TargetEntry *) linitial(pertrans->aggref->args); /* * deserialfn_oid will be set if we must deserialize the input * state before calling the combine function. */ if (!OidIsValid(pertrans->deserialfn_oid)) { /* * Start from 1, since the 0th arg will be the transition * value */ ExecInitExprRec(source_tle->expr, state, &trans_fcinfo->args[argno + 1].value, &trans_fcinfo->args[argno + 1].isnull); } else { FunctionCallInfo ds_fcinfo = pertrans->deserialfn_fcinfo; /* evaluate argument */ ExecInitExprRec(source_tle->expr, state, &ds_fcinfo->args[0].value, &ds_fcinfo->args[0].isnull); /* Dummy second argument for type-safety reasons */ ds_fcinfo->args[1].value = PointerGetDatum(NULL); ds_fcinfo->args[1].isnull = false; /* * Don't call a strict deserialization function with NULL * input */ if (pertrans->deserialfn.fn_strict) scratch.opcode = EEOP_AGG_STRICT_DESERIALIZE; else scratch.opcode = EEOP_AGG_DESERIALIZE; scratch.d.agg_deserialize.fcinfo_data = ds_fcinfo; scratch.d.agg_deserialize.jumpnull = -1; /* adjust later */ scratch.resvalue = &trans_fcinfo->args[argno + 1].value; scratch.resnull = &trans_fcinfo->args[argno + 1].isnull; ExprEvalPushStep(state, &scratch); /* don't add an adjustment unless the function is strict */ if (pertrans->deserialfn.fn_strict) adjust_bailout = lappend_int(adjust_bailout, state->steps_len - 1); /* restore normal settings of scratch fields */ scratch.resvalue = &state->resvalue; scratch.resnull = &state->resnull; } argno++; Assert(pertrans->numInputs == argno); } else if (!pertrans->aggsortrequired) { ListCell *arg; /* * Normal transition function without ORDER BY / DISTINCT or with * ORDER BY / DISTINCT but the planner has given us pre-sorted * input. */ strictargs = trans_fcinfo->args + 1; foreach(arg, pertrans->aggref->args) { TargetEntry *source_tle = (TargetEntry *) lfirst(arg); /* * Don't initialize args for any ORDER BY clause that might * exist in a presorted aggregate. */ if (argno == pertrans->numTransInputs) break; /* * Start from 1, since the 0th arg will be the transition * value */ ExecInitExprRec(source_tle->expr, state, &trans_fcinfo->args[argno + 1].value, &trans_fcinfo->args[argno + 1].isnull); argno++; } Assert(pertrans->numTransInputs == argno); } else if (pertrans->numInputs == 1) { /* * Non-presorted DISTINCT and/or ORDER BY case, with a single * column sorted on. */ TargetEntry *source_tle = (TargetEntry *) linitial(pertrans->aggref->args); Assert(list_length(pertrans->aggref->args) == 1); ExecInitExprRec(source_tle->expr, state, &state->resvalue, &state->resnull); strictnulls = &state->resnull; argno++; Assert(pertrans->numInputs == argno); } else { /* * Non-presorted DISTINCT and/or ORDER BY case, with multiple * columns sorted on. */ Datum *values = pertrans->sortslot->tts_values; bool *nulls = pertrans->sortslot->tts_isnull; ListCell *arg; strictnulls = nulls; foreach(arg, pertrans->aggref->args) { TargetEntry *source_tle = (TargetEntry *) lfirst(arg); ExecInitExprRec(source_tle->expr, state, &values[argno], &nulls[argno]); argno++; } Assert(pertrans->numInputs == argno); } /* * For a strict transfn, nothing happens when there's a NULL input; we * just keep the prior transValue. This is true for both plain and * sorted/distinct aggregates. */ if (trans_fcinfo->flinfo->fn_strict && pertrans->numTransInputs > 0) { if (strictnulls) scratch.opcode = EEOP_AGG_STRICT_INPUT_CHECK_NULLS; else if (strictargs && pertrans->numTransInputs == 1) scratch.opcode = EEOP_AGG_STRICT_INPUT_CHECK_ARGS_1; else scratch.opcode = EEOP_AGG_STRICT_INPUT_CHECK_ARGS; scratch.d.agg_strict_input_check.nulls = strictnulls; scratch.d.agg_strict_input_check.args = strictargs; scratch.d.agg_strict_input_check.jumpnull = -1; /* adjust later */ scratch.d.agg_strict_input_check.nargs = pertrans->numTransInputs; ExprEvalPushStep(state, &scratch); adjust_bailout = lappend_int(adjust_bailout, state->steps_len - 1); } /* Handle DISTINCT aggregates which have pre-sorted input */ if (pertrans->numDistinctCols > 0 && !pertrans->aggsortrequired) { if (pertrans->numDistinctCols > 1) scratch.opcode = EEOP_AGG_PRESORTED_DISTINCT_MULTI; else scratch.opcode = EEOP_AGG_PRESORTED_DISTINCT_SINGLE; scratch.d.agg_presorted_distinctcheck.pertrans = pertrans; scratch.d.agg_presorted_distinctcheck.jumpdistinct = -1; /* adjust later */ ExprEvalPushStep(state, &scratch); adjust_bailout = lappend_int(adjust_bailout, state->steps_len - 1); } /* * Call transition function (once for each concurrently evaluated * grouping set). Do so for both sort and hash based computations, as * applicable. */ if (doSort) { int processGroupingSets = Max(phase->numsets, 1); int setoff = 0; for (int setno = 0; setno < processGroupingSets; setno++) { ExecBuildAggTransCall(state, aggstate, &scratch, trans_fcinfo, pertrans, transno, setno, setoff, false, nullcheck); setoff++; } } if (doHash) { int numHashes = aggstate->num_hashes; int setoff; /* in MIXED mode, there'll be preceding transition values */ if (aggstate->aggstrategy != AGG_HASHED) setoff = aggstate->maxsets; else setoff = 0; for (int setno = 0; setno < numHashes; setno++) { ExecBuildAggTransCall(state, aggstate, &scratch, trans_fcinfo, pertrans, transno, setno, setoff, true, nullcheck); setoff++; } } /* adjust early bail out jump target(s) */ foreach(bail, adjust_bailout) { ExprEvalStep *as = &state->steps[lfirst_int(bail)]; if (as->opcode == EEOP_JUMP_IF_NOT_TRUE) { Assert(as->d.jump.jumpdone == -1); as->d.jump.jumpdone = state->steps_len; } else if (as->opcode == EEOP_AGG_STRICT_INPUT_CHECK_ARGS || as->opcode == EEOP_AGG_STRICT_INPUT_CHECK_ARGS_1 || as->opcode == EEOP_AGG_STRICT_INPUT_CHECK_NULLS) { Assert(as->d.agg_strict_input_check.jumpnull == -1); as->d.agg_strict_input_check.jumpnull = state->steps_len; } else if (as->opcode == EEOP_AGG_STRICT_DESERIALIZE) { Assert(as->d.agg_deserialize.jumpnull == -1); as->d.agg_deserialize.jumpnull = state->steps_len; } else if (as->opcode == EEOP_AGG_PRESORTED_DISTINCT_SINGLE || as->opcode == EEOP_AGG_PRESORTED_DISTINCT_MULTI) { Assert(as->d.agg_presorted_distinctcheck.jumpdistinct == -1); as->d.agg_presorted_distinctcheck.jumpdistinct = state->steps_len; } else Assert(false); } } scratch.resvalue = NULL; scratch.resnull = NULL; scratch.opcode = EEOP_DONE_NO_RETURN; ExprEvalPushStep(state, &scratch); ExecReadyExpr(state); return state; } /* * Build transition/combine function invocation for a single transition * value. This is separated from ExecBuildAggTrans() because there are * multiple callsites (hash and sort in some grouping set cases). */ static void ExecBuildAggTransCall(ExprState *state, AggState *aggstate, ExprEvalStep *scratch, FunctionCallInfo fcinfo, AggStatePerTrans pertrans, int transno, int setno, int setoff, bool ishash, bool nullcheck) { ExprContext *aggcontext; int adjust_jumpnull = -1; if (ishash) aggcontext = aggstate->hashcontext; else aggcontext = aggstate->aggcontexts[setno]; /* add check for NULL pointer? */ if (nullcheck) { scratch->opcode = EEOP_AGG_PLAIN_PERGROUP_NULLCHECK; scratch->d.agg_plain_pergroup_nullcheck.setoff = setoff; /* adjust later */ scratch->d.agg_plain_pergroup_nullcheck.jumpnull = -1; ExprEvalPushStep(state, scratch); adjust_jumpnull = state->steps_len - 1; } /* * Determine appropriate transition implementation. * * For non-ordered aggregates and ORDER BY / DISTINCT aggregates with * presorted input: * * If the initial value for the transition state doesn't exist in the * pg_aggregate table then we will let the first non-NULL value returned * from the outer procNode become the initial value. (This is useful for * aggregates like max() and min().) The noTransValue flag signals that we * need to do so. If true, generate a * EEOP_AGG_INIT_STRICT_PLAIN_TRANS{,_BYVAL} step. This step also needs to * do the work described next: * * If the function is strict, but does have an initial value, choose * EEOP_AGG_STRICT_PLAIN_TRANS{,_BYVAL}, which skips the transition * function if the transition value has become NULL (because a previous * transition function returned NULL). This step also needs to do the work * described next: * * Otherwise we call EEOP_AGG_PLAIN_TRANS{,_BYVAL}, which does not have to * perform either of the above checks. * * Having steps with overlapping responsibilities is not nice, but * aggregations are very performance sensitive, making this worthwhile. * * For ordered aggregates: * * Only need to choose between the faster path for a single ordered * column, and the one between multiple columns. Checking strictness etc * is done when finalizing the aggregate. See * process_ordered_aggregate_{single, multi} and * advance_transition_function. */ if (!pertrans->aggsortrequired) { if (pertrans->transtypeByVal) { if (fcinfo->flinfo->fn_strict && pertrans->initValueIsNull) scratch->opcode = EEOP_AGG_PLAIN_TRANS_INIT_STRICT_BYVAL; else if (fcinfo->flinfo->fn_strict) scratch->opcode = EEOP_AGG_PLAIN_TRANS_STRICT_BYVAL; else scratch->opcode = EEOP_AGG_PLAIN_TRANS_BYVAL; } else { if (fcinfo->flinfo->fn_strict && pertrans->initValueIsNull) scratch->opcode = EEOP_AGG_PLAIN_TRANS_INIT_STRICT_BYREF; else if (fcinfo->flinfo->fn_strict) scratch->opcode = EEOP_AGG_PLAIN_TRANS_STRICT_BYREF; else scratch->opcode = EEOP_AGG_PLAIN_TRANS_BYREF; } } else if (pertrans->numInputs == 1) scratch->opcode = EEOP_AGG_ORDERED_TRANS_DATUM; else scratch->opcode = EEOP_AGG_ORDERED_TRANS_TUPLE; scratch->d.agg_trans.pertrans = pertrans; scratch->d.agg_trans.setno = setno; scratch->d.agg_trans.setoff = setoff; scratch->d.agg_trans.transno = transno; scratch->d.agg_trans.aggcontext = aggcontext; ExprEvalPushStep(state, scratch); /* fix up jumpnull */ if (adjust_jumpnull != -1) { ExprEvalStep *as = &state->steps[adjust_jumpnull]; Assert(as->opcode == EEOP_AGG_PLAIN_PERGROUP_NULLCHECK); Assert(as->d.agg_plain_pergroup_nullcheck.jumpnull == -1); as->d.agg_plain_pergroup_nullcheck.jumpnull = state->steps_len; } } /* * Build an ExprState that calls the given hash function(s) on the attnums * given by 'keyColIdx' . When numCols > 1, the hash values returned by each * hash function are combined to produce a single hash value. * * desc: tuple descriptor for the to-be-hashed columns * ops: TupleTableSlotOps to use for the give TupleDesc * hashfunctions: FmgrInfos for each hash function to call, one per numCols. * These are used directly in the returned ExprState so must remain allocated. * collations: collation to use when calling the hash function. * numCols: array length of hashfunctions, collations and keyColIdx. * parent: PlanState node that the resulting ExprState will be evaluated at * init_value: Normally 0, but can be set to other values to seed the hash * with. Non-zero is marginally slower, so best to only use if it's provably * worthwhile. */ ExprState * ExecBuildHash32FromAttrs(TupleDesc desc, const TupleTableSlotOps *ops, FmgrInfo *hashfunctions, Oid *collations, int numCols, AttrNumber *keyColIdx, PlanState *parent, uint32 init_value) { ExprState *state = makeNode(ExprState); ExprEvalStep scratch = {0}; NullableDatum *iresult = NULL; intptr_t opcode; AttrNumber last_attnum = 0; Assert(numCols >= 0); state->parent = parent; /* * Make a place to store intermediate hash values between subsequent * hashing of individual columns. We only need this if there is more than * one column to hash or an initial value plus one column. */ if ((int64) numCols + (init_value != 0) > 1) iresult = palloc_object(NullableDatum); /* find the highest attnum so we deform the tuple to that point */ for (int i = 0; i < numCols; i++) last_attnum = Max(last_attnum, keyColIdx[i]); scratch.opcode = EEOP_INNER_FETCHSOME; scratch.d.fetch.last_var = last_attnum; scratch.d.fetch.fixed = false; scratch.d.fetch.kind = ops; scratch.d.fetch.known_desc = desc; if (ExecComputeSlotInfo(state, &scratch)) ExprEvalPushStep(state, &scratch); if (init_value == 0) { /* * No initial value, so we can assign the result of the hash function * for the first attribute without having to concern ourselves with * combining the result with any initial value. */ opcode = EEOP_HASHDATUM_FIRST; } else { /* * Set up operation to set the initial value. Normally we store this * in the intermediate hash value location, but if there are no * columns to hash, store it in the ExprState's result field. */ scratch.opcode = EEOP_HASHDATUM_SET_INITVAL; scratch.d.hashdatum_initvalue.init_value = UInt32GetDatum(init_value); scratch.resvalue = numCols > 0 ? &iresult->value : &state->resvalue; scratch.resnull = numCols > 0 ? &iresult->isnull : &state->resnull; ExprEvalPushStep(state, &scratch); /* * When using an initial value use the NEXT32 ops as the FIRST ops * would overwrite the stored initial value. */ opcode = EEOP_HASHDATUM_NEXT32; } for (int i = 0; i < numCols; i++) { FmgrInfo *finfo; FunctionCallInfo fcinfo; Oid inputcollid = collations[i]; AttrNumber attnum = keyColIdx[i] - 1; finfo = &hashfunctions[i]; fcinfo = palloc0(SizeForFunctionCallInfo(1)); /* Initialize function call parameter structure too */ InitFunctionCallInfoData(*fcinfo, finfo, 1, inputcollid, NULL, NULL); /* * Fetch inner Var for this attnum and store it in the 1st arg of the * hash func. */ scratch.opcode = EEOP_INNER_VAR; scratch.resvalue = &fcinfo->args[0].value; scratch.resnull = &fcinfo->args[0].isnull; scratch.d.var.attnum = attnum; scratch.d.var.vartype = TupleDescAttr(desc, attnum)->atttypid; scratch.d.var.varreturningtype = VAR_RETURNING_DEFAULT; ExprEvalPushStep(state, &scratch); /* Call the hash function */ scratch.opcode = opcode; if (i == numCols - 1) { /* * The result for hashing the final column is stored in the * ExprState. */ scratch.resvalue = &state->resvalue; scratch.resnull = &state->resnull; } else { Assert(iresult != NULL); /* intermediate values are stored in an intermediate result */ scratch.resvalue = &iresult->value; scratch.resnull = &iresult->isnull; } /* * NEXT32 opcodes need to look at the intermediate result. We might * as well just set this for all ops. FIRSTs won't look at it. */ scratch.d.hashdatum.iresult = iresult; scratch.d.hashdatum.finfo = finfo; scratch.d.hashdatum.fcinfo_data = fcinfo; scratch.d.hashdatum.fn_addr = finfo->fn_addr; scratch.d.hashdatum.jumpdone = -1; ExprEvalPushStep(state, &scratch); /* subsequent attnums must be combined with the previous */ opcode = EEOP_HASHDATUM_NEXT32; } scratch.resvalue = NULL; scratch.resnull = NULL; scratch.opcode = EEOP_DONE_RETURN; ExprEvalPushStep(state, &scratch); ExecReadyExpr(state); return state; } /* * Build an ExprState that calls the given hash function(s) on the given * 'hash_exprs'. When multiple expressions are present, the hash values * returned by each hash function are combined to produce a single hash value. * * desc: tuple descriptor for the to-be-hashed expressions * ops: TupleTableSlotOps for the TupleDesc * hashfunc_oids: Oid for each hash function to call, one for each 'hash_expr' * collations: collation to use when calling the hash function. * hash_expr: list of expressions to hash the value of * opstrict: array corresponding to the 'hashfunc_oids' to store op_strict() * parent: PlanState node that the 'hash_exprs' will be evaluated at * init_value: Normally 0, but can be set to other values to seed the hash * with some other value. Using non-zero is slightly less efficient but can * be useful. * keep_nulls: if true, evaluation of the returned ExprState will abort early * returning NULL if the given hash function is strict and the Datum to hash * is null. When set to false, any NULL input Datums are skipped. */ ExprState * ExecBuildHash32Expr(TupleDesc desc, const TupleTableSlotOps *ops, const Oid *hashfunc_oids, const List *collations, const List *hash_exprs, const bool *opstrict, PlanState *parent, uint32 init_value, bool keep_nulls) { ExprState *state = makeNode(ExprState); ExprEvalStep scratch = {0}; NullableDatum *iresult = NULL; List *adjust_jumps = NIL; ListCell *lc; ListCell *lc2; intptr_t strict_opcode; intptr_t opcode; int num_exprs = list_length(hash_exprs); Assert(num_exprs == list_length(collations)); state->parent = parent; /* Insert setup steps as needed. */ ExecCreateExprSetupSteps(state, (Node *) hash_exprs); /* * Make a place to store intermediate hash values between subsequent * hashing of individual expressions. We only need this if there is more * than one expression to hash or an initial value plus one expression. */ if ((int64) num_exprs + (init_value != 0) > 1) iresult = palloc_object(NullableDatum); if (init_value == 0) { /* * No initial value, so we can assign the result of the hash function * for the first hash_expr without having to concern ourselves with * combining the result with any initial value. */ strict_opcode = EEOP_HASHDATUM_FIRST_STRICT; opcode = EEOP_HASHDATUM_FIRST; } else { /* * Set up operation to set the initial value. Normally we store this * in the intermediate hash value location, but if there are no exprs * to hash, store it in the ExprState's result field. */ scratch.opcode = EEOP_HASHDATUM_SET_INITVAL; scratch.d.hashdatum_initvalue.init_value = UInt32GetDatum(init_value); scratch.resvalue = num_exprs > 0 ? &iresult->value : &state->resvalue; scratch.resnull = num_exprs > 0 ? &iresult->isnull : &state->resnull; ExprEvalPushStep(state, &scratch); /* * When using an initial value use the NEXT32/NEXT32_STRICT ops as the * FIRST/FIRST_STRICT ops would overwrite the stored initial value. */ strict_opcode = EEOP_HASHDATUM_NEXT32_STRICT; opcode = EEOP_HASHDATUM_NEXT32; } forboth(lc, hash_exprs, lc2, collations) { Expr *expr = (Expr *) lfirst(lc); FmgrInfo *finfo; FunctionCallInfo fcinfo; int i = foreach_current_index(lc); Oid funcid; Oid inputcollid = lfirst_oid(lc2); funcid = hashfunc_oids[i]; /* Allocate hash function lookup data. */ finfo = palloc0_object(FmgrInfo); fcinfo = palloc0(SizeForFunctionCallInfo(1)); fmgr_info(funcid, finfo); /* * Build the steps to evaluate the hash function's argument have it so * the value of that is stored in the 0th argument of the hash func. */ ExecInitExprRec(expr, state, &fcinfo->args[0].value, &fcinfo->args[0].isnull); if (i == num_exprs - 1) { /* the result for hashing the final expr is stored in the state */ scratch.resvalue = &state->resvalue; scratch.resnull = &state->resnull; } else { Assert(iresult != NULL); /* intermediate values are stored in an intermediate result */ scratch.resvalue = &iresult->value; scratch.resnull = &iresult->isnull; } /* * NEXT32 opcodes need to look at the intermediate result. We might * as well just set this for all ops. FIRSTs won't look at it. */ scratch.d.hashdatum.iresult = iresult; /* Initialize function call parameter structure too */ InitFunctionCallInfoData(*fcinfo, finfo, 1, inputcollid, NULL, NULL); scratch.d.hashdatum.finfo = finfo; scratch.d.hashdatum.fcinfo_data = fcinfo; scratch.d.hashdatum.fn_addr = finfo->fn_addr; scratch.opcode = opstrict[i] && !keep_nulls ? strict_opcode : opcode; scratch.d.hashdatum.jumpdone = -1; ExprEvalPushStep(state, &scratch); adjust_jumps = lappend_int(adjust_jumps, state->steps_len - 1); /* * For subsequent keys we must combine the hash value with the * previous hashes. */ strict_opcode = EEOP_HASHDATUM_NEXT32_STRICT; opcode = EEOP_HASHDATUM_NEXT32; } /* adjust jump targets */ foreach(lc, adjust_jumps) { ExprEvalStep *as = &state->steps[lfirst_int(lc)]; Assert(as->opcode == EEOP_HASHDATUM_FIRST || as->opcode == EEOP_HASHDATUM_FIRST_STRICT || as->opcode == EEOP_HASHDATUM_NEXT32 || as->opcode == EEOP_HASHDATUM_NEXT32_STRICT); Assert(as->d.hashdatum.jumpdone == -1); as->d.hashdatum.jumpdone = state->steps_len; } scratch.resvalue = NULL; scratch.resnull = NULL; scratch.opcode = EEOP_DONE_RETURN; ExprEvalPushStep(state, &scratch); ExecReadyExpr(state); return state; } /* * Build equality expression that can be evaluated using ExecQual(), returning * true if the expression context's inner/outer tuple are NOT DISTINCT. I.e * two nulls match, a null and a not-null don't match. * * desc: tuple descriptor of the to-be-compared tuples * numCols: the number of attributes to be examined * keyColIdx: array of attribute column numbers * eqFunctions: array of function oids of the equality functions to use * parent: parent executor node */ ExprState * ExecBuildGroupingEqual(TupleDesc ldesc, TupleDesc rdesc, const TupleTableSlotOps *lops, const TupleTableSlotOps *rops, int numCols, const AttrNumber *keyColIdx, const Oid *eqfunctions, const Oid *collations, PlanState *parent) { ExprState *state = makeNode(ExprState); ExprEvalStep scratch = {0}; int maxatt = -1; List *adjust_jumps = NIL; ListCell *lc; /* * When no columns are actually compared, the result's always true. See * special case in ExecQual(). */ if (numCols == 0) return NULL; state->expr = NULL; state->flags = EEO_FLAG_IS_QUAL; state->parent = parent; scratch.resvalue = &state->resvalue; scratch.resnull = &state->resnull; /* compute max needed attribute */ for (int natt = 0; natt < numCols; natt++) { int attno = keyColIdx[natt]; if (attno > maxatt) maxatt = attno; } Assert(maxatt >= 0); /* push deform steps */ scratch.opcode = EEOP_INNER_FETCHSOME; scratch.d.fetch.last_var = maxatt; scratch.d.fetch.fixed = false; scratch.d.fetch.known_desc = ldesc; scratch.d.fetch.kind = lops; if (ExecComputeSlotInfo(state, &scratch)) ExprEvalPushStep(state, &scratch); scratch.opcode = EEOP_OUTER_FETCHSOME; scratch.d.fetch.last_var = maxatt; scratch.d.fetch.fixed = false; scratch.d.fetch.known_desc = rdesc; scratch.d.fetch.kind = rops; if (ExecComputeSlotInfo(state, &scratch)) ExprEvalPushStep(state, &scratch); /* * Start comparing at the last field (least significant sort key). That's * the most likely to be different if we are dealing with sorted input. */ for (int natt = numCols; --natt >= 0;) { int attno = keyColIdx[natt]; Form_pg_attribute latt = TupleDescAttr(ldesc, attno - 1); Form_pg_attribute ratt = TupleDescAttr(rdesc, attno - 1); Oid foid = eqfunctions[natt]; Oid collid = collations[natt]; FmgrInfo *finfo; FunctionCallInfo fcinfo; AclResult aclresult; /* Check permission to call function */ aclresult = object_aclcheck(ProcedureRelationId, foid, GetUserId(), ACL_EXECUTE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(foid)); InvokeFunctionExecuteHook(foid); /* Set up the primary fmgr lookup information */ finfo = palloc0_object(FmgrInfo); fcinfo = palloc0(SizeForFunctionCallInfo(2)); fmgr_info(foid, finfo); fmgr_info_set_expr(NULL, finfo); InitFunctionCallInfoData(*fcinfo, finfo, 2, collid, NULL, NULL); /* left arg */ scratch.opcode = EEOP_INNER_VAR; scratch.d.var.attnum = attno - 1; scratch.d.var.vartype = latt->atttypid; scratch.d.var.varreturningtype = VAR_RETURNING_DEFAULT; scratch.resvalue = &fcinfo->args[0].value; scratch.resnull = &fcinfo->args[0].isnull; ExprEvalPushStep(state, &scratch); /* right arg */ scratch.opcode = EEOP_OUTER_VAR; scratch.d.var.attnum = attno - 1; scratch.d.var.vartype = ratt->atttypid; scratch.d.var.varreturningtype = VAR_RETURNING_DEFAULT; scratch.resvalue = &fcinfo->args[1].value; scratch.resnull = &fcinfo->args[1].isnull; ExprEvalPushStep(state, &scratch); /* evaluate distinctness */ scratch.opcode = EEOP_NOT_DISTINCT; scratch.d.func.finfo = finfo; scratch.d.func.fcinfo_data = fcinfo; scratch.d.func.fn_addr = finfo->fn_addr; scratch.d.func.nargs = 2; scratch.resvalue = &state->resvalue; scratch.resnull = &state->resnull; ExprEvalPushStep(state, &scratch); /* then emit EEOP_QUAL to detect if result is false (or null) */ scratch.opcode = EEOP_QUAL; scratch.d.qualexpr.jumpdone = -1; scratch.resvalue = &state->resvalue; scratch.resnull = &state->resnull; ExprEvalPushStep(state, &scratch); adjust_jumps = lappend_int(adjust_jumps, state->steps_len - 1); } /* adjust jump targets */ foreach(lc, adjust_jumps) { ExprEvalStep *as = &state->steps[lfirst_int(lc)]; Assert(as->opcode == EEOP_QUAL); Assert(as->d.qualexpr.jumpdone == -1); as->d.qualexpr.jumpdone = state->steps_len; } scratch.resvalue = NULL; scratch.resnull = NULL; scratch.opcode = EEOP_DONE_RETURN; ExprEvalPushStep(state, &scratch); ExecReadyExpr(state); return state; } /* * Build equality expression that can be evaluated using ExecQual(), returning * true if the expression context's inner/outer tuples are equal. Datums in * the inner/outer slots are assumed to be in the same order and quantity as * the 'eqfunctions' parameter. NULLs are treated as equal. * * desc: tuple descriptor of the to-be-compared tuples * lops: the slot ops for the inner tuple slots * rops: the slot ops for the outer tuple slots * eqFunctions: array of function oids of the equality functions to use * this must be the same length as the 'param_exprs' list. * collations: collation Oids to use for equality comparison. Must be the * same length as the 'param_exprs' list. * parent: parent executor node */ ExprState * ExecBuildParamSetEqual(TupleDesc desc, const TupleTableSlotOps *lops, const TupleTableSlotOps *rops, const Oid *eqfunctions, const Oid *collations, const List *param_exprs, PlanState *parent) { ExprState *state = makeNode(ExprState); ExprEvalStep scratch = {0}; int maxatt = list_length(param_exprs); List *adjust_jumps = NIL; ListCell *lc; state->expr = NULL; state->flags = EEO_FLAG_IS_QUAL; state->parent = parent; scratch.resvalue = &state->resvalue; scratch.resnull = &state->resnull; /* push deform steps */ scratch.opcode = EEOP_INNER_FETCHSOME; scratch.d.fetch.last_var = maxatt; scratch.d.fetch.fixed = false; scratch.d.fetch.known_desc = desc; scratch.d.fetch.kind = lops; if (ExecComputeSlotInfo(state, &scratch)) ExprEvalPushStep(state, &scratch); scratch.opcode = EEOP_OUTER_FETCHSOME; scratch.d.fetch.last_var = maxatt; scratch.d.fetch.fixed = false; scratch.d.fetch.known_desc = desc; scratch.d.fetch.kind = rops; if (ExecComputeSlotInfo(state, &scratch)) ExprEvalPushStep(state, &scratch); for (int attno = 0; attno < maxatt; attno++) { Form_pg_attribute att = TupleDescAttr(desc, attno); Oid foid = eqfunctions[attno]; Oid collid = collations[attno]; FmgrInfo *finfo; FunctionCallInfo fcinfo; AclResult aclresult; /* Check permission to call function */ aclresult = object_aclcheck(ProcedureRelationId, foid, GetUserId(), ACL_EXECUTE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(foid)); InvokeFunctionExecuteHook(foid); /* Set up the primary fmgr lookup information */ finfo = palloc0_object(FmgrInfo); fcinfo = palloc0(SizeForFunctionCallInfo(2)); fmgr_info(foid, finfo); fmgr_info_set_expr(NULL, finfo); InitFunctionCallInfoData(*fcinfo, finfo, 2, collid, NULL, NULL); /* left arg */ scratch.opcode = EEOP_INNER_VAR; scratch.d.var.attnum = attno; scratch.d.var.vartype = att->atttypid; scratch.d.var.varreturningtype = VAR_RETURNING_DEFAULT; scratch.resvalue = &fcinfo->args[0].value; scratch.resnull = &fcinfo->args[0].isnull; ExprEvalPushStep(state, &scratch); /* right arg */ scratch.opcode = EEOP_OUTER_VAR; scratch.d.var.attnum = attno; scratch.d.var.vartype = att->atttypid; scratch.d.var.varreturningtype = VAR_RETURNING_DEFAULT; scratch.resvalue = &fcinfo->args[1].value; scratch.resnull = &fcinfo->args[1].isnull; ExprEvalPushStep(state, &scratch); /* evaluate distinctness */ scratch.opcode = EEOP_NOT_DISTINCT; scratch.d.func.finfo = finfo; scratch.d.func.fcinfo_data = fcinfo; scratch.d.func.fn_addr = finfo->fn_addr; scratch.d.func.nargs = 2; scratch.resvalue = &state->resvalue; scratch.resnull = &state->resnull; ExprEvalPushStep(state, &scratch); /* then emit EEOP_QUAL to detect if result is false (or null) */ scratch.opcode = EEOP_QUAL; scratch.d.qualexpr.jumpdone = -1; scratch.resvalue = &state->resvalue; scratch.resnull = &state->resnull; ExprEvalPushStep(state, &scratch); adjust_jumps = lappend_int(adjust_jumps, state->steps_len - 1); } /* adjust jump targets */ foreach(lc, adjust_jumps) { ExprEvalStep *as = &state->steps[lfirst_int(lc)]; Assert(as->opcode == EEOP_QUAL); Assert(as->d.qualexpr.jumpdone == -1); as->d.qualexpr.jumpdone = state->steps_len; } scratch.resvalue = NULL; scratch.resnull = NULL; scratch.opcode = EEOP_DONE_RETURN; ExprEvalPushStep(state, &scratch); ExecReadyExpr(state); return state; } /* * Push steps to evaluate a JsonExpr and its various subsidiary expressions. */ static void ExecInitJsonExpr(JsonExpr *jsexpr, ExprState *state, Datum *resv, bool *resnull, ExprEvalStep *scratch) { JsonExprState *jsestate = palloc0_object(JsonExprState); ListCell *argexprlc; ListCell *argnamelc; List *jumps_return_null = NIL; List *jumps_to_end = NIL; ListCell *lc; ErrorSaveContext *escontext; bool returning_domain = get_typtype(jsexpr->returning->typid) == TYPTYPE_DOMAIN; Assert(jsexpr->on_error != NULL); jsestate->jsexpr = jsexpr; /* * Evaluate formatted_expr storing the result into * jsestate->formatted_expr. */ ExecInitExprRec((Expr *) jsexpr->formatted_expr, state, &jsestate->formatted_expr.value, &jsestate->formatted_expr.isnull); /* JUMP to return NULL if formatted_expr evaluates to NULL */ jumps_return_null = lappend_int(jumps_return_null, state->steps_len); scratch->opcode = EEOP_JUMP_IF_NULL; scratch->resnull = &jsestate->formatted_expr.isnull; scratch->d.jump.jumpdone = -1; /* set below */ ExprEvalPushStep(state, scratch); /* * Evaluate pathspec expression storing the result into * jsestate->pathspec. */ ExecInitExprRec((Expr *) jsexpr->path_spec, state, &jsestate->pathspec.value, &jsestate->pathspec.isnull); /* JUMP to return NULL if path_spec evaluates to NULL */ jumps_return_null = lappend_int(jumps_return_null, state->steps_len); scratch->opcode = EEOP_JUMP_IF_NULL; scratch->resnull = &jsestate->pathspec.isnull; scratch->d.jump.jumpdone = -1; /* set below */ ExprEvalPushStep(state, scratch); /* Steps to compute PASSING args. */ jsestate->args = NIL; forboth(argexprlc, jsexpr->passing_values, argnamelc, jsexpr->passing_names) { Expr *argexpr = (Expr *) lfirst(argexprlc); String *argname = lfirst_node(String, argnamelc); JsonPathVariable *var = palloc_object(JsonPathVariable); var->name = argname->sval; var->namelen = strlen(var->name); var->typid = exprType((Node *) argexpr); var->typmod = exprTypmod((Node *) argexpr); ExecInitExprRec(argexpr, state, &var->value, &var->isnull); jsestate->args = lappend(jsestate->args, var); } /* Step for jsonpath evaluation; see ExecEvalJsonExprPath(). */ scratch->opcode = EEOP_JSONEXPR_PATH; scratch->resvalue = resv; scratch->resnull = resnull; scratch->d.jsonexpr.jsestate = jsestate; ExprEvalPushStep(state, scratch); /* * Step to return NULL after jumping to skip the EEOP_JSONEXPR_PATH step * when either formatted_expr or pathspec is NULL. Adjust jump target * addresses of JUMPs that we added above. */ foreach(lc, jumps_return_null) { ExprEvalStep *as = &state->steps[lfirst_int(lc)]; as->d.jump.jumpdone = state->steps_len; } scratch->opcode = EEOP_CONST; scratch->resvalue = resv; scratch->resnull = resnull; scratch->d.constval.value = (Datum) 0; scratch->d.constval.isnull = true; ExprEvalPushStep(state, scratch); escontext = jsexpr->on_error->btype != JSON_BEHAVIOR_ERROR ? &jsestate->escontext : NULL; /* * To handle coercion errors softly, use the following ErrorSaveContext to * pass to ExecInitExprRec() when initializing the coercion expressions * and in the EEOP_JSONEXPR_COERCION step. */ jsestate->escontext.type = T_ErrorSaveContext; /* * Steps to coerce the result value computed by EEOP_JSONEXPR_PATH or the * NULL returned on NULL input as described above. */ jsestate->jump_eval_coercion = -1; if (jsexpr->use_json_coercion) { jsestate->jump_eval_coercion = state->steps_len; ExecInitJsonCoercion(state, jsexpr->returning, escontext, jsexpr->omit_quotes, jsexpr->op == JSON_EXISTS_OP, resv, resnull); } else if (jsexpr->use_io_coercion) { /* * Here we only need to initialize the FunctionCallInfo for the target * type's input function, which is called by ExecEvalJsonExprPath() * itself, so no additional step is necessary. */ Oid typinput; Oid typioparam; FmgrInfo *finfo; FunctionCallInfo fcinfo; getTypeInputInfo(jsexpr->returning->typid, &typinput, &typioparam); finfo = palloc0_object(FmgrInfo); fcinfo = palloc0(SizeForFunctionCallInfo(3)); fmgr_info(typinput, finfo); fmgr_info_set_expr((Node *) jsexpr->returning, finfo); InitFunctionCallInfoData(*fcinfo, finfo, 3, InvalidOid, NULL, NULL); /* * We can preload the second and third arguments for the input * function, since they're constants. */ fcinfo->args[1].value = ObjectIdGetDatum(typioparam); fcinfo->args[1].isnull = false; fcinfo->args[2].value = Int32GetDatum(jsexpr->returning->typmod); fcinfo->args[2].isnull = false; fcinfo->context = (Node *) escontext; jsestate->input_fcinfo = fcinfo; } /* * Add a special step, if needed, to check if the coercion evaluation ran * into an error but was not thrown because the ON ERROR behavior is not * ERROR. It will set jsestate->error if an error did occur. */ if (jsestate->jump_eval_coercion >= 0 && escontext != NULL) { scratch->opcode = EEOP_JSONEXPR_COERCION_FINISH; scratch->d.jsonexpr.jsestate = jsestate; ExprEvalPushStep(state, scratch); } jsestate->jump_empty = jsestate->jump_error = -1; /* * Step to check jsestate->error and return the ON ERROR expression if * there is one. This handles both the errors that occur during jsonpath * evaluation in EEOP_JSONEXPR_PATH and subsequent coercion evaluation. * * Speed up common cases by avoiding extra steps for a NULL-valued ON * ERROR expression unless RETURNING a domain type, where constraints must * be checked. ExecEvalJsonExprPath() already returns NULL on error, * making additional steps unnecessary in typical scenarios. Note that the * default ON ERROR behavior for JSON_VALUE() and JSON_QUERY() is to * return NULL. */ if (jsexpr->on_error->btype != JSON_BEHAVIOR_ERROR && (!(IsA(jsexpr->on_error->expr, Const) && ((Const *) jsexpr->on_error->expr)->constisnull) || returning_domain)) { ErrorSaveContext *saved_escontext; jsestate->jump_error = state->steps_len; /* JUMP to end if false, that is, skip the ON ERROR expression. */ jumps_to_end = lappend_int(jumps_to_end, state->steps_len); scratch->opcode = EEOP_JUMP_IF_NOT_TRUE; scratch->resvalue = &jsestate->error.value; scratch->resnull = &jsestate->error.isnull; scratch->d.jump.jumpdone = -1; /* set below */ ExprEvalPushStep(state, scratch); /* * Steps to evaluate the ON ERROR expression; handle errors softly to * rethrow them in COERCION_FINISH step that will be added later. */ saved_escontext = state->escontext; state->escontext = escontext; ExecInitExprRec((Expr *) jsexpr->on_error->expr, state, resv, resnull); state->escontext = saved_escontext; /* Step to coerce the ON ERROR expression if needed */ if (jsexpr->on_error->coerce) ExecInitJsonCoercion(state, jsexpr->returning, escontext, jsexpr->omit_quotes, false, resv, resnull); /* * Add a COERCION_FINISH step to check for errors that may occur when * coercing and rethrow them. */ if (jsexpr->on_error->coerce || IsA(jsexpr->on_error->expr, CoerceViaIO) || IsA(jsexpr->on_error->expr, CoerceToDomain)) { scratch->opcode = EEOP_JSONEXPR_COERCION_FINISH; scratch->resvalue = resv; scratch->resnull = resnull; scratch->d.jsonexpr.jsestate = jsestate; ExprEvalPushStep(state, scratch); } /* JUMP to end to skip the ON EMPTY steps added below. */ jumps_to_end = lappend_int(jumps_to_end, state->steps_len); scratch->opcode = EEOP_JUMP; scratch->d.jump.jumpdone = -1; ExprEvalPushStep(state, scratch); } /* * Step to check jsestate->empty and return the ON EMPTY expression if * there is one. * * See the comment above for details on the optimization for NULL-valued * expressions. */ if (jsexpr->on_empty != NULL && jsexpr->on_empty->btype != JSON_BEHAVIOR_ERROR && (!(IsA(jsexpr->on_empty->expr, Const) && ((Const *) jsexpr->on_empty->expr)->constisnull) || returning_domain)) { ErrorSaveContext *saved_escontext; jsestate->jump_empty = state->steps_len; /* JUMP to end if false, that is, skip the ON EMPTY expression. */ jumps_to_end = lappend_int(jumps_to_end, state->steps_len); scratch->opcode = EEOP_JUMP_IF_NOT_TRUE; scratch->resvalue = &jsestate->empty.value; scratch->resnull = &jsestate->empty.isnull; scratch->d.jump.jumpdone = -1; /* set below */ ExprEvalPushStep(state, scratch); /* * Steps to evaluate the ON EMPTY expression; handle errors softly to * rethrow them in COERCION_FINISH step that will be added later. */ saved_escontext = state->escontext; state->escontext = escontext; ExecInitExprRec((Expr *) jsexpr->on_empty->expr, state, resv, resnull); state->escontext = saved_escontext; /* Step to coerce the ON EMPTY expression if needed */ if (jsexpr->on_empty->coerce) ExecInitJsonCoercion(state, jsexpr->returning, escontext, jsexpr->omit_quotes, false, resv, resnull); /* * Add a COERCION_FINISH step to check for errors that may occur when * coercing and rethrow them. */ if (jsexpr->on_empty->coerce || IsA(jsexpr->on_empty->expr, CoerceViaIO) || IsA(jsexpr->on_empty->expr, CoerceToDomain)) { scratch->opcode = EEOP_JSONEXPR_COERCION_FINISH; scratch->resvalue = resv; scratch->resnull = resnull; scratch->d.jsonexpr.jsestate = jsestate; ExprEvalPushStep(state, scratch); } } foreach(lc, jumps_to_end) { ExprEvalStep *as = &state->steps[lfirst_int(lc)]; as->d.jump.jumpdone = state->steps_len; } jsestate->jump_end = state->steps_len; } /* * Initialize a EEOP_JSONEXPR_COERCION step to coerce the value given in resv * to the given RETURNING type. */ static void ExecInitJsonCoercion(ExprState *state, JsonReturning *returning, ErrorSaveContext *escontext, bool omit_quotes, bool exists_coerce, Datum *resv, bool *resnull) { ExprEvalStep scratch = {0}; /* For json_populate_type() */ scratch.opcode = EEOP_JSONEXPR_COERCION; scratch.resvalue = resv; scratch.resnull = resnull; scratch.d.jsonexpr_coercion.targettype = returning->typid; scratch.d.jsonexpr_coercion.targettypmod = returning->typmod; scratch.d.jsonexpr_coercion.json_coercion_cache = NULL; scratch.d.jsonexpr_coercion.escontext = escontext; scratch.d.jsonexpr_coercion.omit_quotes = omit_quotes; scratch.d.jsonexpr_coercion.exists_coerce = exists_coerce; scratch.d.jsonexpr_coercion.exists_cast_to_int = exists_coerce && getBaseType(returning->typid) == INT4OID; scratch.d.jsonexpr_coercion.exists_check_domain = exists_coerce && DomainHasConstraints(returning->typid); ExprEvalPushStep(state, &scratch); }
c
github
https://github.com/postgres/postgres
src/backend/executor/execExpr.c
#!/usr/bin/env python # Copyright (c) 2014 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Tests for analyzer """ import TestGyp found = 'Found dependency\n' not_found = 'No dependencies\n' def __CreateTestFile(files): f = open('test_file', 'w') for file in files: f.write(file + '\n') f.close() test = TestGyp.TestGypCustom(format='analyzer') # Verifies file_path must be specified. test.run_gyp('test.gyp', stdout='Must specify files to analyze via file_path generator ' 'flag\n') # Trivial test of a source. __CreateTestFile(['foo.c']) test.run_gyp('test.gyp', '-Gfile_path=test_file', stdout=found) # Conditional source that is excluded. __CreateTestFile(['conditional_source.c']) test.run_gyp('test.gyp', '-Gfile_path=test_file', stdout=not_found) # Conditional source that is included by way of argument. __CreateTestFile(['conditional_source.c']) test.run_gyp('test.gyp', '-Gfile_path=test_file', '-Dtest_variable=1', stdout=found) # Two unknown files. __CreateTestFile(['unknown1.c', 'unoknow2.cc']) test.run_gyp('test.gyp', '-Gfile_path=test_file', stdout=not_found) # Two unknown files. __CreateTestFile(['unknown1.c', 'subdir/subdir_sourcex.c']) test.run_gyp('test.gyp', '-Gfile_path=test_file', stdout=not_found) # Included dependency __CreateTestFile(['unknown1.c', 'subdir/subdir_source.c']) test.run_gyp('test.gyp', '-Gfile_path=test_file', stdout=found) # Included inputs to actions. __CreateTestFile(['action_input.c']) test.run_gyp('test.gyp', '-Gfile_path=test_file', stdout=found) # Don't consider outputs. __CreateTestFile(['action_output.c']) test.run_gyp('test.gyp', '-Gfile_path=test_file', stdout=not_found) # Rule inputs. __CreateTestFile(['rule_input.c']) test.run_gyp('test.gyp', '-Gfile_path=test_file', stdout=found) # Ignore patch specified with PRODUCT_DIR. __CreateTestFile(['product_dir_input.c']) test.run_gyp('test.gyp', '-Gfile_path=test_file', stdout=not_found) # Path specified via a variable. __CreateTestFile(['subdir/subdir_source2.c']) test.run_gyp('test.gyp', '-Gfile_path=test_file', stdout=found) # Verifies paths with // are fixed up correctly. __CreateTestFile(['parent_source.c']) test.run_gyp('test.gyp', '-Gfile_path=test_file', stdout=found) test.pass_test()
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from osv import fields, osv import tools def _type_get(self, cr, uid, context=None): obj = self.pool.get('auction.lot.category') ids = obj.search(cr, uid, []) res = obj.read(cr, uid, ids, ['name'], context) res = [(r['name'], r['name']) for r in res] return res class report_auction(osv.osv): """Auction Report""" _name = "report.auction" _description = "Auction's Summary" _auto = False _columns = { 'year': fields.char('Year', size=4, readonly=True), 'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'), ('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')], 'Month',readonly=True), 'day': fields.char('Day', size=128, readonly=True), 'buyer_login': fields.char('Buyer Login', size=64, readonly=True, select=1), 'buyer':fields.many2one('res.partner', 'Buyer', readonly=True, select=2), 'seller': fields.many2one('res.partner', 'Seller', readonly=True, select=1), 'object':fields.integer('No of objects', readonly=True, select=1), 'total_price':fields.float('Total Price', digits=(16, 2), readonly=True, select=2), 'lot_type': fields.selection(_type_get, 'Object category', size=64), 'avg_price':fields.float('Avg Price.', digits=(16, 2), readonly=True, select=2), 'date': fields.date('Create Date', select=1), 'auction': fields.many2one('auction.dates', 'Auction date', readonly=True, select=1), 'gross_revenue':fields.float('Gross Revenue', readonly=True), 'net_revenue':fields.float('Net Revenue', readonly=True), 'net_margin':fields.float('Net Margin', readonly=True), 'avg_estimation':fields.float('Avg estimation', readonly=True), 'user_id':fields.many2one('res.users', 'User', select=1), 'state': fields.selection((('draft', 'Draft'), ('unsold', 'Unsold'), ('sold', 'Sold')), 'State', readonly=True, select=1), } def init(self, cr): tools.drop_view_if_exists(cr, 'report_auction') cr.execute(''' create or replace view report_auction as ( select min(al.id) as id, al.ach_login as "buyer_login", ad.auction1 as date, al.state, al.create_uid as user_id, to_char(ad.auction1, 'YYYY') as year, to_char(ad.auction1, 'MM') as month, to_char(ad.auction1, 'YYYY-MM-DD') as day, al.ach_uid as "buyer", al.lot_type as lot_type, ade.partner_id as seller, ad.id as auction, count(al.id) as "object", sum(al.obj_price) as "total_price", (sum(al.obj_price)/count(al.id)) as "avg_price", sum(al.gross_revenue) as gross_revenue, sum(al.net_revenue) as net_revenue, avg(al.net_margin) as net_margin, sum(al.lot_est1+al.lot_est2)/2 as avg_estimation from auction_lots al, auction_dates ad, auction_deposit ade where ad.id=al.auction_id and ade.id=al.bord_vnd_id group by ad.auction1, al.ach_uid, ad.id, al.ach_login, ade.partner_id, al.state, al.create_uid, al.lot_type ) ''') report_auction() #========================== #Dashboard Report #========================== class report_auction_object_date(osv.osv): _name = "report.auction.object.date" _description = "Objects per day" _auto = False _columns = { 'obj_num': fields.integer('# of Objects'), 'name': fields.date('Created date', select=2), 'month': fields.date('Month', select=1), 'user_id':fields.many2one('res.users', 'User',select=1), } def init(self, cr): tools.drop_view_if_exists(cr, 'report_auction_object_date') cr.execute("""create or replace view report_auction_object_date as (select min(l.id) as id, to_char(l.create_date, 'YYYY-MM-DD') as name, to_char(l.create_date, 'YYYY-MM-01') as month, count(l.obj_num) as obj_num, l.create_uid as user_id from auction_lots l group by to_char(l.create_date, 'YYYY-MM-DD'), to_char(l.create_date, 'YYYY-MM-01'), l.create_uid ) """) report_auction_object_date() class report_auction_adjudication(osv.osv): _name = "report.auction.adjudication" _description = "report_auction_adjudication" _auto = False _columns = { 'name': fields.many2one('auction.dates','Auction date',readonly=True,select=1), 'state': fields.selection((('draft','Draft'),('close','Closed')),'Status', select=1), 'adj_total': fields.float('Total Adjudication'), 'date': fields.date('Date', readonly=True,select=1), 'user_id':fields.many2one('res.users', 'User',select=1) } def init(self, cr): tools.drop_view_if_exists(cr, 'report_auction_adjudication') cr.execute(""" create or replace view report_auction_adjudication as ( select l.id as id, l.id as name, sum(m.obj_price) as adj_total, to_char(l.create_date, 'YYYY-MM-01') as date, l.create_uid as user_id, l.state from auction_dates l ,auction_lots m where m.auction_id=l.id group by l.id,l.state,l.name,l.create_uid,to_char(l.create_date, 'YYYY-MM-01') ) """) report_auction_adjudication() class report_object_encoded(osv.osv): _name = "report.object.encoded" _description = "Object encoded" _auto = False _columns = { 'user_id':fields.many2one('res.users', 'User', select=True), 'estimation': fields.float('Estimation',select=True), 'date': fields.date('Create Date', required=True), 'gross_revenue':fields.float('Gross revenue',readonly=True, select=True), 'net_revenue':fields.float('Net revenue',readonly=True, select=True), 'obj_margin':fields.float('Net margin', readonly=True, select=True), 'obj_ret':fields.integer('# obj ret', readonly=True, select=True), 'adj':fields.integer('Adj.', readonly=True, select=True), 'obj_num':fields.integer('# of Encoded obj.', readonly=True, select=True), 'state': fields.selection((('draft','Draft'),('unsold','Unsold'),('paid','Paid'),('invoiced','Invoiced')),'Status', required=True,select=1), } def init(self, cr): tools.drop_view_if_exists(cr, 'report_object_encoded') cr.execute('''create or replace view report_object_encoded as (select min(al.id) as id, to_char(al.create_date, 'YYYY-MM-DD') as date, al.state as state, al.create_uid as user_id, sum((100*lot_est1)/obj_price) as estimation, (SELECT count(1) FROM auction_lots WHERE obj_ret>0) as obj_ret, SUM(al.gross_revenue) as "gross_revenue", SUM(al.net_revenue) as "net_revenue", SUM(al.net_revenue)/count(al.id) as "obj_margin", COUNT(al.product_id) as obj_num, SUM(al.obj_price) as "adj" from auction_lots al where al.obj_price>0 group by to_char(al.create_date, 'YYYY-MM-DD'), al.state, al.create_uid) ''') report_object_encoded() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
unknown
codeparrot/codeparrot-clean
version: 2.0.0 filters: - "*": approvers: - 10gen/devprod-correctness - "/bazel*": approvers: - 10gen/devprod-build - "/compile*": approvers: - 10gen/devprod-build - "/failed_unittests_gather.sh": approvers: - 10gen/devprod-build - "/generate_clang_tidy_report.py": approvers: - 10gen/devprod-build - "/generate_evergreen_bazelrc.sh": approvers: - 10gen/devprod-build - "get_custom_build_promotion_expansions.sh": approvers: - 10gen/devprod-release-infrastructure - "get_custom_build_promotion_expansions_debug.sh": approvers: - 10gen/devprod-release-infrastructure - "upload_custom_build_to_cloud_env.sh": approvers: - 10gen/devprod-release-infrastructure - "/package*": approvers: - 10gen/devprod-build - "validate_compile_commands.py": approvers: - 10gen/devprod-build - "packager_crypt_py_run.sh": approvers: - 10gen/devprod-build - "perf-submission.sh": approvers: - 10gen/devprod-performance-infrastructure - "streams*": approvers: - 10gen/streams-engine - "selinux_*": approvers: - 10gen/server-security - "generate_sast_report.sh": approvers: - 10gen/devprod-release-infrastructure - "write_sast_report_env_file.sh": approvers: - 10gen/devprod-release-infrastructure - "generate_symbol_check_report.py": approvers: - 10gen/devprod-build - "verify_extension_visibility_test.sh": approvers: - 10gen/query-integration-extensions-api
unknown
github
https://github.com/mongodb/mongo
evergreen/OWNERS.yml
#include "test/jemalloc_test.h" #include "jemalloc/internal/ticker.h" TEST_BEGIN(test_ticker_tick) { #define NREPS 2 #define NTICKS 3 ticker_t ticker; int32_t i, j; ticker_init(&ticker, NTICKS); for (i = 0; i < NREPS; i++) { for (j = 0; j < NTICKS; j++) { expect_u_eq(ticker_read(&ticker), NTICKS - j, "Unexpected ticker value (i=%d, j=%d)", i, j); expect_false(ticker_tick(&ticker), "Unexpected ticker fire (i=%d, j=%d)", i, j); } expect_u32_eq(ticker_read(&ticker), 0, "Expected ticker depletion"); expect_true(ticker_tick(&ticker), "Expected ticker fire (i=%d)", i); expect_u32_eq(ticker_read(&ticker), NTICKS, "Expected ticker reset"); } #undef NTICKS } TEST_END TEST_BEGIN(test_ticker_ticks) { #define NTICKS 3 ticker_t ticker; ticker_init(&ticker, NTICKS); expect_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value"); expect_false(ticker_ticks(&ticker, NTICKS), "Unexpected ticker fire"); expect_u_eq(ticker_read(&ticker), 0, "Unexpected ticker value"); expect_true(ticker_ticks(&ticker, NTICKS), "Expected ticker fire"); expect_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value"); expect_true(ticker_ticks(&ticker, NTICKS + 1), "Expected ticker fire"); expect_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value"); #undef NTICKS } TEST_END TEST_BEGIN(test_ticker_copy) { #define NTICKS 3 ticker_t ta, tb; ticker_init(&ta, NTICKS); ticker_copy(&tb, &ta); expect_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value"); expect_true(ticker_ticks(&tb, NTICKS + 1), "Expected ticker fire"); expect_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value"); ticker_tick(&ta); ticker_copy(&tb, &ta); expect_u_eq(ticker_read(&tb), NTICKS - 1, "Unexpected ticker value"); expect_true(ticker_ticks(&tb, NTICKS), "Expected ticker fire"); expect_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value"); #undef NTICKS } TEST_END TEST_BEGIN(test_ticker_geom) { const int32_t ticks = 100; const uint64_t niters = 100 * 1000; ticker_geom_t ticker; ticker_geom_init(&ticker, ticks); uint64_t total_ticks = 0; /* Just some random constant. */ uint64_t prng_state = 0x343219f93496db9fULL; for (uint64_t i = 0; i < niters; i++) { while(!ticker_geom_tick(&ticker, &prng_state)) { total_ticks++; } } /* * In fact, with this choice of random seed and the PRNG implementation * used at the time this was tested, total_ticks is 95.1% of the * expected ticks. */ expect_u64_ge(total_ticks , niters * ticks * 9 / 10, "Mean off by > 10%%"); expect_u64_le(total_ticks , niters * ticks * 11 / 10, "Mean off by > 10%%"); } TEST_END int main(void) { return test( test_ticker_tick, test_ticker_ticks, test_ticker_copy, test_ticker_geom); }
c
github
https://github.com/redis/redis
deps/jemalloc/test/unit/ticker.c
''' Created on Dec 1, 2011 @author: t4aalton ''' from socialDevices.device import precondition, body, deviceInterface import time from socialDevices.misc import TriggeringEvent class CalendarEvent: def __init__(self, eid, subject, location, time): self.eid = eid self.subject = subject self.location = location self.time = time class EventApproaching(TriggeringEvent): def __init__(self, f, t): TriggeringEvent.__init__(self, f) #super(EventApproaching, self).__init__(f) self.eid = t @deviceInterface class CalendarSource(): def __init__(self): currentTime = time.time() self.events = [CalendarEvent(0, 'weekly meeting', 'H5', currentTime - 1*600), CalendarEvent(1, 'super meeting', 'Rower', currentTime + 10 * 600), CalendarEvent(3, 'nice meeting', 'home', currentTime + 15 * 600)] @body def getCalendarEvent(self, eid): return self.events[eid] @precondition def eventApproaching(self, eid, eid): currentTime = time.time() event = self.events[eid] return event.time >= currentTime and event.time <= currentTime + 10*600 # currentTime = time.time() # for event in self.events: # if event.time >= currentTime and event.time <= currentTime + 10*600: # return event.eid # return -1
unknown
codeparrot/codeparrot-clean
#include "ruby.h" #define init(n) {void Init_##n(VALUE klass); Init_##n(klass);} static VALUE sym_find(VALUE dummy, VALUE sym) { return rb_check_symbol(&sym); } static VALUE sym_pinneddown_p(VALUE dummy, VALUE sym) { ID id = rb_check_id(&sym); if (!id) return Qnil; #ifdef ULL2NUM return ULL2NUM(id); #else return ULONG2NUM(id); #endif } static VALUE sym_iv_get(VALUE dummy, VALUE obj, VALUE name) { const char *n = StringValueCStr(name); return rb_iv_get(obj, n); } void Init_symbol(void) { VALUE mBug = rb_define_module("Bug"); VALUE klass = rb_define_class_under(mBug, "Symbol", rb_cSymbol); rb_define_singleton_method(klass, "find", sym_find, 1); rb_define_singleton_method(klass, "pinneddown?", sym_pinneddown_p, 1); rb_define_singleton_method(klass, "iv_get", sym_iv_get, 2); TEST_INIT_FUNCS(init); }
c
github
https://github.com/ruby/ruby
ext/-test-/symbol/init.c
from cvxopt import matrix, spmatrix from chompack.misc import symmetrize from itertools import chain def maxchord(A, ve = None): """ Maximal chordal subgraph of sparsity graph. Returns a lower triangular sparse matrix which is the projection of :math:`A` on a maximal chordal subgraph and a perfect elimination order :math:`p`. Only the lower triangular part of :math:`A` is accessed. The optional argument `ve` is the index of the last vertex to be eliminated (the default value is `n-1`). If :math:`A` is chordal, then the matrix returned is equal to :math:`A`. :param A: :py:class:`spmatrix` :param ve: integer between 0 and `A.size[0]`-1 (optional) .. seealso:: P. M. Dearing, D. R. Shier, D. D. Warner, `Maximal chordal subgraphs <http://dx.doi.org/10.1016/0166-218X(88)90075-3>`_, Discrete Applied Mathematics, 20:3, 1988, pp. 181-190. """ n = A.size[0] assert A.size[1] == n, "A must be a square matrix" assert type(A) is spmatrix, "A must be a sparse matrix" if ve is None: ve = n-1 else: assert type(ve) is int and 0<=ve<n,\ "ve must be an integer between 0 and A.size[0]-1" As = symmetrize(A) cp,ri,val = As.CCS # permutation vector p = matrix(0,(n,1)) # weight array w = matrix(0,(n,1)) max_w = 0 S = [list(range(ve))+list(range(ve+1,n))+[ve]] + [[] for i in range(n-1)] C = [set() for i in range(n)] E = [[] for i in range(n)] # edge list V = [[] for i in range(n)] # num. values for i in range(n-1,-1,-1): # select next node to number while True: if len(S[max_w]) > 0: v = S[max_w].pop() if w[v] >= 0: break else: max_w -= 1 p[i] = v w[v] = -1 # set w[v] = -1 to mark that node v has been numbered # loop over unnumbered neighbors of node v for ii in range(cp[v],cp[v+1]): u = ri[ii] d = val[ii] if w[u] >= 0: if C[u].issubset(C[v]): C[u].update([v]) w[u] += 1 S[w[u]].append(u) # bump up u to S[w[u]] max_w = max(max_w,w[u]) # update max deg. E[min(u,v)].append(max(u,v)) V[min(u,v)].append(d) elif u == v: E[u].append(u) V[u].append(d) # build adjacency matrix of reordered max. chordal subgraph Am = spmatrix([d for d in chain.from_iterable(V)],[i for i in chain.from_iterable(E)],\ [i for i in chain.from_iterable([len(Ej)*[j] for j,Ej in enumerate(E)])],(n,n)) return Am,p
unknown
codeparrot/codeparrot-clean
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Fake LDAP server for test harness. This class does very little error checking, and knows nothing about ldap class definitions. It implements the minimum emulation of the python ldap library to work with nova. """ import fnmatch from nova.openstack.common import jsonutils class Store(object): def __init__(self): if hasattr(self.__class__, '_instance'): raise Exception(_('Attempted to instantiate singleton')) @classmethod def instance(cls): if not hasattr(cls, '_instance'): cls._instance = _StorageDict() return cls._instance class _StorageDict(dict): def keys(self, pat=None): ret = super(_StorageDict, self).keys() if pat is not None: ret = fnmatch.filter(ret, pat) return ret def delete(self, key): try: del self[key] except KeyError: pass def flushdb(self): self.clear() def hgetall(self, key): """Returns the hash for the given key; creates the hash if the key doesn't exist.""" try: return self[key] except KeyError: self[key] = {} return self[key] def hget(self, key, field): hashdict = self.hgetall(key) try: return hashdict[field] except KeyError: hashdict[field] = {} return hashdict[field] def hset(self, key, field, val): hashdict = self.hgetall(key) hashdict[field] = val def hmset(self, key, value_dict): hashdict = self.hgetall(key) for field, val in value_dict.items(): hashdict[field] = val SCOPE_BASE = 0 SCOPE_ONELEVEL = 1 # Not implemented SCOPE_SUBTREE = 2 MOD_ADD = 0 MOD_DELETE = 1 MOD_REPLACE = 2 class NO_SUCH_OBJECT(Exception): # pylint: disable=C0103 """Duplicate exception class from real LDAP module.""" pass class OBJECT_CLASS_VIOLATION(Exception): # pylint: disable=C0103 """Duplicate exception class from real LDAP module.""" pass class SERVER_DOWN(Exception): # pylint: disable=C0103 """Duplicate exception class from real LDAP module.""" pass def initialize(_uri): """Opens a fake connection with an LDAP server.""" return FakeLDAP() def _match_query(query, attrs): """Match an ldap query to an attribute dictionary. The characters &, |, and ! are supported in the query. No syntax checking is performed, so malformed queries will not work correctly. """ # cut off the parentheses inner = query[1:-1] if inner.startswith('&'): # cut off the & l, r = _paren_groups(inner[1:]) return _match_query(l, attrs) and _match_query(r, attrs) if inner.startswith('|'): # cut off the | l, r = _paren_groups(inner[1:]) return _match_query(l, attrs) or _match_query(r, attrs) if inner.startswith('!'): # cut off the ! and the nested parentheses return not _match_query(query[2:-1], attrs) (k, _sep, v) = inner.partition('=') return _match(k, v, attrs) def _paren_groups(source): """Split a string into parenthesized groups.""" count = 0 start = 0 result = [] for pos in xrange(len(source)): if source[pos] == '(': if count == 0: start = pos count += 1 if source[pos] == ')': count -= 1 if count == 0: result.append(source[start:pos + 1]) return result def _match(key, value, attrs): """Match a given key and value against an attribute list.""" if key not in attrs: return False # This is a wild card search. Implemented as all or nothing for now. if value == "*": return True if key != "objectclass": return value in attrs[key] # it is an objectclass check, so check subclasses values = _subs(value) for v in values: if v in attrs[key]: return True return False def _subs(value): """Returns a list of subclass strings. The strings represent the ldap object class plus any subclasses that inherit from it. Fakeldap doesn't know about the ldap object structure, so subclasses need to be defined manually in the dictionary below. """ subs = {'groupOfNames': ['novaProject']} if value in subs: return [value] + subs[value] return [value] def _from_json(encoded): """Convert attribute values from json representation. Args: encoded -- a json encoded string Returns a list of strings """ return [str(x) for x in jsonutils.loads(encoded)] def _to_json(unencoded): """Convert attribute values into json representation. Args: unencoded -- an unencoded string or list of strings. If it is a single string, it will be converted into a list. Returns a json string """ return jsonutils.dumps(list(unencoded)) server_fail = False class FakeLDAP(object): """Fake LDAP connection.""" def simple_bind_s(self, dn, password): """This method is ignored, but provided for compatibility.""" if server_fail: raise SERVER_DOWN pass def unbind_s(self): """This method is ignored, but provided for compatibility.""" if server_fail: raise SERVER_DOWN pass def add_s(self, dn, attr): """Add an object with the specified attributes at dn.""" if server_fail: raise SERVER_DOWN key = "%s%s" % (self.__prefix, dn) value_dict = dict([(k, _to_json(v)) for k, v in attr]) Store.instance().hmset(key, value_dict) def delete_s(self, dn): """Remove the ldap object at specified dn.""" if server_fail: raise SERVER_DOWN Store.instance().delete("%s%s" % (self.__prefix, dn)) def modify_s(self, dn, attrs): """Modify the object at dn using the attribute list. :param dn: a dn :param attrs: a list of tuples in the following form:: ([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value) """ if server_fail: raise SERVER_DOWN store = Store.instance() key = "%s%s" % (self.__prefix, dn) for cmd, k, v in attrs: values = _from_json(store.hget(key, k)) if cmd == MOD_ADD: values.append(v) elif cmd == MOD_REPLACE: values = [v] else: values.remove(v) values = store.hset(key, k, _to_json(values)) def modrdn_s(self, dn, newrdn): oldobj = self.search_s(dn, SCOPE_BASE) if not oldobj: raise NO_SUCH_OBJECT() newdn = "%s,%s" % (newrdn, dn.partition(',')[2]) newattrs = oldobj[0][1] modlist = [] for attrtype in newattrs.keys(): modlist.append((attrtype, newattrs[attrtype])) self.add_s(newdn, modlist) self.delete_s(dn) def search_s(self, dn, scope, query=None, fields=None): """Search for all matching objects under dn using the query. Args: dn -- dn to search under scope -- only SCOPE_BASE and SCOPE_SUBTREE are supported query -- query to filter objects by fields -- fields to return. Returns all fields if not specified """ if server_fail: raise SERVER_DOWN if scope != SCOPE_BASE and scope != SCOPE_SUBTREE: raise NotImplementedError(str(scope)) store = Store.instance() if scope == SCOPE_BASE: pattern = "%s%s" % (self.__prefix, dn) keys = store.keys(pattern) else: keys = store.keys("%s*%s" % (self.__prefix, dn)) if not keys: raise NO_SUCH_OBJECT() objects = [] for key in keys: # get the attributes from the store attrs = store.hgetall(key) # turn the values from the store into lists # pylint: disable=E1103 attrs = dict([(k, _from_json(v)) for k, v in attrs.iteritems()]) # filter the objects by query if not query or _match_query(query, attrs): # filter the attributes by fields attrs = dict([(k, v) for k, v in attrs.iteritems() if not fields or k in fields]) objects.append((key[len(self.__prefix):], attrs)) return objects @property def __prefix(self): # pylint: disable=R0201 """Get the prefix to use for all keys.""" return 'ldap:'
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python from twitter import json, TwitterError # TwitterError not used class UserStatus(object): """A class representing the UserStatus structure used by the twitter API. The UserStatus structure exposes the following properties: userstatus.name userstatus.id_str userstatus.id userstatus.screen_name userstatus.following userstatus.followed_by """ def __init__(self, **kwargs): """An object to hold a Twitter user status message. This class is normally instantiated by the twitter.Api class and returned in a sequence. Args: id: The unique id of this status message. [Optional] id_str: The string version of the unique id of this status message. [Optional] """ param_defaults = { 'name': None, 'id': None, 'id_str': None, 'screen_name': None, 'following': None, 'followed_by': None} for (param, default) in param_defaults.iteritems(): setattr(self, param, kwargs.get(param, default)) def GetFollowedBy(self): return self.followed_by or False def GetFollowing(self): return self.following or False def GetScreenName(self): return self.screen_name def __ne__(self, other): return not self.__eq__(other) def __eq__(self, other): try: return other and \ self.name == other.name and \ self.id == other.id and \ self.id_str == other.id_str and \ self.screen_name == other.screen_name and \ self.following == other.following and \ self.followed_by == other.followed_by except AttributeError: return False def __str__(self): """A string representation of this twitter.UserStatus instance. The return value is the same as the JSON string representation. Returns: A string representation of this twitter.UserStatus instance. """ return self.AsJsonString() def AsJsonString(self): """A JSON string representation of this twitter.UserStatus instance. Returns: A JSON string representation of this twitter.UserStatus instance """ return json.dumps(self.AsDict(), sort_keys=True) def AsDict(self): """A dict representation of this twitter.UserStatus instance. The return value uses the same key names as the JSON representation. Return: A dict representing this twitter.UserStatus instance """ data = {} if self.name: data['name'] = self.name if self.id: data['id'] = self.id if self.id_str: data['id_str'] = self.id_str if self.screen_name: data['screen_name'] = self.screen_name if self.following: data['following'] = self.following if self.followed_by: data['followed_by'] = self.followed_by return data @staticmethod def NewFromJsonDict(data): """Create a new instance based on a JSON dict. Args: data: A JSON dict, as converted from the JSON in the twitter API Returns: A twitter.UserStatus instance """ following = None followed_by = None if 'connections' in data: if 'following' in data['connections']: following = True if 'followed_by' in data['connections']: followed_by = True return UserStatus(name=data.get('name', None), id=data.get('id', None), id_str=data.get('id_str', None), screen_name=data.get('screen_name', None), following=following, followed_by=followed_by) class User(object): """A class representing the User structure used by the twitter API. The User structure exposes the following properties: user.id user.name user.screen_name user.location user.description user.default_profile user.default_profile_image user.profile_image_url user.profile_background_tile user.profile_background_image_url user.profile_banner_url user.profile_sidebar_fill_color user.profile_background_color user.profile_link_color user.profile_text_color user.protected user.utc_offset user.time_zone user.url user.status user.statuses_count user.followers_count user.friends_count user.favourites_count user.geo_enabled user.verified user.lang user.notifications user.contributors_enabled user.created_at user.listed_count """ def __init__(self, **kwargs): param_defaults = { 'id': None, 'name': None, 'screen_name': None, 'location': None, 'description': None, 'default_profile': None, 'default_profile_image': None, 'profile_image_url': None, 'profile_background_tile': None, 'profile_background_image_url': None, 'profile_banner_url': None, 'profile_sidebar_fill_color': None, 'profile_background_color': None, 'profile_link_color': None, 'profile_text_color': None, 'protected': None, 'utc_offset': None, 'time_zone': None, 'followers_count': None, 'friends_count': None, 'statuses_count': None, 'favourites_count': None, 'url': None, 'status': None, 'geo_enabled': None, 'verified': None, 'lang': None, 'notifications': None, 'contributors_enabled': None, 'created_at': None, 'listed_count': None} for (param, default) in param_defaults.iteritems(): setattr(self, param, kwargs.get(param, default)) def GetId(self): """Get the unique id of this user. Returns: The unique id of this user """ return self._id def SetId(self, id): """Set the unique id of this user. Args: id: The unique id of this user. """ self._id = id id = property(GetId, SetId, doc='The unique id of this user.') def GetName(self): """Get the real name of this user. Returns: The real name of this user """ return self._name def SetName(self, name): """Set the real name of this user. Args: name: The real name of this user """ self._name = name name = property(GetName, SetName, doc='The real name of this user.') def GetScreenName(self): """Get the short twitter name of this user. Returns: The short twitter name of this user """ return self._screen_name def SetScreenName(self, screen_name): """Set the short twitter name of this user. Args: screen_name: the short twitter name of this user """ self._screen_name = screen_name screen_name = property(GetScreenName, SetScreenName, doc='The short twitter name of this user.') def GetLocation(self): """Get the geographic location of this user. Returns: The geographic location of this user """ return self._location def SetLocation(self, location): """Set the geographic location of this user. Args: location: The geographic location of this user """ self._location = location location = property(GetLocation, SetLocation, doc='The geographic location of this user.') def GetDescription(self): """Get the short text description of this user. Returns: The short text description of this user """ return self._description def SetDescription(self, description): """Set the short text description of this user. Args: description: The short text description of this user """ self._description = description description = property(GetDescription, SetDescription, doc='The short text description of this user.') def GetUrl(self): """Get the homepage url of this user. Returns: The homepage url of this user """ return self._url def SetUrl(self, url): """Set the homepage url of this user. Args: url: The homepage url of this user """ self._url = url url = property(GetUrl, SetUrl, doc='The homepage url of this user.') def GetProfileImageUrl(self): """Get the url of the thumbnail of this user. Returns: The url of the thumbnail of this user """ return self._profile_image_url def SetProfileImageUrl(self, profile_image_url): """Set the url of the thumbnail of this user. Args: profile_image_url: The url of the thumbnail of this user """ self._profile_image_url = profile_image_url profile_image_url = property(GetProfileImageUrl, SetProfileImageUrl, doc='The url of the thumbnail of this user.') def GetProfileBackgroundTile(self): """Boolean for whether to tile the profile background image. Returns: True if the background is to be tiled, False if not, None if unset. """ return self._profile_background_tile def SetProfileBackgroundTile(self, profile_background_tile): """Set the boolean flag for whether to tile the profile background image. Args: profile_background_tile: Boolean flag for whether to tile or not. """ self._profile_background_tile = profile_background_tile profile_background_tile = property(GetProfileBackgroundTile, SetProfileBackgroundTile, doc='Boolean for whether to tile the background image.') def GetProfileBackgroundImageUrl(self): return self._profile_background_image_url def SetProfileBackgroundImageUrl(self, profile_background_image_url): self._profile_background_image_url = profile_background_image_url profile_background_image_url = property(GetProfileBackgroundImageUrl, SetProfileBackgroundImageUrl, doc='The url of the profile background of this user.') def GetProfileBannerUrl(self): return self._profile_banner_url def SetProfileBannerUrl(self, profile_banner_url): self._profile_banner_url = profile_banner_url profile_banner_url = property(GetProfileBannerUrl, SetProfileBannerUrl, doc='The url of the profile banner of this user.') def GetProfileSidebarFillColor(self): return self._profile_sidebar_fill_color def SetProfileSidebarFillColor(self, profile_sidebar_fill_color): self._profile_sidebar_fill_color = profile_sidebar_fill_color profile_sidebar_fill_color = property(GetProfileSidebarFillColor, SetProfileSidebarFillColor) def GetProfileBackgroundColor(self): return self._profile_background_color def SetProfileBackgroundColor(self, profile_background_color): self._profile_background_color = profile_background_color profile_background_color = property(GetProfileBackgroundColor, SetProfileBackgroundColor) def GetProfileLinkColor(self): return self._profile_link_color def SetProfileLinkColor(self, profile_link_color): self._profile_link_color = profile_link_color profile_link_color = property(GetProfileLinkColor, SetProfileLinkColor) def GetProfileTextColor(self): return self._profile_text_color def SetProfileTextColor(self, profile_text_color): self._profile_text_color = profile_text_color profile_text_color = property(GetProfileTextColor, SetProfileTextColor) def GetProtected(self): return self._protected def SetProtected(self, protected): self._protected = protected protected = property(GetProtected, SetProtected) def GetUtcOffset(self): return self._utc_offset def SetUtcOffset(self, utc_offset): self._utc_offset = utc_offset utc_offset = property(GetUtcOffset, SetUtcOffset) def GetTimeZone(self): """Returns the current time zone string for the user. Returns: The descriptive time zone string for the user. """ return self._time_zone def SetTimeZone(self, time_zone): """Sets the user's time zone string. Args: time_zone: The descriptive time zone to assign for the user. """ self._time_zone = time_zone time_zone = property(GetTimeZone, SetTimeZone) def GetStatus(self): """Get the latest twitter.Status of this user. Returns: The latest twitter.Status of this user """ return self._status def SetStatus(self, status): """Set the latest twitter.Status of this user. Args: status: The latest twitter.Status of this user """ self._status = status status = property(GetStatus, SetStatus, doc='The latest twitter.Status of this user.') def GetFriendsCount(self): """Get the friend count for this user. Returns: The number of users this user has befriended. """ return self._friends_count def SetFriendsCount(self, count): """Set the friend count for this user. Args: count: The number of users this user has befriended. """ self._friends_count = count friends_count = property(GetFriendsCount, SetFriendsCount, doc='The number of friends for this user.') def GetListedCount(self): """Get the listed count for this user. Returns: The number of lists this user belongs to. """ return self._listed_count def SetListedCount(self, count): """Set the listed count for this user. Args: count: The number of lists this user belongs to. """ self._listed_count = count listed_count = property(GetListedCount, SetListedCount, doc='The number of lists this user belongs to.') def GetFollowersCount(self): """Get the follower count for this user. Returns: The number of users following this user. """ return self._followers_count def SetFollowersCount(self, count): """Set the follower count for this user. Args: count: The number of users following this user. """ self._followers_count = count followers_count = property(GetFollowersCount, SetFollowersCount, doc='The number of users following this user.') def GetStatusesCount(self): """Get the number of status updates for this user. Returns: The number of status updates for this user. """ return self._statuses_count def SetStatusesCount(self, count): """Set the status update count for this user. Args: count: The number of updates for this user. """ self._statuses_count = count statuses_count = property(GetStatusesCount, SetStatusesCount, doc='The number of updates for this user.') def GetFavouritesCount(self): """Get the number of favourites for this user. Returns: The number of favourites for this user. """ return self._favourites_count def SetFavouritesCount(self, count): """Set the favourite count for this user. Args: count: The number of favourites for this user. """ self._favourites_count = count favourites_count = property(GetFavouritesCount, SetFavouritesCount, doc='The number of favourites for this user.') def GetGeoEnabled(self): """Get the setting of geo_enabled for this user. Returns: True/False if Geo tagging is enabled """ return self._geo_enabled def SetGeoEnabled(self, geo_enabled): """Set the latest twitter.geo_enabled of this user. Args: geo_enabled: True/False if Geo tagging is to be enabled """ self._geo_enabled = geo_enabled geo_enabled = property(GetGeoEnabled, SetGeoEnabled, doc='The value of twitter.geo_enabled for this user.') def GetVerified(self): """Get the setting of verified for this user. Returns: True/False if user is a verified account """ return self._verified def SetVerified(self, verified): """Set twitter.verified for this user. Args: verified: True/False if user is a verified account """ self._verified = verified verified = property(GetVerified, SetVerified, doc='The value of twitter.verified for this user.') def GetLang(self): """Get the setting of lang for this user. Returns: language code of the user """ return self._lang def SetLang(self, lang): """Set twitter.lang for this user. Args: lang: language code for the user """ self._lang = lang lang = property(GetLang, SetLang, doc='The value of twitter.lang for this user.') def GetNotifications(self): """Get the setting of notifications for this user. Returns: True/False for the notifications setting of the user """ return self._notifications def SetNotifications(self, notifications): """Set twitter.notifications for this user. Args: notifications: True/False notifications setting for the user """ self._notifications = notifications notifications = property(GetNotifications, SetNotifications, doc='The value of twitter.notifications for this user.') def GetContributorsEnabled(self): """Get the setting of contributors_enabled for this user. Returns: True/False contributors_enabled of the user """ return self._contributors_enabled def SetContributorsEnabled(self, contributors_enabled): """Set twitter.contributors_enabled for this user. Args: contributors_enabled: True/False contributors_enabled setting for the user """ self._contributors_enabled = contributors_enabled contributors_enabled = property(GetContributorsEnabled, SetContributorsEnabled, doc='The value of twitter.contributors_enabled for this user.') def GetCreatedAt(self): """Get the setting of created_at for this user. Returns: created_at value of the user """ return self._created_at def SetCreatedAt(self, created_at): """Set twitter.created_at for this user. Args: created_at: created_at value for the user """ self._created_at = created_at created_at = property(GetCreatedAt, SetCreatedAt, doc='The value of twitter.created_at for this user.') def __ne__(self, other): return not self.__eq__(other) def __eq__(self, other): try: return other and \ self.id == other.id and \ self.name == other.name and \ self.screen_name == other.screen_name and \ self.location == other.location and \ self.description == other.description and \ self.default_profile == other.default_profile and \ self.default_profile_image == other.default_profile_image and \ self.profile_image_url == other.profile_image_url and \ self.profile_background_tile == other.profile_background_tile and \ self.profile_background_image_url == other.profile_background_image_url and \ self.profile_banner_url == other.profile_banner_url and \ self.profile_sidebar_fill_color == other.profile_sidebar_fill_color and \ self.profile_background_color == other.profile_background_color and \ self.profile_link_color == other.profile_link_color and \ self.profile_text_color == other.profile_text_color and \ self.protected == other.protected and \ self.utc_offset == other.utc_offset and \ self.time_zone == other.time_zone and \ self.url == other.url and \ self.statuses_count == other.statuses_count and \ self.followers_count == other.followers_count and \ self.favourites_count == other.favourites_count and \ self.friends_count == other.friends_count and \ self.status == other.status and \ self.geo_enabled == other.geo_enabled and \ self.verified == other.verified and \ self.lang == other.lang and \ self.notifications == other.notifications and \ self.contributors_enabled == other.contributors_enabled and \ self.created_at == other.created_at and \ self.listed_count == other.listed_count except AttributeError: return False def __str__(self): """A string representation of this twitter.User instance. The return value is the same as the JSON string representation. Returns: A string representation of this twitter.User instance. """ return self.AsJsonString() def AsJsonString(self): """A JSON string representation of this twitter.User instance. Returns: A JSON string representation of this twitter.User instance """ return json.dumps(self.AsDict(), sort_keys=True) def AsDict(self): """A dict representation of this twitter.User instance. The return value uses the same key names as the JSON representation. Return: A dict representing this twitter.User instance """ data = {} if self.id: data['id'] = self.id if self.name: data['name'] = self.name if self.screen_name: data['screen_name'] = self.screen_name if self.location: data['location'] = self.location if self.description: data['description'] = self.description if self.default_profile: data['default_profile'] = self.default_profile if self.default_profile_image: data['default_profile_image'] = self.default_profile_image if self.profile_image_url: data['profile_image_url'] = self.profile_image_url if self.profile_background_tile is not None: data['profile_background_tile'] = self.profile_background_tile if self.profile_background_image_url: data['profile_background_image_url'] = self.profile_background_image_url if self.profile_banner_url: data['profile_banner_url'] = self.profile_banner_url if self.profile_sidebar_fill_color: data['profile_sidebar_fill_color'] = self.profile_sidebar_fill_color if self.profile_background_color: data['profile_background_color'] = self.profile_background_color if self.profile_link_color: data['profile_link_color'] = self.profile_link_color if self.profile_text_color: data['profile_text_color'] = self.profile_text_color if self.protected is not None: data['protected'] = self.protected if self.utc_offset: data['utc_offset'] = self.utc_offset if self.time_zone: data['time_zone'] = self.time_zone if self.url: data['url'] = self.url if self.status: data['status'] = self.status.AsDict() if self.friends_count: data['friends_count'] = self.friends_count if self.followers_count: data['followers_count'] = self.followers_count if self.statuses_count: data['statuses_count'] = self.statuses_count if self.favourites_count: data['favourites_count'] = self.favourites_count if self.geo_enabled: data['geo_enabled'] = self.geo_enabled if self.verified: data['verified'] = self.verified if self.lang: data['lang'] = self.lang if self.notifications: data['notifications'] = self.notifications if self.contributors_enabled: data['contributors_enabled'] = self.contributors_enabled if self.created_at: data['created_at'] = self.created_at if self.listed_count: data['listed_count'] = self.listed_count return data @staticmethod def NewFromJsonDict(data): """Create a new instance based on a JSON dict. Args: data: A JSON dict, as converted from the JSON in the twitter API Returns: A twitter.User instance """ if 'status' in data: from twitter import Status # Have to do the import here to prevent cyclic imports in the __init__.py # file status = Status.NewFromJsonDict(data['status']) else: status = None return User(id=data.get('id', None), name=data.get('name', None), screen_name=data.get('screen_name', None), location=data.get('location', None), description=data.get('description', None), statuses_count=data.get('statuses_count', None), followers_count=data.get('followers_count', None), favourites_count=data.get('favourites_count', None), default_profile=data.get('default_profile', None), default_profile_image=data.get('default_profile_image', None), friends_count=data.get('friends_count', None), profile_image_url=data.get('profile_image_url_https', data.get('profile_image_url', None)), profile_background_tile=data.get('profile_background_tile', None), profile_background_image_url=data.get('profile_background_image_url', None), profile_banner_url=data.get('profile_banner_url', None), profile_sidebar_fill_color=data.get('profile_sidebar_fill_color', None), profile_background_color=data.get('profile_background_color', None), profile_link_color=data.get('profile_link_color', None), profile_text_color=data.get('profile_text_color', None), protected=data.get('protected', None), utc_offset=data.get('utc_offset', None), time_zone=data.get('time_zone', None), url=data.get('url', None), status=status, geo_enabled=data.get('geo_enabled', None), verified=data.get('verified', None), lang=data.get('lang', None), notifications=data.get('notifications', None), contributors_enabled=data.get('contributors_enabled', None), created_at=data.get('created_at', None), listed_count=data.get('listed_count', None))
unknown
codeparrot/codeparrot-clean
{ "kind": "Dashboard", "apiVersion": "dashboard.grafana.app/v2beta1", "metadata": { "name": "v0alpha1.mysql_unittest.v42" }, "spec": { "annotations": [ { "kind": "AnnotationQuery", "spec": { "query": { "kind": "DataQuery", "group": "", "version": "v0", "spec": {} }, "enable": true, "hide": true, "iconColor": "rgba(0, 211, 255, 1)", "name": "Annotations \u0026 Alerts", "builtIn": true, "legacyOptions": { "type": "dashboard" } } }, { "kind": "AnnotationQuery", "spec": { "query": { "kind": "DataQuery", "group": "", "version": "v0", "spec": {} }, "enable": false, "hide": false, "iconColor": "#6ed0e0", "name": "Deploys", "legacyOptions": { "limit": 100, "rawQuery": "SELECT\n time_sec,\n description as text,\n tags\n FROM event\n WHERE $__unixEpochFilter(time_sec) AND tags='deploy'\n ORDER BY 1 ASC\n ", "showIn": 0, "tags": [], "type": "tags" } } }, { "kind": "AnnotationQuery", "spec": { "query": { "kind": "DataQuery", "group": "", "version": "v0", "spec": {} }, "enable": false, "hide": false, "iconColor": "rgba(255, 96, 96, 1)", "name": "Tickets", "legacyOptions": { "limit": 100, "rawQuery": "SELECT\n time_sec as time,\n description as text,\n tags\n FROM event\n WHERE $__unixEpochFilter(time_sec) AND tags='ticket'\n ORDER BY 1 ASC\n ", "showIn": 0, "tags": [], "type": "tags" } } }, { "kind": "AnnotationQuery", "spec": { "query": { "kind": "DataQuery", "group": "", "version": "v0", "spec": {} }, "enable": false, "hide": false, "iconColor": "#7eb26d", "name": "Metric Values timeEpoch macro", "legacyOptions": { "limit": 100, "rawQuery": "SELECT \n $__timeEpoch(time), \n measurement as text, \n '' as tags\nFROM\n metric_values \nWHERE\n $__timeFilter(time)\nORDER BY 1", "showIn": 0, "tags": [], "type": "tags" } } }, { "kind": "AnnotationQuery", "spec": { "query": { "kind": "DataQuery", "group": "", "version": "v0", "spec": {} }, "enable": false, "hide": false, "iconColor": "#1f78c1", "name": "Metric Values native time", "legacyOptions": { "limit": 100, "rawQuery": "SELECT \n time, \n measurement as text, \n '' as tags\nFROM\n metric_values \nWHERE\n $__timeFilter(time)\nORDER BY 1", "showIn": 0, "tags": [], "type": "tags" } } } ], "cursorSync": "Off", "description": "Run the mysql unit tests to generate the data backing this dashboard", "editable": true, "elements": { "panel-10": { "kind": "Panel", "spec": { "id": 10, "title": "timeGroup macro 5m with fill(10.0)", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "DataQuery", "group": "prometheus", "version": "v0", "datasource": { "name": "gdev-mysql-ds-tests" }, "spec": { "alias": "", "format": "time_series", "rawSql": "SELECT $__timeGroupAlias(time, '5m', 10.0), avg(value) as value FROM metric WHERE $__timeFilter(time) GROUP BY 1 ORDER BY 1" } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": {} } }, "vizConfig": { "kind": "VizConfig", "group": "timeseries", "version": "", "spec": { "options": { "__angularMigration": { "autoMigrateFrom": "graph", "originalOptions": { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "fill": 2, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 2, "nullPointMode": "null", "percentage": false, "pointradius": 3, "points": true, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": true, "thresholds": [], "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "xaxis": { "mode": "time", "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "min": "0", "show": true }, { "format": "short", "logBase": 1, "show": true } ], "yaxis": { "align": false } } } }, "fieldConfig": { "defaults": {}, "overrides": [] } } } } }, "panel-12": { "kind": "Panel", "spec": { "id": 12, "title": "Metrics - timeGroup macro $summarize with fill(NULL)", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "DataQuery", "group": "prometheus", "version": "v0", "datasource": { "name": "gdev-mysql-ds-tests" }, "spec": { "alias": "", "format": "time_series", "rawSql": "SELECT $__timeGroupAlias(time, '$summarize', NULL), sum(value) as value FROM metric WHERE $__timeFilter(time) GROUP BY 1 ORDER BY 1" } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": {} } }, "vizConfig": { "kind": "VizConfig", "group": "timeseries", "version": "", "spec": { "options": { "__angularMigration": { "autoMigrateFrom": "graph", "originalOptions": { "aliasColors": {}, "bars": true, "dashLength": 10, "dashes": false, "fill": 2, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": false, "linewidth": 2, "nullPointMode": "null as zero", "percentage": false, "pointradius": 3, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": true, "thresholds": [], "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "xaxis": { "mode": "time", "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "show": true }, { "format": "short", "logBase": 1, "show": true } ], "yaxis": { "align": false } } } }, "fieldConfig": { "defaults": {}, "overrides": [] } } } } }, "panel-13": { "kind": "Panel", "spec": { "id": 13, "title": "Metrics - timeGroup macro $summarize with fill(100.0)", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "DataQuery", "group": "prometheus", "version": "v0", "datasource": { "name": "gdev-mysql-ds-tests" }, "spec": { "alias": "", "format": "time_series", "rawSql": "SELECT $__timeGroupAlias(time, '$summarize', 100.0), sum(value) as value FROM metric WHERE $__timeFilter(time) GROUP BY 1 ORDER BY 1" } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": {} } }, "vizConfig": { "kind": "VizConfig", "group": "timeseries", "version": "", "spec": { "options": { "__angularMigration": { "autoMigrateFrom": "graph", "originalOptions": { "aliasColors": {}, "bars": true, "dashLength": 10, "dashes": false, "fill": 2, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": false, "linewidth": 2, "nullPointMode": "null", "percentage": false, "pointradius": 3, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": true, "thresholds": [], "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "xaxis": { "mode": "time", "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "show": true }, { "format": "short", "logBase": 1, "show": true } ], "yaxis": { "align": false } } } }, "fieldConfig": { "defaults": {}, "overrides": [] } } } } }, "panel-14": { "kind": "Panel", "spec": { "id": 14, "title": "Multiple series with metric column - series mode", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "DataQuery", "group": "prometheus", "version": "v0", "datasource": { "name": "gdev-mysql-ds-tests" }, "spec": { "alias": "", "format": "time_series", "rawSql": "SELECT $__time(time), measurement as metric, valueOne, valueTwo FROM metric_values WHERE $__timeFilter(time) AND measurement IN($metric) ORDER BY 1" } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": {} } }, "vizConfig": { "kind": "VizConfig", "group": "bargauge", "version": "", "spec": { "options": { "__angularMigration": { "autoMigrateFrom": "graph", "originalOptions": { "aliasColors": {}, "bars": true, "dashLength": 10, "dashes": false, "fill": 1, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": true, "min": true, "rightSide": true, "show": true, "total": true, "values": true }, "lines": false, "linewidth": 1, "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "thresholds": [], "tooltip": { "shared": false, "sort": 0, "value_type": "individual" }, "xaxis": { "mode": "series", "show": true, "values": [ "total" ] }, "yaxes": [ { "format": "short", "logBase": 1, "min": "0", "show": true }, { "format": "short", "logBase": 1, "show": true } ], "yaxis": { "align": false } } } }, "fieldConfig": { "defaults": {}, "overrides": [] } } } } }, "panel-15": { "kind": "Panel", "spec": { "id": 15, "title": "Multiple series without metric column - series mode", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "DataQuery", "group": "prometheus", "version": "v0", "datasource": { "name": "gdev-mysql-ds-tests" }, "spec": { "alias": "", "format": "time_series", "rawSql": "SELECT $__timeEpoch(time), valueOne, valueTwo FROM metric_values\nWHERE $__timeFilter(time) AND measurement IN($metric) ORDER BY 1" } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": {} } }, "vizConfig": { "kind": "VizConfig", "group": "bargauge", "version": "", "spec": { "options": { "__angularMigration": { "autoMigrateFrom": "graph", "originalOptions": { "aliasColors": {}, "bars": true, "dashLength": 10, "dashes": false, "fill": 1, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": true, "min": true, "rightSide": true, "show": true, "total": true, "values": true }, "lines": false, "linewidth": 1, "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "thresholds": [], "tooltip": { "shared": false, "sort": 0, "value_type": "individual" }, "xaxis": { "mode": "series", "show": true, "values": [ "total" ] }, "yaxes": [ { "format": "short", "logBase": 1, "min": "0", "show": true }, { "format": "short", "logBase": 1, "show": true } ], "yaxis": { "align": false } } } }, "fieldConfig": { "defaults": {}, "overrides": [] } } } } }, "panel-16": { "kind": "Panel", "spec": { "id": 16, "title": "Metrics - timeGroup macro $summarize without fill", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "DataQuery", "group": "prometheus", "version": "v0", "datasource": { "name": "gdev-mysql-ds-tests" }, "spec": { "alias": "", "format": "time_series", "rawSql": "SELECT $__timeGroupAlias(time, '$summarize'), avg(value) as value FROM metric WHERE $__timeFilter(time) GROUP BY 1 ORDER BY 1" } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": {} } }, "vizConfig": { "kind": "VizConfig", "group": "timeseries", "version": "", "spec": { "options": { "__angularMigration": { "autoMigrateFrom": "graph", "originalOptions": { "aliasColors": {}, "bars": true, "dashLength": 10, "dashes": false, "fill": 2, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": false, "linewidth": 2, "nullPointMode": "null", "percentage": false, "pointradius": 3, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": true, "thresholds": [], "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "xaxis": { "mode": "time", "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "show": true }, { "format": "short", "logBase": 1, "show": true } ], "yaxis": { "align": false } } } }, "fieldConfig": { "defaults": {}, "overrides": [] } } } } }, "panel-17": { "kind": "Panel", "spec": { "id": 17, "title": "Multiple series with metric column - stacked percent", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "DataQuery", "group": "prometheus", "version": "v0", "datasource": { "name": "gdev-mysql-ds-tests" }, "spec": { "alias": "", "format": "time_series", "rawSql": "SELECT $__time(time), measurement as metric, valueOne, valueTwo FROM metric_values WHERE $__timeFilter(time) AND measurement IN($metric) ORDER BY 1" } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": {} } }, "vizConfig": { "kind": "VizConfig", "group": "timeseries", "version": "", "spec": { "options": { "__angularMigration": { "autoMigrateFrom": "graph", "originalOptions": { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "fill": 2, "legend": { "alignAsTable": true, "avg": true, "current": true, "hideEmpty": false, "hideZero": false, "max": true, "min": true, "rightSide": true, "show": true, "total": true, "values": true }, "lines": true, "linewidth": 2, "nullPointMode": "null", "percentage": true, "pointradius": 3, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": true, "steppedLine": false, "thresholds": [], "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "xaxis": { "mode": "time", "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "min": "0", "show": true }, { "format": "short", "logBase": 1, "show": true } ], "yaxis": { "align": false } } } }, "fieldConfig": { "defaults": {}, "overrides": [] } } } } }, "panel-18": { "kind": "Panel", "spec": { "id": 18, "title": "Multiple series without metric column - stacked", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "DataQuery", "group": "prometheus", "version": "v0", "datasource": { "name": "gdev-mysql-ds-tests" }, "spec": { "alias": "", "format": "time_series", "rawSql": "SELECT $__timeEpoch(time), valueOne, valueTwo FROM metric_values\nWHERE $__timeFilter(time) AND measurement IN($metric) ORDER BY 1" } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": {} } }, "vizConfig": { "kind": "VizConfig", "group": "timeseries", "version": "", "spec": { "options": { "__angularMigration": { "autoMigrateFrom": "graph", "originalOptions": { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "fill": 2, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": true, "min": true, "rightSide": true, "show": true, "total": true, "values": true }, "lines": true, "linewidth": 2, "nullPointMode": "null", "percentage": false, "pointradius": 3, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": true, "steppedLine": false, "thresholds": [], "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "xaxis": { "mode": "time", "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "min": "0", "show": true }, { "format": "short", "logBase": 1, "show": true } ], "yaxis": { "align": false } } } }, "fieldConfig": { "defaults": {}, "overrides": [] } } } } }, "panel-19": { "kind": "Panel", "spec": { "id": 19, "title": "Multiple series with metric column - stacked", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "DataQuery", "group": "prometheus", "version": "v0", "datasource": { "name": "gdev-mysql-ds-tests" }, "spec": { "alias": "", "format": "time_series", "rawSql": "SELECT $__time(time), measurement as metric, valueOne, valueTwo FROM metric_values WHERE $__timeFilter(time) AND measurement IN($metric) ORDER BY 1" } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": {} } }, "vizConfig": { "kind": "VizConfig", "group": "timeseries", "version": "", "spec": { "options": { "__angularMigration": { "autoMigrateFrom": "graph", "originalOptions": { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "fill": 2, "legend": { "alignAsTable": true, "avg": true, "current": true, "hideEmpty": false, "hideZero": false, "max": true, "min": true, "rightSide": true, "show": true, "total": true, "values": true }, "lines": true, "linewidth": 2, "nullPointMode": "null", "percentage": false, "pointradius": 3, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": true, "steppedLine": false, "thresholds": [], "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "xaxis": { "mode": "time", "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "min": "0", "show": true }, { "format": "short", "logBase": 1, "show": true } ], "yaxis": { "align": false } } } }, "fieldConfig": { "defaults": {}, "overrides": [] } } } } }, "panel-2": { "kind": "Panel", "spec": { "id": 2, "title": "Data types", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "DataQuery", "group": "prometheus", "version": "v0", "datasource": { "name": "gdev-mysql-ds-tests" }, "spec": { "alias": "", "format": "table", "rawSql": "SELECT * from mysql_types" } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": {} } }, "vizConfig": { "kind": "VizConfig", "group": "table", "version": "", "spec": { "options": { "__angularMigration": { "autoMigrateFrom": "table-old", "originalOptions": { "columns": [], "fontSize": "100%", "scroll": true, "showHeader": true, "sort": { "col": 0, "desc": true }, "styles": [ { "alias": "", "align": "auto", "colors": [ "rgba(245, 54, 54, 0.9)", "rgba(237, 129, 40, 0.89)", "rgba(50, 172, 45, 0.97)" ], "decimals": 2, "pattern": "/.*/", "thresholds": [], "type": "string", "unit": "short" } ], "transform": "table" } } }, "fieldConfig": { "defaults": {}, "overrides": [] } } } } }, "panel-20": { "kind": "Panel", "spec": { "id": 20, "title": "Multiple series without metric column - stacked percent", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "DataQuery", "group": "prometheus", "version": "v0", "datasource": { "name": "gdev-mysql-ds-tests" }, "spec": { "alias": "", "format": "time_series", "rawSql": "SELECT $__timeEpoch(time), valueOne, valueTwo FROM metric_values\nWHERE $__timeFilter(time) AND measurement IN($metric) ORDER BY 1" } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": {} } }, "vizConfig": { "kind": "VizConfig", "group": "timeseries", "version": "", "spec": { "options": { "__angularMigration": { "autoMigrateFrom": "graph", "originalOptions": { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "fill": 2, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": true, "min": true, "rightSide": true, "show": true, "total": true, "values": true }, "lines": true, "linewidth": 2, "nullPointMode": "null", "percentage": true, "pointradius": 3, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": true, "steppedLine": false, "thresholds": [], "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "xaxis": { "mode": "time", "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "min": "0", "show": true }, { "format": "short", "logBase": 1, "show": true } ], "yaxis": { "align": false } } } }, "fieldConfig": { "defaults": {}, "overrides": [] } } } } }, "panel-21": { "kind": "Panel", "spec": { "id": 21, "title": "Multiple series with metric column - histogram stacked", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "DataQuery", "group": "prometheus", "version": "v0", "datasource": { "name": "gdev-mysql-ds-tests" }, "spec": { "alias": "", "format": "time_series", "rawSql": "SELECT $__time(time), measurement as metric, valueOne, valueTwo FROM metric_values WHERE $__timeFilter(time) AND measurement IN($metric) ORDER BY 1" } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": {} } }, "vizConfig": { "kind": "VizConfig", "group": "histogram", "version": "", "spec": { "options": { "__angularMigration": { "autoMigrateFrom": "graph", "originalOptions": { "aliasColors": {}, "bars": true, "dashLength": 10, "dashes": false, "fill": 1, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": true, "min": true, "rightSide": true, "show": false, "total": true, "values": true }, "lines": false, "linewidth": 1, "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": true, "steppedLine": false, "thresholds": [], "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "xaxis": { "buckets": 20, "mode": "histogram", "show": true, "values": [ "current" ] }, "yaxes": [ { "format": "short", "logBase": 1, "min": "0", "show": true }, { "format": "short", "logBase": 1, "show": true } ], "yaxis": { "align": false } } } }, "fieldConfig": { "defaults": {}, "overrides": [] } } } } }, "panel-22": { "kind": "Panel", "spec": { "id": 22, "title": "Multiple series without metric column - histogram", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "DataQuery", "group": "prometheus", "version": "v0", "datasource": { "name": "gdev-mysql-ds-tests" }, "spec": { "alias": "", "format": "time_series", "rawSql": "SELECT $__timeEpoch(time), valueOne, valueTwo FROM metric_values\nWHERE $__timeFilter(time) AND measurement IN($metric) ORDER BY 1" } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": {} } }, "vizConfig": { "kind": "VizConfig", "group": "histogram", "version": "", "spec": { "options": { "__angularMigration": { "autoMigrateFrom": "graph", "originalOptions": { "aliasColors": {}, "bars": true, "dashLength": 10, "dashes": false, "fill": 1, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": true, "min": true, "rightSide": true, "show": false, "total": true, "values": true }, "lines": false, "linewidth": 1, "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "thresholds": [], "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "xaxis": { "buckets": 100, "mode": "histogram", "show": true, "values": [ "total" ] }, "yaxes": [ { "format": "short", "logBase": 1, "min": "0", "show": true }, { "format": "short", "logBase": 1, "show": true } ], "yaxis": { "align": false } } } }, "fieldConfig": { "defaults": {}, "overrides": [] } } } } }, "panel-23": { "kind": "Panel", "spec": { "id": 23, "title": "Multiple series with metric column - histogram stacked percent", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "DataQuery", "group": "prometheus", "version": "v0", "datasource": { "name": "gdev-mysql-ds-tests" }, "spec": { "alias": "", "format": "time_series", "rawSql": "SELECT $__time(time), measurement as metric, valueOne, valueTwo FROM metric_values WHERE $__timeFilter(time) AND measurement IN($metric) ORDER BY 1" } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": {} } }, "vizConfig": { "kind": "VizConfig", "group": "histogram", "version": "", "spec": { "options": { "__angularMigration": { "autoMigrateFrom": "graph", "originalOptions": { "aliasColors": {}, "bars": true, "dashLength": 10, "dashes": false, "fill": 1, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": true, "min": true, "rightSide": true, "show": false, "total": true, "values": true }, "lines": false, "linewidth": 1, "nullPointMode": "null", "percentage": true, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": true, "steppedLine": false, "thresholds": [], "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "xaxis": { "buckets": 20, "mode": "histogram", "show": true, "values": [ "current" ] }, "yaxes": [ { "format": "short", "logBase": 1, "min": "0", "show": true }, { "format": "short", "logBase": 1, "show": true } ], "yaxis": { "align": false } } } }, "fieldConfig": { "defaults": {}, "overrides": [] } } } } }, "panel-24": { "kind": "Panel", "spec": { "id": 24, "title": "Multiple series without metric column - histogram stacked percent", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "DataQuery", "group": "prometheus", "version": "v0", "datasource": { "name": "gdev-mysql-ds-tests" }, "spec": { "alias": "", "format": "time_series", "rawSql": "SELECT $__timeEpoch(time), valueOne, valueTwo FROM metric_values\nWHERE $__timeFilter(time) AND measurement IN($metric) ORDER BY 1" } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": {} } }, "vizConfig": { "kind": "VizConfig", "group": "histogram", "version": "", "spec": { "options": { "__angularMigration": { "autoMigrateFrom": "graph", "originalOptions": { "aliasColors": {}, "bars": true, "dashLength": 10, "dashes": false, "fill": 1, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": true, "min": true, "rightSide": true, "show": false, "total": true, "values": true }, "lines": false, "linewidth": 1, "nullPointMode": "null", "percentage": true, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": true, "steppedLine": false, "thresholds": [], "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "xaxis": { "buckets": 20, "mode": "histogram", "show": true, "values": [ "total" ] }, "yaxes": [ { "format": "short", "logBase": 1, "min": "0", "show": true }, { "format": "short", "logBase": 1, "show": true } ], "yaxis": { "align": false } } } }, "fieldConfig": { "defaults": {}, "overrides": [] } } } } }, "panel-25": { "kind": "Panel", "spec": { "id": 25, "title": "Multiple series with metric column - histogram", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "DataQuery", "group": "prometheus", "version": "v0", "datasource": { "name": "gdev-mysql-ds-tests" }, "spec": { "alias": "", "format": "time_series", "rawSql": "SELECT $__time(time), measurement as metric, valueOne, valueTwo FROM metric_values WHERE $__timeFilter(time) AND measurement IN($metric) ORDER BY 1" } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": {} } }, "vizConfig": { "kind": "VizConfig", "group": "histogram", "version": "", "spec": { "options": { "__angularMigration": { "autoMigrateFrom": "graph", "originalOptions": { "aliasColors": {}, "bars": true, "dashLength": 10, "dashes": false, "fill": 1, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": true, "min": true, "rightSide": true, "show": false, "total": true, "values": true }, "lines": false, "linewidth": 1, "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "thresholds": [], "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "xaxis": { "buckets": 50, "mode": "histogram", "show": true, "values": [ "current" ] }, "yaxes": [ { "format": "short", "logBase": 1, "min": "0", "show": true }, { "format": "short", "logBase": 1, "show": true } ], "yaxis": { "align": false } } } }, "fieldConfig": { "defaults": {}, "overrides": [] } } } } }, "panel-26": { "kind": "Panel", "spec": { "id": 26, "title": "Multiple series without metric column - histogram stacked", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "DataQuery", "group": "prometheus", "version": "v0", "datasource": { "name": "gdev-mysql-ds-tests" }, "spec": { "alias": "", "format": "time_series", "rawSql": "SELECT $__timeEpoch(time), valueOne, valueTwo FROM metric_values\nWHERE $__timeFilter(time) AND measurement IN($metric) ORDER BY 1" } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": {} } }, "vizConfig": { "kind": "VizConfig", "group": "histogram", "version": "", "spec": { "options": { "__angularMigration": { "autoMigrateFrom": "graph", "originalOptions": { "aliasColors": {}, "bars": true, "dashLength": 10, "dashes": false, "fill": 1, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": true, "min": true, "rightSide": true, "show": false, "total": true, "values": true }, "lines": false, "linewidth": 1, "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": true, "steppedLine": false, "thresholds": [], "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "xaxis": { "buckets": 20, "mode": "histogram", "show": true, "values": [ "total" ] }, "yaxes": [ { "format": "short", "logBase": 1, "min": "0", "show": true }, { "format": "short", "logBase": 1, "show": true } ], "yaxis": { "align": false } } } }, "fieldConfig": { "defaults": {}, "overrides": [] } } } } }, "panel-27": { "kind": "Panel", "spec": { "id": 27, "title": "Multiple series with metric column using timeGroup macro ($summarize)", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "DataQuery", "group": "prometheus", "version": "v0", "datasource": { "name": "gdev-mysql-ds-tests" }, "spec": { "alias": "", "format": "time_series", "rawSql": "SELECT \n $__timeGroupAlias(time, '$summarize'), \n measurement as metric, \n avg(valueOne) as valueOne,\n avg(valueTwo) as valueTwo\nFROM\n metric_values \nWHERE\n $__timeFilter(time) AND\n measurement IN($metric)\nGROUP BY 1, 2\nORDER BY 1" } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": {} } }, "vizConfig": { "kind": "VizConfig", "group": "timeseries", "version": "", "spec": { "options": { "__angularMigration": { "autoMigrateFrom": "graph", "originalOptions": { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "fill": 2, "legend": { "alignAsTable": true, "avg": true, "current": true, "hideEmpty": false, "hideZero": false, "max": true, "min": true, "rightSide": true, "show": true, "total": true, "values": true }, "lines": true, "linewidth": 2, "nullPointMode": "null", "percentage": false, "pointradius": 3, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "thresholds": [], "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "xaxis": { "mode": "time", "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "min": "0", "show": true }, { "format": "short", "logBase": 1, "show": true } ], "yaxis": { "align": false } } } }, "fieldConfig": { "defaults": {}, "overrides": [] } } } } }, "panel-28": { "kind": "Panel", "spec": { "id": 28, "title": "Multiple series without metric column", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "DataQuery", "group": "prometheus", "version": "v0", "datasource": { "name": "gdev-mysql-ds-tests" }, "spec": { "alias": "", "format": "time_series", "rawSql": "SELECT $__time(time), valueOne, valueTwo FROM metric_values ORDER BY 1" } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": {} } }, "vizConfig": { "kind": "VizConfig", "group": "timeseries", "version": "", "spec": { "options": { "__angularMigration": { "autoMigrateFrom": "graph", "originalOptions": { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "fill": 2, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": true, "min": true, "rightSide": true, "show": true, "total": true, "values": true }, "lines": true, "linewidth": 2, "nullPointMode": "null", "percentage": false, "pointradius": 3, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "thresholds": [], "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "xaxis": { "mode": "time", "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "min": "0", "show": true }, { "format": "short", "logBase": 1, "show": true } ], "yaxis": { "align": false } } } }, "fieldConfig": { "defaults": {}, "overrides": [] } } } } }, "panel-32": { "kind": "Panel", "spec": { "id": 32, "title": "cast(null as unsigned integer) as time", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "DataQuery", "group": "prometheus", "version": "v0", "datasource": { "name": "gdev-mysql-ds-tests" }, "spec": { "alias": "", "format": "table", "rawSql": "SELECT cast(null as unsigned integer) as time_sec", "target": "" } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": {} } }, "vizConfig": { "kind": "VizConfig", "group": "table", "version": "", "spec": { "options": { "__angularMigration": { "autoMigrateFrom": "table-old", "originalOptions": { "columns": [], "fontSize": "100%", "scroll": true, "showHeader": true, "sort": { "col": 0, "desc": true }, "styles": [ { "alias": "Time", "align": "auto", "dateFormat": "YYYY-MM-DD HH:mm:ss", "pattern": "time_sec", "type": "date" }, { "alias": "", "align": "auto", "colors": [ "rgba(245, 54, 54, 0.9)", "rgba(237, 129, 40, 0.89)", "rgba(50, 172, 45, 0.97)" ], "decimals": 2, "pattern": "/.*/", "thresholds": [], "type": "number", "unit": "short" } ], "transform": "table" } } }, "fieldConfig": { "defaults": {}, "overrides": [] } } } } }, "panel-33": { "kind": "Panel", "spec": { "id": 33, "title": "cast(null as datetime) as time", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "DataQuery", "group": "prometheus", "version": "v0", "datasource": { "name": "gdev-mysql-ds-tests" }, "spec": { "alias": "", "format": "table", "rawSql": "SELECT cast(null as datetime) as time_sec", "target": "" } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": {} } }, "vizConfig": { "kind": "VizConfig", "group": "table", "version": "", "spec": { "options": { "__angularMigration": { "autoMigrateFrom": "table-old", "originalOptions": { "columns": [], "fontSize": "100%", "scroll": true, "showHeader": true, "sort": { "col": 0, "desc": true }, "styles": [ { "alias": "Time", "align": "auto", "dateFormat": "YYYY-MM-DD HH:mm:ss", "pattern": "time_sec", "type": "date" }, { "alias": "", "align": "auto", "colors": [ "rgba(245, 54, 54, 0.9)", "rgba(237, 129, 40, 0.89)", "rgba(50, 172, 45, 0.97)" ], "decimals": 2, "pattern": "/.*/", "thresholds": [], "type": "number", "unit": "short" } ], "transform": "table" } } }, "fieldConfig": { "defaults": {}, "overrides": [] } } } } }, "panel-34": { "kind": "Panel", "spec": { "id": 34, "title": "cast()NOW() as datetime) as time", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "DataQuery", "group": "prometheus", "version": "v0", "datasource": { "name": "gdev-mysql-ds-tests" }, "spec": { "alias": "", "format": "table", "rawSql": "SELECT cast(NOW() as datetime) as time_sec", "target": "" } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": {} } }, "vizConfig": { "kind": "VizConfig", "group": "table", "version": "", "spec": { "options": { "__angularMigration": { "autoMigrateFrom": "table-old", "originalOptions": { "columns": [], "fontSize": "100%", "scroll": true, "showHeader": true, "sort": { "col": 0, "desc": true }, "styles": [ { "alias": "Time", "align": "auto", "dateFormat": "YYYY-MM-DD HH:mm:ss", "pattern": "time_sec", "type": "date" }, { "alias": "", "align": "auto", "colors": [ "rgba(245, 54, 54, 0.9)", "rgba(237, 129, 40, 0.89)", "rgba(50, 172, 45, 0.97)" ], "decimals": 2, "pattern": "/.*/", "thresholds": [], "type": "number", "unit": "short" } ], "transform": "table" } } }, "fieldConfig": { "defaults": {}, "overrides": [] } } } } }, "panel-35": { "kind": "Panel", "spec": { "id": 35, "title": "NOW() as time", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "DataQuery", "group": "prometheus", "version": "v0", "datasource": { "name": "gdev-mysql-ds-tests" }, "spec": { "alias": "", "format": "table", "rawSql": "SELECT NOW() as time", "target": "" } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": {} } }, "vizConfig": { "kind": "VizConfig", "group": "table", "version": "", "spec": { "options": { "__angularMigration": { "autoMigrateFrom": "table-old", "originalOptions": { "columns": [], "fontSize": "100%", "scroll": true, "showHeader": true, "sort": { "col": 0, "desc": true }, "styles": [ { "alias": "Time", "align": "auto", "dateFormat": "YYYY-MM-DD HH:mm:ss", "pattern": "time", "type": "date" }, { "alias": "", "align": "auto", "colors": [ "rgba(245, 54, 54, 0.9)", "rgba(237, 129, 40, 0.89)", "rgba(50, 172, 45, 0.97)" ], "decimals": 2, "pattern": "/.*/", "thresholds": [], "type": "number", "unit": "short" } ], "transform": "table" } } }, "fieldConfig": { "defaults": {}, "overrides": [] } } } } }, "panel-36": { "kind": "Panel", "spec": { "id": 36, "title": "timeGroup macro 5m with fill(previous)", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "DataQuery", "group": "prometheus", "version": "v0", "datasource": { "name": "gdev-mysql-ds-tests" }, "spec": { "alias": "", "format": "time_series", "rawSql": "SELECT $__timeGroupAlias(time, '5m', previous), avg(value) as value FROM metric WHERE $__timeFilter(time) GROUP BY 1 ORDER BY 1" } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": {} } }, "vizConfig": { "kind": "VizConfig", "group": "timeseries", "version": "", "spec": { "options": { "__angularMigration": { "autoMigrateFrom": "graph", "originalOptions": { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "fill": 2, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 2, "nullPointMode": "null", "percentage": false, "pointradius": 3, "points": true, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": true, "thresholds": [], "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "xaxis": { "mode": "time", "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "min": "0", "show": true }, { "format": "short", "logBase": 1, "show": true } ], "yaxis": { "align": false } } } }, "fieldConfig": { "defaults": {}, "overrides": [] } } } } }, "panel-37": { "kind": "Panel", "spec": { "id": 37, "title": "Metrics - timeGroup macro $summarize with fill(previous)", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "DataQuery", "group": "prometheus", "version": "v0", "datasource": { "name": "gdev-mysql-ds-tests" }, "spec": { "alias": "", "format": "time_series", "rawSql": "SELECT $__timeGroupAlias(time, '$summarize', previous), sum(value) as value FROM metric WHERE $__timeFilter(time) GROUP BY 1 ORDER BY 1" } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": {} } }, "vizConfig": { "kind": "VizConfig", "group": "timeseries", "version": "", "spec": { "options": { "__angularMigration": { "autoMigrateFrom": "graph", "originalOptions": { "aliasColors": {}, "bars": true, "dashLength": 10, "dashes": false, "fill": 2, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": false, "linewidth": 2, "nullPointMode": "null", "percentage": false, "pointradius": 3, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": true, "thresholds": [], "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "xaxis": { "mode": "time", "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "show": true }, { "format": "short", "logBase": 1, "show": true } ], "yaxis": { "align": false } } } }, "fieldConfig": { "defaults": {}, "overrides": [] } } } } }, "panel-38": { "kind": "Panel", "spec": { "id": 38, "title": "Multiple series with metric column using unixEpochGroup macro ($summarize)", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "DataQuery", "group": "prometheus", "version": "v0", "datasource": { "name": "gdev-mysql-ds-tests" }, "spec": { "alias": "", "format": "time_series", "rawSql": "SELECT \n $__unixEpochGroupAlias(timeInt32, '$summarize'), \n measurement, \n avg(valueOne) as valueOne,\n avg(valueTwo) as valueTwo\nFROM\n metric_values \nWHERE\n $__unixEpochFilter(timeInt32) AND\n measurement in($metric)\nGROUP BY 1, 2\nORDER BY 1, 2" } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": {} } }, "vizConfig": { "kind": "VizConfig", "group": "timeseries", "version": "", "spec": { "options": { "__angularMigration": { "autoMigrateFrom": "graph", "originalOptions": { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "fill": 2, "legend": { "alignAsTable": true, "avg": true, "current": true, "hideEmpty": false, "hideZero": false, "max": true, "min": true, "rightSide": true, "show": true, "total": true, "values": true }, "lines": true, "linewidth": 2, "nullPointMode": "null", "percentage": false, "pointradius": 3, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "thresholds": [], "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "xaxis": { "mode": "time", "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "min": "0", "show": true }, { "format": "short", "logBase": 1, "show": true } ], "yaxis": { "align": false } } } }, "fieldConfig": { "defaults": {}, "overrides": [] } } } } }, "panel-39": { "kind": "Panel", "spec": { "id": 39, "title": "Multiple series without metric column using unixEpochGroup macro ($summarize)", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "DataQuery", "group": "prometheus", "version": "v0", "datasource": { "name": "gdev-mysql-ds-tests" }, "spec": { "alias": "", "format": "time_series", "rawSql": "SELECT \n $__unixEpochGroupAlias(timeInt32, '$summarize'), \n avg(valueOne) as valueOne,\n avg(valueTwo) as valueTwo\nFROM\n metric_values \nWHERE\n $__unixEpochFilter(timeInt32) AND\n measurement in($metric)\nGROUP BY 1\nORDER BY 1" } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": {} } }, "vizConfig": { "kind": "VizConfig", "group": "timeseries", "version": "", "spec": { "options": { "__angularMigration": { "autoMigrateFrom": "graph", "originalOptions": { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "fill": 2, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": true, "min": true, "rightSide": true, "show": true, "total": true, "values": true }, "lines": true, "linewidth": 2, "nullPointMode": "null", "percentage": false, "pointradius": 3, "points": false, "renderer": "flot", "seriesOverrides": [ { "alias": "MovingAverageValueOne", "dashes": true, "lines": false }, { "alias": "MovingAverageValueTwo", "dashes": true, "lines": false, "yaxis": 1 } ], "spaceLength": 10, "stack": false, "steppedLine": false, "thresholds": [], "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "xaxis": { "mode": "time", "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "min": "0", "show": true }, { "format": "short", "logBase": 1, "show": true } ], "yaxis": { "align": false } } } }, "fieldConfig": { "defaults": {}, "overrides": [] } } } } }, "panel-4": { "kind": "Panel", "spec": { "id": 4, "title": "Multiple series with metric column", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "DataQuery", "group": "prometheus", "version": "v0", "datasource": { "name": "gdev-mysql-ds-tests" }, "spec": { "alias": "", "format": "time_series", "rawSql": "SELECT $__time(time), measurement as metric, valueOne, valueTwo FROM metric_values WHERE $__timeFilter(time) AND measurement IN($metric) ORDER BY 1" } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": {} } }, "vizConfig": { "kind": "VizConfig", "group": "timeseries", "version": "", "spec": { "options": { "__angularMigration": { "autoMigrateFrom": "graph", "originalOptions": { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "fill": 2, "legend": { "alignAsTable": true, "avg": true, "current": true, "hideEmpty": false, "hideZero": false, "max": true, "min": true, "rightSide": true, "show": true, "total": true, "values": true }, "lines": true, "linewidth": 2, "nullPointMode": "null", "percentage": false, "pointradius": 3, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "thresholds": [], "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "xaxis": { "mode": "time", "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "min": "0", "show": true }, { "format": "short", "logBase": 1, "show": true } ], "yaxis": { "align": false } } } }, "fieldConfig": { "defaults": {}, "overrides": [] } } } } }, "panel-5": { "kind": "Panel", "spec": { "id": 5, "title": "Multiple series without metric column using timeGroup macro ($summarize)", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "DataQuery", "group": "prometheus", "version": "v0", "datasource": { "name": "gdev-mysql-ds-tests" }, "spec": { "alias": "", "format": "time_series", "rawSql": "SELECT \n $__timeGroupAlias(time, '$summarize'), \n avg(valueOne) as valueOne, \n avg(valueTwo) as valueTwo \nFROM\n metric_values \nWHERE \n $__timeFilter(time) AND \n measurement IN($metric)\nGROUP BY 1\nORDER BY 1" } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": {} } }, "vizConfig": { "kind": "VizConfig", "group": "timeseries", "version": "", "spec": { "options": { "__angularMigration": { "autoMigrateFrom": "graph", "originalOptions": { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "fill": 2, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": true, "min": true, "rightSide": true, "show": true, "total": true, "values": true }, "lines": true, "linewidth": 2, "nullPointMode": "null", "percentage": false, "pointradius": 3, "points": false, "renderer": "flot", "seriesOverrides": [ { "alias": "MovingAverageValueOne", "dashes": true, "lines": false }, { "alias": "MovingAverageValueTwo", "dashes": true, "lines": false, "yaxis": 1 } ], "spaceLength": 10, "stack": false, "steppedLine": false, "thresholds": [], "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "xaxis": { "mode": "time", "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "min": "0", "show": true }, { "format": "short", "logBase": 1, "show": true } ], "yaxis": { "align": false } } } }, "fieldConfig": { "defaults": {}, "overrides": [] } } } } }, "panel-7": { "kind": "Panel", "spec": { "id": 7, "title": "timeGroup macro 5m without fill", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "DataQuery", "group": "prometheus", "version": "v0", "datasource": { "name": "gdev-mysql-ds-tests" }, "spec": { "alias": "", "format": "time_series", "rawSql": "SELECT $__timeGroupAlias(time, '5m'), avg(value) as value FROM metric WHERE $__timeFilter(time) GROUP BY 1 ORDER BY 1" } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": {} } }, "vizConfig": { "kind": "VizConfig", "group": "timeseries", "version": "", "spec": { "options": { "__angularMigration": { "autoMigrateFrom": "graph", "originalOptions": { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "fill": 2, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 2, "nullPointMode": "null", "percentage": false, "pointradius": 3, "points": true, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": true, "thresholds": [], "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "xaxis": { "mode": "time", "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "min": "0", "show": true }, { "format": "short", "logBase": 1, "show": true } ], "yaxis": { "align": false } } } }, "fieldConfig": { "defaults": {}, "overrides": [] } } } } }, "panel-9": { "kind": "Panel", "spec": { "id": 9, "title": "timeGroup macro 5m with fill(NULL) and null as zero", "description": "", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "DataQuery", "group": "prometheus", "version": "v0", "datasource": { "name": "gdev-mysql-ds-tests" }, "spec": { "alias": "", "format": "time_series", "rawSql": "SELECT $__timeGroupAlias(time, '5m', NULL), avg(value) as value FROM metric WHERE $__timeFilter(time) GROUP BY 1 ORDER BY 1" } }, "refId": "A", "hidden": false } } ], "transformations": [], "queryOptions": {} } }, "vizConfig": { "kind": "VizConfig", "group": "timeseries", "version": "", "spec": { "options": { "__angularMigration": { "autoMigrateFrom": "graph", "originalOptions": { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "fill": 2, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 2, "nullPointMode": "null as zero", "percentage": false, "pointradius": 3, "points": true, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": true, "thresholds": [], "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "xaxis": { "mode": "time", "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "min": "0", "show": true }, { "format": "short", "logBase": 1, "show": true } ], "yaxis": { "align": false } } } }, "fieldConfig": { "defaults": {}, "overrides": [] } } } } } }, "layout": { "kind": "GridLayout", "spec": { "items": [ { "kind": "GridLayoutItem", "spec": { "x": 0, "y": 0, "width": 24, "height": 4, "element": { "kind": "ElementReference", "name": "panel-2" } } }, { "kind": "GridLayoutItem", "spec": { "x": 0, "y": 4, "width": 6, "height": 3, "element": { "kind": "ElementReference", "name": "panel-32" } } }, { "kind": "GridLayoutItem", "spec": { "x": 6, "y": 4, "width": 6, "height": 3, "element": { "kind": "ElementReference", "name": "panel-33" } } }, { "kind": "GridLayoutItem", "spec": { "x": 12, "y": 4, "width": 6, "height": 3, "element": { "kind": "ElementReference", "name": "panel-34" } } }, { "kind": "GridLayoutItem", "spec": { "x": 18, "y": 4, "width": 6, "height": 3, "element": { "kind": "ElementReference", "name": "panel-35" } } }, { "kind": "GridLayoutItem", "spec": { "x": 0, "y": 7, "width": 6, "height": 6, "element": { "kind": "ElementReference", "name": "panel-7" } } }, { "kind": "GridLayoutItem", "spec": { "x": 6, "y": 7, "width": 6, "height": 6, "element": { "kind": "ElementReference", "name": "panel-9" } } }, { "kind": "GridLayoutItem", "spec": { "x": 12, "y": 7, "width": 6, "height": 6, "element": { "kind": "ElementReference", "name": "panel-10" } } }, { "kind": "GridLayoutItem", "spec": { "x": 18, "y": 7, "width": 6, "height": 6, "element": { "kind": "ElementReference", "name": "panel-36" } } }, { "kind": "GridLayoutItem", "spec": { "x": 0, "y": 13, "width": 6, "height": 6, "element": { "kind": "ElementReference", "name": "panel-16" } } }, { "kind": "GridLayoutItem", "spec": { "x": 6, "y": 13, "width": 6, "height": 6, "element": { "kind": "ElementReference", "name": "panel-12" } } }, { "kind": "GridLayoutItem", "spec": { "x": 12, "y": 13, "width": 6, "height": 6, "element": { "kind": "ElementReference", "name": "panel-13" } } }, { "kind": "GridLayoutItem", "spec": { "x": 18, "y": 13, "width": 6, "height": 6, "element": { "kind": "ElementReference", "name": "panel-37" } } }, { "kind": "GridLayoutItem", "spec": { "x": 0, "y": 19, "width": 12, "height": 8, "element": { "kind": "ElementReference", "name": "panel-27" } } }, { "kind": "GridLayoutItem", "spec": { "x": 12, "y": 19, "width": 12, "height": 8, "element": { "kind": "ElementReference", "name": "panel-5" } } }, { "kind": "GridLayoutItem", "spec": { "x": 0, "y": 27, "width": 12, "height": 8, "element": { "kind": "ElementReference", "name": "panel-38" } } }, { "kind": "GridLayoutItem", "spec": { "x": 12, "y": 27, "width": 12, "height": 8, "element": { "kind": "ElementReference", "name": "panel-39" } } }, { "kind": "GridLayoutItem", "spec": { "x": 0, "y": 35, "width": 12, "height": 8, "element": { "kind": "ElementReference", "name": "panel-4" } } }, { "kind": "GridLayoutItem", "spec": { "x": 12, "y": 35, "width": 12, "height": 8, "element": { "kind": "ElementReference", "name": "panel-28" } } }, { "kind": "GridLayoutItem", "spec": { "x": 0, "y": 43, "width": 12, "height": 8, "element": { "kind": "ElementReference", "name": "panel-19" } } }, { "kind": "GridLayoutItem", "spec": { "x": 12, "y": 43, "width": 12, "height": 8, "element": { "kind": "ElementReference", "name": "panel-18" } } }, { "kind": "GridLayoutItem", "spec": { "x": 0, "y": 51, "width": 12, "height": 8, "element": { "kind": "ElementReference", "name": "panel-17" } } }, { "kind": "GridLayoutItem", "spec": { "x": 12, "y": 51, "width": 12, "height": 8, "element": { "kind": "ElementReference", "name": "panel-20" } } }, { "kind": "GridLayoutItem", "spec": { "x": 0, "y": 59, "width": 12, "height": 8, "element": { "kind": "ElementReference", "name": "panel-14" } } }, { "kind": "GridLayoutItem", "spec": { "x": 12, "y": 59, "width": 12, "height": 8, "element": { "kind": "ElementReference", "name": "panel-15" } } }, { "kind": "GridLayoutItem", "spec": { "x": 0, "y": 67, "width": 12, "height": 8, "element": { "kind": "ElementReference", "name": "panel-25" } } }, { "kind": "GridLayoutItem", "spec": { "x": 12, "y": 67, "width": 12, "height": 8, "element": { "kind": "ElementReference", "name": "panel-22" } } }, { "kind": "GridLayoutItem", "spec": { "x": 0, "y": 75, "width": 12, "height": 8, "element": { "kind": "ElementReference", "name": "panel-21" } } }, { "kind": "GridLayoutItem", "spec": { "x": 12, "y": 75, "width": 12, "height": 8, "element": { "kind": "ElementReference", "name": "panel-26" } } }, { "kind": "GridLayoutItem", "spec": { "x": 0, "y": 83, "width": 12, "height": 8, "element": { "kind": "ElementReference", "name": "panel-23" } } }, { "kind": "GridLayoutItem", "spec": { "x": 12, "y": 83, "width": 12, "height": 8, "element": { "kind": "ElementReference", "name": "panel-24" } } } ] } }, "links": [], "liveNow": false, "preload": false, "tags": [ "gdev", "mysql", "datasource-test" ], "timeSettings": { "timezone": "", "from": "2018-03-15T12:30:00.000Z", "to": "2018-03-15T13:55:01.000Z", "autoRefresh": "", "autoRefreshIntervals": [ "5s", "10s", "30s", "1m", "5m", "15m", "30m", "1h", "2h", "1d" ], "hideTimepicker": false, "fiscalYearStartMonth": 0 }, "title": "Datasource tests - MySQL (unittest)", "variables": [ { "kind": "QueryVariable", "spec": { "name": "metric", "current": { "text": [ "All" ], "value": [ "$__all" ] }, "label": "Metric", "hide": "dontHide", "refresh": "onDashboardLoad", "skipUrlSync": false, "query": { "kind": "DataQuery", "group": "prometheus", "version": "v0", "spec": { "__legacyStringValue": "SELECT DISTINCT measurement FROM metric_values" } }, "regex": "", "sort": "disabled", "options": [], "multi": true, "includeAll": true, "allowCustomValue": true } }, { "kind": "IntervalVariable", "spec": { "name": "summarize", "query": "1s,10s,30s,1m,5m,10m", "current": { "text": "10m", "value": "10m" }, "options": [ { "selected": false, "text": "1s", "value": "1s" }, { "selected": false, "text": "10s", "value": "10s" }, { "selected": false, "text": "30s", "value": "30s" }, { "selected": false, "text": "1m", "value": "1m" }, { "selected": false, "text": "5m", "value": "5m" }, { "selected": true, "text": "10m", "value": "10m" } ], "auto": false, "auto_min": "10s", "auto_count": 30, "refresh": "onTimeRangeChanged", "label": "Interval", "hide": "dontHide", "skipUrlSync": false } } ] }, "status": { "conversion": { "failed": false, "storedVersion": "v0alpha1" } } }
json
github
https://github.com/grafana/grafana
apps/dashboard/pkg/migration/conversion/testdata/output/migrated_dev_dashboards/datasource-mysql/v0alpha1.mysql_unittest.v42.v2beta1.json
"""Unit tests for host collections. :Requirement: Hostcollection :CaseAutomation: Automated :CaseLevel: Acceptance :CaseComponent: HostCollections :Assignee: swadeley :TestType: Functional :CaseImportance: High :Upstream: No """ from random import choice from random import randint import pytest from broker import VMBroker from nailgun import entities from requests.exceptions import HTTPError from robottelo.datafactory import invalid_values_list from robottelo.datafactory import parametrized from robottelo.datafactory import valid_data_list from robottelo.hosts import ContentHost @pytest.fixture(scope='module') def fake_hosts(module_org): """Create content hosts that can be shared by tests.""" hosts = [entities.Host(organization=module_org).create() for _ in range(2)] return hosts @pytest.mark.parametrize('name', **parametrized(valid_data_list())) @pytest.mark.tier1 def test_positive_create_with_name(module_org, name): """Create host collections with different names. :id: 8f2b9223-f5be-4cb1-8316-01ea747cae14 :parametrized: yes :expectedresults: The host collection was successfully created and has appropriate name. :CaseImportance: Critical """ host_collection = entities.HostCollection(name=name, organization=module_org).create() assert host_collection.name == name @pytest.mark.tier1 def test_positive_list(module_org): """Create new host collection and then retrieve list of all existing host collections :id: 6ae32df2-b917-4830-8709-15fb272b76c1 :BZ: 1331875 :expectedresults: Returned list of host collections for the system contains at least one collection :CaseImportance: Critical """ entities.HostCollection(organization=module_org).create() hc_list = entities.HostCollection().search() assert len(hc_list) >= 1 @pytest.mark.tier1 def test_positive_list_for_organization(): """Create host collection for specific organization. Retrieve list of host collections for that organization :id: 5f9de8ab-2c53-401b-add3-57d86c97563a :expectedresults: The host collection was successfully created and present in the list of collections for specific organization :CaseImportance: Critical """ org = entities.Organization().create() hc = entities.HostCollection(organization=org).create() hc_list = entities.HostCollection(organization=org).search() assert len(hc_list) == 1 assert hc_list[0].id == hc.id @pytest.mark.parametrize('desc', **parametrized(valid_data_list())) @pytest.mark.tier1 def test_positive_create_with_description(module_org, desc): """Create host collections with different descriptions. :id: 9d13392f-8d9d-4ff1-8909-4233e4691055 :parametrized: yes :expectedresults: The host collection was successfully created and has appropriate description. :CaseImportance: Critical """ host_collection = entities.HostCollection(description=desc, organization=module_org).create() assert host_collection.description == desc @pytest.mark.tier1 def test_positive_create_with_limit(module_org): """Create host collections with different limits. :id: 86d9387b-7036-4794-96fd-5a3472dd9160 :expectedresults: The host collection was successfully created and has appropriate limit. :CaseImportance: Critical """ for _ in range(5): limit = randint(1, 30) host_collection = entities.HostCollection(max_hosts=limit, organization=module_org).create() assert host_collection.max_hosts == limit @pytest.mark.parametrize("unlimited", [False, True]) @pytest.mark.tier1 def test_positive_create_with_unlimited_hosts(module_org, unlimited): """Create host collection with different values of 'unlimited hosts' parameter. :id: d385574e-5794-4442-b6cd-e5ded001d877 :parametrized: yes :expectedresults: The host collection was successfully created and has appropriate 'unlimited hosts' parameter value. :CaseImportance: Critical """ host_collection = entities.HostCollection( max_hosts=None if unlimited else 1, organization=module_org, unlimited_hosts=unlimited, ).create() assert host_collection.unlimited_hosts == unlimited @pytest.mark.tier1 def test_positive_create_with_host(module_org, fake_hosts): """Create a host collection that contains a host. :id: 9dc0ad72-58c2-4079-b1ca-2c4373472f0f :expectedresults: The host collection can be read back, and it includes one host. :CaseImportance: Critical :BZ: 1325989 """ host_collection = entities.HostCollection( host=[fake_hosts[0]], organization=module_org ).create() assert len(host_collection.host) == 1 @pytest.mark.tier1 def test_positive_create_with_hosts(module_org, fake_hosts): """Create a host collection that contains hosts. :id: bb8d2b42-9a8b-4c4f-ba0c-c56ae5a7eb1d :expectedresults: The host collection can be read back, and it references two hosts. :CaseImportance: Critical :BZ: 1325989 """ host_collection = entities.HostCollection(host=fake_hosts, organization=module_org).create() assert len(host_collection.host) == len(fake_hosts) @pytest.mark.tier2 def test_positive_add_host(module_org, fake_hosts): """Add a host to host collection. :id: da8bc901-7ac8-4029-bb62-af21aa4d3a88 :expectedresults: Host was added to the host collection. :CaseLevel: Integration :BZ:1325989 """ host_collection = entities.HostCollection(organization=module_org).create() host_collection.host_ids = [fake_hosts[0].id] host_collection = host_collection.update(['host_ids']) assert len(host_collection.host) == 1 @pytest.mark.upgrade @pytest.mark.tier2 def test_positive_add_hosts(module_org, fake_hosts): """Add hosts to host collection. :id: f76b4db1-ccd5-47ab-be15-8c7d91d03b22 :expectedresults: Hosts were added to the host collection. :CaseLevel: Integration :BZ: 1325989 """ host_collection = entities.HostCollection(organization=module_org).create() host_ids = [str(host.id) for host in fake_hosts] host_collection.host_ids = host_ids host_collection = host_collection.update(['host_ids']) assert len(host_collection.host) == len(fake_hosts) @pytest.mark.tier1 def test_positive_read_host_ids(module_org, fake_hosts): """Read a host collection and look at the ``host_ids`` field. :id: 444a1528-64c8-41b6-ba2b-6c49799d5980 :expectedresults: The ``host_ids`` field matches the host IDs passed in when creating the host collection. :CaseImportance: Critical :BZ:1325989 """ host_collection = entities.HostCollection(host=fake_hosts, organization=module_org).create() assert frozenset(host.id for host in host_collection.host) == frozenset( host.id for host in fake_hosts ) @pytest.mark.parametrize('new_name', **parametrized(valid_data_list())) @pytest.mark.tier1 def test_positive_update_name(module_org, new_name): """Check if host collection name can be updated :id: b2dedb99-6dd7-41be-8aaa-74065c820ac6 :parametrized: yes :expectedresults: Host collection name was successfully updated :CaseImportance: Critical """ host_collection = entities.HostCollection(organization=module_org).create() host_collection.name = new_name assert host_collection.update().name == new_name @pytest.mark.parametrize('new_desc', **parametrized(valid_data_list())) @pytest.mark.tier1 def test_positive_update_description(module_org, new_desc): """Check if host collection description can be updated :id: f8e9bd1c-1525-4b5f-a07c-eb6b6e7aa628 :parametrized: yes :expectedresults: Host collection description was updated :CaseImportance: Critical """ host_collection = entities.HostCollection(organization=module_org).create() host_collection.description = new_desc assert host_collection.update().description == new_desc @pytest.mark.tier1 def test_positive_update_limit(module_org): """Check if host collection limit can be updated :id: 4eda7796-cd81-453b-9b72-4ef84b2c1d8c :expectedresults: Host collection limit was updated :CaseImportance: Critical """ host_collection = entities.HostCollection( max_hosts=1, organization=module_org, unlimited_hosts=False ).create() for limit in (1, 3, 5, 10, 20): host_collection.max_hosts = limit assert host_collection.update().max_hosts == limit @pytest.mark.tier1 def test_positive_update_unlimited_hosts(module_org): """Check if host collection 'unlimited hosts' parameter can be updated :id: 09a3973d-9832-4255-87bf-f9eaeab4aee8 :expectedresults: Host collection 'unlimited hosts' parameter was updated :CaseImportance: Critical """ random_unlimited = choice([True, False]) host_collection = entities.HostCollection( max_hosts=1 if not random_unlimited else None, organization=module_org, unlimited_hosts=random_unlimited, ).create() for unlimited in (not random_unlimited, random_unlimited): host_collection.max_hosts = 1 if not unlimited else None host_collection.unlimited_hosts = unlimited host_collection = host_collection.update(['max_hosts', 'unlimited_hosts']) assert host_collection.unlimited_hosts == unlimited @pytest.mark.tier1 def test_positive_update_host(module_org, fake_hosts): """Update host collection's host. :id: 23082854-abcf-4085-be9c-a5d155446acb :expectedresults: The host collection was updated with a new host. :CaseImportance: Critical """ host_collection = entities.HostCollection( host=[fake_hosts[0]], organization=module_org ).create() host_collection.host_ids = [fake_hosts[1].id] host_collection = host_collection.update(['host_ids']) assert host_collection.host[0].id == fake_hosts[1].id @pytest.mark.upgrade @pytest.mark.tier1 def test_positive_update_hosts(module_org, fake_hosts): """Update host collection's hosts. :id: 0433b37d-ae16-456f-a51d-c7b800334861 :expectedresults: The host collection was updated with new hosts. :CaseImportance: Critical """ host_collection = entities.HostCollection(host=fake_hosts, organization=module_org).create() new_hosts = [entities.Host(organization=module_org).create() for _ in range(2)] host_ids = [str(host.id) for host in new_hosts] host_collection.host_ids = host_ids host_collection = host_collection.update(['host_ids']) assert {host.id for host in host_collection.host} == {host.id for host in new_hosts} @pytest.mark.upgrade @pytest.mark.tier1 def test_positive_delete(module_org): """Check if host collection can be deleted :id: 13a16cd2-16ce-4966-8c03-5d821edf963b :expectedresults: Host collection was successfully deleted :CaseImportance: Critical """ host_collection = entities.HostCollection(organization=module_org).create() host_collection.delete() with pytest.raises(HTTPError): host_collection.read() @pytest.mark.parametrize('name', **parametrized(invalid_values_list())) @pytest.mark.tier1 def test_negative_create_with_invalid_name(module_org, name): """Try to create host collections with different invalid names :id: 38f67d04-a19d-4eab-a577-21b8d62c7389 :parametrized: yes :expectedresults: The host collection was not created :CaseImportance: Critical """ with pytest.raises(HTTPError): entities.HostCollection(name=name, organization=module_org).create() @pytest.mark.tier1 def test_positive_add_remove_subscription(module_org, module_ak_cv_lce): """Try to bulk add and remove a subscription to members of a host collection. :id: c4ec5727-eb25-452e-a91f-87cafb16666b :steps: 1. Create HC, add AK to HC 2. Create product so we can use it's subscription 3. Create some VMs and register them with AK so they are in HC 4. Add the subscription to the members of the Host Collection 5. Assert subscription is added 6. Bulk remove subscription 7. Assert it is removed :expectedresults: subscription added to, and removed from, members of host collection :CaseImportance: Critical """ # this command creates a host collection and "appends", makes available, to the AK module_ak_cv_lce.host_collection.append( entities.HostCollection(organization=module_org).create() ) # Move HC from Add tab to List tab on AK view module_ak_cv_lce = module_ak_cv_lce.update(['host_collection']) # Create a product so we have a subscription to use product = entities.Product(organization=module_org).create() prod_name = product.name product_subscription = entities.Subscription(organization=module_org).search( query={'search': f'name={prod_name}'} )[0] # Create and register VMs as members of Host Collection with VMBroker(nick='rhel7', host_classes={'host': ContentHost}, _count=2) as hosts: for client in hosts: client.install_katello_ca() client.register_contenthost(module_org.label, module_ak_cv_lce.name) # Read host_collection back from Satellite to get host_ids host_collection = module_ak_cv_lce.host_collection[0].read() host_ids = [host.id for host in host_collection.host] # Add subscription # Call nailgun to make the API PUT to members of Host Collection entities.Host().bulk_add_subscriptions( data={ "organization_id": module_org.id, "included": {"ids": host_ids}, "subscriptions": [{"id": product_subscription.id, "quantity": 1}], } ) # GET the subscriptions from hosts and assert they are there for host_id in host_ids: req = entities.HostSubscription(host=host_id).subscriptions() assert ( prod_name in req['results'][0]['product_name'] ), 'Subscription not applied to HC members' # Remove the subscription # Call nailgun to make the API PUT to members of Host Collection entities.Host().bulk_remove_subscriptions( data={ "organization_id": module_org.id, "included": {"ids": host_ids}, "subscriptions": [{"id": product_subscription.id, "quantity": 1}], } ) # GET the subscriptions from hosts and assert they are gone for host_id in host_ids: req = entities.HostSubscription(host=host_id).subscriptions() assert not req['results'], 'Subscription not removed from HC members'
unknown
codeparrot/codeparrot-clean
from optparse import make_option from django.core.management import BaseCommand from corehq.apps.change_feed.consumer.feed import KafkaChangeFeed from corehq.apps.change_feed.exceptions import UnavailableKafkaOffset from corehq.apps.change_feed.topics import validate_offsets from pillowtop import get_all_pillow_instances class Command(BaseCommand): help = ("Validates that all pillows that use kafka have checkpoints that still exist " "in the kafka feed.") option_list = BaseCommand.option_list + ( make_option('--print-only', action='store_true', dest='print_only', default=False, help="Only print information, don't fail if checkpoints aren't valid."), ) def handle(self, *args, **options): print_only = options['print_only'] validate_checkpoints(print_only) def validate_checkpoints(print_only): for pillow in get_all_pillow_instances(): if isinstance(pillow.get_change_feed(), KafkaChangeFeed): checkpoint_dict = _get_checkpoint_dict(pillow) try: validate_offsets(checkpoint_dict) except UnavailableKafkaOffset as e: message = u'Problem with checkpoint for {}: {}'.format( pillow.pillow_id, e ) if print_only: print message else: raise Exception(message) def _get_checkpoint_dict(pillow): sequence = pillow.get_last_checkpoint_sequence() if isinstance(sequence, dict): sequence_dict = sequence else: try: sequence_int = int(sequence) except ValueError: # assume this is an old/legacy checkpoint return {} else: sequence_dict = { pillow.get_change_feed()._get_single_topic_or_fail(): sequence_int } # filter out 0's since we don't want to check those as they are likely new pillows return { k: v for k, v in sequence_dict.items() if v > 0 }
unknown
codeparrot/codeparrot-clean
import os ROBOT_LISTENER_API_VERSION = '2' OUTFILE = open(os.path.join(os.getenv('TEMPDIR'), 'listener_attrs.txt'), 'w') START_ATTRS = 'doc starttime ' END_ATTRS = START_ATTRS + 'endtime elapsedtime status ' KW_ATTRS = 'args assign kwname libname type' EXPECTED_TYPES = {'elapsedtime': (int, long), 'tags': list, 'args': list, 'assign': list, 'metadata': dict, 'tests': list, 'suites': list, 'totaltests': int} def start_suite(name, attrs): _verify_attrs('START SUITE', attrs, START_ATTRS + 'id longname metadata source tests suites totaltests') def end_suite(name, attrs): _verify_attrs('END SUITE', attrs, END_ATTRS + 'id longname metadata source tests suites totaltests statistics message') def start_test(name, attrs): _verify_attrs('START TEST', attrs, START_ATTRS + 'id longname tags critical template') def end_test(name, attrs): _verify_attrs('END TEST', attrs, END_ATTRS + 'id longname tags critical message template') def start_keyword(name, attrs): _verify_attrs('START KEYWORD', attrs, START_ATTRS + KW_ATTRS) _verify_name(name, **attrs) def end_keyword(name, attrs): _verify_attrs('END KEYWORD', attrs, END_ATTRS + KW_ATTRS) _verify_name(name, **attrs) def _verify_attrs(method_name, attrs, names): names = names.split() OUTFILE.write(method_name + '\n') if len(names) != len(attrs): OUTFILE.write('FAILED: wrong number of attributes\n') OUTFILE.write('Expected: %s\nActual: %s\n' % (names, attrs.keys())) return for name in names: value = attrs[name] exp_type = EXPECTED_TYPES.get(name, basestring) if isinstance(value, exp_type): OUTFILE.write('PASSED | %s: %s\n' % (name, value)) else: OUTFILE.write('FAILED | %s: %r, Expected: %s, Actual: %s\n' % (name, value, exp_type, type(value))) def _verify_name(name, kwname=None, libname=None, **ignored): if libname: if name != '%s.%s' % (libname, kwname): OUTFILE.write("FAILED | KW NAME: '%s' != '%s.%s'\n" % (name, libname, kwname)) else: if name != kwname: OUTFILE.write("FAILED | KW NAME: '%s' != '%s'\n" % (name, kwname)) if libname != '': OUTFILE.write("FAILED | LIB NAME: '%s' != ''\n" % libname) def close(): OUTFILE.close()
unknown
codeparrot/codeparrot-clean
import logging from pyvisdk.exceptions import InvalidArgumentError ######################################## # Automatically generated, do not edit. ######################################## log = logging.getLogger(__name__) def DVSFeatureCapability(vim, *args, **kwargs): '''Dataobject representing the feature capabilities supported by the vSphere Distributed Switch.''' obj = vim.client.factory.create('ns0:DVSFeatureCapability') # do some validation checking... if (len(args) + len(kwargs)) < 2: raise IndexError('Expected at least 3 arguments got: %d' % len(args)) required = [ 'networkResourceManagementSupported', 'vmDirectPathGen2Supported' ] optional = [ 'networkResourceManagementCapability', 'networkResourcePoolHighShareValue', 'nicTeamingPolicy', 'dynamicProperty', 'dynamicType' ] for name, arg in zip(required+optional, args): setattr(obj, name, arg) for name, value in kwargs.items(): if name in required + optional: setattr(obj, name, value) else: raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional))) return obj
unknown
codeparrot/codeparrot-clean
/* * Copyright (C) Igor Sysoev * Copyright (C) Nginx, Inc. */ #include <ngx_config.h> #include <ngx_core.h> #include <ngx_http.h> typedef struct { ngx_uint_t hash_max_size; ngx_uint_t hash_bucket_size; } ngx_http_map_conf_t; typedef struct { ngx_hash_keys_arrays_t keys; ngx_array_t *values_hash; #if (NGX_PCRE) ngx_array_t regexes; #endif ngx_http_variable_value_t *default_value; ngx_conf_t *cf; unsigned hostnames:1; unsigned no_cacheable:1; } ngx_http_map_conf_ctx_t; typedef struct { ngx_http_map_t map; ngx_http_complex_value_t value; ngx_http_variable_value_t *default_value; ngx_uint_t hostnames; /* unsigned hostnames:1 */ } ngx_http_map_ctx_t; static int ngx_libc_cdecl ngx_http_map_cmp_dns_wildcards(const void *one, const void *two); static void *ngx_http_map_create_conf(ngx_conf_t *cf); static char *ngx_http_map_block(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); static char *ngx_http_map(ngx_conf_t *cf, ngx_command_t *dummy, void *conf); static ngx_command_t ngx_http_map_commands[] = { { ngx_string("map"), NGX_HTTP_MAIN_CONF|NGX_CONF_BLOCK|NGX_CONF_TAKE2, ngx_http_map_block, NGX_HTTP_MAIN_CONF_OFFSET, 0, NULL }, { ngx_string("map_hash_max_size"), NGX_HTTP_MAIN_CONF|NGX_CONF_TAKE1, ngx_conf_set_num_slot, NGX_HTTP_MAIN_CONF_OFFSET, offsetof(ngx_http_map_conf_t, hash_max_size), NULL }, { ngx_string("map_hash_bucket_size"), NGX_HTTP_MAIN_CONF|NGX_CONF_TAKE1, ngx_conf_set_num_slot, NGX_HTTP_MAIN_CONF_OFFSET, offsetof(ngx_http_map_conf_t, hash_bucket_size), NULL }, ngx_null_command }; static ngx_http_module_t ngx_http_map_module_ctx = { NULL, /* preconfiguration */ NULL, /* postconfiguration */ ngx_http_map_create_conf, /* create main configuration */ NULL, /* init main configuration */ NULL, /* create server configuration */ NULL, /* merge server configuration */ NULL, /* create location configuration */ NULL /* merge location configuration */ }; ngx_module_t ngx_http_map_module = { NGX_MODULE_V1, &ngx_http_map_module_ctx, /* module context */ ngx_http_map_commands, /* module directives */ NGX_HTTP_MODULE, /* module type */ NULL, /* init master */ NULL, /* init module */ NULL, /* init process */ NULL, /* init thread */ NULL, /* exit thread */ NULL, /* exit process */ NULL, /* exit master */ NGX_MODULE_V1_PADDING }; static ngx_int_t ngx_http_map_variable(ngx_http_request_t *r, ngx_http_variable_value_t *v, uintptr_t data) { ngx_http_map_ctx_t *map = (ngx_http_map_ctx_t *) data; ngx_str_t val, str; ngx_http_complex_value_t *cv; ngx_http_variable_value_t *value; ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "http map started"); if (ngx_http_complex_value(r, &map->value, &val) != NGX_OK) { return NGX_ERROR; } if (map->hostnames && val.len > 0 && val.data[val.len - 1] == '.') { val.len--; } value = ngx_http_map_find(r, &map->map, &val); if (value == NULL) { value = map->default_value; } if (!value->valid) { cv = (ngx_http_complex_value_t *) value->data; if (ngx_http_complex_value(r, cv, &str) != NGX_OK) { return NGX_ERROR; } v->valid = 1; v->no_cacheable = 0; v->not_found = 0; v->len = str.len; v->data = str.data; } else { *v = *value; } ngx_log_debug2(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "http map: \"%V\" \"%v\"", &val, v); return NGX_OK; } static void * ngx_http_map_create_conf(ngx_conf_t *cf) { ngx_http_map_conf_t *mcf; mcf = ngx_palloc(cf->pool, sizeof(ngx_http_map_conf_t)); if (mcf == NULL) { return NULL; } mcf->hash_max_size = NGX_CONF_UNSET_UINT; mcf->hash_bucket_size = NGX_CONF_UNSET_UINT; return mcf; } static char * ngx_http_map_block(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) { ngx_http_map_conf_t *mcf = conf; char *rv; ngx_str_t *value, name; ngx_conf_t save; ngx_pool_t *pool; ngx_hash_init_t hash; ngx_http_map_ctx_t *map; ngx_http_variable_t *var; ngx_http_map_conf_ctx_t ctx; ngx_http_compile_complex_value_t ccv; if (mcf->hash_max_size == NGX_CONF_UNSET_UINT) { mcf->hash_max_size = 2048; } if (mcf->hash_bucket_size == NGX_CONF_UNSET_UINT) { mcf->hash_bucket_size = ngx_cacheline_size; } else { mcf->hash_bucket_size = ngx_align(mcf->hash_bucket_size, ngx_cacheline_size); } map = ngx_pcalloc(cf->pool, sizeof(ngx_http_map_ctx_t)); if (map == NULL) { return NGX_CONF_ERROR; } value = cf->args->elts; ngx_memzero(&ccv, sizeof(ngx_http_compile_complex_value_t)); ccv.cf = cf; ccv.value = &value[1]; ccv.complex_value = &map->value; if (ngx_http_compile_complex_value(&ccv) != NGX_OK) { return NGX_CONF_ERROR; } name = value[2]; if (name.data[0] != '$') { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "invalid variable name \"%V\"", &name); return NGX_CONF_ERROR; } name.len--; name.data++; var = ngx_http_add_variable(cf, &name, NGX_HTTP_VAR_CHANGEABLE); if (var == NULL) { return NGX_CONF_ERROR; } var->get_handler = ngx_http_map_variable; var->data = (uintptr_t) map; pool = ngx_create_pool(NGX_DEFAULT_POOL_SIZE, cf->log); if (pool == NULL) { return NGX_CONF_ERROR; } ctx.keys.pool = cf->pool; ctx.keys.temp_pool = pool; if (ngx_hash_keys_array_init(&ctx.keys, NGX_HASH_LARGE) != NGX_OK) { ngx_destroy_pool(pool); return NGX_CONF_ERROR; } ctx.values_hash = ngx_pcalloc(pool, sizeof(ngx_array_t) * ctx.keys.hsize); if (ctx.values_hash == NULL) { ngx_destroy_pool(pool); return NGX_CONF_ERROR; } #if (NGX_PCRE) if (ngx_array_init(&ctx.regexes, cf->pool, 2, sizeof(ngx_http_map_regex_t)) != NGX_OK) { ngx_destroy_pool(pool); return NGX_CONF_ERROR; } #endif ctx.default_value = NULL; ctx.cf = &save; ctx.hostnames = 0; ctx.no_cacheable = 0; save = *cf; cf->pool = pool; cf->ctx = &ctx; cf->handler = ngx_http_map; cf->handler_conf = conf; rv = ngx_conf_parse(cf, NULL); *cf = save; if (rv != NGX_CONF_OK) { ngx_destroy_pool(pool); return rv; } if (ctx.no_cacheable) { var->flags |= NGX_HTTP_VAR_NOCACHEABLE; } map->default_value = ctx.default_value ? ctx.default_value: &ngx_http_variable_null_value; map->hostnames = ctx.hostnames; hash.key = ngx_hash_key_lc; hash.max_size = mcf->hash_max_size; hash.bucket_size = mcf->hash_bucket_size; hash.name = "map_hash"; hash.pool = cf->pool; if (ctx.keys.keys.nelts) { hash.hash = &map->map.hash.hash; hash.temp_pool = NULL; if (ngx_hash_init(&hash, ctx.keys.keys.elts, ctx.keys.keys.nelts) != NGX_OK) { ngx_destroy_pool(pool); return NGX_CONF_ERROR; } } if (ctx.keys.dns_wc_head.nelts) { ngx_qsort(ctx.keys.dns_wc_head.elts, (size_t) ctx.keys.dns_wc_head.nelts, sizeof(ngx_hash_key_t), ngx_http_map_cmp_dns_wildcards); hash.hash = NULL; hash.temp_pool = pool; if (ngx_hash_wildcard_init(&hash, ctx.keys.dns_wc_head.elts, ctx.keys.dns_wc_head.nelts) != NGX_OK) { ngx_destroy_pool(pool); return NGX_CONF_ERROR; } map->map.hash.wc_head = (ngx_hash_wildcard_t *) hash.hash; } if (ctx.keys.dns_wc_tail.nelts) { ngx_qsort(ctx.keys.dns_wc_tail.elts, (size_t) ctx.keys.dns_wc_tail.nelts, sizeof(ngx_hash_key_t), ngx_http_map_cmp_dns_wildcards); hash.hash = NULL; hash.temp_pool = pool; if (ngx_hash_wildcard_init(&hash, ctx.keys.dns_wc_tail.elts, ctx.keys.dns_wc_tail.nelts) != NGX_OK) { ngx_destroy_pool(pool); return NGX_CONF_ERROR; } map->map.hash.wc_tail = (ngx_hash_wildcard_t *) hash.hash; } #if (NGX_PCRE) if (ctx.regexes.nelts) { map->map.regex = ctx.regexes.elts; map->map.nregex = ctx.regexes.nelts; } #endif ngx_destroy_pool(pool); return rv; } static int ngx_libc_cdecl ngx_http_map_cmp_dns_wildcards(const void *one, const void *two) { ngx_hash_key_t *first, *second; first = (ngx_hash_key_t *) one; second = (ngx_hash_key_t *) two; return ngx_dns_strcmp(first->key.data, second->key.data); } static char * ngx_http_map(ngx_conf_t *cf, ngx_command_t *dummy, void *conf) { u_char *data; size_t len; ngx_int_t rv; ngx_str_t *value, v; ngx_uint_t i, key; ngx_http_map_conf_ctx_t *ctx; ngx_http_complex_value_t cv, *cvp; ngx_http_variable_value_t *var, **vp; ngx_http_compile_complex_value_t ccv; ctx = cf->ctx; value = cf->args->elts; if (cf->args->nelts == 1 && ngx_strcmp(value[0].data, "hostnames") == 0) { ctx->hostnames = 1; return NGX_CONF_OK; } if (cf->args->nelts == 1 && ngx_strcmp(value[0].data, "volatile") == 0) { ctx->no_cacheable = 1; return NGX_CONF_OK; } if (cf->args->nelts != 2) { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "invalid number of the map parameters"); return NGX_CONF_ERROR; } if (ngx_strcmp(value[0].data, "include") == 0) { return ngx_conf_include(cf, dummy, conf); } key = 0; for (i = 0; i < value[1].len; i++) { key = ngx_hash(key, value[1].data[i]); } key %= ctx->keys.hsize; vp = ctx->values_hash[key].elts; if (vp) { for (i = 0; i < ctx->values_hash[key].nelts; i++) { if (vp[i]->valid) { data = vp[i]->data; len = vp[i]->len; } else { cvp = (ngx_http_complex_value_t *) vp[i]->data; data = cvp->value.data; len = cvp->value.len; } if (value[1].len != len) { continue; } if (ngx_strncmp(value[1].data, data, len) == 0) { var = vp[i]; goto found; } } } else { if (ngx_array_init(&ctx->values_hash[key], cf->pool, 4, sizeof(ngx_http_variable_value_t *)) != NGX_OK) { return NGX_CONF_ERROR; } } var = ngx_palloc(ctx->keys.pool, sizeof(ngx_http_variable_value_t)); if (var == NULL) { return NGX_CONF_ERROR; } v.len = value[1].len; v.data = ngx_pstrdup(ctx->keys.pool, &value[1]); if (v.data == NULL) { return NGX_CONF_ERROR; } ngx_memzero(&ccv, sizeof(ngx_http_compile_complex_value_t)); ccv.cf = ctx->cf; ccv.value = &v; ccv.complex_value = &cv; if (ngx_http_compile_complex_value(&ccv) != NGX_OK) { return NGX_CONF_ERROR; } if (cv.lengths != NULL) { cvp = ngx_palloc(ctx->keys.pool, sizeof(ngx_http_complex_value_t)); if (cvp == NULL) { return NGX_CONF_ERROR; } *cvp = cv; var->len = 0; var->data = (u_char *) cvp; var->valid = 0; } else { var->len = v.len; var->data = v.data; var->valid = 1; } var->no_cacheable = 0; var->not_found = 0; vp = ngx_array_push(&ctx->values_hash[key]); if (vp == NULL) { return NGX_CONF_ERROR; } *vp = var; found: if (ngx_strcmp(value[0].data, "default") == 0) { if (ctx->default_value) { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "duplicate default map parameter"); return NGX_CONF_ERROR; } ctx->default_value = var; return NGX_CONF_OK; } #if (NGX_PCRE) if (value[0].len && value[0].data[0] == '~') { ngx_regex_compile_t rc; ngx_http_map_regex_t *regex; u_char errstr[NGX_MAX_CONF_ERRSTR]; regex = ngx_array_push(&ctx->regexes); if (regex == NULL) { return NGX_CONF_ERROR; } value[0].len--; value[0].data++; ngx_memzero(&rc, sizeof(ngx_regex_compile_t)); if (value[0].data[0] == '*') { value[0].len--; value[0].data++; rc.options = NGX_REGEX_CASELESS; } rc.pattern = value[0]; rc.err.len = NGX_MAX_CONF_ERRSTR; rc.err.data = errstr; regex->regex = ngx_http_regex_compile(ctx->cf, &rc); if (regex->regex == NULL) { return NGX_CONF_ERROR; } regex->value = var; return NGX_CONF_OK; } #endif if (value[0].len && value[0].data[0] == '\\') { value[0].len--; value[0].data++; } rv = ngx_hash_add_key(&ctx->keys, &value[0], var, (ctx->hostnames) ? NGX_HASH_WILDCARD_KEY : 0); if (rv == NGX_OK) { return NGX_CONF_OK; } if (rv == NGX_DECLINED) { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "invalid hostname or wildcard \"%V\"", &value[0]); } if (rv == NGX_BUSY) { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "conflicting parameter \"%V\"", &value[0]); } return NGX_CONF_ERROR; }
c
github
https://github.com/nginx/nginx
src/http/modules/ngx_http_map_module.c
//===------- DifferentiationMangler.h --------- differentiation -*- C++ -*-===// // // This source file is part of the Swift.org open source project // // Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // // See https://swift.org/LICENSE.txt for license information // See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// #ifndef SWIFT_SIL_UTILS_DIFFERENTIATIONMANGLER_H #define SWIFT_SIL_UTILS_DIFFERENTIATIONMANGLER_H #include "swift/AST/ASTContext.h" #include "swift/AST/ASTMangler.h" #include "swift/AST/AutoDiff.h" #include "swift/Basic/NullablePtr.h" #include "swift/Demangling/Demangler.h" #include "swift/SIL/SILFunction.h" namespace swift { namespace Mangle { /// A mangler for generated differentiation functions. class DifferentiationMangler : public ASTMangler { public: DifferentiationMangler(ASTContext &Ctx) : ASTMangler(Ctx) {} /// Returns the mangled name for a differentiation function of the given kind. std::string mangleAutoDiffFunction(StringRef originalName, Demangle::AutoDiffFunctionKind kind, const AutoDiffConfig &config); /// Returns the mangled name for a derivative function of the given kind. std::string mangleDerivativeFunction(StringRef originalName, AutoDiffDerivativeFunctionKind kind, const AutoDiffConfig &config); /// Returns the mangled name for a linear map of the given kind. std::string mangleLinearMap(StringRef originalName, AutoDiffLinearMapKind kind, const AutoDiffConfig &config); /// Returns the mangled name for a derivative function subset parameters /// thunk. std::string mangleDerivativeFunctionSubsetParametersThunk( StringRef originalName, CanType toType, AutoDiffDerivativeFunctionKind linearMapKind, IndexSubset *fromParamIndices, IndexSubset *fromResultIndices, IndexSubset *toParamIndices); /// Returns the mangled name for a linear map subset parameters thunk. std::string mangleLinearMapSubsetParametersThunk( CanType fromType, AutoDiffLinearMapKind linearMapKind, IndexSubset *fromParamIndices, IndexSubset *fromResultIndices, IndexSubset *toParamIndices); }; } // end namespace Mangle } // end namespace swift #endif /* SWIFT_SIL_UTILS_DIFFERENTIATIONMANGLER_H */
c
github
https://github.com/apple/swift
include/swift/SILOptimizer/Utils/DifferentiationMangler.h
#!/usr/bin/env python # coding:utf-8 # manning 2015-1-27 import lxml.html import urlparse import time import sys sys.path.append("..") from fetch import fetcher from config.config import * from node import UrlNode,HtmlNode def timestamp(): return str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())) def crawler(html_node): link_list = [] html = html_node.html url = html_node.url if html == '': return [] else: #获取页面内的links try: tmp = lxml.html.document_fromstring(html) tmp.make_links_absolute(url) links = tmp.iterlinks() link_list = list(set([i[2] for i in links])) except Exception, e: pass #过滤不期待页面后缀 try: temp_list = [] for i in link_list: if urlparse.urlparse(i)[2].split('.')[-1].lower() not in IGNORE_EXT: temp_list.append(i) link_list = temp_list except Exception, e: print str(e) tmp_url_node = [] for i in link_list: tmp_url_node.append(UrlNode(urlparse.urlunparse((urlparse.urlparse(i)[0],urlparse.urlparse(i)[1],urlparse.urlparse(i)[2],urlparse.urlparse(i)[3],urlparse.urlparse(i)[4],'')),url,len(html),timestamp(),'',html_node.depth)) return tmp_url_node if __name__ == '__main__': pass
unknown
codeparrot/codeparrot-clean
/* Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package controlplane import ( "testing" pkiutiltesting "k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil/testing" ) func TestMain(m *testing.M) { pkiutiltesting.RunWithPrivateKeyFixtureDirectory(m) }
go
github
https://github.com/kubernetes/kubernetes
cmd/kubeadm/app/phases/controlplane/main_test.go
# -*- coding: utf-8 -*- """ This is the common settings file, intended to set sane defaults. If you have a piece of configuration that's dependent on a set of feature flags being set, then create a function that returns the calculated value based on the value of FEATURES[...]. Modules that extend this one can change the feature configuration in an environment specific config file and re-calculate those values. We should make a method that calls all these config methods so that you just make one call at the end of your site-specific dev file to reset all the dependent variables (like INSTALLED_APPS) for you. Longer TODO: 1. Right now our treatment of static content in general and in particular course-specific static content is haphazard. 2. We should have a more disciplined approach to feature flagging, even if it just means that we stick them in a dict called FEATURES. 3. We need to handle configuration for multiple courses. This could be as multiple sites, but we do need a way to map their data assets. When refering to XBlocks, we use the entry-point name. For example, | setup( | name='xblock-foobar', | version='0.1', | packages=[ | 'foobar_xblock', | ], | entry_points={ | 'xblock.v1': [ | 'foobar-block = foobar_xblock:FoobarBlock', | # ^^^^^^^^^^^^ This is the one you want. | ] | }, | ) """ # We intentionally define lots of variables that aren't used, and # want to import all variables from base settings files # pylint: disable=unused-import from __future__ import absolute_import import imp import os import sys from datetime import timedelta import lms.envs.common # Although this module itself may not use these imported variables, other dependent modules may. from lms.envs.common import ( USE_TZ, TECH_SUPPORT_EMAIL, PLATFORM_NAME, PLATFORM_DESCRIPTION, BUGS_EMAIL, DOC_STORE_CONFIG, DATA_DIR, ALL_LANGUAGES, WIKI_ENABLED, update_module_store_settings, ASSET_IGNORE_REGEX, PARENTAL_CONSENT_AGE_LIMIT, REGISTRATION_EMAIL_PATTERNS_ALLOWED, # The following PROFILE_IMAGE_* settings are included as they are # indirectly accessed through the email opt-in API, which is # technically accessible through the CMS via legacy URLs. PROFILE_IMAGE_BACKEND, PROFILE_IMAGE_DEFAULT_FILENAME, PROFILE_IMAGE_DEFAULT_FILE_EXTENSION, PROFILE_IMAGE_SECRET_KEY, PROFILE_IMAGE_MIN_BYTES, PROFILE_IMAGE_MAX_BYTES, PROFILE_IMAGE_SIZES_MAP, # The following setting is included as it is used to check whether to # display credit eligibility table on the CMS or not. ENABLE_CREDIT_ELIGIBILITY, YOUTUBE_API_KEY, COURSE_MODE_DEFAULTS, DEFAULT_COURSE_ABOUT_IMAGE_URL, # User-uploaded content MEDIA_ROOT, MEDIA_URL, # Lazy Gettext _, # Django REST framework configuration REST_FRAMEWORK, STATICI18N_OUTPUT_DIR, # Heartbeat HEARTBEAT_CHECKS, HEARTBEAT_EXTENDED_CHECKS, HEARTBEAT_CELERY_TIMEOUT, # Theme to use when no site or site theme is defined, DEFAULT_SITE_THEME, # Default site to use if no site exists matching request headers SITE_ID, # Enable or disable theming ENABLE_COMPREHENSIVE_THEMING, COMPREHENSIVE_THEME_LOCALE_PATHS, COMPREHENSIVE_THEME_DIRS, # constants for redirects app REDIRECT_CACHE_TIMEOUT, REDIRECT_CACHE_KEY_PREFIX, # This is required for the migrations in oauth_dispatch.models # otherwise it fails saying this attribute is not present in Settings # Although Studio does not enable OAuth2 Provider capability, the new approach # to generating test databases will discover and try to create all tables # and this setting needs to be present OAUTH2_PROVIDER_APPLICATION_MODEL, JWT_AUTH, USERNAME_REGEX_PARTIAL, USERNAME_PATTERN, # django-debug-toolbar DEBUG_TOOLBAR_PATCH_SETTINGS, BLOCK_STRUCTURES_SETTINGS, # File upload defaults FILE_UPLOAD_STORAGE_BUCKET_NAME, FILE_UPLOAD_STORAGE_PREFIX, COURSE_ENROLLMENT_MODES, CONTENT_TYPE_GATE_GROUP_IDS, HELP_TOKENS_BOOKS, SUPPORT_SITE_LINK, PASSWORD_RESET_SUPPORT_LINK, ACTIVATION_EMAIL_SUPPORT_LINK, DEFAULT_COURSE_VISIBILITY_IN_CATALOG, DEFAULT_MOBILE_AVAILABLE, CONTACT_EMAIL, DISABLE_ACCOUNT_ACTIVATION_REQUIREMENT_SWITCH, # Video Image settings VIDEO_IMAGE_SETTINGS, VIDEO_TRANSCRIPTS_SETTINGS, RETIRED_USERNAME_PREFIX, RETIRED_USERNAME_FMT, RETIRED_EMAIL_PREFIX, RETIRED_EMAIL_DOMAIN, RETIRED_EMAIL_FMT, RETIRED_USER_SALTS, RETIREMENT_SERVICE_WORKER_USERNAME, RETIREMENT_STATES, # Methods to derive settings _make_mako_template_dirs, _make_locale_paths, ) from path import Path as path from django.core.urlresolvers import reverse_lazy from lms.djangoapps.lms_xblock.mixin import LmsBlockMixin from cms.lib.xblock.authoring_mixin import AuthoringMixin from xmodule.modulestore.edit_info import EditInfoMixin from openedx.core.djangoapps.theming.helpers_dirs import ( get_themes_unchecked, get_theme_base_dirs_from_settings ) from openedx.core.lib.license import LicenseMixin from openedx.core.lib.derived import derived, derived_collection_entry from openedx.core.release import doc_version ############################ FEATURE CONFIGURATION ############################# # Dummy secret key for dev/test SECRET_KEY = 'dev key' STUDIO_NAME = _("Your Platform Studio") STUDIO_SHORT_NAME = _("Studio") FEATURES = { 'GITHUB_PUSH': False, # for consistency in user-experience, keep the value of the following 3 settings # in sync with the ones in lms/envs/common.py 'ENABLE_DISCUSSION_SERVICE': True, 'ENABLE_TEXTBOOK': True, 'ENABLE_STUDENT_NOTES': True, # DO NOT SET TO True IN THIS FILE # Doing so will cause all courses to be released on production 'DISABLE_START_DATES': False, # When True, all courses will be active, regardless of start date 'AUTH_USE_CERTIFICATES': False, # email address for studio staff (eg to request course creation) 'STUDIO_REQUEST_EMAIL': '', # Segment - must explicitly turn it on for production 'CMS_SEGMENT_KEY': None, # Enable URL that shows information about the status of various services 'ENABLE_SERVICE_STATUS': False, # Don't autoplay videos for course authors 'AUTOPLAY_VIDEOS': False, # Move the course author to next page when a video finishes. Set to True to # show an auto-advance button in videos. If False, videos never auto-advance. 'ENABLE_AUTOADVANCE_VIDEOS': False, # If set to True, new Studio users won't be able to author courses unless # an Open edX admin has added them to the course creator group. 'ENABLE_CREATOR_GROUP': True, # Turn off account locking if failed login attempts exceeds a limit 'ENABLE_MAX_FAILED_LOGIN_ATTEMPTS': False, # Allow editing of short description in course settings in cms 'EDITABLE_SHORT_DESCRIPTION': True, # Hide any Personally Identifiable Information from application logs 'SQUELCH_PII_IN_LOGS': False, # Toggles the embargo functionality, which blocks users # based on their location. 'EMBARGO': False, # Turn on/off Microsites feature 'USE_MICROSITES': False, # Allow creating courses with non-ascii characters in the course id 'ALLOW_UNICODE_COURSE_ID': False, # Prevent concurrent logins per user 'PREVENT_CONCURRENT_LOGINS': False, # Turn off Video Upload Pipeline through Studio, by default 'ENABLE_VIDEO_UPLOAD_PIPELINE': False, # let students save and manage their annotations # for consistency in user-experience, keep the value of this feature flag # in sync with the one in lms/envs/common.py 'ENABLE_EDXNOTES': False, # Show a new field in "Advanced settings" that can store custom data about a # course and that can be read from themes 'ENABLE_OTHER_COURSE_SETTINGS': False, # Enable support for content libraries. Note that content libraries are # only supported in courses using split mongo. 'ENABLE_CONTENT_LIBRARIES': True, # Milestones application flag 'MILESTONES_APP': False, # Prerequisite courses feature flag 'ENABLE_PREREQUISITE_COURSES': False, # Toggle course entrance exams feature 'ENTRANCE_EXAMS': False, # Toggle platform-wide course licensing 'LICENSING': False, # Enable the courseware search functionality 'ENABLE_COURSEWARE_INDEX': False, # Enable content libraries search functionality 'ENABLE_LIBRARY_INDEX': False, # Enable course reruns, which will always use the split modulestore 'ALLOW_COURSE_RERUNS': True, # Certificates Web/HTML Views 'CERTIFICATES_HTML_VIEW': False, # Teams feature 'ENABLE_TEAMS': True, # Show video bumper in Studio 'ENABLE_VIDEO_BUMPER': False, # Show issue open badges in Studio 'ENABLE_OPENBADGES': False, # How many seconds to show the bumper again, default is 7 days: 'SHOW_BUMPER_PERIODICITY': 7 * 24 * 3600, # Enable credit eligibility feature 'ENABLE_CREDIT_ELIGIBILITY': ENABLE_CREDIT_ELIGIBILITY, # Special Exams, aka Timed and Proctored Exams 'ENABLE_SPECIAL_EXAMS': False, 'ORGANIZATIONS_APP': False, # Show the language selector in the header 'SHOW_HEADER_LANGUAGE_SELECTOR': False, # At edX it's safe to assume that English transcripts are always available # This is not the case for all installations. # The default value in {lms,cms}/envs/common.py and xmodule/tests/test_video.py should be consistent. 'FALLBACK_TO_ENGLISH_TRANSCRIPTS': True, # Set this to False to facilitate cleaning up invalid xml from your modulestore. 'ENABLE_XBLOCK_XML_VALIDATION': True, # Allow public account creation 'ALLOW_PUBLIC_ACCOUNT_CREATION': True, # Whether or not the dynamic EnrollmentTrackUserPartition should be registered. 'ENABLE_ENROLLMENT_TRACK_USER_PARTITION': True, # Whether to send an email for failed password reset attempts or not. This is mainly useful for notifying users # that they don't have an account associated with email addresses they believe they've registered with. 'ENABLE_PASSWORD_RESET_FAILURE_EMAIL': False, # Whether archived courses (courses with end dates in the past) should be # shown in Studio in a separate list. 'ENABLE_SEPARATE_ARCHIVED_COURSES': True, # For acceptance and load testing 'AUTOMATIC_AUTH_FOR_TESTING': False, # Prevent auto auth from creating superusers or modifying existing users 'RESTRICT_AUTOMATIC_AUTH': True, # Set this to true to make API docs available at /api-docs/. 'ENABLE_API_DOCS': False, } ENABLE_JASMINE = False ############################# SOCIAL MEDIA SHARING ############################# SOCIAL_SHARING_SETTINGS = { # Note: Ensure 'CUSTOM_COURSE_URLS' has a matching value in lms/envs/common.py 'CUSTOM_COURSE_URLS': False } ############################# SET PATH INFORMATION ############################# PROJECT_ROOT = path(__file__).abspath().dirname().dirname() # /edx-platform/cms REPO_ROOT = PROJECT_ROOT.dirname() COMMON_ROOT = REPO_ROOT / "common" OPENEDX_ROOT = REPO_ROOT / "openedx" CMS_ROOT = REPO_ROOT / "cms" LMS_ROOT = REPO_ROOT / "lms" ENV_ROOT = REPO_ROOT.dirname() # virtualenv dir /edx-platform is in GITHUB_REPO_ROOT = ENV_ROOT / "data" sys.path.append(REPO_ROOT) sys.path.append(PROJECT_ROOT / 'djangoapps') sys.path.append(COMMON_ROOT / 'djangoapps') # For geolocation ip database GEOIP_PATH = REPO_ROOT / "common/static/data/geoip/GeoIP.dat" GEOIPV6_PATH = REPO_ROOT / "common/static/data/geoip/GeoIPv6.dat" ############################# TEMPLATE CONFIGURATION ############################# # Mako templating import tempfile MAKO_MODULE_DIR = os.path.join(tempfile.gettempdir(), 'mako_cms') MAKO_TEMPLATE_DIRS_BASE = [ PROJECT_ROOT / 'templates', COMMON_ROOT / 'templates', COMMON_ROOT / 'djangoapps' / 'pipeline_mako' / 'templates', COMMON_ROOT / 'static', # required to statically include common Underscore templates OPENEDX_ROOT / 'core' / 'djangoapps' / 'cors_csrf' / 'templates', OPENEDX_ROOT / 'core' / 'djangoapps' / 'dark_lang' / 'templates', OPENEDX_ROOT / 'core' / 'lib' / 'license' / 'templates', CMS_ROOT / 'djangoapps' / 'pipeline_js' / 'templates', ] CONTEXT_PROCESSORS = ( 'django.template.context_processors.request', 'django.template.context_processors.static', 'django.contrib.messages.context_processors.messages', 'django.template.context_processors.i18n', 'django.contrib.auth.context_processors.auth', # this is required for admin 'django.template.context_processors.csrf', 'help_tokens.context_processor', 'openedx.core.djangoapps.site_configuration.context_processors.configuration_context', ) # Django templating TEMPLATES = [ { 'NAME': 'django', 'BACKEND': 'django.template.backends.django.DjangoTemplates', # Don't look for template source files inside installed applications. 'APP_DIRS': False, # Instead, look for template source files in these dirs. 'DIRS': _make_mako_template_dirs, # Options specific to this backend. 'OPTIONS': { 'loaders': ( # We have to use mako-aware template loaders to be able to include # mako templates inside django templates (such as main_django.html). 'openedx.core.djangoapps.theming.template_loaders.ThemeTemplateLoader', 'edxmako.makoloader.MakoFilesystemLoader', 'edxmako.makoloader.MakoAppDirectoriesLoader', ), 'context_processors': CONTEXT_PROCESSORS, # Change 'debug' in your environment settings files - not here. 'debug': False } }, { 'NAME': 'mako', 'BACKEND': 'edxmako.backend.Mako', 'APP_DIRS': False, 'DIRS': _make_mako_template_dirs, 'OPTIONS': { 'context_processors': CONTEXT_PROCESSORS, 'debug': False, } }, { # This separate copy of the Mako backend is used to render previews using the LMS templates 'NAME': 'preview', 'BACKEND': 'edxmako.backend.Mako', 'APP_DIRS': False, 'DIRS': lms.envs.common.MAKO_TEMPLATE_DIRS_BASE, 'OPTIONS': { 'context_processors': CONTEXT_PROCESSORS, 'debug': False, 'namespace': 'lms.main', } }, ] derived_collection_entry('TEMPLATES', 0, 'DIRS') derived_collection_entry('TEMPLATES', 1, 'DIRS') DEFAULT_TEMPLATE_ENGINE = TEMPLATES[0] ############################################################################## EDX_ROOT_URL = '' LOGIN_REDIRECT_URL = EDX_ROOT_URL + '/home/' LOGIN_URL = reverse_lazy('login_redirect_to_lms') # use the ratelimit backend to prevent brute force attacks AUTHENTICATION_BACKENDS = [ 'rules.permissions.ObjectPermissionBackend', 'ratelimitbackend.backends.RateLimitModelBackend', ] LMS_BASE = None LMS_ROOT_URL = "http://localhost:8000" LMS_INTERNAL_ROOT_URL = LMS_ROOT_URL LMS_ENROLLMENT_API_PATH = "/api/enrollment/v1/" ENTERPRISE_API_URL = LMS_INTERNAL_ROOT_URL + '/enterprise/api/v1/' ENTERPRISE_CONSENT_API_URL = LMS_INTERNAL_ROOT_URL + '/consent/api/v1/' # These are standard regexes for pulling out info like course_ids, usage_ids, etc. # They are used so that URLs with deprecated-format strings still work. from lms.envs.common import ( COURSE_KEY_PATTERN, COURSE_KEY_REGEX, COURSE_ID_PATTERN, USAGE_KEY_PATTERN, ASSET_KEY_PATTERN ) ######################### CSRF ######################################### # Forwards-compatibility with Django 1.7 CSRF_COOKIE_AGE = 60 * 60 * 24 * 7 * 52 # It is highly recommended that you override this in any environment accessed by # end users CSRF_COOKIE_SECURE = False #################### CAPA External Code Evaluation ############################# XQUEUE_INTERFACE = { 'url': 'http://localhost:8888', 'django_auth': {'username': 'local', 'password': 'local'}, 'basic_auth': None, } ################################# Middleware ################################### MIDDLEWARE_CLASSES = [ 'openedx.core.lib.x_forwarded_for.middleware.XForwardedForMiddleware', 'crum.CurrentRequestUserMiddleware', # A newer and safer request cache. 'edx_django_utils.cache.middleware.RequestCacheMiddleware', 'edx_django_utils.monitoring.middleware.MonitoringMemoryMiddleware', 'openedx.core.djangoapps.header_control.middleware.HeaderControlMiddleware', 'django.middleware.cache.UpdateCacheMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.sites.middleware.CurrentSiteMiddleware', 'edx_rest_framework_extensions.auth.jwt.middleware.JwtAuthCookieMiddleware', # Allows us to define redirects via Django admin 'django_sites_extensions.middleware.RedirectMiddleware', # Instead of SessionMiddleware, we use a more secure version # 'django.contrib.sessions.middleware.SessionMiddleware', 'openedx.core.djangoapps.safe_sessions.middleware.SafeSessionMiddleware', 'method_override.middleware.MethodOverrideMiddleware', # Instead of AuthenticationMiddleware, we use a cache-backed version 'openedx.core.djangoapps.cache_toolbox.middleware.CacheBackedAuthenticationMiddleware', 'student.middleware.UserStandingMiddleware', 'openedx.core.djangoapps.contentserver.middleware.StaticContentServer', 'django.contrib.messages.middleware.MessageMiddleware', 'track.middleware.TrackMiddleware', # This is used to set or update the user language preferences. 'openedx.core.djangoapps.lang_pref.middleware.LanguagePreferenceMiddleware', # Allows us to dark-launch particular languages 'openedx.core.djangoapps.dark_lang.middleware.DarkLangMiddleware', 'openedx.core.djangoapps.embargo.middleware.EmbargoMiddleware', # Detects user-requested locale from 'accept-language' header in http request 'django.middleware.locale.LocaleMiddleware', 'codejail.django_integration.ConfigureCodeJailMiddleware', # catches any uncaught RateLimitExceptions and returns a 403 instead of a 500 'ratelimitbackend.middleware.RateLimitMiddleware', # for expiring inactive sessions 'openedx.core.djangoapps.session_inactivity_timeout.middleware.SessionInactivityTimeout', 'openedx.core.djangoapps.theming.middleware.CurrentSiteThemeMiddleware', # use Django built in clickjacking protection 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'waffle.middleware.WaffleMiddleware', # Enables force_django_cache_miss functionality for TieredCache. 'edx_django_utils.cache.middleware.TieredCacheMiddleware', # Outputs monitoring metrics for a request. 'edx_rest_framework_extensions.middleware.RequestMetricsMiddleware', 'edx_rest_framework_extensions.auth.jwt.middleware.EnsureJWTAuthSettingsMiddleware', # This must be last so that it runs first in the process_response chain 'openedx.core.djangoapps.site_configuration.middleware.SessionCookieDomainOverrideMiddleware', ] # Clickjacking protection can be disabled by setting this to 'ALLOW' X_FRAME_OPTIONS = 'DENY' # Platform for Privacy Preferences header P3P_HEADER = 'CP="Open EdX does not have a P3P policy."' ############# XBlock Configuration ########## # Import after sys.path fixup from xmodule.modulestore.inheritance import InheritanceMixin from xmodule.modulestore import prefer_xmodules from xmodule.x_module import XModuleMixin # These are the Mixins that should be added to every XBlock. # This should be moved into an XBlock Runtime/Application object # once the responsibility of XBlock creation is moved out of modulestore - cpennington XBLOCK_MIXINS = ( LmsBlockMixin, InheritanceMixin, XModuleMixin, EditInfoMixin, AuthoringMixin, ) XBLOCK_SELECT_FUNCTION = prefer_xmodules # Paths to wrapper methods which should be applied to every XBlock's FieldData. XBLOCK_FIELD_DATA_WRAPPERS = () ############################ Modulestore Configuration ################################ MODULESTORE_BRANCH = 'draft-preferred' MODULESTORE = { 'default': { 'ENGINE': 'xmodule.modulestore.mixed.MixedModuleStore', 'OPTIONS': { 'mappings': {}, 'stores': [ { 'NAME': 'split', 'ENGINE': 'xmodule.modulestore.split_mongo.split_draft.DraftVersioningModuleStore', 'DOC_STORE_CONFIG': DOC_STORE_CONFIG, 'OPTIONS': { 'default_class': 'xmodule.hidden_module.HiddenDescriptor', 'fs_root': DATA_DIR, 'render_template': 'edxmako.shortcuts.render_to_string', } }, { 'NAME': 'draft', 'ENGINE': 'xmodule.modulestore.mongo.DraftMongoModuleStore', 'DOC_STORE_CONFIG': DOC_STORE_CONFIG, 'OPTIONS': { 'default_class': 'xmodule.hidden_module.HiddenDescriptor', 'fs_root': DATA_DIR, 'render_template': 'edxmako.shortcuts.render_to_string', } } ] } } } # Modulestore-level field override providers. These field override providers don't # require student context. MODULESTORE_FIELD_OVERRIDE_PROVIDERS = () #################### Python sandbox ############################################ CODE_JAIL = { # Path to a sandboxed Python executable. None means don't bother. 'python_bin': None, # User to run as in the sandbox. 'user': 'sandbox', # Configurable limits. 'limits': { # How many CPU seconds can jailed code use? 'CPU': 1, }, } ############################ DJANGO_BUILTINS ################################ # Change DEBUG in your environment settings files, not here DEBUG = False SESSION_COOKIE_SECURE = False SESSION_SAVE_EVERY_REQUEST = False SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer' # Site info SITE_NAME = "localhost:8001" HTTPS = 'on' ROOT_URLCONF = 'cms.urls' # Email EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' EMAIL_HOST = 'localhost' EMAIL_PORT = 25 EMAIL_USE_TLS = False EMAIL_HOST_USER = '' EMAIL_HOST_PASSWORD = '' DEFAULT_FROM_EMAIL = 'registration@example.com' DEFAULT_FEEDBACK_EMAIL = 'feedback@example.com' SERVER_EMAIL = 'devops@example.com' ADMINS = [] MANAGERS = ADMINS # Initialize to 'unknown', but read from JSON in aws.py EDX_PLATFORM_REVISION = 'unknown' # Static content STATIC_URL = '/static/studio/' STATIC_ROOT = ENV_ROOT / "staticfiles" / 'studio' STATICFILES_DIRS = [ COMMON_ROOT / "static", PROJECT_ROOT / "static", # This is how you would use the textbook images locally # ("book", ENV_ROOT / "book_images"), ] # Locale/Internationalization CELERY_TIMEZONE = 'UTC' TIME_ZONE = 'America/New_York' # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name LANGUAGE_CODE = 'en' # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGES_BIDI = lms.envs.common.LANGUAGES_BIDI LANGUAGE_COOKIE = lms.envs.common.LANGUAGE_COOKIE LANGUAGES = lms.envs.common.LANGUAGES LANGUAGE_DICT = dict(LANGUAGES) USE_I18N = True USE_L10N = True STATICI18N_ROOT = PROJECT_ROOT / "static" # Localization strings (e.g. django.po) are under these directories LOCALE_PATHS = _make_locale_paths derived('LOCALE_PATHS') # Messages MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage' COURSE_IMPORT_EXPORT_STORAGE = 'django.core.files.storage.FileSystemStorage' ##### EMBARGO ##### EMBARGO_SITE_REDIRECT_URL = None ############################### PIPELINE ####################################### PIPELINE_ENABLED = True STATICFILES_STORAGE = 'openedx.core.storage.ProductionStorage' # List of finder classes that know how to find static files in various locations. # Note: the pipeline finder is included to be able to discover optimized files STATICFILES_FINDERS = [ 'openedx.core.djangoapps.theming.finders.ThemeFilesFinder', 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'openedx.core.lib.xblock_pipeline.finder.XBlockPipelineFinder', 'pipeline.finders.PipelineFinder', ] # Don't use compression by default PIPELINE_CSS_COMPRESSOR = None PIPELINE_JS_COMPRESSOR = 'pipeline.compressors.uglifyjs.UglifyJSCompressor' # Don't wrap JavaScript as there is code that depends upon updating the global namespace PIPELINE_DISABLE_WRAPPER = True # Specify the UglifyJS binary to use PIPELINE_UGLIFYJS_BINARY = 'node_modules/.bin/uglifyjs' from openedx.core.lib.rooted_paths import rooted_glob PIPELINE_CSS = { 'style-vendor': { 'source_filenames': [ 'css/vendor/normalize.css', 'css/vendor/font-awesome.css', 'css/vendor/html5-input-polyfills/number-polyfill.css', 'js/vendor/CodeMirror/codemirror.css', 'css/vendor/ui-lightness/jquery-ui-1.8.22.custom.css', 'css/vendor/jquery.qtip.min.css', 'js/vendor/markitup/skins/simple/style.css', 'js/vendor/markitup/sets/wiki/style.css', ], 'output_filename': 'css/cms-style-vendor.css', }, 'style-vendor-tinymce-content': { 'source_filenames': [ 'css/tinymce-studio-content-fonts.css', 'js/vendor/tinymce/js/tinymce/skins/studio-tmce4/content.min.css', 'css/tinymce-studio-content.css' ], 'output_filename': 'css/cms-style-vendor-tinymce-content.css', }, 'style-vendor-tinymce-skin': { 'source_filenames': [ 'js/vendor/tinymce/js/tinymce/skins/studio-tmce4/skin.min.css' ], 'output_filename': 'css/cms-style-vendor-tinymce-skin.css', }, 'style-main-v1': { 'source_filenames': [ 'css/studio-main-v1.css', ], 'output_filename': 'css/studio-main-v1.css', }, 'style-main-v1-rtl': { 'source_filenames': [ 'css/studio-main-v1-rtl.css', ], 'output_filename': 'css/studio-main-v1-rtl.css', }, 'style-main-v2': { 'source_filenames': [ 'css/studio-main-v2.css', ], 'output_filename': 'css/studio-main-v2.css', }, 'style-main-v2-rtl': { 'source_filenames': [ 'css/studio-main-v2-rtl.css', ], 'output_filename': 'css/studio-main-v2-rtl.css', }, 'style-xmodule-annotations': { 'source_filenames': [ 'css/vendor/ova/annotator.css', 'css/vendor/ova/edx-annotator.css', 'css/vendor/ova/video-js.min.css', 'css/vendor/ova/rangeslider.css', 'css/vendor/ova/share-annotator.css', 'css/vendor/ova/richText-annotator.css', 'css/vendor/ova/tags-annotator.css', 'css/vendor/ova/flagging-annotator.css', 'css/vendor/ova/diacritic-annotator.css', 'css/vendor/ova/grouping-annotator.css', 'css/vendor/ova/ova.css', 'js/vendor/ova/catch/css/main.css' ], 'output_filename': 'css/cms-style-xmodule-annotations.css', }, } base_vendor_js = [ 'js/src/utility.js', 'js/src/logger.js', 'common/js/vendor/jquery.js', 'common/js/vendor/jquery-migrate.js', 'js/vendor/jquery.cookie.js', 'js/vendor/url.min.js', 'common/js/vendor/underscore.js', 'common/js/vendor/underscore.string.js', 'common/js/vendor/backbone.js', 'js/vendor/URI.min.js', # Make some edX UI Toolkit utilities available in the global "edx" namespace 'edx-ui-toolkit/js/utils/global-loader.js', 'edx-ui-toolkit/js/utils/string-utils.js', 'edx-ui-toolkit/js/utils/html-utils.js', # Load Bootstrap and supporting libraries 'common/js/vendor/popper.js', 'common/js/vendor/bootstrap.js', # Finally load RequireJS 'common/js/vendor/require.js' ] # test_order: Determines the position of this chunk of javascript on # the jasmine test page PIPELINE_JS = { 'base_vendor': { 'source_filenames': base_vendor_js, 'output_filename': 'js/cms-base-vendor.js', }, 'module-js': { 'source_filenames': ( rooted_glob(COMMON_ROOT / 'static/', 'xmodule/descriptors/js/*.js') + rooted_glob(COMMON_ROOT / 'static/', 'xmodule/modules/js/*.js') + rooted_glob(COMMON_ROOT / 'static/', 'common/js/discussion/*.js') ), 'output_filename': 'js/cms-modules.js', 'test_order': 1 }, } PIPELINE_COMPILERS = () PIPELINE_CSS_COMPRESSOR = None PIPELINE_JS_COMPRESSOR = None STATICFILES_IGNORE_PATTERNS = ( "*.py", "*.pyc", # It would be nice if we could do, for example, "**/*.scss", # but these strings get passed down to the `fnmatch` module, # which doesn't support that. :( # http://docs.python.org/2/library/fnmatch.html "sass/*.scss", "sass/*/*.scss", "sass/*/*/*.scss", "sass/*/*/*/*.scss", # Ignore tests "spec", "spec_helpers", # Symlinks used by js-test-tool "xmodule_js", "common_static", ) PIPELINE_YUI_BINARY = 'yui-compressor' ################################# DJANGO-REQUIRE ############################### # The baseUrl to pass to the r.js optimizer, relative to STATIC_ROOT. REQUIRE_BASE_URL = "./" # The name of a build profile to use for your project, relative to REQUIRE_BASE_URL. # A sensible value would be 'app.build.js'. Leave blank to use the built-in default build profile. # Set to False to disable running the default profile (e.g. if only using it to build Standalone # Modules) REQUIRE_BUILD_PROFILE = "cms/js/build.js" # The name of the require.js script used by your project, relative to REQUIRE_BASE_URL. REQUIRE_JS = "js/vendor/requiresjs/require.js" # Whether to run django-require in debug mode. REQUIRE_DEBUG = False ########################## DJANGO WEBPACK LOADER ############################## WEBPACK_LOADER = { 'DEFAULT': { 'BUNDLE_DIR_NAME': 'bundles/', 'STATS_FILE': os.path.join(STATIC_ROOT, 'webpack-stats.json') }, 'WORKERS': { 'BUNDLE_DIR_NAME': 'bundles/', 'STATS_FILE': os.path.join(STATIC_ROOT, 'webpack-worker-stats.json') } } WEBPACK_CONFIG_PATH = 'webpack.prod.config.js' ################################# CELERY ###################################### # Auto discover tasks fails to detect contentstore tasks CELERY_IMPORTS = ('cms.djangoapps.contentstore.tasks') # Message configuration CELERY_TASK_SERIALIZER = 'json' CELERY_RESULT_SERIALIZER = 'json' CELERY_MESSAGE_COMPRESSION = 'gzip' # Results configuration CELERY_IGNORE_RESULT = False CELERY_STORE_ERRORS_EVEN_IF_IGNORED = True # Events configuration CELERY_TRACK_STARTED = True CELERY_SEND_EVENTS = True CELERY_SEND_TASK_SENT_EVENT = True # Exchange configuration CELERY_DEFAULT_EXCHANGE = 'edx.core' CELERY_DEFAULT_EXCHANGE_TYPE = 'direct' # Queues configuration HIGH_PRIORITY_QUEUE = 'edx.core.high' DEFAULT_PRIORITY_QUEUE = 'edx.core.default' CELERY_QUEUE_HA_POLICY = 'all' CELERY_CREATE_MISSING_QUEUES = True CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE CELERY_QUEUES = { HIGH_PRIORITY_QUEUE: {}, DEFAULT_PRIORITY_QUEUE: {} } ############################## Video ########################################## YOUTUBE = { # YouTube JavaScript API 'API': 'https://www.youtube.com/iframe_api', 'TEST_TIMEOUT': 1500, # URL to get YouTube metadata 'METADATA_URL': 'https://www.googleapis.com/youtube/v3/videos', # Current youtube api for requesting transcripts. # For example: http://video.google.com/timedtext?lang=en&v=j_jEn79vS3g. 'TEXT_API': { 'url': 'video.google.com/timedtext', 'params': { 'lang': 'en', 'v': 'set_youtube_id_of_11_symbols_here', }, }, 'IMAGE_API': 'http://img.youtube.com/vi/{youtube_id}/0.jpg', # /maxresdefault.jpg for 1920*1080 } ############################# VIDEO UPLOAD PIPELINE ############################# VIDEO_UPLOAD_PIPELINE = { 'BUCKET': '', 'ROOT_PATH': '', 'CONCURRENT_UPLOAD_LIMIT': 4, } ############################ APPS ##################################### # The order of INSTALLED_APPS is important, when adding new apps here # remember to check that you are not creating new # RemovedInDjango19Warnings in the test logs. INSTALLED_APPS = [ # Standard apps 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.humanize', 'django.contrib.redirects', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'djcelery', 'method_override', # Common Initialization 'openedx.core.djangoapps.common_initialization.apps.CommonInitializationConfig', # Common views 'openedx.core.djangoapps.common_views', # API access administration 'openedx.core.djangoapps.api_admin', # History tables 'simple_history', # Database-backed configuration 'config_models', 'openedx.core.djangoapps.config_model_utils', 'waffle', # Monitor the status of services 'openedx.core.djangoapps.service_status', # Video module configs (This will be moved to Video once it becomes an XBlock) 'openedx.core.djangoapps.video_config', # edX Video Pipeline integration 'openedx.core.djangoapps.video_pipeline', # For CMS 'contentstore.apps.ContentstoreConfig', 'openedx.core.djangoapps.contentserver', 'course_creators', 'openedx.core.djangoapps.external_auth', 'student.apps.StudentConfig', # misleading name due to sharing with lms 'openedx.core.djangoapps.course_groups', # not used in cms (yet), but tests run 'xblock_config.apps.XBlockConfig', # Maintenance tools 'maintenance', 'openedx.core.djangoapps.util.apps.UtilConfig', # Tracking 'track', 'eventtracking.django.apps.EventTrackingConfig', # For asset pipelining 'edxmako.apps.EdxMakoConfig', 'pipeline', 'static_replace', 'require', 'webpack_loader', # Site configuration for theming and behavioral modification 'openedx.core.djangoapps.site_configuration', # Ability to detect and special-case crawler behavior 'openedx.core.djangoapps.crawlers', # comment common 'django_comment_common', # for course creator table 'django.contrib.admin', # for managing course modes 'course_modes.apps.CourseModesConfig', # Verified Track Content Cohorting (Beta feature that will hopefully be removed) 'openedx.core.djangoapps.verified_track_content', # Dark-launching languages 'openedx.core.djangoapps.dark_lang', # # User preferences 'wiki', 'django_notify', 'course_wiki', # Our customizations 'mptt', 'sekizai', 'openedx.core.djangoapps.user_api', 'django_openid_auth', # Country embargo support 'openedx.core.djangoapps.embargo', # Course action state 'course_action_state', # Additional problem types 'edx_jsme', # Molecular Structure 'openedx.core.djangoapps.content.course_overviews.apps.CourseOverviewsConfig', 'openedx.core.djangoapps.content.block_structure.apps.BlockStructureConfig', # edx-milestones service 'milestones', # Self-paced course configuration 'openedx.core.djangoapps.self_paced', # Coursegraph 'openedx.core.djangoapps.coursegraph.apps.CoursegraphConfig', # Credit courses 'openedx.core.djangoapps.credit.apps.CreditConfig', 'xblock_django', # Catalog integration 'openedx.core.djangoapps.catalog', # django-oauth2-provider (deprecated) 'provider', 'provider.oauth2', 'edx_oauth2_provider', # django-oauth-toolkit 'oauth2_provider', # These are apps that aren't strictly needed by Studio, but are imported by # other apps that are. Django 1.8 wants to have imported models supported # by installed apps. 'openedx.core.djangoapps.oauth_dispatch.apps.OAuthDispatchAppConfig', 'oauth_provider', 'courseware', 'survey.apps.SurveyConfig', 'lms.djangoapps.verify_student.apps.VerifyStudentConfig', 'completion', # Microsite configuration application 'microsite_configuration', # Static i18n support 'statici18n', # Tagging 'cms.lib.xblock.tagging', # Enables default site and redirects 'django_sites_extensions', # additional release utilities to ease automation 'release_util', # rule-based authorization 'rules.apps.AutodiscoverRulesConfig', # management of user-triggered async tasks (course import/export, etc.) 'user_tasks', # CMS specific user task handling 'cms_user_tasks.apps.CmsUserTasksConfig', # Unusual migrations 'database_fixups', # Customized celery tasks, including persisting failed tasks so they can # be retried 'celery_utils', # Waffle related utilities 'openedx.core.djangoapps.waffle_utils', # DRF filters 'django_filters', 'cms.djangoapps.api', # Entitlements, used in openedx tests 'entitlements', # Asset management for mako templates 'pipeline_mako', # API Documentation 'rest_framework_swagger', 'openedx.features.course_duration_limits', 'openedx.features.content_type_gating', 'experiments', ] ################# EDX MARKETING SITE ################################## EDXMKTG_LOGGED_IN_COOKIE_NAME = 'edxloggedin' EDXMKTG_USER_INFO_COOKIE_NAME = 'edx-user-info' EDXMKTG_USER_INFO_COOKIE_VERSION = 1 MKTG_URLS = {} MKTG_URL_LINK_MAP = { } COURSES_WITH_UNSAFE_CODE = [] ############################## EVENT TRACKING ################################# TRACK_MAX_EVENT = 50000 TRACKING_BACKENDS = { 'logger': { 'ENGINE': 'track.backends.logger.LoggerBackend', 'OPTIONS': { 'name': 'tracking' } } } # We're already logging events, and we don't want to capture user # names/passwords. Heartbeat events are likely not interesting. TRACKING_IGNORE_URL_PATTERNS = [r'^/event', r'^/login', r'^/heartbeat'] EVENT_TRACKING_ENABLED = True EVENT_TRACKING_BACKENDS = { 'tracking_logs': { 'ENGINE': 'eventtracking.backends.routing.RoutingBackend', 'OPTIONS': { 'backends': { 'logger': { 'ENGINE': 'eventtracking.backends.logger.LoggerBackend', 'OPTIONS': { 'name': 'tracking', 'max_event_size': TRACK_MAX_EVENT, } } }, 'processors': [ {'ENGINE': 'track.shim.LegacyFieldMappingProcessor'}, {'ENGINE': 'track.shim.PrefixedEventProcessor'} ] } }, 'segmentio': { 'ENGINE': 'eventtracking.backends.routing.RoutingBackend', 'OPTIONS': { 'backends': { 'segment': {'ENGINE': 'eventtracking.backends.segment.SegmentBackend'} }, 'processors': [ { 'ENGINE': 'eventtracking.processors.whitelist.NameWhitelistProcessor', 'OPTIONS': { 'whitelist': [] } }, { 'ENGINE': 'track.shim.GoogleAnalyticsProcessor' } ] } } } EVENT_TRACKING_PROCESSORS = [] #### PASSWORD POLICY SETTINGS ##### AUTH_PASSWORD_VALIDATORS = [ { "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", }, { "NAME": "util.password_policy_validators.MinimumLengthValidator", "OPTIONS": { "min_length": 2 } }, { "NAME": "util.password_policy_validators.MaximumLengthValidator", "OPTIONS": { "max_length": 75 } }, ] ##### ACCOUNT LOCKOUT DEFAULT PARAMETERS ##### MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED = 5 MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS = 15 * 60 ### Apps only installed in some instances # The order of INSTALLED_APPS matters, so this tuple is the app name and the item in INSTALLED_APPS # that this app should be inserted *before*. A None here means it should be appended to the list. OPTIONAL_APPS = ( ('problem_builder', 'openedx.core.djangoapps.content.course_overviews.apps.CourseOverviewsConfig'), ('edx_sga', None), # edx-ora2 ('submissions', 'openedx.core.djangoapps.content.course_overviews.apps.CourseOverviewsConfig'), ('openassessment', 'openedx.core.djangoapps.content.course_overviews.apps.CourseOverviewsConfig'), ('openassessment.assessment', 'openedx.core.djangoapps.content.course_overviews.apps.CourseOverviewsConfig'), ('openassessment.fileupload', 'openedx.core.djangoapps.content.course_overviews.apps.CourseOverviewsConfig'), ('openassessment.workflow', 'openedx.core.djangoapps.content.course_overviews.apps.CourseOverviewsConfig'), ('openassessment.xblock', 'openedx.core.djangoapps.content.course_overviews.apps.CourseOverviewsConfig'), # edxval ('edxval', 'openedx.core.djangoapps.content.course_overviews.apps.CourseOverviewsConfig'), # Organizations App (http://github.com/edx/edx-organizations) ('organizations', None), # Enterprise App (http://github.com/edx/edx-enterprise) ('enterprise', None), ('consent', None), ('integrated_channels.integrated_channel', None), ('integrated_channels.degreed', None), ('integrated_channels.sap_success_factors', None), ('integrated_channels.xapi', None), ) for app_name, insert_before in OPTIONAL_APPS: # First attempt to only find the module rather than actually importing it, # to avoid circular references - only try to import if it can't be found # by find_module, which doesn't work with import hooks try: imp.find_module(app_name) except ImportError: try: __import__(app_name) except ImportError: continue try: INSTALLED_APPS.insert(INSTALLED_APPS.index(insert_before), app_name) except (IndexError, ValueError): INSTALLED_APPS.append(app_name) ### External auth usage -- prefixes for ENROLLMENT_DOMAIN SHIBBOLETH_DOMAIN_PREFIX = 'shib:' OPENID_DOMAIN_PREFIX = 'openid:' # Set request limits for maximum size of a request body and maximum number of GET/POST parameters. (>=Django 1.10) # Limits are currently disabled - but can be used for finer-grained denial-of-service protection. DATA_UPLOAD_MAX_MEMORY_SIZE = None DATA_UPLOAD_MAX_NUMBER_FIELDS = None ### Size of chunks into which asset uploads will be divided UPLOAD_CHUNK_SIZE_IN_MB = 10 ### Max size of asset uploads to GridFS MAX_ASSET_UPLOAD_FILE_SIZE_IN_MB = 10 # FAQ url to direct users to if they upload # a file that exceeds the above size MAX_ASSET_UPLOAD_FILE_SIZE_URL = "" ### Default value for entrance exam minimum score ENTRANCE_EXAM_MIN_SCORE_PCT = 50 ### Default language for a new course DEFAULT_COURSE_LANGUAGE = "en" # Specify XBlocks that should be treated as advanced problems. Each entry is a # dict: # 'component': the entry-point name of the XBlock. # 'boilerplate_name': an optional YAML template to be used. Specify as # None to omit. # ADVANCED_PROBLEM_TYPES = [ { 'component': 'openassessment', 'boilerplate_name': None, }, { 'component': 'drag-and-drop-v2', 'boilerplate_name': None } ] # Files and Uploads type filter values FILES_AND_UPLOAD_TYPE_FILTERS = { "Images": ['image/png', 'image/jpeg', 'image/jpg', 'image/gif', 'image/tiff', 'image/tif', 'image/x-icon', 'image/svg+xml', 'image/bmp', 'image/x-ms-bmp', ], "Documents": [ 'application/pdf', 'text/plain', 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'application/vnd.openxmlformats-officedocument.wordprocessingml.template', 'application/vnd.openxmlformats-officedocument.presentationml.presentation', 'application/vnd.openxmlformats-officedocument.presentationml.slideshow', 'application/vnd.openxmlformats-officedocument.presentationml.template', 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'application/vnd.openxmlformats-officedocument.spreadsheetml.template', 'application/msword', 'application/vnd.ms-excel', 'application/vnd.ms-powerpoint', 'application/csv', 'application/vnd.ms-excel.sheet.macroEnabled.12', 'text/x-tex', 'application/x-pdf', 'application/vnd.ms-excel.sheet.macroenabled.12', 'file/pdf', 'image/pdf', 'text/csv', 'text/pdf', 'text/x-sh', '\"application/pdf\"', ], "Audio": ['audio/mpeg', 'audio/mp3', 'audio/x-wav', 'audio/ogg', 'audio/wav', 'audio/aac', 'audio/x-m4a', 'audio/mp4', 'audio/x-ms-wma', ], "Code": ['application/json', 'text/html', 'text/javascript', 'application/javascript', 'text/css', 'text/x-python', 'application/x-java-jnlp-file', 'application/xml', 'application/postscript', 'application/x-javascript', 'application/java-vm', 'text/x-c++src', 'text/xml', 'text/x-scss', 'application/x-python-code', 'application/java-archive', 'text/x-python-script', 'application/x-ruby', 'application/mathematica', 'text/coffeescript', 'text/x-matlab', 'application/sql', 'text/php', ] } # Default to no Search Engine SEARCH_ENGINE = None ELASTIC_FIELD_MAPPINGS = { "start_date": { "type": "date" } } XBLOCK_SETTINGS = { "VideoDescriptor": { "licensing_enabled": FEATURES.get("LICENSING", False) }, 'VideoModule': { 'YOUTUBE_API_KEY': YOUTUBE_API_KEY } } STUDIO_FRONTEND_CONTAINER_URL = None ################################ Settings for Credit Course Requirements ################################ # Initial delay used for retrying tasks. # Additional retries use longer delays. # Value is in seconds. CREDIT_TASK_DEFAULT_RETRY_DELAY = 30 # Maximum number of retries per task for errors that are not related # to throttling. CREDIT_TASK_MAX_RETRIES = 5 # Maximum age in seconds of timestamps we will accept # when a credit provider notifies us that a student has been approved # or denied for credit. CREDIT_PROVIDER_TIMESTAMP_EXPIRATION = 15 * 60 ################################ Settings for Microsites ################################ ### Select an implementation for the microsite backend # for MICROSITE_BACKEND possible choices are # 1. microsite_configuration.backends.filebased.FilebasedMicrositeBackend # 2. microsite_configuration.backends.database.DatabaseMicrositeBackend MICROSITE_BACKEND = 'microsite_configuration.backends.filebased.FilebasedMicrositeBackend' # for MICROSITE_TEMPLATE_BACKEND possible choices are # 1. microsite_configuration.backends.filebased.FilebasedMicrositeTemplateBackend # 2. microsite_configuration.backends.database.DatabaseMicrositeTemplateBackend MICROSITE_TEMPLATE_BACKEND = 'microsite_configuration.backends.filebased.FilebasedMicrositeTemplateBackend' # TTL for microsite database template cache MICROSITE_DATABASE_TEMPLATE_CACHE_TTL = 5 * 60 ############################ Global Database Configuration ##################### DATABASE_ROUTERS = [ 'openedx.core.lib.django_courseware_routers.StudentModuleHistoryExtendedRouter', ] ############################ OAUTH2 Provider ################################### # OpenID Connect issuer ID. Normally the URL of the authentication endpoint. OAUTH_OIDC_ISSUER = 'https://www.example.com/oauth2' # 5 minute expiration time for JWT id tokens issued for external API requests. OAUTH_ID_TOKEN_EXPIRATION = 5 * 60 # Partner support link for CMS footer PARTNER_SUPPORT_EMAIL = '' # Affiliate cookie tracking AFFILIATE_COOKIE_NAME = 'affiliate_id' ############## Settings for Studio Context Sensitive Help ############## HELP_TOKENS_INI_FILE = REPO_ROOT / "cms" / "envs" / "help_tokens.ini" HELP_TOKENS_LANGUAGE_CODE = lambda settings: settings.LANGUAGE_CODE HELP_TOKENS_VERSION = lambda settings: doc_version() derived('HELP_TOKENS_LANGUAGE_CODE', 'HELP_TOKENS_VERSION') # Used with Email sending RETRY_ACTIVATION_EMAIL_MAX_ATTEMPTS = 5 RETRY_ACTIVATION_EMAIL_TIMEOUT = 0.5 ############## DJANGO-USER-TASKS ############## # How long until database records about the outcome of a task and its artifacts get deleted? USER_TASKS_MAX_AGE = timedelta(days=7) ############## Settings for the Enterprise App ###################### ENTERPRISE_ENROLLMENT_API_URL = LMS_ROOT_URL + LMS_ENROLLMENT_API_PATH ENTERPRISE_SERVICE_WORKER_USERNAME = 'enterprise_worker' ENTERPRISE_API_CACHE_TIMEOUT = 3600 # Value is in seconds # The default value of this needs to be a 16 character string ENTERPRISE_REPORTING_SECRET = '0000000000000000' ENTERPRISE_CUSTOMER_CATALOG_DEFAULT_CONTENT_FILTER = {} ############## Settings for the Discovery App ###################### COURSE_CATALOG_API_URL = None ############################# Persistent Grades #################################### # Queue to use for updating persistent grades RECALCULATE_GRADES_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE # Queue to use for updating grades due to grading policy change POLICY_CHANGE_GRADES_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE # Rate limit for regrading tasks that a grading policy change can kick off POLICY_CHANGE_TASK_RATE_LIMIT = '300/h' ############## Settings for CourseGraph ############################ COURSEGRAPH_JOB_QUEUE = DEFAULT_PRIORITY_QUEUE ########## Settings for video transcript migration tasks ############ VIDEO_TRANSCRIPT_MIGRATIONS_JOB_QUEUE = DEFAULT_PRIORITY_QUEUE ########## Settings youtube thumbnails scraper tasks ############ SCRAPE_YOUTUBE_THUMBNAILS_JOB_QUEUE = DEFAULT_PRIORITY_QUEUE ###################### VIDEO IMAGE STORAGE ###################### VIDEO_IMAGE_DEFAULT_FILENAME = 'images/video-images/default_video_image.png' VIDEO_IMAGE_SUPPORTED_FILE_FORMATS = { '.bmp': 'image/bmp', '.bmp2': 'image/x-ms-bmp', # PIL gives x-ms-bmp format '.gif': 'image/gif', '.jpg': 'image/jpeg', '.jpeg': 'image/jpeg', '.png': 'image/png' } VIDEO_IMAGE_MAX_FILE_SIZE_MB = '2 MB' VIDEO_IMAGE_MIN_FILE_SIZE_KB = '2 KB' VIDEO_IMAGE_MAX_WIDTH = 1280 VIDEO_IMAGE_MAX_HEIGHT = 720 VIDEO_IMAGE_MIN_WIDTH = 640 VIDEO_IMAGE_MIN_HEIGHT = 360 VIDEO_IMAGE_ASPECT_RATIO = 16 / 9.0 VIDEO_IMAGE_ASPECT_RATIO_TEXT = '16:9' VIDEO_IMAGE_ASPECT_RATIO_ERROR_MARGIN = 0.1 ###################### ZENDESK ###################### ZENDESK_URL = None ZENDESK_USER = None ZENDESK_API_KEY = None ZENDESK_CUSTOM_FIELDS = {} ############## Settings for Completion API ######################### # Once a user has watched this percentage of a video, mark it as complete: # (0.0 = 0%, 1.0 = 100%) COMPLETION_VIDEO_COMPLETE_PERCENTAGE = 0.95 ############## Installed Django Apps ######################### from openedx.core.djangoapps.plugins import plugin_apps, plugin_settings, constants as plugin_constants INSTALLED_APPS.extend(plugin_apps.get_apps(plugin_constants.ProjectType.CMS)) plugin_settings.add_plugins(__name__, plugin_constants.ProjectType.CMS, plugin_constants.SettingsType.COMMON) # Course exports streamed in blocks of this size. 8192 or 8kb is the default # setting for the FileWrapper class used to iterate over the export file data. # See: https://docs.python.org/2/library/wsgiref.html#wsgiref.util.FileWrapper COURSE_EXPORT_DOWNLOAD_CHUNK_SIZE = 8192
unknown
codeparrot/codeparrot-clean
""" Classes to support "biological sequence" files. :Author: Bob Harris (rsharris@bx.psu.edu) """ # DNA reverse complement table DNA_COMP = " - " \ " TVGH CD M KN YSA BWXR tvgh cd m kn ysa bwxr " \ " " \ " " class SeqFile: """ A biological sequence is a sequence of bytes or characters. Usually these represent DNA (A,C,G,T), proteins, or some variation of those. class attributes: file: file object containing the sequence revcomp: whether gets from this sequence should be reverse-complemented False => no reverse complement True => (same as "-5'") "maf" => (same as "-5'") "+5'" => minus strand is from plus strand's 5' end (same as "-3'") "+3'" => minus strand is from plus strand's 3' end (same as "-5'") "-5'" => minus strand is from its 5' end (as per MAF file format) "-3'" => minus strand is from its 3' end (as per genome browser, but with origin-zero) name: usually a species and/or chromosome name (e.g. "mule.chr5"); if the file contains a name, that overrides this one gap: gap character that aligners should use for gaps in this sequence """ def __init__(self, file=None, revcomp=False, name="", gap=None): self.file = file if revcomp: self.revcomp = "-5'" elif revcomp == "+3'": self.revcomp = "-5'" elif revcomp == "+5'": self.revcomp = "-3'" elif revcomp == "maf": self.revcomp = "-5'" else: self.revcomp = revcomp self.name = name if gap is None: self.gap = "-" else: self.gap = gap self.text = None # (subclasses fill in text and self.length = 0 # length or they most override get()) def close(self): assert (self.file is not None) self.file.close() self.file = None def extract_name(self, line): try: return line.split()[0] except Exception: return "" def set_text(self, text): self.text = text self.length = len(text) def __str__(self): text = "" if self.name is not None: text += self.name + " " text += self.get(0, self.length) return text def get(self, start, length): """ Fetch subsequence starting at position `start` with length `length`. This method is picky about parameters, the requested interval must have non-negative length and fit entirely inside the NIB sequence, the returned string will contain exactly 'length' characters, or an AssertionError will be generated. """ # Check parameters assert length >= 0, "Length must be non-negative (got %d)" % length assert start >= 0, "Start must be greater than 0 (got %d)" % start assert start + length <= self.length, \ "Interval beyond end of sequence ({}..{} > {})".format(start, start + length, self.length) # Fetch sequence and reverse complement if necesary if not self.revcomp: return self.raw_fetch(start, length) if self.revcomp == "-3'": return self.reverse_complement(self.raw_fetch(start, length)) assert self.revcomp == "-5'", "unrecognized reverse complement scheme" start = self.length - (start+length) return self.reverse_complement(self.raw_fetch(start, length)) def raw_fetch(self, start, length): return self.text[start:start+length] def reverse_complement(self, text): comp = [ch for ch in text.translate(DNA_COMP)] comp.reverse() return "".join(comp) class SeqReader: """Iterate over all sequences in a file in order""" def __init__(self, file, revcomp=False, name="", gap=None): self.file = file self.revcomp = revcomp self.name = name self.gap = gap self.seqs_read = 0 def close(self): self.file.close() def __iter__(self): return SeqReaderIter(self) def __next__(self): # subclasses should override this method and return the return # .. next sequence (of type SeqFile or a subclass) read from self.file class SeqReaderIter: def __init__(self, reader): self.reader = reader def __iter__(self): return self def __next__(self): v = next(self.reader) if not v: raise StopIteration return v
unknown
codeparrot/codeparrot-clean