code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v2beta1",
"metadata": {
"name": "v7.timepicker.v42"
},
"spec": {
"annotations": [
{
"kind": "AnnotationQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana",
"version": "v0",
"datasource": {
"name": "-- Grafana --"
},
"spec": {}
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"builtIn": true,
"legacyOptions": {
"type": "dashboard"
}
}
}
],
"cursorSync": "Off",
"editable": true,
"elements": {
"panel-1": {
"kind": "Panel",
"spec": {
"id": 1,
"title": "",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "prometheus",
"version": "v0",
"datasource": {
"name": "default-ds-uid"
},
"spec": {
"expr": "up"
}
},
"refId": "A",
"hidden": false
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "prometheus",
"version": "v0",
"datasource": {
"name": "default-ds-uid"
},
"spec": {
"expr": "cpu_usage"
}
},
"refId": "B",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "timeseries",
"version": "",
"spec": {
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
}
},
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-1"
}
}
}
]
}
},
"links": [],
"liveNow": false,
"preload": false,
"tags": [],
"timeSettings": {
"timezone": "",
"from": "now-6h",
"to": "now",
"autoRefresh": "",
"autoRefreshIntervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"hideTimepicker": false,
"fiscalYearStartMonth": 0
},
"title": "No Title",
"variables": []
},
"status": {}
} | json | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/conversion/testdata/input/migrated_dashboards_from_v0_to_v2/v2beta1.v7.timepicker.json |
# frozen_string_literal: true
require "cases/helper"
require "models/default"
require "support/schema_dumping_helper"
require "active_support/core_ext/object/with"
module PGSchemaHelper
def with_schema_search_path(schema_search_path)
@connection.schema_search_path = schema_search_path
@connection.schema_cache.clear!
yield if block_given?
ensure
@connection.schema_search_path = "'$user', public"
@connection.schema_cache.clear!
end
def with_dump_schemas(value, &block)
ActiveRecord.with(dump_schemas: value, &block)
end
end
class SchemaTest < ActiveRecord::PostgreSQLTestCase
include PGSchemaHelper
include SchemaDumpingHelper
self.use_transactional_tests = false
SCHEMA_NAME = "test_schema"
SCHEMA2_NAME = "test_schema2"
TABLE_NAME = "things"
CAPITALIZED_TABLE_NAME = "Things"
INDEX_A_NAME = "a_index_things_on_name"
INDEX_B_NAME = "b_index_things_on_different_columns_in_each_schema"
INDEX_C_NAME = "c_index_full_text_search"
INDEX_D_NAME = "d_index_things_on_description_desc"
INDEX_E_NAME = "e_index_things_on_name_vector"
INDEX_A_COLUMN = "name"
INDEX_B_COLUMN_S1 = "email"
INDEX_B_COLUMN_S2 = "moment"
INDEX_C_COLUMN = "(to_tsvector('english', coalesce(things.name, '')))"
INDEX_D_COLUMN = "description"
INDEX_E_COLUMN = "name_vector"
COLUMNS = [
"id integer",
"name character varying(50)",
"email character varying(50)",
"description character varying(100)",
"name_vector tsvector",
"moment timestamp without time zone default now()"
]
PK_TABLE_NAME = "table_with_pk"
UNMATCHED_SEQUENCE_NAME = "unmatched_primary_key_default_value_seq"
UNMATCHED_PK_TABLE_NAME = "table_with_unmatched_sequence_for_pk"
PARTITIONED_TABLE = "measurements"
PARTITIONED_TABLE_INDEX = "index_measurements_on_logdate_and_city_id"
class Thing1 < ActiveRecord::Base
self.table_name = "test_schema.things"
end
class Thing2 < ActiveRecord::Base
self.table_name = "test_schema2.things"
end
class Thing3 < ActiveRecord::Base
self.table_name = 'test_schema."things.table"'
end
class Thing4 < ActiveRecord::Base
self.table_name = 'test_schema."Things"'
end
class Thing5 < ActiveRecord::Base
self.table_name = "things"
end
class Song < ActiveRecord::Base
self.table_name = "music.songs"
has_and_belongs_to_many :albums
end
class Album < ActiveRecord::Base
self.table_name = "music.albums"
has_and_belongs_to_many :songs
def self.default_scope; where(deleted: false); end
end
def setup
@connection = ActiveRecord::Base.lease_connection
@connection.execute "CREATE SCHEMA #{SCHEMA_NAME} CREATE TABLE #{TABLE_NAME} (#{COLUMNS.join(',')})"
@connection.execute "CREATE TABLE #{SCHEMA_NAME}.\"#{TABLE_NAME}.table\" (#{COLUMNS.join(',')})"
@connection.execute "CREATE TABLE #{SCHEMA_NAME}.\"#{CAPITALIZED_TABLE_NAME}\" (#{COLUMNS.join(',')})"
@connection.execute "CREATE SCHEMA #{SCHEMA2_NAME} CREATE TABLE #{TABLE_NAME} (#{COLUMNS.join(',')})"
@connection.execute "CREATE INDEX #{INDEX_A_NAME} ON #{SCHEMA_NAME}.#{TABLE_NAME} USING btree (#{INDEX_A_COLUMN});"
@connection.execute "CREATE INDEX #{INDEX_A_NAME} ON #{SCHEMA2_NAME}.#{TABLE_NAME} USING btree (#{INDEX_A_COLUMN});"
@connection.execute "CREATE INDEX #{INDEX_B_NAME} ON #{SCHEMA_NAME}.#{TABLE_NAME} USING btree (#{INDEX_B_COLUMN_S1});"
@connection.execute "CREATE INDEX #{INDEX_B_NAME} ON #{SCHEMA2_NAME}.#{TABLE_NAME} USING btree (#{INDEX_B_COLUMN_S2});"
@connection.execute "CREATE INDEX #{INDEX_C_NAME} ON #{SCHEMA_NAME}.#{TABLE_NAME} USING gin (#{INDEX_C_COLUMN});"
@connection.execute "CREATE INDEX #{INDEX_C_NAME} ON #{SCHEMA2_NAME}.#{TABLE_NAME} USING gin (#{INDEX_C_COLUMN});"
@connection.execute "CREATE INDEX #{INDEX_D_NAME} ON #{SCHEMA_NAME}.#{TABLE_NAME} USING btree (#{INDEX_D_COLUMN} DESC);"
@connection.execute "CREATE INDEX #{INDEX_D_NAME} ON #{SCHEMA2_NAME}.#{TABLE_NAME} USING btree (#{INDEX_D_COLUMN} DESC);"
@connection.execute "CREATE INDEX #{INDEX_E_NAME} ON #{SCHEMA_NAME}.#{TABLE_NAME} USING gin (#{INDEX_E_COLUMN});"
@connection.execute "CREATE INDEX #{INDEX_E_NAME} ON #{SCHEMA2_NAME}.#{TABLE_NAME} USING gin (#{INDEX_E_COLUMN});"
@connection.execute "CREATE TABLE #{SCHEMA_NAME}.#{PK_TABLE_NAME} (id serial primary key)"
@connection.execute "CREATE TABLE #{SCHEMA2_NAME}.#{PK_TABLE_NAME} (id serial primary key)"
@connection.execute "CREATE SEQUENCE #{SCHEMA_NAME}.#{UNMATCHED_SEQUENCE_NAME}"
@connection.execute "CREATE TABLE #{SCHEMA_NAME}.#{UNMATCHED_PK_TABLE_NAME} (id integer NOT NULL DEFAULT nextval('#{SCHEMA_NAME}.#{UNMATCHED_SEQUENCE_NAME}'::regclass), CONSTRAINT unmatched_pkey PRIMARY KEY (id))"
end
teardown do
@connection.drop_schema SCHEMA2_NAME, if_exists: true
@connection.drop_schema SCHEMA_NAME, if_exists: true
end
def test_schema_names
schema_names = @connection.schema_names
assert_includes schema_names, "public"
assert_includes schema_names, "test_schema"
assert_includes schema_names, "test_schema2"
assert_includes schema_names, "hint_plan" if @connection.supports_optimizer_hints?
end
def test_create_schema
@connection.create_schema "test_schema3"
assert @connection.schema_names.include? "test_schema3"
ensure
@connection.drop_schema "test_schema3"
end
def test_raise_create_schema_with_existing_schema
@connection.create_schema "test_schema3"
assert_raises(ActiveRecord::StatementInvalid) do
@connection.create_schema "test_schema3"
end
ensure
@connection.drop_schema "test_schema3"
end
def test_force_create_schema
@connection.create_schema "test_schema3"
assert_queries_match(/DROP SCHEMA IF EXISTS "test_schema3"/) do
@connection.create_schema "test_schema3", force: true
end
assert @connection.schema_names.include?("test_schema3")
ensure
@connection.drop_schema "test_schema3"
end
def test_create_schema_if_not_exists
@connection.create_schema "test_schema3"
assert_queries_match('CREATE SCHEMA IF NOT EXISTS "test_schema3"') do
@connection.create_schema "test_schema3", if_not_exists: true
end
assert @connection.schema_names.include?("test_schema3")
ensure
@connection.drop_schema "test_schema3"
end
def test_create_schema_raises_if_both_force_and_if_not_exists_provided
assert_raises(ArgumentError, match: "Options `:force` and `:if_not_exists` cannot be used simultaneously.") do
@connection.create_schema "test_schema3", force: true, if_not_exists: true
end
end
def test_drop_schema
begin
@connection.create_schema "test_schema3"
ensure
@connection.drop_schema "test_schema3"
end
assert_not_includes @connection.schema_names, "test_schema3"
end
def test_drop_schema_if_exists
@connection.create_schema "some_schema"
assert_includes @connection.schema_names, "some_schema"
@connection.drop_schema "some_schema", if_exists: true
assert_not_includes @connection.schema_names, "some_schema"
end
def test_habtm_table_name_with_schema
ActiveRecord::Base.lease_connection.drop_schema "music", if_exists: true
ActiveRecord::Base.lease_connection.create_schema "music"
ActiveRecord::Base.lease_connection.execute <<~SQL
CREATE TABLE music.albums (id serial primary key, deleted boolean default false);
CREATE TABLE music.songs (id serial primary key);
CREATE TABLE music.albums_songs (album_id integer, song_id integer);
SQL
song = Song.create
album = song.albums.create
assert_equal song, Song.includes(:albums).where("albums.id": album.id).first
assert_equal [album.id], Song.joins(:albums).pluck("albums.id")
assert_equal [album.id], Song.joins(:albums).pluck("music.albums.id")
ensure
ActiveRecord::Base.lease_connection.drop_schema "music", if_exists: true
end
def test_drop_schema_with_nonexisting_schema
assert_raises(ActiveRecord::StatementInvalid) do
@connection.drop_schema "idontexist"
end
assert_nothing_raised do
@connection.drop_schema "idontexist", if_exists: true
end
end
def test_rename_schema
@connection.create_schema("test_schema3")
@connection.rename_schema("test_schema3", "test_schema4")
assert_not_includes @connection.schema_names, "test_schema3"
assert_includes @connection.schema_names, "test_schema4"
ensure
@connection.drop_schema("test_schema3", if_exists: true)
@connection.drop_schema("test_schema4", if_exists: true)
end
def test_rename_schema_with_nonexisting_schema
assert_raises(ActiveRecord::StatementInvalid) do
@connection.rename_schema("idontexist", "neitherdoi")
end
end
def test_rename_schema_with_existing_target_name
@connection.create_schema("test_schema3")
@connection.create_schema("test_schema4")
assert_raises(ActiveRecord::StatementInvalid) do
@connection.rename_schema("test_schema3", "test_schema4")
end
ensure
@connection.drop_schema("test_schema3", if_exists: true)
@connection.drop_schema("test_schema4", if_exists: true)
end
def test_raise_wrapped_exception_on_bad_prepare
assert_raises(ActiveRecord::StatementInvalid) do
@connection.exec_query "select * from developers where id = ?", "sql", [bind_param(1)]
end
end
if ActiveRecord::Base.lease_connection.prepared_statements
def test_schema_change_with_prepared_stmt
altered = false
assert_nothing_raised do
@connection.exec_query "select * from developers where id = $1", "sql", [bind_param(1)]
@connection.exec_query "alter table developers add column zomg int", "sql", []
altered = true
@connection.exec_query "select * from developers where id = $1", "sql", [bind_param(1)]
end
pass
ensure
# We are not using DROP COLUMN IF EXISTS because that syntax is only
# supported by pg 9.X
@connection.exec_query("alter table developers drop column zomg", "sql", []) if altered
end
end
def test_data_source_exists?
[Thing1, Thing2, Thing3, Thing4].each do |klass|
name = klass.table_name
assert @connection.data_source_exists?(name), "'#{name}' data_source should exist"
end
end
def test_data_source_exists_when_on_schema_search_path
with_schema_search_path(SCHEMA_NAME) do
assert(@connection.data_source_exists?(TABLE_NAME), "data_source should exist and be found")
end
end
def test_data_source_exists_when_not_on_schema_search_path
with_schema_search_path("PUBLIC") do
assert_not(@connection.data_source_exists?(TABLE_NAME), "data_source exists but should not be found")
end
end
def test_data_source_exists_wrong_schema
assert_not(@connection.data_source_exists?("foo.things"), "data_source should not exist")
end
def test_data_source_exists_quoted_names
[ %("#{SCHEMA_NAME}"."#{TABLE_NAME}"), %(#{SCHEMA_NAME}."#{TABLE_NAME}"), %(#{SCHEMA_NAME}."#{TABLE_NAME}")].each do |given|
assert(@connection.data_source_exists?(given), "data_source should exist when specified as #{given}")
end
with_schema_search_path(SCHEMA_NAME) do
given = %("#{TABLE_NAME}")
assert(@connection.data_source_exists?(given), "data_source should exist when specified as #{given}")
end
end
def test_data_source_exists_quoted_table
with_schema_search_path(SCHEMA_NAME) do
assert(@connection.data_source_exists?('"things.table"'), "data_source should exist")
end
end
def test_with_schema_prefixed_table_name
assert_nothing_raised do
assert_equal COLUMNS, columns("#{SCHEMA_NAME}.#{TABLE_NAME}")
end
end
def test_with_schema_prefixed_capitalized_table_name
assert_nothing_raised do
assert_equal COLUMNS, columns("#{SCHEMA_NAME}.#{CAPITALIZED_TABLE_NAME}")
end
end
def test_with_schema_search_path
assert_nothing_raised do
with_schema_search_path(SCHEMA_NAME) do
assert_equal COLUMNS, columns(TABLE_NAME)
end
end
end
def test_proper_encoding_of_table_name
assert_equal '"table_name"', @connection.quote_table_name("table_name")
assert_equal '"table.name"', @connection.quote_table_name('"table.name"')
assert_equal '"schema_name"."table_name"', @connection.quote_table_name("schema_name.table_name")
assert_equal '"schema_name"."table.name"', @connection.quote_table_name('schema_name."table.name"')
assert_equal '"schema.name"."table_name"', @connection.quote_table_name('"schema.name".table_name')
assert_equal '"schema.name"."table.name"', @connection.quote_table_name('"schema.name"."table.name"')
end
def test_where_with_qualified_schema_name
Thing1.create(id: 1, name: "thing1", email: "thing1@localhost", moment: Time.now)
assert_equal ["thing1"], Thing1.where("test_schema.things.name": "thing1").map(&:name)
end
def test_pluck_with_qualified_schema_name
Thing1.create(id: 1, name: "thing1", email: "thing1@localhost", moment: Time.now)
assert_equal ["thing1"], Thing1.pluck(:"test_schema.things.name")
end
def test_classes_with_qualified_schema_name
assert_equal 0, Thing1.count
assert_equal 0, Thing2.count
assert_equal 0, Thing3.count
assert_equal 0, Thing4.count
Thing1.create(id: 1, name: "thing1", email: "thing1@localhost", moment: Time.now)
assert_equal 1, Thing1.count
assert_equal 0, Thing2.count
assert_equal 0, Thing3.count
assert_equal 0, Thing4.count
Thing2.create(id: 1, name: "thing1", email: "thing1@localhost", moment: Time.now)
assert_equal 1, Thing1.count
assert_equal 1, Thing2.count
assert_equal 0, Thing3.count
assert_equal 0, Thing4.count
Thing3.create(id: 1, name: "thing1", email: "thing1@localhost", moment: Time.now)
assert_equal 1, Thing1.count
assert_equal 1, Thing2.count
assert_equal 1, Thing3.count
assert_equal 0, Thing4.count
Thing4.create(id: 1, name: "thing1", email: "thing1@localhost", moment: Time.now)
assert_equal 1, Thing1.count
assert_equal 1, Thing2.count
assert_equal 1, Thing3.count
assert_equal 1, Thing4.count
end
def test_raise_on_unquoted_schema_name
assert_raises(ActiveRecord::StatementInvalid) do
with_schema_search_path "$user,public"
end
end
def test_without_schema_search_path
assert_raises(ActiveRecord::StatementInvalid) { columns(TABLE_NAME) }
end
def test_ignore_nil_schema_search_path
assert_nothing_raised { with_schema_search_path nil }
end
def test_index_name_exists
with_schema_search_path(SCHEMA_NAME) do
assert @connection.index_name_exists?(TABLE_NAME, INDEX_A_NAME)
assert @connection.index_name_exists?(TABLE_NAME, INDEX_B_NAME)
assert @connection.index_name_exists?(TABLE_NAME, INDEX_C_NAME)
assert @connection.index_name_exists?(TABLE_NAME, INDEX_D_NAME)
assert @connection.index_name_exists?(TABLE_NAME, INDEX_E_NAME)
assert @connection.index_name_exists?(TABLE_NAME, INDEX_E_NAME)
assert_not @connection.index_name_exists?(TABLE_NAME, "missing_index")
if supports_partitioned_indexes?
create_partitioned_table
create_partitioned_table_index
assert @connection.index_name_exists?(PARTITIONED_TABLE, PARTITIONED_TABLE_INDEX)
end
end
assert @connection.index_name_exists?("#{SCHEMA_NAME}.#{TABLE_NAME}", INDEX_A_NAME)
end
def test_dump_indexes_for_schema_one
do_dump_index_tests_for_schema(SCHEMA_NAME, INDEX_A_COLUMN, INDEX_B_COLUMN_S1, INDEX_D_COLUMN, INDEX_E_COLUMN)
end
def test_dump_indexes_for_schema_two
do_dump_index_tests_for_schema(SCHEMA2_NAME, INDEX_A_COLUMN, INDEX_B_COLUMN_S2, INDEX_D_COLUMN, INDEX_E_COLUMN)
end
def test_dump_indexes_for_schema_multiple_schemas_in_search_path
do_dump_index_tests_for_schema("public, #{SCHEMA_NAME}", INDEX_A_COLUMN, INDEX_B_COLUMN_S1, INDEX_D_COLUMN, INDEX_E_COLUMN)
end
def test_dump_indexes_for_table_with_scheme_specified_in_name
indexes = @connection.indexes("#{SCHEMA_NAME}.#{TABLE_NAME}")
assert_equal 5, indexes.size
if supports_partitioned_indexes?
create_partitioned_table
create_partitioned_table_index
indexes = @connection.indexes("#{SCHEMA_NAME}.#{PARTITIONED_TABLE}")
assert_equal 1, indexes.size
end
end
def test_with_uppercase_index_name
@connection.execute "CREATE INDEX \"things_Index\" ON #{SCHEMA_NAME}.things (name)"
with_schema_search_path SCHEMA_NAME do
assert_nothing_raised { @connection.remove_index "things", name: "things_Index" }
end
if supports_partitioned_indexes?
create_partitioned_table
@connection.execute "CREATE INDEX \"#{PARTITIONED_TABLE}_Index\" ON #{SCHEMA_NAME}.#{PARTITIONED_TABLE} (logdate, city_id)"
with_schema_search_path SCHEMA_NAME do
assert_nothing_raised { @connection.remove_index PARTITIONED_TABLE, name: "#{PARTITIONED_TABLE}_Index" }
end
end
end
def test_remove_index_when_schema_specified
@connection.execute "CREATE INDEX \"things_Index\" ON #{SCHEMA_NAME}.things (name)"
assert_nothing_raised { @connection.remove_index "things", name: "#{SCHEMA_NAME}.things_Index" }
@connection.execute "CREATE INDEX \"things_Index\" ON #{SCHEMA_NAME}.things (name)"
assert_nothing_raised { @connection.remove_index "#{SCHEMA_NAME}.things", name: "things_Index" }
@connection.execute "CREATE INDEX \"things_Index\" ON #{SCHEMA_NAME}.things (name)"
assert_nothing_raised { @connection.remove_index "#{SCHEMA_NAME}.things", name: "#{SCHEMA_NAME}.things_Index" }
@connection.execute "CREATE INDEX \"things_Index\" ON #{SCHEMA_NAME}.things (name)"
assert_raises(ArgumentError) { @connection.remove_index "#{SCHEMA2_NAME}.things", name: "#{SCHEMA_NAME}.things_Index" }
if supports_partitioned_indexes?
create_partitioned_table
@connection.execute "CREATE INDEX \"#{PARTITIONED_TABLE}_Index\" ON #{SCHEMA_NAME}.#{PARTITIONED_TABLE} (logdate, city_id)"
assert_nothing_raised { @connection.remove_index PARTITIONED_TABLE, name: "#{SCHEMA_NAME}.#{PARTITIONED_TABLE}_Index" }
@connection.execute "CREATE INDEX \"#{PARTITIONED_TABLE}_Index\" ON #{SCHEMA_NAME}.#{PARTITIONED_TABLE} (logdate, city_id)"
assert_nothing_raised { @connection.remove_index "#{SCHEMA_NAME}.#{PARTITIONED_TABLE}", name: "#{PARTITIONED_TABLE}_Index" }
@connection.execute "CREATE INDEX \"#{PARTITIONED_TABLE}_Index\" ON #{SCHEMA_NAME}.#{PARTITIONED_TABLE} (logdate, city_id)"
assert_nothing_raised { @connection.remove_index "#{SCHEMA_NAME}.#{PARTITIONED_TABLE}", name: "#{SCHEMA_NAME}.#{PARTITIONED_TABLE}_Index" }
@connection.execute "CREATE INDEX \"#{PARTITIONED_TABLE}_Index\" ON #{SCHEMA_NAME}.#{PARTITIONED_TABLE} (logdate, city_id)"
assert_raises(ArgumentError) { @connection.remove_index "#{SCHEMA2_NAME}.#{PARTITIONED_TABLE}", name: "#{SCHEMA_NAME}.#{PARTITIONED_TABLE}_Index" }
end
end
def test_primary_key_with_schema_specified
[
%("#{SCHEMA_NAME}"."#{PK_TABLE_NAME}"),
%(#{SCHEMA_NAME}."#{PK_TABLE_NAME}"),
%(#{SCHEMA_NAME}.#{PK_TABLE_NAME})
].each do |given|
assert_equal "id", @connection.primary_key(given), "primary key should be found when table referenced as #{given}"
end
end
def test_primary_key_assuming_schema_search_path
with_schema_search_path("#{SCHEMA_NAME}, #{SCHEMA2_NAME}") do
assert_equal "id", @connection.primary_key(PK_TABLE_NAME), "primary key should be found"
end
end
def test_pk_and_sequence_for_with_schema_specified
pg_name = ActiveRecord::ConnectionAdapters::PostgreSQL::Name
[
%("#{SCHEMA_NAME}"."#{PK_TABLE_NAME}"),
%("#{SCHEMA_NAME}"."#{UNMATCHED_PK_TABLE_NAME}")
].each do |given|
pk, seq = @connection.pk_and_sequence_for(given)
assert_equal "id", pk, "primary key should be found when table referenced as #{given}"
assert_equal pg_name.new(SCHEMA_NAME, "#{PK_TABLE_NAME}_id_seq"), seq, "sequence name should be found when table referenced as #{given}" if given == %("#{SCHEMA_NAME}"."#{PK_TABLE_NAME}")
assert_equal pg_name.new(SCHEMA_NAME, UNMATCHED_SEQUENCE_NAME), seq, "sequence name should be found when table referenced as #{given}" if given == %("#{SCHEMA_NAME}"."#{UNMATCHED_PK_TABLE_NAME}")
end
end
def test_current_schema
{
%('$user',public) => "public",
SCHEMA_NAME => SCHEMA_NAME,
%(#{SCHEMA2_NAME},#{SCHEMA_NAME},public) => SCHEMA2_NAME,
%(public,#{SCHEMA2_NAME},#{SCHEMA_NAME}) => "public"
}.each do |given, expect|
with_schema_search_path(given) { assert_equal expect, @connection.current_schema }
end
end
def test_prepared_statements_with_multiple_schemas
[SCHEMA_NAME, SCHEMA2_NAME].each do |schema_name|
with_schema_search_path schema_name do
Thing5.create(id: 1, name: "thing inside #{SCHEMA_NAME}", email: "thing1@localhost", moment: Time.now)
end
end
[SCHEMA_NAME, SCHEMA2_NAME].each do |schema_name|
with_schema_search_path schema_name do
assert_equal 1, Thing5.count
end
end
end
def test_schema_exists?
{
"public" => true,
SCHEMA_NAME => true,
SCHEMA2_NAME => true,
"darkside" => false
}.each do |given, expect|
assert_equal expect, @connection.schema_exists?(given)
end
end
def test_reset_column_sequences!
sequence_name = "#{SCHEMA_NAME}.#{UNMATCHED_SEQUENCE_NAME}"
@connection.execute "SELECT setval('#{sequence_name}', 123)"
assert_equal 124, @connection.select_value("SELECT nextval('#{sequence_name}')")
@connection.reset_column_sequences!([["#{SCHEMA_NAME}.#{UNMATCHED_PK_TABLE_NAME}", "id", sequence_name, nil, 1]])
assert_equal 1, @connection.select_value("SELECT nextval('#{sequence_name}')")
end
def test_reset_column_sequences_with_just_tables
sequence_name = "#{SCHEMA_NAME}.#{UNMATCHED_SEQUENCE_NAME}"
@connection.execute "SELECT setval('#{sequence_name}', 123)"
assert_equal 124, @connection.select_value("SELECT nextval('#{sequence_name}')")
@connection.reset_column_sequences!([["#{SCHEMA_NAME}.#{UNMATCHED_PK_TABLE_NAME}"]])
assert_equal 1, @connection.select_value("SELECT nextval('#{sequence_name}')")
end
def test_reset_pk_sequence!
sequence_name = "#{SCHEMA_NAME}.#{UNMATCHED_SEQUENCE_NAME}"
@connection.execute "SELECT setval('#{sequence_name}', 123)"
assert_equal 124, @connection.select_value("SELECT nextval('#{sequence_name}')")
@connection.reset_pk_sequence!("#{SCHEMA_NAME}.#{UNMATCHED_PK_TABLE_NAME}")
assert_equal 1, @connection.select_value("SELECT nextval('#{sequence_name}')")
end
def test_set_pk_sequence
table_name = "#{SCHEMA_NAME}.#{PK_TABLE_NAME}"
_, sequence_name = @connection.pk_and_sequence_for table_name
@connection.set_pk_sequence! table_name, 123
assert_equal 124, @connection.select_value("SELECT nextval('#{sequence_name}')")
@connection.reset_pk_sequence! table_name
end
def test_rename_index
old_name = INDEX_A_NAME
new_name = "#{old_name}_new"
@connection.rename_index("#{SCHEMA_NAME}.#{TABLE_NAME}", old_name, new_name)
assert_not @connection.index_name_exists?("#{SCHEMA_NAME}.#{TABLE_NAME}", old_name)
assert @connection.index_name_exists?("#{SCHEMA_NAME}.#{TABLE_NAME}", new_name)
end
def test_dumping_schemas
with_dump_schemas("test_schema,test_schema2,public") do
output = dump_all_table_schema(/./)
assert_no_match %r{create_schema "public"}, output
assert_match %r{create_schema "test_schema"}, output
assert_match %r{create_schema "test_schema2"}, output
end
end
private
def columns(table_name)
@connection.send(:column_definitions, table_name).map do |name, type, default|
"#{name} #{type}" + (default ? " default #{default}" : "")
end
end
def do_dump_index_tests_for_schema(this_schema_name, first_index_column_name, second_index_column_name, third_index_column_name, fourth_index_column_name)
with_schema_search_path(this_schema_name) do
indexes = @connection.indexes(TABLE_NAME).sort_by(&:name)
assert_equal 5, indexes.size
index_a, index_b, index_c, index_d, index_e = indexes
do_dump_index_assertions_for_one_index(index_a, INDEX_A_NAME, first_index_column_name)
do_dump_index_assertions_for_one_index(index_b, INDEX_B_NAME, second_index_column_name)
do_dump_index_assertions_for_one_index(index_d, INDEX_D_NAME, third_index_column_name)
do_dump_index_assertions_for_one_index(index_e, INDEX_E_NAME, fourth_index_column_name)
assert_equal :btree, index_a.using
assert_equal :btree, index_b.using
assert_equal :gin, index_c.using
assert_equal :btree, index_d.using
assert_equal :gin, index_e.using
assert_equal :desc, index_d.orders
end
end
def do_dump_index_assertions_for_one_index(this_index, this_index_name, this_index_column)
assert_equal TABLE_NAME, this_index.table
assert_equal 1, this_index.columns.size
assert_equal this_index_column, this_index.columns[0]
assert_equal this_index_name, this_index.name
end
def bind_param(value)
ActiveRecord::Relation::QueryAttribute.new(nil, value, ActiveRecord::Type::Value.new)
end
def create_partitioned_table
@connection.execute "CREATE TABLE #{SCHEMA_NAME}.\"#{PARTITIONED_TABLE}\" (city_id integer not null, logdate date not null) PARTITION BY LIST (city_id)"
end
def create_partitioned_table_index
@connection.execute "CREATE INDEX #{PARTITIONED_TABLE_INDEX} ON #{SCHEMA_NAME}.#{PARTITIONED_TABLE} (logdate, city_id)"
end
end
class SchemaForeignKeyTest < ActiveRecord::PostgreSQLTestCase
include SchemaDumpingHelper
setup do
@connection = ActiveRecord::Base.lease_connection
@connection.create_schema("my_schema")
end
teardown do
@connection.drop_schema("my_schema", if_exists: true)
end
def test_dump_foreign_key_targeting_different_schema
@connection.create_table "my_schema.trains" do |t|
t.string :name
end
@connection.create_table "wagons" do |t|
t.integer :train_id
end
@connection.add_foreign_key "wagons", "my_schema.trains"
output = dump_table_schema "wagons"
assert_match %r{\s+add_foreign_key "wagons", "my_schema\.trains"$}, output
ensure
@connection.drop_table "wagons", if_exists: true
@connection.drop_table "my_schema.trains", if_exists: true
end
def test_create_foreign_key_same_schema
@connection.create_table "my_schema.trains"
@connection.create_table "my_schema.wagons" do |t|
t.integer :train_id
end
@connection.add_foreign_key "my_schema.wagons", "my_schema.trains"
assert @connection.foreign_key_exists?("my_schema.wagons", "my_schema.trains")
end
def test_create_foreign_key_different_schemas
@connection.create_schema "my_other_schema"
@connection.create_table "my_schema.trains"
@connection.create_table "my_other_schema.wagons" do |t|
t.integer :train_id
end
@connection.add_foreign_key "my_other_schema.wagons", "my_schema.trains"
assert @connection.foreign_key_exists?("my_other_schema.wagons", "my_schema.trains")
ensure
@connection.drop_schema "my_other_schema", if_exists: true
end
end
class SchemaIndexOpclassTest < ActiveRecord::PostgreSQLTestCase
include SchemaDumpingHelper
setup do
@connection = ActiveRecord::Base.lease_connection
@connection.create_table "trains" do |t|
t.string :name
t.string :position
t.text :description
end
end
teardown do
@connection.drop_table "trains", if_exists: true
end
def test_string_opclass_is_dumped
@connection.execute "CREATE INDEX trains_name_and_description ON trains USING btree(name text_pattern_ops, description text_pattern_ops)"
output = dump_table_schema "trains"
assert_match(/opclass: :text_pattern_ops/, output)
end
def test_non_default_opclass_is_dumped
@connection.execute "CREATE INDEX trains_name_and_description ON trains USING btree(name, description text_pattern_ops)"
output = dump_table_schema "trains"
assert_match(/opclass: \{ description: :text_pattern_ops \}/, output)
end
def test_opclass_class_parsing_on_non_reserved_and_cannot_be_function_or_type_keyword
@connection.enable_extension("pg_trgm")
@connection.execute "CREATE INDEX trains_position ON trains USING gin(position gin_trgm_ops)"
@connection.execute "CREATE INDEX trains_name_and_position ON trains USING btree(name, position text_pattern_ops)"
output = dump_table_schema "trains"
assert_match(/opclass: :gin_trgm_ops/, output)
assert_match(/opclass: \{ position: :text_pattern_ops \}/, output)
end
end
class SchemaIndexNullsOrderTest < ActiveRecord::PostgreSQLTestCase
include SchemaDumpingHelper
setup do
@connection = ActiveRecord::Base.lease_connection
@connection.create_table "trains" do |t|
t.string :name
t.text :description
end
end
teardown do
@connection.drop_table "trains", if_exists: true
end
def test_nulls_order_is_dumped
@connection.execute "CREATE INDEX trains_name_and_description ON trains USING btree(name NULLS FIRST, description)"
output = dump_table_schema "trains"
assert_match(/order: \{ name: "NULLS FIRST" \}/, output)
end
def test_non_default_order_with_nulls_is_dumped
@connection.execute "CREATE INDEX trains_name_and_desc ON trains USING btree(name DESC NULLS LAST, description)"
output = dump_table_schema "trains"
assert_match(/order: \{ name: "DESC NULLS LAST" \}/, output)
end
end
class DefaultsUsingMultipleSchemasAndDomainTest < ActiveRecord::PostgreSQLTestCase
setup do
@connection = ActiveRecord::Base.lease_connection
@connection.drop_schema "schema_1", if_exists: true
@connection.execute "CREATE SCHEMA schema_1"
@connection.execute "CREATE DOMAIN schema_1.text AS text"
@connection.execute "CREATE DOMAIN schema_1.varchar AS varchar"
@connection.execute "CREATE DOMAIN schema_1.bpchar AS bpchar"
@old_search_path = @connection.schema_search_path
@connection.schema_search_path = "schema_1, pg_catalog"
@connection.create_table "defaults" do |t|
t.text "text_col", default: "some value"
t.string "string_col", default: "some value"
t.decimal "decimal_col", default: "3.14159265358979323846"
end
Default.reset_column_information
end
teardown do
@connection.schema_search_path = @old_search_path
@connection.drop_schema "schema_1", if_exists: true
Default.reset_column_information
end
def test_text_defaults_in_new_schema_when_overriding_domain
assert_equal "some value", Default.new.text_col, "Default of text column was not correctly parsed"
end
def test_string_defaults_in_new_schema_when_overriding_domain
assert_equal "some value", Default.new.string_col, "Default of string column was not correctly parsed"
end
def test_decimal_defaults_in_new_schema_when_overriding_domain
assert_equal BigDecimal("3.14159265358979323846"), Default.new.decimal_col, "Default of decimal column was not correctly parsed"
end
def test_bpchar_defaults_in_new_schema_when_overriding_domain
@connection.execute "ALTER TABLE defaults ADD bpchar_col bpchar DEFAULT 'some value'"
Default.reset_column_information
assert_equal "some value", Default.new.bpchar_col, "Default of bpchar column was not correctly parsed"
end
def test_text_defaults_after_updating_column_default
@connection.execute "ALTER TABLE defaults ALTER COLUMN text_col SET DEFAULT 'some text'::schema_1.text"
assert_equal "some text", Default.new.text_col, "Default of text column was not correctly parsed after updating default using '::text' since postgreSQL will add parens to the default in db"
end
def test_default_containing_quote_and_colons
@connection.execute "ALTER TABLE defaults ALTER COLUMN string_col SET DEFAULT 'foo''::bar'"
assert_equal "foo'::bar", Default.new.string_col
end
end
class SchemaWithDotsTest < ActiveRecord::PostgreSQLTestCase
include PGSchemaHelper
setup do
@connection = ActiveRecord::Base.lease_connection
@connection.create_schema "my.schema"
end
teardown do
@connection.drop_schema "my.schema", if_exists: true
end
test "rename_table" do
with_schema_search_path('"my.schema"') do
@connection.create_table :posts
@connection.rename_table :posts, :articles
assert_equal ["articles"], @connection.tables
end
end
test "Active Record basics" do
with_schema_search_path('"my.schema"') do
@connection.create_table :articles do |t|
t.string :title
end
article_class = Class.new(ActiveRecord::Base) do
self.table_name = '"my.schema".articles'
end
article_class.create!(title: "zOMG, welcome to my blorgh!")
welcome_article = article_class.last
assert_equal "zOMG, welcome to my blorgh!", welcome_article.title
end
end
end
class SchemaJoinTablesTest < ActiveRecord::PostgreSQLTestCase
def setup
@connection = ActiveRecord::Base.lease_connection
@connection.create_schema("test_schema")
end
def teardown
@connection.drop_schema("test_schema", if_exists: true)
end
def test_create_join_table
@connection.create_join_table("test_schema.posts", "test_schema.comments")
assert @connection.table_exists?("test_schema.comments_posts")
columns = @connection.columns("test_schema.comments_posts").map(&:name)
assert_equal ["comment_id", "post_id"], columns.sort
@connection.drop_join_table("test_schema.posts", "test_schema.comments")
assert_not @connection.table_exists?("test_schema.comments_posts")
end
end
class SchemaIndexIncludeColumnsTest < ActiveRecord::PostgreSQLTestCase
include SchemaDumpingHelper
def test_schema_dumps_index_included_columns
index_definition = dump_table_schema("companies").split(/\n/).grep(/t\.index.*company_include_index/).first.strip
if ActiveRecord::Base.lease_connection.supports_index_include?
assert_equal 't.index ["firm_id", "type"], name: "company_include_index", include: ["name", "account_id"]', index_definition
else
assert_equal 't.index ["firm_id", "type"], name: "company_include_index"', index_definition
end
end
end
class SchemaIndexNullsNotDistinctTest < ActiveRecord::PostgreSQLTestCase
include SchemaDumpingHelper
setup do
@connection = ActiveRecord::Base.lease_connection
@connection.create_table "trains" do |t|
t.string :name
end
end
teardown do
@connection.drop_table "trains", if_exists: true
end
def test_nulls_not_distinct_is_dumped
skip("current adapter doesn't support nulls not distinct") unless supports_nulls_not_distinct?
@connection.execute "CREATE INDEX trains_name ON trains USING btree(name) NULLS NOT DISTINCT"
output = dump_table_schema "trains"
assert_match(/nulls_not_distinct: true/, output)
end
def test_nulls_distinct_is_dumped
skip("current adapter doesn't support nulls not distinct") unless supports_nulls_not_distinct?
@connection.execute "CREATE INDEX trains_name ON trains USING btree(name) NULLS DISTINCT"
output = dump_table_schema "trains"
assert_no_match(/nulls_not_distinct/, output)
end
def test_nulls_not_set_is_dumped
@connection.execute "CREATE INDEX trains_name ON trains USING btree(name)"
output = dump_table_schema "trains"
assert_no_match(/nulls_not_distinct/, output)
end
end
class SchemaCreateTableOptionsTest < ActiveRecord::PostgreSQLTestCase
include SchemaDumpingHelper
setup do
@previous_unlogged_tables = ActiveRecord::ConnectionAdapters::PostgreSQLAdapter.create_unlogged_tables
@connection = ActiveRecord::Base.connection
ActiveRecord::ConnectionAdapters::PostgreSQLAdapter.create_unlogged_tables = false
end
teardown do
@connection.drop_table "trains", if_exists: true
@connection.drop_table "transportation_modes", if_exists: true
@connection.drop_table "vehicles", if_exists: true
ActiveRecord::ConnectionAdapters::PostgreSQLAdapter.create_unlogged_tables = @previous_unlogged_tables
end
def test_list_partition_options_is_dumped
skip("current adapter doesn't support native partitioning") unless supports_native_partitioning?
options = "PARTITION BY LIST (kind)"
@connection.create_table "trains", id: false, options: options do |t|
t.string :name
t.string :kind
end
output = dump_table_schema "trains"
assert_match("options: \"#{options}\"", output)
end
def test_range_partition_options_is_dumped
skip("current adapter doesn't support native partitioning") unless supports_native_partitioning?
options = "PARTITION BY RANGE (created_at)"
@connection.create_table "trains", id: false, options: options do |t|
t.string :name
t.datetime :created_at, null: false
end
output = dump_table_schema "trains"
assert_match("options: \"#{options}\"", output)
end
def test_inherited_table_options_is_dumped
@connection.create_table "transportation_modes" do |t|
t.string :name
t.string :kind
end
options = "INHERITS (transportation_modes)"
@connection.create_table "trains", options: options
output = dump_table_schema "trains"
assert_match("options: \"#{options}\"", output)
end
def test_multiple_inherited_table_options_is_dumped
@connection.create_table "vehicles" do |t|
t.string :name
end
@connection.create_table "transportation_modes" do |t|
t.string :kind
end
options = "INHERITS (transportation_modes, vehicles)"
@connection.create_table "trains", options: options
output = dump_table_schema "trains"
assert_match("options: \"#{options}\"", output)
end
def test_no_partition_options_are_dumped
@connection.create_table "trains" do |t|
t.string :name
end
output = dump_table_schema "trains"
assert_no_match("options:", output)
end
end
class DumpSchemasTest < ActiveRecord::PostgreSQLTestCase
include SchemaDumpingHelper
include PGSchemaHelper
def setup
@connection = ActiveRecord::Base.connection
@connection.create_schema("test_schema")
@connection.create_schema("test_schema2")
@connection.create_enum("test_schema.test_enum_in_test_schema", ["foo", "bar"])
@connection.create_enum("test_enum_in_public", ["foo", "bar"])
@connection.create_table("test_schema.test_table")
@connection.create_table("test_schema.test_table2") do |t|
t.integer "test_table_id"
t.foreign_key "test_schema.test_table"
end
# Create a table in test_schema2 and a table in test_schema with a cross-schema foreign key
@connection.create_table("test_schema2.referenced_table")
@connection.create_table("test_schema.cross_schema_fk_table") do |t|
t.integer "referenced_table_id"
t.foreign_key "test_schema2.referenced_table"
end
end
def teardown
@connection.drop_schema("test_schema")
@connection.drop_schema("test_schema2")
@connection.drop_enum("test_enum_in_public")
end
def test_schema_dump_with_dump_schemas_all
with_dump_schemas(:all) do
output = dump_all_table_schema
assert_includes output, 'create_schema "test_schema"'
assert_not_includes output, 'create_schema "public"'
assert_includes output, 'create_enum "test_schema.test_enum_in_test_schema"'
assert_includes output, 'create_enum "public.test_enum_in_public"'
assert_includes output, 'create_table "test_schema.test_table"'
assert_includes output, 'create_table "public.authors"'
assert_includes output, 'add_foreign_key "test_schema.test_table2", "test_schema.test_table"'
assert_includes output, 'add_foreign_key "public.authors", "public.author_addresses"'
end
end
def test_schema_dump_with_dump_schemas_string
with_dump_schemas("test_schema") do
output = dump_all_table_schema
assert_includes output, 'create_schema "test_schema"'
assert_not_includes output, 'create_schema "public"'
assert_includes output, 'create_enum "test_enum_in_test_schema"'
assert_not_includes output, "test_enum_in_public"
assert_includes output, 'create_table "test_table"'
assert_not_includes output, 'create table "authors"'
assert_includes output, 'add_foreign_key "test_table2", "test_table"'
assert_not_includes output, 'add_foreign_key "authors", "author_addresses"'
end
end
def test_schema_dump_with_dump_schemas_schema_search_path
with_dump_schemas(:schema_search_path) do
with_schema_search_path("'$user',test_schema2,test_schema") do
output = dump_all_table_schema
assert_includes output, 'create_schema "test_schema"'
assert_includes output, 'create_schema "test_schema2"'
assert_not_includes output, 'create_schema "public"'
assert_includes output, 'create_enum "test_schema.test_enum_in_test_schema"'
assert_not_includes output, 'create_enum "public.test_enum_in_public"'
assert_includes output, 'create_table "test_schema.test_table"'
assert_not_includes output, 'create_table "public.authors"'
assert_includes output, 'add_foreign_key "test_schema.test_table2", "test_schema.test_table"'
assert_not_includes output, 'add_foreign_key "public.authors", "public.author_addresses"'
end
end
end
def test_schema_dump_with_cross_schema_foreign_key
with_dump_schemas(:all) do
output = dump_all_table_schema
assert_includes output, 'add_foreign_key "test_schema.cross_schema_fk_table", "test_schema2.referenced_table"'
assert_not_includes output, 'add_foreign_key "test_schema.cross_schema_fk_table", "test_schema.test_schema2.referenced_table"'
end
end
end | ruby | github | https://github.com/rails/rails | activerecord/test/cases/adapters/postgresql/schema_test.rb |
########################################################################
# $HeadURL$
########################################################################
""" WMSHistory corrector for the group and ingroup shares
"""
__RCSID__ = "$Id$"
import datetime
import time as nativetime
from DIRAC.WorkloadManagementSystem.private.correctors.BaseCorrector import BaseCorrector
from DIRAC.Core.Utilities import List, Time
from DIRAC.AccountingSystem.Client.ReportsClient import ReportsClient
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Security import CS
class WMSHistoryCorrector( BaseCorrector ):
_GLOBAL_MAX_CORRECTION = 'MaxGlobalCorrection'
_SLICE_TIME_SPAN = 'TimeSpan'
_SLICE_WEIGHT = 'Weight'
_SLICE_MAX_CORRECTION = 'MaxCorrection'
def initialize( self ):
self.__log = gLogger.getSubLogger( "WMSHistoryCorrector" )
self.__reportsClient = ReportsClient()
self.__usageHistory = {}
self.__slices = {}
self.__lastHistoryUpdate = 0
self.__globalCorrectionFactor = 5
self._fillSlices()
return S_OK()
def _fillSlices( self ):
self.__log.info( "Filling time slices..." )
self.__slices = {}
self.__globalCorrectionFactor =self.getCSOption( self._GLOBAL_MAX_CORRECTION, 5 )
result = self.getCSSections()
if not result[ 'OK' ]:
self.__log.error( "Cound not get configured time slices", result[ 'Message' ] )
return
timeSlices = result[ 'Value' ]
for timeSlice in timeSlices:
self.__slices[ timeSlice ] = {}
for key, defaultValue in ( ( self._SLICE_TIME_SPAN, 604800 ),
( self._SLICE_WEIGHT, 1 ),
( self._SLICE_MAX_CORRECTION, 3 ) ):
self.__slices[ timeSlice ][ key ] = self.getCSOption( "%s/%s" % ( timeSlice, key ), defaultValue )
#Weight has to be normalized to sum 1
weightSum = 0
for timeSlice in self.__slices:
weightSum += self.__slices[ timeSlice ][ self._SLICE_WEIGHT ]
for timeSlice in self.__slices:
self.__slices[ timeSlice ][ self._SLICE_WEIGHT ] /= float( weightSum )
self.__log.info( "Found %s time slices" % len( self.__slices ) )
def updateHistoryKnowledge( self ):
updatePeriod = self.getCSOption( 'UpdateHistoryPeriod', 900 )
now = nativetime.time()
if self.__lastHistoryUpdate + updatePeriod > now:
self.__log.verbose( "Skipping history update. Last update was less than %s secs ago" % updatePeriod)
return
self.__lastHistoryUpdate = now
self.__log.info( "Updating history knowledge" )
self.__usageHistory = {}
for timeSlice in self.__slices:
result = self._getUsageHistoryForTimeSpan( self.__slices[ timeSlice ][ self._SLICE_TIME_SPAN ],
self.getGroup() )
if not result[ 'OK' ]:
self.__usageHistory = {}
self.__log.error( "Could not get history for slice", "%s: %s" % ( timeSlice, result[ 'Message' ] ) )
return
self.__usageHistory[ timeSlice ] = result[ 'Value' ]
self.__log.info( "Got history for slice %s (%s entities in slice)" % ( timeSlice, len( self.__usageHistory[ timeSlice ] ) ) )
self.__log.info( "Updated history knowledge" )
def _getUsageHistoryForTimeSpan( self, timeSpan, groupToUse = "" ):
reportCondition = { 'Status' : [ 'Running' ] }
if not groupToUse:
reportGrouping = 'UserGroup'
else:
reportGrouping = 'User'
reportCondition = { 'UserGroup' : groupToUse }
now = Time.dateTime()
result = self.__reportsClient.getReport( 'WMSHistory', 'AverageNumberOfJobs',
now - datetime.timedelta( seconds = timeSpan ), now,
reportCondition, reportGrouping,
{ 'lastSeconds' : timeSpan } )
if not result[ 'OK' ]:
self.__log.error( "Cannot get history from Accounting", result[ 'Message' ] )
return result
data = result[ 'Value' ][ 'data' ]
#Map the usernames to DNs
if groupToUse:
mappedData = {}
for userName in data:
result = CS.getDNForUsername( userName )
if not result[ 'OK' ]:
self.__log.error( "User does not have any DN assigned", "%s :%s" % ( userName, result[ 'Message' ] ) )
continue
for userDN in result[ 'Value' ]:
mappedData[ userDN ] = data[ userName ]
data = mappedData
return S_OK( data )
def __normalizeShares( self, entityShares ):
totalShare = 0.0
normalizedShares = {}
#Normalize shares
for entity in entityShares:
totalShare += entityShares[ entity ]
self.__log.verbose( "Total share for given entities is %.3f" % totalShare )
for entity in entityShares:
normalizedShare = entityShares[ entity ] / totalShare
normalizedShares[ entity ] = normalizedShare
self.__log.verbose( "Normalized share for %s: %.3f" % ( entity, normalizedShare ) )
return normalizedShares
def applyCorrection( self, entitiesExpectedShare ):
#Normalize expected shares
normalizedShares = self.__normalizeShares( entitiesExpectedShare )
if not self.__usageHistory:
self.__log.verbose( "No history knowledge available. Correction is 1 for all entities" )
return entitiesExpectedShare
entitiesSliceCorrections = dict( [ ( entity, [] ) for entity in entitiesExpectedShare ] )
for timeSlice in self.__usageHistory:
self.__log.verbose( "Calculating correction for time slice %s" % timeSlice )
sliceTotal = 0.0
sliceHistory = self.__usageHistory[ timeSlice ]
for entity in entitiesExpectedShare:
if entity in sliceHistory:
sliceTotal += sliceHistory[ entity ]
self.__log.verbose( "Usage for %s: %.3f" % ( entity, sliceHistory[ entity ] ) )
self.__log.verbose( "Total usage for slice %.3f" % sliceTotal )
if sliceTotal == 0.0:
self.__log.verbose( "Slice usage is 0, skeeping slice" )
continue
maxSliceCorrection = self.__slices[ timeSlice ][ self._SLICE_MAX_CORRECTION ]
minSliceCorrection = 1.0/maxSliceCorrection
for entity in entitiesExpectedShare:
if entity in sliceHistory:
normalizedSliceUsage = sliceHistory[ entity ] / sliceTotal
self.__log.verbose( "Entity %s is present in slice %s (normalized usage %.2f)" % ( entity,
timeSlice,
normalizedSliceUsage ) )
sliceCorrectionFactor = normalizedShares[ entity ] / normalizedSliceUsage
sliceCorrectionFactor = min( sliceCorrectionFactor, maxSliceCorrection )
sliceCorrectionFactor = max( sliceCorrectionFactor, minSliceCorrection )
sliceCorrectionFactor *= self.__slices[ timeSlice ][ self._SLICE_WEIGHT ]
else:
self.__log.verbose( "Entity %s is not present in slice %s" % ( entity, timeSlice ) )
sliceCorrectionFactor = maxSliceCorrection
self.__log.verbose( "Slice correction factor for entity %s is %.3f" % ( entity, sliceCorrectionFactor ) )
entitiesSliceCorrections[ entity ].append( sliceCorrectionFactor )
correctedEntityShare = {}
maxGlobalCorrectionFactor = self.__globalCorrectionFactor
minGlobalCorrectionFactor = 1.0/maxGlobalCorrectionFactor
for entity in entitiesSliceCorrections:
entityCorrectionFactor = 0.0
slicesCorrections = entitiesSliceCorrections[ entity ]
if not slicesCorrections:
self.__log.verbose( "Entity does not have any correction %s" % entity )
correctedEntityShare[ entity ] = entitiesExpectedShare[ entity ]
else:
for cF in entitiesSliceCorrections[ entity ]:
entityCorrectionFactor += cF
entityCorrectionFactor = min( entityCorrectionFactor, maxGlobalCorrectionFactor )
entityCorrectionFactor = max( entityCorrectionFactor, minGlobalCorrectionFactor )
correctedShare = entitiesExpectedShare[ entity ] * entityCorrectionFactor
correctedEntityShare[ entity ] = correctedShare
self.__log.verbose( "Final correction factor for entity %s is %.3f\n Final share is %.3f" % ( entity,
entityCorrectionFactor,
correctedShare ) )
self.__log.verbose( "Initial shares:\n %s" % "\n ".join( [ "%s : %.2f" % ( en, entitiesExpectedShare[ en ] ) for en in entitiesExpectedShare ] ) )
self.__log.verbose( "Corrected shares:\n %s" % "\n ".join( [ "%s : %.2f" % ( en, correctedEntityShare[ en ] ) for en in correctedEntityShare ] ) )
return correctedEntityShare | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.aop.config;
import java.util.ArrayList;
import java.util.List;
import org.aspectj.lang.ProceedingJoinPoint;
import org.junit.jupiter.api.Nested;
import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.test.annotation.DirtiesContext;
import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatException;
/**
* Integration tests for advice invocation order for advice configured via the
* AOP namespace.
*
* @author Sam Brannen
* @since 5.2.7
* @see org.springframework.aop.framework.autoproxy.AspectJAutoProxyAdviceOrderIntegrationTests
*/
class AopNamespaceHandlerAdviceOrderIntegrationTests {
@Nested
@SpringJUnitConfig(locations = "AopNamespaceHandlerAdviceOrderIntegrationTests-afterFirst.xml")
@DirtiesContext
class AfterAdviceFirstTests {
@Test
void afterAdviceIsInvokedFirst(@Autowired Echo echo, @Autowired InvocationTrackingAspect aspect) throws Exception {
assertThat(aspect.invocations).isEmpty();
assertThat(echo.echo(42)).isEqualTo(42);
assertThat(aspect.invocations).containsExactly("around - start", "before", "around - end", "after", "after returning");
aspect.invocations.clear();
assertThatException().isThrownBy(() -> echo.echo(new Exception()));
assertThat(aspect.invocations).containsExactly("around - start", "before", "around - end", "after", "after throwing");
}
}
@Nested
@SpringJUnitConfig(locations = "AopNamespaceHandlerAdviceOrderIntegrationTests-afterLast.xml")
@DirtiesContext
class AfterAdviceLastTests {
@Test
void afterAdviceIsInvokedLast(@Autowired Echo echo, @Autowired InvocationTrackingAspect aspect) throws Exception {
assertThat(aspect.invocations).isEmpty();
assertThat(echo.echo(42)).isEqualTo(42);
assertThat(aspect.invocations).containsExactly("around - start", "before", "around - end", "after returning", "after");
aspect.invocations.clear();
assertThatException().isThrownBy(() -> echo.echo(new Exception()));
assertThat(aspect.invocations).containsExactly("around - start", "before", "around - end", "after throwing", "after");
}
}
static class Echo {
Object echo(Object obj) throws Exception {
if (obj instanceof Exception) {
throw (Exception) obj;
}
return obj;
}
}
static class InvocationTrackingAspect {
List<String> invocations = new ArrayList<>();
Object around(ProceedingJoinPoint joinPoint) throws Throwable {
invocations.add("around - start");
try {
return joinPoint.proceed();
}
finally {
invocations.add("around - end");
}
}
void before() {
invocations.add("before");
}
void afterReturning() {
invocations.add("after returning");
}
void afterThrowing() {
invocations.add("after throwing");
}
void after() {
invocations.add("after");
}
}
} | java | github | https://github.com/spring-projects/spring-framework | integration-tests/src/test/java/org/springframework/aop/config/AopNamespaceHandlerAdviceOrderIntegrationTests.java |
#
# Copyright (C) 2014 Conjur Inc
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from mock import patch
import conjur
api = conjur.new_from_key('foo', 'bar')
group = api.group('v1/admins')
def test_group():
assert group.role.kind == 'group'
assert group.role.identifier == 'v1/admins'
assert group.role.roleid == api.config.account + ':group:v1/admins'
@patch.object(group.role, 'grant_to')
def test_add_member(mock_grant_to):
member = api.user('foo')
group.add_member(member)
mock_grant_to.assert_called_with(member, False)
@patch.object(group.role, 'grant_to')
def test_add_member_admin(mock_grant_to):
member = api.role('something', 'else')
group.add_member(member, True)
mock_grant_to.assert_called_with(member, True)
@patch.object(group.role, 'revoke_from')
def test_remove_member(mock_revoke_from):
member = api.user('foo')
group.remove_member(member)
mock_revoke_from.assert_called_with(member) | unknown | codeparrot/codeparrot-clean | ||
# coding=utf-8
"""Definitions that will be used when describing the different analysis steps.
"""
from collections import OrderedDict
from safe.utilities.i18n import tr
__copyright__ = "Copyright 2016, The InaSAFE Project"
__license__ = "GPL version 3"
__email__ = "info@inasafe.org"
__revision__ = '$Format:%H$'
# Note that we use an ordered dict for these definitions as we want them
# to show in a particular order in the definitions help. TS.
analysis_steps = OrderedDict()
analysis_steps['initialisation'] = {
'key': 'initialisation',
'name': tr('Analysis initialisation'),
'description': tr(
'In this phase we clear the impact function state and work logs.'),
'icon': '.svg',
'icon_credits': 'Not specified',
'citations': [
{
'text': tr(''),
'link': u''
}
]
}
analysis_steps['data_store'] = {
'key': 'data_store',
'name': tr('Data store creation'),
'description': tr(
'In this phase we create a data store. The data store is a '
'folder or GeoPackage containing all of the working data '
'used for and produced by this analysis.'),
'citations': [
{
'text': tr(''),
'link': u''
}
]
}
analysis_steps['hazard_preparation'] = {
'key': 'hazard_preparation',
'name': tr('Hazard preparation'),
'description': tr(
'During the hazard preparation phase of the analysis, we convert '
'the hazard data to a classified vector layer if it is not '
'already in this format.'),
'citations': [
{
'text': tr(''),
'link': u''
}
]
}
analysis_steps['exposure_preparation'] = {
'key': 'exposure_preparation',
'name': tr('Exposure preparation'),
'description': tr(
'During the exposure preparation phase of the analysis, we '
'convert the exposure data to a usable format for the analysis.'),
'citations': [
{
'text': tr(''),
'link': u''
}
]
}
analysis_steps['aggregation_preparation'] = {
'key': 'aggregation_preparation',
'name': tr('Aggregation preparation'),
'description': tr(
'During this step we prepare the aggregation data, extracting '
'only the selected polygons from the aggregation layer, and '
'reprojecting to aggregation data to the exposure layer\'s '
'coordinate reference system.'),
'citations': [
{
'text': tr(''),
'link': u''
}
]
}
analysis_steps['aggregate_hazard_preparation'] = {
'key': 'aggregate_hazard_preparation',
'name': tr('Aggregate hazard preparation'),
'description': tr(
'In this step we union the hazard data and the aggregation data '
'then remove any of the resulting polygons that do not intersect '
'the aggregation areas. Each resulting polygon stores the id and '
'class of the hazard and the id and name from the aggregation '
'area.'),
'citations': [
{
'text': tr(''),
'link': u''
}
]
}
analysis_steps['combine_hazard_exposure'] = {
'key': 'combine_hazard_exposure',
'name': tr('Combine aggregate hazard and exposure'),
'description': tr(
'In this step we combine the aggregate hazard and exposure layers '
'to produce an intermediate impact layer where each exposure '
'feature has been assigned an aggregation id and name, hazard id '
'and class and a column indicating whether the exposed feature '
'is affected or not.'),
'citations': [
{
'text': tr(''),
'link': u''
}
]
}
analysis_steps['post_processing'] = {
'key': 'post_processing',
'name': tr('Post processing'),
'description': tr(
'During this step we analyse each exposure feature to determine '
'additional vulnerability attributes such as gender breakdown '
'age breakdown, minimum needs and so on. This additional '
'information is written into the impact layers.'),
'citations': [
{
'text': tr(''),
'link': u''
}
]
}
analysis_steps['summary_calculation'] = {
'key': 'summary',
'name': tr('Summary calculation'),
'description': tr(
'At the end of the analysis we summarise the analysis results '
'by aggregate hazard areas, aggregation areas and the total '
'analysis area.'),
'citations': [
{
'text': tr(''),
'link': u''
}
]
}
analysis_steps['profiling'] = {
'key': 'profiling',
'name': tr('Profiling'),
'description': tr(
'At the end of the analysis we extract profiling data so that '
'we can provide a detailed work log and also help you to '
'identify any bottlenecks in the processing flow.'),
'citations': [
{
'text': tr(''),
'link': u''
}
]
} | unknown | codeparrot/codeparrot-clean | ||
/* MIT License
*
* Copyright (c) 2024 Brad House
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* SPDX-License-Identifier: MIT
*/
#ifndef __ARES__HTABLE_VPVP_H
#define __ARES__HTABLE_VPVP_H
/*! \addtogroup ares_htable_vpvp HashTable with void pointer Key and void
* pointer Value
*
* This data structure wraps the base ares_htable data structure in order to
* split the key and value data types as size_t and void pointer, respectively.
*
* Average time complexity:
* - Insert: O(1)
* - Search: O(1)
* - Delete: O(1)
*
* @{
*/
struct ares_htable_vpvp;
/*! Opaque data type for size_t key, void pointer hash table implementation */
typedef struct ares_htable_vpvp ares_htable_vpvp_t;
/*! Callback to free key stored in hashtable
*
* \param[in] key user-supplied key
*/
typedef void (*ares_htable_vpvp_key_free_t)(void *key);
/*! Callback to free value stored in hashtable
*
* \param[in] val user-supplied value
*/
typedef void (*ares_htable_vpvp_val_free_t)(void *val);
/*! Destroy hashtable
*
* \param[in] htable Initialized hashtable
*/
CARES_EXTERN void ares_htable_vpvp_destroy(ares_htable_vpvp_t *htable);
/*! Create size_t key, void pointer value hash table
*
* \param[in] key_free Optional. Call back to free user-supplied key. If
* NULL it is expected the caller will clean up any user
* supplied keys.
* \param[in] val_free Optional. Call back to free user-supplied value. If
* NULL it is expected the caller will clean up any user
* supplied values.
*/
CARES_EXTERN ares_htable_vpvp_t *
ares_htable_vpvp_create(ares_htable_vpvp_key_free_t key_free,
ares_htable_vpvp_val_free_t val_free);
/*! Insert key/value into hash table
*
* \param[in] htable Initialized hash table
* \param[in] key key to associate with value
* \param[in] val value to store (takes ownership). May be NULL.
* \return ARES_TRUE on success, ARES_FALSE on failure or out of memory
*/
CARES_EXTERN ares_bool_t ares_htable_vpvp_insert(ares_htable_vpvp_t *htable,
void *key, void *val);
/*! Retrieve value from hashtable based on key
*
* \param[in] htable Initialized hash table
* \param[in] key key to use to search
* \param[out] val Optional. Pointer to store value.
* \return ARES_TRUE on success, ARES_FALSE on failure
*/
CARES_EXTERN ares_bool_t ares_htable_vpvp_get(const ares_htable_vpvp_t *htable,
const void *key, void **val);
/*! Retrieve value from hashtable directly as return value. Caveat to this
* function over ares_htable_vpvp_get() is that if a NULL value is stored
* you cannot determine if the key is not found or the value is NULL.
*
* \param[in] htable Initialized hash table
* \param[in] key key to use to search
* \return value associated with key in hashtable or NULL
*/
CARES_EXTERN void *ares_htable_vpvp_get_direct(const ares_htable_vpvp_t *htable,
const void *key);
/*! Remove a value from the hashtable by key
*
* \param[in] htable Initialized hash table
* \param[in] key key to use to search
* \return ARES_TRUE if found, ARES_FALSE if not
*/
CARES_EXTERN ares_bool_t ares_htable_vpvp_remove(ares_htable_vpvp_t *htable,
const void *key);
/*! Retrieve the number of keys stored in the hash table
*
* \param[in] htable Initialized hash table
* \return count
*/
CARES_EXTERN size_t ares_htable_vpvp_num_keys(const ares_htable_vpvp_t *htable);
/*! @} */
#endif /* __ARES__HTABLE_VPVP_H */ | c | github | https://github.com/nodejs/node | deps/cares/src/lib/include/ares_htable_vpvp.h |
## Input
```javascript
let someGlobal = {};
function component(a) {
let x = {a, someGlobal};
return x;
}
export const FIXTURE_ENTRYPOINT = {
fn: component,
params: ['value 1'],
isComponent: false,
};
```
## Code
```javascript
import { c as _c } from "react/compiler-runtime";
let someGlobal = {};
function component(a) {
const $ = _c(2);
let t0;
if ($[0] !== a) {
t0 = { a, someGlobal };
$[0] = a;
$[1] = t0;
} else {
t0 = $[1];
}
const x = t0;
return x;
}
export const FIXTURE_ENTRYPOINT = {
fn: component,
params: ["value 1"],
isComponent: false,
};
```
### Eval output
(kind: ok) {"a":"value 1","someGlobal":{}} | unknown | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/destructure-capture-global.expect.md |
test_kind: js_test
selector:
roots:
- jstests/core/**/*.js
- jstests/core_standalone/**/*.js
exclude_files:
# Transactions are not supported on MongoDB standalone nodes, so we do not run these tests in the
# 'core' suite. Instead we run them against a 1-node replica set in the 'core_txns' suite.
- jstests/core/txns/**/*.js
# Queryable encryption is not supported on standalone.
- jstests/core/query/queryable_encryption/**/*.js
# Query settings are not supported on standalone.
- jstests/core/query/query_settings/**/*.js
executor:
archive:
hooks:
- ValidateCollections
config:
shell_options:
crashOnInvalidBSONError: ""
objcheck: ""
eval: await import("jstests/libs/override_methods/detect_spawning_own_mongod.js");
hooks:
- class: ValidateCollections
shell_options:
global_vars:
TestData:
skipValidationOnNamespaceNotFound: false
- class: CleanEveryN
n: 20
fixture:
class: MongoDFixture
mongod_options:
set_parameters:
enableTestCommands: 1 | unknown | github | https://github.com/mongodb/mongo | buildscripts/resmokeconfig/suites/core.yml |
import {
codeFixAll,
createCodeFixAction,
registerCodeFix,
} from "../_namespaces/ts.codefix.js";
import {
addToSeen,
CodeFixContextBase,
contains,
createTextSpanFromNode,
Diagnostics,
ExportSpecifier,
factory,
filter,
findDiagnosticForNode,
getDiagnosticsWithinSpan,
getNodeId,
getTokenAtPosition,
isExportSpecifier,
SourceFile,
SyntaxKind,
textChanges,
TextSpan,
tryCast,
} from "../_namespaces/ts.js";
const errorCodes = [Diagnostics.Re_exporting_a_type_when_0_is_enabled_requires_using_export_type.code];
const fixId = "convertToTypeOnlyExport";
registerCodeFix({
errorCodes,
getCodeActions: function getCodeActionsToConvertToTypeOnlyExport(context) {
const changes = textChanges.ChangeTracker.with(context, t => fixSingleExportDeclaration(t, getExportSpecifierForDiagnosticSpan(context.span, context.sourceFile), context));
if (changes.length) {
return [createCodeFixAction(fixId, changes, Diagnostics.Convert_to_type_only_export, fixId, Diagnostics.Convert_all_re_exported_types_to_type_only_exports)];
}
},
fixIds: [fixId],
getAllCodeActions: function getAllCodeActionsToConvertToTypeOnlyExport(context) {
const fixedExportDeclarations = new Set<number>();
return codeFixAll(context, errorCodes, (changes, diag) => {
const exportSpecifier = getExportSpecifierForDiagnosticSpan(diag, context.sourceFile);
if (exportSpecifier && addToSeen(fixedExportDeclarations, getNodeId(exportSpecifier.parent.parent))) {
fixSingleExportDeclaration(changes, exportSpecifier, context);
}
});
},
});
function getExportSpecifierForDiagnosticSpan(span: TextSpan, sourceFile: SourceFile) {
return tryCast(getTokenAtPosition(sourceFile, span.start).parent, isExportSpecifier);
}
function fixSingleExportDeclaration(changes: textChanges.ChangeTracker, exportSpecifier: ExportSpecifier | undefined, context: CodeFixContextBase) {
if (!exportSpecifier) {
return;
}
const exportClause = exportSpecifier.parent;
const exportDeclaration = exportClause.parent;
const typeExportSpecifiers = getTypeExportSpecifiers(exportSpecifier, context);
if (typeExportSpecifiers.length === exportClause.elements.length) {
changes.insertModifierBefore(context.sourceFile, SyntaxKind.TypeKeyword, exportClause);
}
else {
const valueExportDeclaration = factory.updateExportDeclaration(
exportDeclaration,
exportDeclaration.modifiers,
/*isTypeOnly*/ false,
factory.updateNamedExports(exportClause, filter(exportClause.elements, e => !contains(typeExportSpecifiers, e))),
exportDeclaration.moduleSpecifier,
/*attributes*/ undefined,
);
const typeExportDeclaration = factory.createExportDeclaration(
/*modifiers*/ undefined,
/*isTypeOnly*/ true,
factory.createNamedExports(typeExportSpecifiers),
exportDeclaration.moduleSpecifier,
/*attributes*/ undefined,
);
changes.replaceNode(context.sourceFile, exportDeclaration, valueExportDeclaration, {
leadingTriviaOption: textChanges.LeadingTriviaOption.IncludeAll,
trailingTriviaOption: textChanges.TrailingTriviaOption.Exclude,
});
changes.insertNodeAfter(context.sourceFile, exportDeclaration, typeExportDeclaration);
}
}
function getTypeExportSpecifiers(originExportSpecifier: ExportSpecifier, context: CodeFixContextBase): readonly ExportSpecifier[] {
const exportClause = originExportSpecifier.parent;
if (exportClause.elements.length === 1) {
return exportClause.elements;
}
const diagnostics = getDiagnosticsWithinSpan(
createTextSpanFromNode(exportClause),
context.program.getSemanticDiagnostics(context.sourceFile, context.cancellationToken),
);
return filter(exportClause.elements, element => {
return element === originExportSpecifier || findDiagnosticForNode(element, diagnostics)?.code === errorCodes[0];
});
} | typescript | github | https://github.com/microsoft/TypeScript | src/services/codefixes/convertToTypeOnlyExport.ts |
<?php
/*
* This file is part of the Symfony package.
*
* (c) Fabien Potencier <fabien@symfony.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Symfony\Component\Form;
use Symfony\Component\Form\Extension\Core\Type\EnumType;
use Symfony\Component\Form\Guess\Guess;
use Symfony\Component\Form\Guess\TypeGuess;
use Symfony\Component\Form\Guess\ValueGuess;
final class EnumFormTypeGuesser implements FormTypeGuesserInterface
{
/**
* @var array<string, array<string, string|false>>
*/
private array $cache = [];
public function guessType(string $class, string $property): ?TypeGuess
{
if (!($enum = $this->getPropertyType($class, $property))) {
return null;
}
return new TypeGuess(EnumType::class, ['class' => ltrim($enum, '?')], Guess::HIGH_CONFIDENCE);
}
public function guessRequired(string $class, string $property): ?ValueGuess
{
if (!($enum = $this->getPropertyType($class, $property))) {
return null;
}
return new ValueGuess('?' !== $enum[0], Guess::HIGH_CONFIDENCE);
}
public function guessMaxLength(string $class, string $property): ?ValueGuess
{
return null;
}
public function guessPattern(string $class, string $property): ?ValueGuess
{
return null;
}
private function getPropertyType(string $class, string $property): string|false
{
if (isset($this->cache[$class][$property])) {
return $this->cache[$class][$property];
}
try {
$propertyReflection = new \ReflectionProperty($class, $property);
} catch (\ReflectionException) {
return $this->cache[$class][$property] = false;
}
$type = $propertyReflection->getType();
if (!$type instanceof \ReflectionNamedType || !enum_exists($type->getName())) {
$enum = false;
} else {
$enum = $type->getName();
if ($type->allowsNull()) {
$enum = '?'.$enum;
}
}
return $this->cache[$class][$property] = $enum;
}
} | php | github | https://github.com/symfony/symfony | src/Symfony/Component/Form/EnumFormTypeGuesser.php |
#!/usr/bin/env python
#
# This script will warm up the buffer cache with the tables required to run the input
# query. This only works on a mini-dfs cluster. This is remarkably difficult to do
# since hdfs which tries to hide the details of the block locations from users.
# The only way to do this is to
# 1. use the java APIs (deprecated, of course) to extract the block ids.
# 2. find the files with those block ids on the file system and read them
#
# First run testdata/bin/generate-block-ids.sh. This will output the block locations
# to testdata/block-ids. This file is good as long as the mini-dfs cluster does not
# get new files. If the block-ids file is not there, this script will run
# generate-block-ids.sh.
#
# Run this script, passing it the query and it will go read every replica of every
# block of every table in the query.
import math
import os
import re
import sys
import subprocess
import tempfile
from optparse import OptionParser
# Options
parser = OptionParser()
parser.add_option("-q", "--query", dest="query", default = "",
help="Query to run. If none specified, runs all queries.")
(options, args) = parser.parse_args()
block_ids_file = 'testdata/block-ids'
data_node_root = os.environ['MINI_DFS_BASE_DATA_DIR'] + '/dfs/data'
block_ids = {}
# Parse the block ids file to all the block ids for all the tables
# the format of the file is:
# <table name>: <block_id1> <block_id2> <etc>
def parse_block_ids():
full_path = os.environ['IMPALA_HOME'] + "/" + block_ids_file;
if not os.path.isfile(full_path):
cmd = os.environ['IMPALA_HOME'] + '/testdata/bin/generate-block-ids.sh'
os.system(cmd)
if not os.path.isfile(full_path):
raise Exception("Could not find/generate block id files: " + full_path)
f = open(full_path);
for line in f:
tokens = line.split(':')
blocks = tokens[1].strip().split(' ')
block_ids[tokens[0].strip()] = blocks
# Parse for the tables used in this query
def parse_tables(query):
table_predecessor = ['from', 'join']
tokens = query.split(' ')
tables = []
next_is_table = False
for t in tokens:
t = t.lower()
if next_is_table:
tables.append(t)
next_is_table = False
if t in table_predecessor:
next_is_table = True
return tables
# Warm the buffer cache by cat-ing all the blocks to /dev/null
def warm_buffer_cache(table):
if table not in block_ids:
raise Exception("Table not found: " + table)
blocks = block_ids[table]
for block in blocks:
cmd = 'find %s -type f -name blk_%s* -exec cat {} > /dev/null \;' % \
(data_node_root, block)
os.system(cmd)
tables = parse_tables(options.query)
parse_block_ids()
if len(tables) == 0:
raise Exception("Could not parse tables in: " + options.query)
for table in tables:
warm_buffer_cache(table) | unknown | codeparrot/codeparrot-clean | ||
from functools import partial
from django.contrib.gis.db.models import aggregates
class BaseSpatialFeatures(object):
gis_enabled = True
# Does the database contain a SpatialRefSys model to store SRID information?
has_spatialrefsys_table = True
# Does the backend support the django.contrib.gis.utils.add_srs_entry() utility?
supports_add_srs_entry = True
# Does the backend introspect GeometryField to its subtypes?
supports_geometry_field_introspection = True
# Reference implementation of 3D functions is:
# http://postgis.net/docs/PostGIS_Special_Functions_Index.html#PostGIS_3D_Functions
supports_3d_functions = False
# Does the database support SRID transform operations?
supports_transform = True
# Do geometric relationship operations operate on real shapes (or only on bounding boxes)?
supports_real_shape_operations = True
# Can geometry fields be null?
supports_null_geometries = True
# Can the `distance` GeoQuerySet method be applied on geodetic coordinate systems?
supports_distance_geodetic = True
# Is the database able to count vertices on polygons (with `num_points`)?
supports_num_points_poly = True
# The following properties indicate if the database backend support
# certain lookups (dwithin, left and right, relate, ...)
supports_distances_lookups = True
supports_left_right_lookups = False
@property
def supports_bbcontains_lookup(self):
return 'bbcontains' in self.connection.ops.gis_operators
@property
def supports_contained_lookup(self):
return 'contained' in self.connection.ops.gis_operators
@property
def supports_crosses_lookup(self):
return 'crosses' in self.connection.ops.gis_operators
@property
def supports_dwithin_lookup(self):
return 'dwithin' in self.connection.ops.gis_operators
@property
def supports_relate_lookup(self):
return 'relate' in self.connection.ops.gis_operators
# For each of those methods, the class will have a property named
# `has_<name>_method` (defined in __init__) which accesses connection.ops
# to determine GIS method availability.
geoqueryset_methods = (
'area', 'centroid', 'difference', 'distance', 'distance_spheroid',
'envelope', 'force_rhr', 'geohash', 'gml', 'intersection', 'kml',
'length', 'num_geom', 'perimeter', 'point_on_surface', 'reverse',
'scale', 'snap_to_grid', 'svg', 'sym_difference', 'transform',
'translate', 'union', 'unionagg',
)
# Specifies whether the Collect and Extent aggregates are supported by the database
@property
def supports_collect_aggr(self):
return aggregates.Collect not in self.connection.ops.disallowed_aggregates
@property
def supports_extent_aggr(self):
return aggregates.Extent not in self.connection.ops.disallowed_aggregates
@property
def supports_make_line_aggr(self):
return aggregates.MakeLine not in self.connection.ops.disallowed_aggregates
def __init__(self, *args):
super(BaseSpatialFeatures, self).__init__(*args)
for method in self.geoqueryset_methods:
# Add dynamically properties for each GQS method, e.g. has_force_rhr_method, etc.
setattr(self.__class__, 'has_%s_method' % method,
property(partial(BaseSpatialFeatures.has_ops_method, method=method)))
def has_ops_method(self, method):
return getattr(self.connection.ops, method, False) | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2024 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package featuregate
import (
"flag"
"fmt"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap/zaptest"
)
func TestFeatureGateFlag(t *testing.T) {
// gates for testing
const testAlphaGate Feature = "TestAlpha"
const testBetaGate Feature = "TestBeta"
tests := []struct {
arg string
expect map[Feature]bool
parseError string
}{
{
arg: "",
expect: map[Feature]bool{
allAlphaGate: false,
allBetaGate: false,
testAlphaGate: false,
testBetaGate: false,
},
},
{
arg: "fooBarBaz=true",
expect: map[Feature]bool{
allAlphaGate: false,
allBetaGate: false,
testAlphaGate: false,
testBetaGate: false,
},
parseError: "unrecognized feature gate: fooBarBaz",
},
{
arg: "AllAlpha=false",
expect: map[Feature]bool{
allAlphaGate: false,
allBetaGate: false,
testAlphaGate: false,
testBetaGate: false,
},
},
{
arg: "AllAlpha=true",
expect: map[Feature]bool{
allAlphaGate: true,
allBetaGate: false,
testAlphaGate: true,
testBetaGate: false,
},
},
{
arg: "AllAlpha=banana",
expect: map[Feature]bool{
allAlphaGate: false,
allBetaGate: false,
testAlphaGate: false,
testBetaGate: false,
},
parseError: "invalid value of AllAlpha",
},
{
arg: "AllAlpha=false,TestAlpha=true",
expect: map[Feature]bool{
allAlphaGate: false,
allBetaGate: false,
testAlphaGate: true,
testBetaGate: false,
},
},
{
arg: "TestAlpha=true,AllAlpha=false",
expect: map[Feature]bool{
allAlphaGate: false,
allBetaGate: false,
testAlphaGate: true,
testBetaGate: false,
},
},
{
arg: "AllAlpha=true,TestAlpha=false",
expect: map[Feature]bool{
allAlphaGate: true,
allBetaGate: false,
testAlphaGate: false,
testBetaGate: false,
},
},
{
arg: "TestAlpha=false,AllAlpha=true",
expect: map[Feature]bool{
allAlphaGate: true,
allBetaGate: false,
testAlphaGate: false,
testBetaGate: false,
},
},
{
arg: "TestBeta=true,AllAlpha=false",
expect: map[Feature]bool{
allAlphaGate: false,
allBetaGate: false,
testAlphaGate: false,
testBetaGate: true,
},
},
{
arg: "AllBeta=false",
expect: map[Feature]bool{
allAlphaGate: false,
allBetaGate: false,
testAlphaGate: false,
testBetaGate: false,
},
},
{
arg: "AllBeta=true",
expect: map[Feature]bool{
allAlphaGate: false,
allBetaGate: true,
testAlphaGate: false,
testBetaGate: true,
},
},
{
arg: "AllBeta=banana",
expect: map[Feature]bool{
allAlphaGate: false,
allBetaGate: false,
testAlphaGate: false,
testBetaGate: false,
},
parseError: "invalid value of AllBeta",
},
{
arg: "AllBeta=false,TestBeta=true",
expect: map[Feature]bool{
allAlphaGate: false,
allBetaGate: false,
testAlphaGate: false,
testBetaGate: true,
},
},
{
arg: "TestBeta=true,AllBeta=false",
expect: map[Feature]bool{
allAlphaGate: false,
allBetaGate: false,
testAlphaGate: false,
testBetaGate: true,
},
},
{
arg: "AllBeta=true,TestBeta=false",
expect: map[Feature]bool{
allAlphaGate: false,
allBetaGate: true,
testAlphaGate: false,
testBetaGate: false,
},
},
{
arg: "TestBeta=false,AllBeta=true",
expect: map[Feature]bool{
allAlphaGate: false,
allBetaGate: true,
testAlphaGate: false,
testBetaGate: false,
},
},
{
arg: "TestAlpha=true,AllBeta=false",
expect: map[Feature]bool{
allAlphaGate: false,
allBetaGate: false,
testAlphaGate: true,
testBetaGate: false,
},
},
}
for i, test := range tests {
t.Run(test.arg, func(t *testing.T) {
fs := flag.NewFlagSet("testfeaturegateflag", flag.ContinueOnError)
f := New("test", zaptest.NewLogger(t))
f.Add(map[Feature]FeatureSpec{
testAlphaGate: {Default: false, PreRelease: Alpha},
testBetaGate: {Default: false, PreRelease: Beta},
})
f.AddFlag(fs, defaultFlagName)
err := fs.Parse([]string{fmt.Sprintf("--%s=%s", defaultFlagName, test.arg)})
if test.parseError != "" {
assert.Containsf(t, err.Error(), test.parseError, "%d: Parse() Expected %v, Got %v", i, test.parseError, err)
} else if err != nil {
t.Errorf("%d: Parse() Expected nil, Got %v", i, err)
}
for k, v := range test.expect {
actual := f.Enabled(k)
assert.Equalf(t, actual, v, "%d: expected %s=%v, Got %v", i, k, v, actual)
}
})
}
}
func TestFeatureGateOverride(t *testing.T) {
const testAlphaGate Feature = "TestAlpha"
const testBetaGate Feature = "TestBeta"
// Don't parse the flag, assert defaults are used.
f := New("test", zaptest.NewLogger(t))
f.Add(map[Feature]FeatureSpec{
testAlphaGate: {Default: false, PreRelease: Alpha},
testBetaGate: {Default: false, PreRelease: Beta},
})
f.Set("TestAlpha=true,TestBeta=true")
assert.Truef(t, f.Enabled(testAlphaGate), "Expected true")
assert.Truef(t, f.Enabled(testBetaGate), "Expected true")
f.Set("TestAlpha=false")
assert.Falsef(t, f.Enabled(testAlphaGate), "Expected false")
assert.Truef(t, f.Enabled(testBetaGate), "Expected true")
}
func TestFeatureGateFlagDefaults(t *testing.T) {
// gates for testing
const testAlphaGate Feature = "TestAlpha"
const testBetaGate Feature = "TestBeta"
// Don't parse the flag, assert defaults are used.
f := New("test", zaptest.NewLogger(t))
f.Add(map[Feature]FeatureSpec{
testAlphaGate: {Default: false, PreRelease: Alpha},
testBetaGate: {Default: true, PreRelease: Beta},
})
assert.Falsef(t, f.Enabled(testAlphaGate), "Expected false")
assert.Truef(t, f.Enabled(testBetaGate), "Expected true")
}
func TestFeatureGateKnownFeatures(t *testing.T) {
// gates for testing
const (
testAlphaGate Feature = "TestAlpha"
testBetaGate Feature = "TestBeta"
testGAGate Feature = "TestGA"
testDeprecatedGate Feature = "TestDeprecated"
)
// Don't parse the flag, assert defaults are used.
f := New("test", zaptest.NewLogger(t))
f.Add(map[Feature]FeatureSpec{
testAlphaGate: {Default: false, PreRelease: Alpha},
testBetaGate: {Default: true, PreRelease: Beta},
testGAGate: {Default: true, PreRelease: GA},
testDeprecatedGate: {Default: false, PreRelease: Deprecated},
})
known := strings.Join(f.KnownFeatures(), " ")
assert.Contains(t, known, testAlphaGate)
assert.Contains(t, known, testBetaGate)
assert.NotContains(t, known, testGAGate)
assert.NotContains(t, known, testDeprecatedGate)
}
func TestFeatureGateSetFromMap(t *testing.T) {
// gates for testing
const testAlphaGate Feature = "TestAlpha"
const testBetaGate Feature = "TestBeta"
const testLockedTrueGate Feature = "TestLockedTrue"
const testLockedFalseGate Feature = "TestLockedFalse"
tests := []struct {
name string
setmap map[string]bool
expect map[Feature]bool
setmapError string
}{
{
name: "set TestAlpha and TestBeta true",
setmap: map[string]bool{
"TestAlpha": true,
"TestBeta": true,
},
expect: map[Feature]bool{
testAlphaGate: true,
testBetaGate: true,
},
},
{
name: "set TestBeta true",
setmap: map[string]bool{
"TestBeta": true,
},
expect: map[Feature]bool{
testAlphaGate: false,
testBetaGate: true,
},
},
{
name: "set TestAlpha false",
setmap: map[string]bool{
"TestAlpha": false,
},
expect: map[Feature]bool{
testAlphaGate: false,
testBetaGate: false,
},
},
{
name: "set TestInvaild true",
setmap: map[string]bool{
"TestInvaild": true,
},
expect: map[Feature]bool{
testAlphaGate: false,
testBetaGate: false,
},
setmapError: "unrecognized feature gate:",
},
{
name: "set locked gates",
setmap: map[string]bool{
"TestLockedTrue": true,
"TestLockedFalse": false,
},
expect: map[Feature]bool{
testAlphaGate: false,
testBetaGate: false,
},
},
{
name: "set locked gates",
setmap: map[string]bool{
"TestLockedTrue": false,
},
expect: map[Feature]bool{
testAlphaGate: false,
testBetaGate: false,
},
setmapError: "cannot set feature gate TestLockedTrue to false, feature is locked to true",
},
{
name: "set locked gates",
setmap: map[string]bool{
"TestLockedFalse": true,
},
expect: map[Feature]bool{
testAlphaGate: false,
testBetaGate: false,
},
setmapError: "cannot set feature gate TestLockedFalse to true, feature is locked to false",
},
}
for i, test := range tests {
t.Run(fmt.Sprintf("SetFromMap %s", test.name), func(t *testing.T) {
f := New("test", zaptest.NewLogger(t))
f.Add(map[Feature]FeatureSpec{
testAlphaGate: {Default: false, PreRelease: Alpha},
testBetaGate: {Default: false, PreRelease: Beta},
testLockedTrueGate: {Default: true, PreRelease: GA, LockToDefault: true},
testLockedFalseGate: {Default: false, PreRelease: GA, LockToDefault: true},
})
err := f.SetFromMap(test.setmap)
if test.setmapError != "" {
if err == nil {
t.Errorf("expected error, got none")
} else if !strings.Contains(err.Error(), test.setmapError) {
t.Errorf("%d: SetFromMap(%#v) Expected err:%v, Got err:%v", i, test.setmap, test.setmapError, err)
}
} else if err != nil {
t.Errorf("%d: SetFromMap(%#v) Expected success, Got err:%v", i, test.setmap, err)
}
for k, v := range test.expect {
actual := f.Enabled(k)
assert.Equalf(t, actual, v, "%d: SetFromMap(%#v) Expected %s=%v, Got %s=%v", i, test.setmap, k, v, k, actual)
}
})
}
}
func TestFeatureGateMetrics(t *testing.T) {
// TODO(henrybear327): Add tests once feature gate metrics are added.
}
func TestFeatureGateString(t *testing.T) {
// gates for testing
const testAlphaGate Feature = "TestAlpha"
const testBetaGate Feature = "TestBeta"
const testGAGate Feature = "TestGA"
featuremap := map[Feature]FeatureSpec{
testGAGate: {Default: true, PreRelease: GA},
testAlphaGate: {Default: false, PreRelease: Alpha},
testBetaGate: {Default: true, PreRelease: Beta},
}
tests := []struct {
setmap map[string]bool
expect string
}{
{
setmap: map[string]bool{
"TestAlpha": false,
},
expect: "TestAlpha=false",
},
{
setmap: map[string]bool{
"TestAlpha": false,
"TestBeta": true,
},
expect: "TestAlpha=false,TestBeta=true",
},
{
setmap: map[string]bool{
"TestGA": true,
"TestAlpha": false,
"TestBeta": true,
},
expect: "TestAlpha=false,TestBeta=true,TestGA=true",
},
}
for i, test := range tests {
t.Run(fmt.Sprintf("SetFromMap %s", test.expect), func(t *testing.T) {
f := New("test", zaptest.NewLogger(t))
f.Add(featuremap)
f.SetFromMap(test.setmap)
result := f.String()
assert.Equalf(t, result, test.expect, "%d: SetFromMap(%#v) Expected %s, Got %s", i, test.setmap, test.expect, result)
})
}
}
func TestFeatureGateOverrideDefault(t *testing.T) {
t.Run("overrides take effect", func(t *testing.T) {
f := New("test", zaptest.NewLogger(t))
err := f.Add(map[Feature]FeatureSpec{
"TestFeature1": {Default: true},
"TestFeature2": {Default: false},
})
require.NoError(t, err)
require.NoError(t, f.OverrideDefault("TestFeature1", false))
require.NoError(t, f.OverrideDefault("TestFeature2", true))
assert.Falsef(t, f.Enabled("TestFeature1"), "expected TestFeature1 to have effective default of false")
assert.Truef(t, f.Enabled("TestFeature2"), "expected TestFeature2 to have effective default of true")
})
t.Run("overrides are preserved across deep copies", func(t *testing.T) {
f := New("test", zaptest.NewLogger(t))
err := f.Add(map[Feature]FeatureSpec{"TestFeature": {Default: false}})
require.NoError(t, err)
require.NoError(t, f.OverrideDefault("TestFeature", true))
fcopy := f.DeepCopy()
assert.Truef(t, fcopy.Enabled("TestFeature"), "default override was not preserved by deep copy")
})
t.Run("reflected in known features", func(t *testing.T) {
f := New("test", zaptest.NewLogger(t))
err := f.Add(map[Feature]FeatureSpec{"TestFeature": {
Default: false,
PreRelease: Alpha,
}})
require.NoError(t, err)
require.NoError(t, f.OverrideDefault("TestFeature", true))
var found bool
for _, s := range f.KnownFeatures() {
if !strings.Contains(s, "TestFeature") {
continue
}
found = true
assert.Containsf(t, s, "default=true", "expected override of default to be reflected in known feature description %q", s)
}
assert.Truef(t, found, "found no entry for TestFeature in known features")
})
t.Run("may not change default for specs with locked defaults", func(t *testing.T) {
f := New("test", zaptest.NewLogger(t))
err := f.Add(map[Feature]FeatureSpec{
"LockedFeature": {
Default: true,
LockToDefault: true,
},
})
require.NoError(t, err)
require.Errorf(t, f.OverrideDefault("LockedFeature", false), "expected error when attempting to override the default for a feature with a locked default")
assert.Errorf(t, f.OverrideDefault("LockedFeature", true), "expected error when attempting to override the default for a feature with a locked default")
})
t.Run("does not supersede explicitly-set value", func(t *testing.T) {
f := New("test", zaptest.NewLogger(t))
err := f.Add(map[Feature]FeatureSpec{"TestFeature": {Default: true}})
require.NoError(t, err)
require.NoError(t, f.OverrideDefault("TestFeature", false))
require.NoError(t, f.SetFromMap(map[string]bool{"TestFeature": true}))
assert.Truef(t, f.Enabled("TestFeature"), "expected feature to be effectively enabled despite default override")
})
t.Run("prevents re-registration of feature spec after overriding default", func(t *testing.T) {
f := New("test", zaptest.NewLogger(t))
err := f.Add(map[Feature]FeatureSpec{
"TestFeature": {
Default: true,
PreRelease: Alpha,
},
})
require.NoError(t, err)
require.NoError(t, f.OverrideDefault("TestFeature", false))
err = f.Add(map[Feature]FeatureSpec{
"TestFeature": {
Default: true,
PreRelease: Alpha,
},
})
assert.Errorf(t, err, "expected re-registration to return a non-nil error after overriding its default")
})
t.Run("does not allow override for an unknown feature", func(t *testing.T) {
f := New("test", zaptest.NewLogger(t))
err := f.OverrideDefault("TestFeature", true)
assert.Errorf(t, err, "expected an error to be returned in attempt to override default for unregistered feature")
})
t.Run("returns error if already added to flag set", func(t *testing.T) {
f := New("test", zaptest.NewLogger(t))
fs := flag.NewFlagSet("test", flag.ContinueOnError)
f.AddFlag(fs, defaultFlagName)
err := f.OverrideDefault("TestFeature", true)
assert.Errorf(t, err, "expected a non-nil error to be returned")
})
} | go | github | https://github.com/etcd-io/etcd | pkg/featuregate/feature_gate_test.go |
"""
Functions that modify an HTTP request or response in some way.
"""
# This group of functions are run as part of the response handling, after
# everything else, including all response middleware. Think of them as
# "compulsory response middleware". Be careful about what goes here, because
# it's a little fiddly to override this behavior, so they should be truly
# universally applicable.
def fix_location_header(request, response):
"""
Ensures that we always use an absolute URI in any location header in the
response. This is required by RFC 2616, section 14.30.
Code constructing response objects is free to insert relative paths, as
this function converts them to absolute paths.
"""
if 'Location' in response and request.get_host():
response['Location'] = request.build_absolute_uri(response['Location'])
return response
def conditional_content_removal(request, response):
"""
Removes the content of responses for HEAD requests, 1xx, 204 and 304
responses. Ensures compliance with RFC 2616, section 4.3.
"""
if 100 <= response.status_code < 200 or response.status_code in (204, 304):
response.content = ''
response['Content-Length'] = 0
if request.method == 'HEAD':
response.content = ''
return response
def fix_IE_for_attach(request, response):
"""
This function will prevent Django from serving a Content-Disposition header
while expecting the browser to cache it (only when the browser is IE). This
leads to IE not allowing the client to download.
"""
if 'MSIE' not in request.META.get('HTTP_USER_AGENT', '').upper():
return response
offending_headers = ('no-cache', 'no-store')
if response.has_header('Content-Disposition'):
try:
del response['Pragma']
except KeyError:
pass
if response.has_header('Cache-Control'):
cache_control_values = [value.strip() for value in
response['Cache-Control'].split(',')
if value.strip().lower() not in offending_headers]
if not len(cache_control_values):
del response['Cache-Control']
else:
response['Cache-Control'] = ', '.join(cache_control_values)
return response
def fix_IE_for_vary(request, response):
"""
This function will fix the bug reported at
http://support.microsoft.com/kb/824847/en-us?spid=8722&sid=global
by clearing the Vary header whenever the mime-type is not safe
enough for Internet Explorer to handle. Poor thing.
"""
if 'MSIE' not in request.META.get('HTTP_USER_AGENT', '').upper():
return response
# These mime-types that are decreed "Vary-safe" for IE:
safe_mime_types = ('text/html', 'text/plain', 'text/sgml')
# The first part of the Content-Type field will be the MIME type,
# everything after ';', such as character-set, can be ignored.
if response['Content-Type'].split(';')[0] not in safe_mime_types:
try:
del response['Vary']
except KeyError:
pass
return response | unknown | codeparrot/codeparrot-clean | ||
/*
MIT License http://www.opensource.org/licenses/mit-license.php
Author Tobias Koppers @sokra
*/
"use strict";
/** @typedef {import("../declarations/WebpackOptions").StatsOptions} StatsOptions */
/** @typedef {import("./Compilation")} Compilation */
/** @typedef {import("./stats/DefaultStatsFactoryPlugin").StatsCompilation} StatsCompilation */
class Stats {
/**
* @param {Compilation} compilation webpack compilation
*/
constructor(compilation) {
this.compilation = compilation;
}
get hash() {
return this.compilation.hash;
}
get startTime() {
return this.compilation.startTime;
}
get endTime() {
return this.compilation.endTime;
}
/**
* @returns {boolean} true if the compilation had a warning
*/
hasWarnings() {
return (
this.compilation.getWarnings().length > 0 ||
this.compilation.children.some((child) => child.getStats().hasWarnings())
);
}
/**
* @returns {boolean} true if the compilation encountered an error
*/
hasErrors() {
return (
this.compilation.errors.length > 0 ||
this.compilation.children.some((child) => child.getStats().hasErrors())
);
}
/**
* @param {(string | boolean | StatsOptions)=} options stats options
* @returns {StatsCompilation} json output
*/
toJson(options) {
const normalizedOptions = this.compilation.createStatsOptions(options, {
forToString: false
});
const statsFactory = this.compilation.createStatsFactory(normalizedOptions);
return statsFactory.create("compilation", this.compilation, {
compilation: this.compilation
});
}
/**
* @param {(string | boolean | StatsOptions)=} options stats options
* @returns {string} string output
*/
toString(options) {
const normalizedOptions = this.compilation.createStatsOptions(options, {
forToString: true
});
const statsFactory = this.compilation.createStatsFactory(normalizedOptions);
const statsPrinter = this.compilation.createStatsPrinter(normalizedOptions);
const data = statsFactory.create("compilation", this.compilation, {
compilation: this.compilation
});
const result = statsPrinter.print("compilation", data);
return result === undefined ? "" : result;
}
}
module.exports = Stats; | javascript | github | https://github.com/webpack/webpack | lib/Stats.js |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Hotel Reservation Management - Reporting",
"version" : "1.0",
"author" : "Tiny,Odoo Community Association (OCA)",
"depends" : ["hotel_reservation"],
"category" : "Generic Modules/Hotel Reservation",
"description": """
Module shows the status of room reservation
* Current status of reserved room
* List status of room as draft or done state
""",
"init_xml" : [],
"demo_xml" : [],
"update_xml" : ["security/ir.model.access.csv","report_hotel_reservation_view.xml"],
"active": False,
'installable': False
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
import collections
import string
import numpy
import six
import cupy
from cupy import carray
from cupy import elementwise
from cupy import util
six_range = six.moves.range
six_zip = six.moves.zip
_broadcast = elementwise._broadcast
_check_args = elementwise._check_args
_decide_params_type = elementwise._decide_params_type
_get_kernel_params = elementwise._get_kernel_params
_get_args_info = elementwise._get_args_info
_get_out_args = elementwise._get_out_args
_get_out_args_with_params = elementwise._get_out_args_with_params
_get_param_info = elementwise._get_param_info
_get_typename = elementwise._get_typename
_guess_routine = elementwise._guess_routine
_reduce_dims = elementwise._reduce_dims
def _get_simple_reduction_kernel(
name, block_size, reduce_type, params, identity,
pre_map_expr, reduce_expr, post_map_expr,
type_preamble, input_expr, output_expr, preamble, options):
if identity is None:
identity = ''
module_code = string.Template('''
${type_preamble}
${preamble}
#define REDUCE(a, b) (${reduce_expr})
#define POST_MAP(a) (${post_map_expr})
typedef ${reduce_type} _type_reduce;
extern "C" __global__ void ${name}(${params}) {
if (_out_clp2_size > 256) {
CUPY_FOR(_i, _out_ind.size()) {
_type_reduce _s = _type_reduce(${identity});
for (int _j = _i, _J = 0;
_j < _in_ind.size();
_j += _out_ind.size(), _J++) {
_in_ind.set(_j);
${input_expr}
_type_reduce _a = ${pre_map_expr};
_s = REDUCE(_s, _a);
}
_out_ind.set(_i);
${output_expr}
POST_MAP(_s);
}
} else {
extern __shared__ _type_reduce _sdata_raw[];
_type_reduce *_sdata = _sdata_raw;
int _tid = threadIdx.x;
_sdata[_tid] = _type_reduce(${identity});
unsigned int _i = _tid % _out_clp2_size;
if (_i >= _out_ind.size()) return;
_type_reduce _s = _type_reduce(${identity});
int _J_offset = _tid / _out_clp2_size;
int _j_offset = _J_offset * _out_ind.size();
int _J_stride = ${block_size} / _out_clp2_size;
int _j_stride = _J_stride * _out_ind.size();
for (int _j = _i + _j_offset, _J = _J_offset;
_j < _in_ind.size();
_j += _j_stride, _J += _J_stride) {
_in_ind.set(_j);
${input_expr}
_type_reduce _a = ${pre_map_expr};
_s = REDUCE(_s, _a);
}
_sdata[_tid] = _s;
__syncthreads();
if (_tid >= 256) return;
_sdata[_tid] = REDUCE(_sdata[_tid], _sdata[_tid + 256]);
__syncthreads();
if (_out_clp2_size <= 128) {
_sdata[_tid] = REDUCE(_sdata[_tid], _sdata[_tid + 128]);
__syncthreads();
if (_out_clp2_size <= 64) {
_sdata[_tid] = REDUCE(_sdata[_tid], _sdata[_tid + 64]);
__syncthreads();
if (_out_clp2_size <= 32) {
_sdata[_tid] = REDUCE(_sdata[_tid], _sdata[_tid + 32]);
if (_out_clp2_size <= 16) {
_sdata[_tid] = REDUCE(_sdata[_tid], _sdata[_tid + 16]);
if (_out_clp2_size <= 8) {
_sdata[_tid] = REDUCE(_sdata[_tid], _sdata[_tid + 8]);
if (_out_clp2_size <= 4) {
_sdata[_tid] = REDUCE(_sdata[_tid], _sdata[_tid + 4]);
if (_out_clp2_size <= 2) {
_sdata[_tid] = REDUCE(_sdata[_tid], _sdata[_tid + 2]);
if (_out_clp2_size <= 1) {
_sdata[_tid] = REDUCE(_sdata[_tid], _sdata[_tid + 1]);
}
}
}
}
}
}
}
}
_s = _sdata[_tid];
if (_tid >= _out_ind.size()) return;
_out_ind.set(_i);
${output_expr}
POST_MAP(_s);
}
}''').substitute(
name=name,
block_size=block_size,
reduce_type=reduce_type,
params=params,
identity=identity,
reduce_expr=reduce_expr,
pre_map_expr=pre_map_expr,
post_map_expr=post_map_expr,
type_preamble=type_preamble,
input_expr=input_expr,
output_expr=output_expr,
preamble=preamble)
module = carray.compile_with_cache(module_code, options)
return module.get_function(name)
def _get_axis(axis, ndim):
if axis is None:
axis = tuple(six_range(ndim))
elif isinstance(axis, collections.Sequence):
axis = tuple(axis)
else:
axis = axis,
for dim in axis:
if dim < -ndim or dim >= ndim:
raise ValueError('Axis overrun')
axis = tuple(sorted([dim % ndim for dim in axis]))
raxis = tuple([dim for dim in six_range(ndim) if dim not in axis])
return axis, raxis
def _get_out_shape(shape, axis, raxis, keepdims):
if keepdims:
out_shape = list(shape)
for i in axis:
out_shape[i] = 1
return tuple(out_shape)
return tuple([shape[i] for i in raxis])
def _get_trans_args(args, trans, shape, params=None):
if trans == tuple(six_range(len(shape))):
return args, shape
if params is not None and any(p.raw for p in params):
raise NotImplementedError('Illegal conditions')
args = [cupy.transpose(a, trans) if isinstance(a, cupy.ndarray) else a
for a in args]
shape = tuple([shape[i] for i in trans])
return args, shape
def _get_inout_args(in_args, out_args, in_indexer, out_indexer, out_clp2_size,
params, reduce_dims):
if reduce_dims:
in_args, in_shape = _reduce_dims(
in_args, params, in_indexer.shape)
out_args, out_shape = _reduce_dims(
out_args, params[len(in_args):], out_indexer.shape)
in_indexer.shape = in_shape
out_indexer.shape = out_shape
args = in_args + out_args + [in_indexer, out_indexer,
numpy.int32(out_clp2_size)]
return args
@util.memoize(for_each_device=True)
def _get_simple_reduction_function(
routine, params, args_info, in_arg_dtype, out_arg_dtype, out_types,
name, block_size, identity, input_expr, output_expr, _preamble,
options):
reduce_type = routine[3]
if reduce_type is None:
reduce_type = _get_typename(out_types[0])
t = (_get_typename(in_arg_dtype), _get_typename(out_arg_dtype))
type_preamble = 'typedef %s type_in0_raw; typedef %s type_out0_raw;' % t
params = _get_kernel_params(params, args_info)
return _get_simple_reduction_kernel(
name, block_size, reduce_type, params, identity,
routine[0], routine[1], routine[2],
type_preamble, input_expr, output_expr, _preamble, options)
class simple_reduction_function(object):
def __init__(self, name, ops, identity, preamble):
self.name = name
self._ops = ops
self.identity = identity
self._preamble = preamble
self.nin = 1
self.nout = 1
in_params = _get_param_info('T in0', True)
out_params = _get_param_info('T out0', False)
self._params = (
in_params + out_params +
_get_param_info(
'CIndexer _in_ind, CIndexer _out_ind', False) +
_get_param_info('int32 _out_clp2_size', True))
self._input_expr = 'const type_in0_raw in0 = _raw_in0[_in_ind.get()];'
self._output_expr = 'type_out0_raw &out0 = _raw_out0[_out_ind.get()];'
self._routine_cache = {}
def __call__(self, a, axis=None, dtype=None, out=None, keepdims=False):
if not isinstance(a, cupy.ndarray):
raise TypeError('Input type must be cupy.ndarray')
if self.identity is None:
assert a.size != 0
if dtype is not None:
dtype = numpy.dtype(dtype).type
in_args = [a]
if out is None:
_check_args((a,))
out_args = []
else:
_check_args((a, out))
out_args = [out]
in_types, out_types, routine = _guess_routine(
self.name, self._routine_cache, self._ops, in_args, dtype)
axis, raxis = _get_axis(axis, a.ndim)
out_shape = _get_out_shape(a.shape, axis, raxis, keepdims)
out_args = _get_out_args(out_args, out_types, out_shape)
in_args, in_shape = _get_trans_args(
in_args, axis + raxis, in_args[0].shape)
in_indexer = carray.Indexer(in_shape)
out_indexer = carray.Indexer(out_shape)
out_clp2_size = 2 ** int.bit_length(int(out_indexer.size - 1))
inout_args = _get_inout_args(
in_args, out_args, in_indexer, out_indexer, out_clp2_size,
self._params, True)
args_info = _get_args_info(inout_args)
block_size = 512
kern = _get_simple_reduction_function(
routine, self._params, args_info,
in_args[0].dtype.type, out_args[0].dtype.type, out_types,
self.name, block_size, self.identity,
self._input_expr, self._output_expr, self._preamble, ())
shared_mem = 32 * block_size
if out_clp2_size > 256:
shared_mem = 0
# TODO(okuta) set actual size
kern.linear_launch(max(out_indexer.size, block_size), inout_args,
shared_mem, block_size)
if len(out_args) == 1:
return out_args[0]
return tuple(out_args)
@util.memoize(for_each_device=True)
def _get_reduction_kernel(
params, args_info, types,
name, block_size, reduce_type, identity, map_expr, reduce_expr,
post_map_expr, preamble, options):
kernel_params = _get_kernel_params(params, args_info)
arrays = [p for p, a in six_zip(params, args_info)
if not p.raw and a[0] is cupy.ndarray]
type_preamble = '\n'.join(
'typedef %s %s;' % (_get_typename(v), k)
for k, v in types)
input_expr = '\n'.join(
['const {0} {1} = _raw_{1}[_j];'.format(p.ctype, p.name)
for p in arrays if p.is_const])
output_expr = '\n'.join(
['{0} &{1} = _raw_{1}[_i];'.format(p.ctype, p.name)
for p in arrays if not p.is_const])
return _get_simple_reduction_kernel(
name, block_size, reduce_type, kernel_params, identity,
map_expr, reduce_expr, post_map_expr,
type_preamble, input_expr, output_expr, preamble, options)
class ReductionKernel(object):
"""User-defined reduction kernel.
This class can be used to define a reduction kernel with or without
broadcasting.
The kernel is compiled at an invocation of the
:meth:`~ReductionKernel.__call__` method, which is cached for each device.
The compiled binary is also cached into a file under the
``$HOME/.cupy/kernel_cache/`` directory with a hashed file name. The cached
binary is resued by other processes.
Args:
in_params (str): Input argument list.
out_params (str): Output argument list.
map_expr (str): Mapping expression for input values.
reduce_expr (str): Reduction expression.
post_map_expr (str): Mapping expression for reduced values.
identity (str): Identity value for starting the reduction.
name (str): Name of the kernel function. It should be set for
readability of the performance profiling.
reduce_type (str): Type of values to be used for reduction. This type
is used to store the special variables ``a``.
reduce_dims (bool): If True, input arrays are reshaped without copy to
smaller dimensions for efficiency.
preamble (str): Fragment of the CUDA-C/C++ code that is inserted at the
top of the cu file.
options (tuple of str): Additional compilation options.
"""
def __init__(self, in_params, out_params,
map_expr, reduce_expr, post_map_expr,
identity, name='reduce_kernel', reduce_type=None,
reduce_dims=True, preamble='', options=()):
self.in_params = _get_param_info(in_params, True)
self.out_params = _get_param_info(out_params, False)
self.nin = len(self.in_params)
self.nout = len(self.out_params)
self.nargs = self.nin + self.nout
self.params = (
self.in_params + self.out_params +
_get_param_info('CIndexer _in_ind, CIndexer _out_ind', False) +
_get_param_info('int32 _out_clp2_size', True))
self.identity = identity
self.reduce_expr = reduce_expr
self.map_expr = map_expr
self.name = name
self.options = options
self.reduce_dims = reduce_dims
self.post_map_expr = post_map_expr
if reduce_type is None:
self.reduce_type = self.out_params[0].ctype
else:
self.reduce_type = reduce_type
self.preamble = preamble
def __call__(self, *args, **kwargs):
"""Compiles and invokes the reduction kernel.
The compilation runs only if the kernel is not cached. Note that the
kernels with different argument dtypes, ndims, or axis are not
compatible. It means that single ReductionKernel object may be compiled
into multiple kernel binaries.
Args:
args: Arguments of the kernel.
Returns:
Arrays are returned according to the ``out_params`` argument of the
``__init__`` method.
"""
out = kwargs.pop('out', None)
axis = kwargs.pop('axis', None)
keepdims = kwargs.pop('keepdims', False)
if kwargs:
raise TypeError('Wrong arguments %s' % kwargs)
n_args = len(args)
if n_args != self.nin and n_args != self.nargs:
raise TypeError('Wrong number of arguments for %s' % self.name)
out_args = list(args[self.nin:])
if out is not None:
if self.nout != 1:
raise NotImplementedError('')
if len(out_args) != 0:
raise ValueError("cannot specify 'out' as both "
"a positional and keyword argument")
out_args = [out]
in_args, broad_shape = _broadcast(args, self.in_params, False)
_check_args(in_args + out_args)
if self.identity is None:
assert 0 in broad_shape
cp_array = cupy.ndarray
in_ndarray_types = tuple(
[a.dtype.type if isinstance(a, cp_array) else None
for a in in_args])
out_ndarray_types = tuple(
[a.dtype.type if isinstance(a, cp_array) else None
for a in out_args])
in_types, out_types, types = _decide_params_type(
self.in_params, self.out_params,
in_ndarray_types, out_ndarray_types)
axis, raxis = _get_axis(axis, len(broad_shape))
out_shape = _get_out_shape(broad_shape, axis, raxis, keepdims)
in_args = [x if isinstance(x, cp_array) else t(x)
for x, t in six_zip(in_args, in_types)]
in_args, in_shape = _get_trans_args(
in_args, axis + raxis, broad_shape, self.in_params)
out_args = _get_out_args_with_params(
out_args, out_types, out_shape, self.out_params)
in_indexer = carray.Indexer(in_shape)
out_indexer = carray.Indexer(out_shape)
out_clp2_size = 2 ** int.bit_length(int(out_indexer.size - 1))
inout_args = _get_inout_args(
in_args, out_args, in_indexer, out_indexer, out_clp2_size,
self.params, self.reduce_dims)
args_info = _get_args_info(inout_args)
block_size = 512
kern = _get_reduction_kernel(
self.params, args_info, types,
self.name, block_size, self.reduce_type, self.identity,
self.map_expr, self.reduce_expr, self.post_map_expr,
self.preamble, self.options)
shared_mem = 32 * block_size
if out_clp2_size > 256:
shared_mem = 0
# TODO(okuta) set actual size
kern.linear_launch(max(out_indexer.size, block_size), inout_args,
shared_mem, block_size)
return out_args[0]
def create_reduction_func(name, ops, routine=None, identity=None,
preamble=''):
_ops = []
for t in ops:
if not isinstance(t, tuple):
typ = t
rt = routine
else:
typ, rt = t
rt = tuple(i or j for i, j in six_zip(rt, routine))
types = typ.split('->')
if len(types) == 1:
in_types = out_types = tuple(types)
else:
in_types, out_types = map(tuple, types)
in_types = tuple([numpy.dtype(t).type for t in in_types])
out_types = tuple([numpy.dtype(t).type for t in out_types])
_ops.append((in_types, out_types, rt))
return simple_reduction_function(name, _ops, identity, preamble)
_min_max_preamble = '''
struct min_max_st{
type_in0_raw value;
int index;
__device__ min_max_st() : index(-1) { }
__device__ min_max_st(type_in0_raw v) : value(v), index(0) { }
__device__ min_max_st(type_in0_raw v, int i) : value(v), index(i) { }
};
__device__ min_max_st my_min(const min_max_st& a, const min_max_st& b) {
if (a.index == -1) return b;
if (b.index == -1) return a;
return min_max_st(min(a.value, b.value));
}
__device__ min_max_st my_max(const min_max_st& a, const min_max_st& b) {
if (a.index == -1) return b;
if (b.index == -1) return a;
return min_max_st(max(a.value, b.value));
}
__device__ min_max_st my_argmin(const min_max_st& a, const min_max_st& b) {
if (a.index == -1) return b;
if (b.index == -1) return a;
return (a.value <= b.value) ? a : b;
}
__device__ min_max_st my_argmax(const min_max_st& a, const min_max_st& b) {
if (a.index == -1) return b;
if (b.index == -1) return a;
return (a.value >= b.value) ? a : b;
}'''
amin = create_reduction_func(
'cupy_min',
('?->?', 'B->B', 'h->h', 'H->H', 'i->i', 'I->I', 'l->l', 'L->L',
'q->q', 'Q->Q', 'e->e', 'f->f', 'd->d'),
('min_max_st(in0)', 'my_min(a, b)', 'out0 = a.value', 'min_max_st'),
None, _min_max_preamble)
amax = create_reduction_func(
'cupy_max',
('?->?', 'B->B', 'h->h', 'H->H', 'i->i', 'I->I', 'l->l', 'L->L',
'q->q', 'Q->Q', 'e->e', 'f->f', 'd->d'),
('min_max_st(in0)', 'my_max(a, b)', 'out0 = a.value', 'min_max_st'),
None, _min_max_preamble)
argmin = create_reduction_func(
'cupy_argmin',
('?->l', 'B->l', 'h->l', 'H->l', 'i->l', 'I->l', 'l->l', 'L->l',
'q->l', 'Q->l', 'e->l', 'f->l', 'd->l'),
('min_max_st(in0, _J)', 'my_argmin(a, b)', 'out0 = a.index', 'min_max_st'),
None, _min_max_preamble)
argmax = create_reduction_func(
'cupy_argmax',
('?->l', 'B->l', 'h->l', 'H->l', 'i->l', 'I->l', 'l->l', 'L->l',
'q->l', 'Q->l', 'e->l', 'f->l', 'd->l'),
('min_max_st(in0, _J)', 'my_argmax(a, b)', 'out0 = a.index', 'min_max_st'),
None, _min_max_preamble) | unknown | codeparrot/codeparrot-clean | ||
<?php
namespace Illuminate\Tests\Database;
use Illuminate\Database\Query\Processors\PostgresProcessor;
use PHPUnit\Framework\TestCase;
class DatabasePostgresProcessorTest extends TestCase
{
public function testProcessColumns()
{
$processor = new PostgresProcessor;
$listing = [
['name' => 'id', 'type_name' => 'int4', 'type' => 'integer', 'collation' => '', 'nullable' => true, 'default' => "nextval('employee_id_seq'::regclass)", 'comment' => '', 'generated' => false],
['name' => 'name', 'type_name' => 'varchar', 'type' => 'character varying(100)', 'collation' => 'collate', 'nullable' => false, 'default' => '', 'comment' => 'foo', 'generated' => false],
['name' => 'balance', 'type_name' => 'numeric', 'type' => 'numeric(8,2)', 'collation' => '', 'nullable' => true, 'default' => '4', 'comment' => 'NULL', 'generated' => false],
['name' => 'birth_date', 'type_name' => 'timestamp', 'type' => 'timestamp(6) without time zone', 'collation' => '', 'nullable' => false, 'default' => '', 'comment' => '', 'generated' => false],
];
$expected = [
['name' => 'id', 'type_name' => 'int4', 'type' => 'integer', 'collation' => '', 'nullable' => true, 'default' => "nextval('employee_id_seq'::regclass)", 'auto_increment' => true, 'comment' => '', 'generation' => null],
['name' => 'name', 'type_name' => 'varchar', 'type' => 'character varying(100)', 'collation' => 'collate', 'nullable' => false, 'default' => '', 'auto_increment' => false, 'comment' => 'foo', 'generation' => null],
['name' => 'balance', 'type_name' => 'numeric', 'type' => 'numeric(8,2)', 'collation' => '', 'nullable' => true, 'default' => '4', 'auto_increment' => false, 'comment' => 'NULL', 'generation' => null],
['name' => 'birth_date', 'type_name' => 'timestamp', 'type' => 'timestamp(6) without time zone', 'collation' => '', 'nullable' => false, 'default' => '', 'auto_increment' => false, 'comment' => '', 'generation' => null],
];
$this->assertEquals($expected, $processor->processColumns($listing));
// convert listing to objects to simulate PDO::FETCH_CLASS
foreach ($listing as &$row) {
$row = (object) $row;
}
$this->assertEquals($expected, $processor->processColumns($listing));
}
} | php | github | https://github.com/laravel/framework | tests/Database/DatabasePostgresProcessorTest.php |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import json
import logging
import sys
from django.utils.functional import wraps
from desktop.lib.django_util import render
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.rest.http_client import RestException
from sqoop.api.exception import handle_rest_exception
from sqoop import client, conf
if sys.version_info[0] > 2:
from django.utils.translation import gettext as _
else:
from django.utils.translation import ugettext as _
__all__ = ['get_job_or_exception']
LOG = logging.getLogger(__name__)
def get_connector_or_exception(exception_class=PopupException):
def inner(view_func):
def decorate(request, connector_id, *args, **kwargs):
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE, ssl_cert_ca_verify=conf.SSL_CERT_CA_VERIFY.get())
connector = c.get_connector(int(connector_id))
except RestException as e:
handle_rest_exception(e, _('Could not get connector.'))
return view_func(request, connector=connector, *args, **kwargs)
return wraps(view_func)(decorate)
return inner
def get_link_or_exception(exception_class=PopupException):
def inner(view_func):
def decorate(request, link_id, *args, **kwargs):
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE, ssl_cert_ca_verify=conf.SSL_CERT_CA_VERIFY.get())
link = c.get_link(int(link_id))
except RestException as e:
handle_rest_exception(e, _('Could not get link.'))
return view_func(request, link=link, *args, **kwargs)
return wraps(view_func)(decorate)
return inner
def get_job_or_exception(exception_class=PopupException):
def inner(view_func):
def decorate(request, job_id, *args, **kwargs):
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE, ssl_cert_ca_verify=conf.SSL_CERT_CA_VERIFY.get())
job = c.get_job(int(job_id))
except RestException as e:
handle_rest_exception(e, _('Could not get job.'))
return view_func(request, job=job, *args, **kwargs)
return wraps(view_func)(decorate)
return inner
def get_submission_or_exception(exception_class=PopupException):
def inner(view_func):
def decorate(request, submission_id, *args, **kwargs):
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE, ssl_cert_ca_verify=conf.SSL_CERT_CA_VERIFY.get())
submission = c.get_submission(int(submission_id))
except RestException as e:
handle_rest_exception(e, _('Could not get submission.'))
return view_func(request, submission=submission, *args, **kwargs)
return wraps(view_func)(decorate)
return inner | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Generate .h for event description.
"""
__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
__copyright__ = "Copyright 2012, Lluís Vilanova <vilanova@ac.upc.edu>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@linux.vnet.ibm.com"
from tracetool import out
def begin(events):
out('/* This file is autogenerated by tracetool, do not edit. */',
'',
'#ifndef TRACE__GENERATED_EVENTS_H',
'#define TRACE__GENERATED_EVENTS_H',
'',
'#include <stdbool.h>',
''
)
# event identifiers
out('typedef enum {')
for e in events:
out(' TRACE_%s,' % e.name.upper())
out(' TRACE_EVENT_COUNT',
'} TraceEventID;',
)
# static state
for e in events:
if 'disable' in e.properties:
enabled = 0
else:
enabled = 1
out('#define TRACE_%s_ENABLED %d' % (e.name.upper(), enabled))
out('#include "trace/event-internal.h"',
'',
'#endif /* TRACE__GENERATED_EVENTS_H */',
) | unknown | codeparrot/codeparrot-clean | ||
"""Support for Powerview scenes from a Powerview hub."""
from typing import Any
from aiopvapi.resources.scene import Scene as PvScene
import voluptuous as vol
from homeassistant.components.scene import Scene
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import CONF_HOST, CONF_PLATFORM
import homeassistant.helpers.config_validation as cv
from .const import (
COORDINATOR,
DEVICE_INFO,
DOMAIN,
HUB_ADDRESS,
PV_API,
PV_ROOM_DATA,
PV_SCENE_DATA,
ROOM_NAME_UNICODE,
STATE_ATTRIBUTE_ROOM_NAME,
)
from .entity import HDEntity
PLATFORM_SCHEMA = vol.Schema(
{vol.Required(CONF_PLATFORM): DOMAIN, vol.Required(HUB_ADDRESS): cv.string}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Import platform from yaml."""
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_HOST: config[HUB_ADDRESS]},
)
)
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up powerview scene entries."""
pv_data = hass.data[DOMAIN][entry.entry_id]
room_data = pv_data[PV_ROOM_DATA]
scene_data = pv_data[PV_SCENE_DATA]
pv_request = pv_data[PV_API]
coordinator = pv_data[COORDINATOR]
device_info = pv_data[DEVICE_INFO]
pvscenes = []
for raw_scene in scene_data.values():
scene = PvScene(raw_scene, pv_request)
room_name = room_data.get(scene.room_id, {}).get(ROOM_NAME_UNICODE, "")
pvscenes.append(PowerViewScene(coordinator, device_info, room_name, scene))
async_add_entities(pvscenes)
class PowerViewScene(HDEntity, Scene):
"""Representation of a Powerview scene."""
def __init__(self, coordinator, device_info, room_name, scene):
"""Initialize the scene."""
super().__init__(coordinator, device_info, room_name, scene.id)
self._scene = scene
@property
def name(self):
"""Return the name of the scene."""
return self._scene.name
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return {STATE_ATTRIBUTE_ROOM_NAME: self._room_name}
@property
def icon(self):
"""Icon to use in the frontend."""
return "mdi:blinds"
async def async_activate(self, **kwargs: Any) -> None:
"""Activate scene. Try to get entities into requested state."""
await self._scene.activate() | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2012, the Mozilla Foundation. All rights reserved.
# Use of this source code is governed by the Simplified BSD License which can
# be found in the LICENSE file.
# Python module to create, delete and get the target of junctions on
# Windows.
__all__ = ["create", "readlink", "unlink", "isjunction"]
import os
import fs
from fs import CreateFile, GetFileAttributes, DeviceIoControl, CloseHandle
import ctypes
from ctypes import WinError, sizeof, byref
from ctypes.wintypes import DWORD
IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003
FSCTL_SET_REPARSE_POINT = 0x000900A4
FSCTL_GET_REPARSE_POINT = 0x000900A8
FSCTL_DELETE_REPARSE_POINT = 0x000900AC
def new_junction_reparse_buffer(path=None):
"""
Given a path, return a pair containing a new REPARSE_DATA_BUFFER and the
length of the buffer (not necessarily the same as sizeof due to packing
issues).
If no path is provided, the maximum length is assumed.
"""
if path is None:
# The maximum reparse point data buffer length is 16384 bytes. We are a
# bit conservative here and set a length of 16000 bytes (8000
# characters) + a few more for the header.
substnamebufferchars = 8000
else:
# 1 more character for the null terminator. Python 2.x calculates
# len(surrogate pair) = 2, so multiplying this by 2 is the right thing
# to do.
substnamebufferchars = len(path) + 1
# It is amazing how ugly MSDN's version of REPARSE_DATA_BUFFER is:
# <http://msdn.microsoft.com/en-us/library/windows/hardware/ff552012>. It
# is a variable-length struct with two strings in the wchar[] buffer at
# the end. Both are supposed to be null-terminated, and the individual
# lengths do not include that of the null character, but the total
# ReparseDataLength does.
#
# In our case, only the SubstituteName part of the mount point/junction-
# specific part is relevant. So we set PrintNameLength to 0, but we still
# need to allow for one null character, so PrintNameBuffer has length 1.
class REPARSE_DATA_BUFFER(ctypes.Structure):
_fields_ = [("ReparseTag", ctypes.c_ulong),
("ReparseDataLength", ctypes.c_ushort),
("Reserved", ctypes.c_ushort),
("SubstituteNameOffset", ctypes.c_ushort),
("SubstituteNameLength", ctypes.c_ushort),
("PrintNameOffset", ctypes.c_ushort),
("PrintNameLength", ctypes.c_ushort),
("SubstituteNameBuffer", ctypes.c_wchar * substnamebufferchars),
("PrintNameBuffer", ctypes.c_wchar * 1)]
numpathbytes = (substnamebufferchars - 1) * sizeof(ctypes.c_wchar)
# We can't really use sizeof on the struct because of packing issues.
# Instead, calculate the size manually
buffersize = (numpathbytes + (sizeof(ctypes.c_wchar) * 2) +
(sizeof(ctypes.c_ushort) * 4))
if path is None:
buffer = REPARSE_DATA_BUFFER()
buffer.ReparseTag = IO_REPARSE_TAG_MOUNT_POINT
else:
buffer = REPARSE_DATA_BUFFER(
IO_REPARSE_TAG_MOUNT_POINT,
buffersize,
0,
# print name offset, length
0, numpathbytes,
# substitute name offset, length
numpathbytes + 2, 0,
# print name
path,
# substitute name
"")
return (buffer, buffersize + REPARSE_DATA_BUFFER.SubstituteNameOffset.offset)
def unparsed_convert(path):
path = os.path.abspath(path)
# Remove the trailing slash for root drives
if path[-2:] == ":\\":
path = path[:-1]
# This magic prefix disables parsing. Note that we do not want to use
# \\?\, since that doesn't tolerate a different case.
return "\\??\\" + path
def unparsed_unconvert(path):
if path[0:4] == "\\??\\":
path = path[4:]
return path
def isjunction(path):
if not os.path.exists(path) or not fs.junctions_supported(path):
return False
attrs = GetFileAttributes(path)
return bool((attrs & fs.FILE_ATTRIBUTE_DIRECTORY) and
(attrs & fs.FILE_ATTRIBUTE_REPARSE_POINT))
def create(source, link_name):
"""
Create a junction at link_name pointing to source.
"""
success = False
if not os.path.isdir(source):
raise Exception("%s is not a directory" % source)
if os.path.exists(link_name):
raise Exception("%s: junction link name already exists" % link_name)
link_name = os.path.abspath(link_name)
os.mkdir(link_name)
# Get a handle to the directory
hlink = CreateFile(link_name, fs.GENERIC_WRITE,
fs.FILE_SHARE_READ | fs.FILE_SHARE_WRITE, None, fs.OPEN_EXISTING,
fs.FILE_FLAG_OPEN_REPARSE_POINT | fs.FILE_FLAG_BACKUP_SEMANTICS,
None)
try:
if hlink == fs.INVALID_HANDLE_VALUE:
raise WinError(descr="Couldn't open directory to create junction")
srcvolpath = unparsed_convert(source)
(junctioninfo, infolen) = new_junction_reparse_buffer(srcvolpath)
dummy = DWORD(0)
res = DeviceIoControl(
hlink,
FSCTL_SET_REPARSE_POINT,
byref(junctioninfo),
infolen,
None,
0,
byref(dummy),
None)
if res == 0:
raise WinError(descr="Setting directory as junction failed")
success = True
finally:
if hlink != fs.INVALID_HANDLE_VALUE:
CloseHandle(hlink)
if not success:
os.rmdir(link_name)
def readlink(path):
# Make sure the path exists and is actually a junction
if not isjunction(path):
raise Exception("%s does not exist or is not a junction" % path)
hlink = CreateFile(path, fs.GENERIC_READ, fs.FILE_SHARE_READ, None,
fs.OPEN_EXISTING,
fs.FILE_FLAG_OPEN_REPARSE_POINT | fs.FILE_FLAG_BACKUP_SEMANTICS,
None)
if hlink == fs.INVALID_HANDLE_VALUE:
raise WinError(descr=("%s: couldn't open directory to read junction" % path))
try:
(junctioninfo, infolen) = new_junction_reparse_buffer()
dummy = DWORD(0)
res = DeviceIoControl(
hlink,
FSCTL_GET_REPARSE_POINT,
None,
0,
byref(junctioninfo),
infolen,
byref(dummy),
None)
if res == 0:
raise WinError(descr="Getting junction info failed")
return unparsed_unconvert(junctioninfo.SubstituteNameBuffer)
finally:
CloseHandle(hlink)
def unlink(path):
# Make sure the path exists and is actually a junction
if not isjunction(path):
raise Exception("%s does not exist or is not a junction" % path)
# Just get rid of the directory
os.rmdir(path) | unknown | codeparrot/codeparrot-clean | ||
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import "errors"
const (
// NetworkNotReadyErrorMsg is used to describe the error that network is not ready
NetworkNotReadyErrorMsg = "network is not ready"
)
var (
// ErrNetworkUnknown indicates the network state is unknown
ErrNetworkUnknown = errors.New("network state unknown")
) | go | github | https://github.com/kubernetes/kubernetes | pkg/kubelet/errors.go |
//// [tests/cases/compiler/arrayAssignmentTest5.ts] ////
//// [arrayAssignmentTest5.ts]
namespace Test {
interface IState {
}
interface IToken {
startIndex: number;
}
interface IStateToken extends IToken {
state: IState;
}
interface ILineTokens {
tokens: IToken[];
endState: IState;
}
interface IAction {
}
interface IMode {
onEnter(line:string, state:IState, offset:number):IAction;
tokenize(line:string, state:IState, includeStates:boolean):ILineTokens;
}
export class Bug implements IMode {
public onEnter(line:string, state:IState, offset:number):IAction {
var lineTokens:ILineTokens= this.tokenize(line, state, true);
var tokens:IStateToken[]= lineTokens.tokens;
if (tokens.length === 0) {
return this.onEnter(line, tokens, offset); // <== this should produce an error since onEnter can not be called with (string, IStateToken[], offset)
}
}
public tokenize(line:string, state:IState, includeStates:boolean):ILineTokens {
return null;
}
}
}
//// [arrayAssignmentTest5.js]
"use strict";
var Test;
(function (Test) {
class Bug {
onEnter(line, state, offset) {
var lineTokens = this.tokenize(line, state, true);
var tokens = lineTokens.tokens;
if (tokens.length === 0) {
return this.onEnter(line, tokens, offset); // <== this should produce an error since onEnter can not be called with (string, IStateToken[], offset)
}
}
tokenize(line, state, includeStates) {
return null;
}
}
Test.Bug = Bug;
})(Test || (Test = {})); | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/arrayAssignmentTest5.js |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_bd
short_description: Manage Bridge Domains (BD) objects (fv:BD)
description:
- Manages Bridge Domains (BD) on Cisco ACI fabrics.
notes:
- The C(tenant) used must exist before using this module in your playbook.
The M(aci_tenant) module can be used for this.
- More information about the internal APIC class B(fv:BD) from
L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Jacob McGill (@jmcgill298)
version_added: '2.4'
options:
arp_flooding:
description:
- Determines if the Bridge Domain should flood ARP traffic.
- The APIC defaults to C(no) when unset during creation.
type: bool
bd:
description:
- The name of the Bridge Domain.
aliases: [ bd_name, name ]
bd_type:
description:
- The type of traffic on the Bridge Domain.
- The APIC defaults to C(ethernet) when unset during creation.
choices: [ ethernet, fc ]
description:
description:
- Description for the Bridge Domain.
enable_multicast:
description:
- Determines if PIM is enabled.
- The APIC defaults to C(no) when unset during creation.
type: bool
enable_routing:
description:
- Determines if IP forwarding should be allowed.
- The APIC defaults to C(yes) when unset during creation.
type: bool
endpoint_clear:
description:
- Clears all End Points in all Leaves when C(yes).
- The value is not reset to disabled once End Points have been cleared; that requires a second task.
- The APIC defaults to C(no) when unset during creation.
type: bool
endpoint_move_detect:
description:
- Determines if GARP should be enabled to detect when End Points move.
- The APIC defaults to C(garp) when unset during creation.
choices: [ default, garp ]
endpoint_retention_action:
description:
- Determines if the Bridge Domain should inherit or resolve the End Point Retention Policy.
- The APIC defaults to C(resolve) when unset during creation.
choices: [ inherit, resolve ]
endpoint_retention_policy:
description:
- The name of the End Point Retention Policy the Bridge Domain should use when
overriding the default End Point Retention Policy.
igmp_snoop_policy:
description:
- The name of the IGMP Snooping Policy the Bridge Domain should use when
overriding the default IGMP Snooping Policy.
ip_learning:
description:
- Determines if the Bridge Domain should learn End Point IPs.
- The APIC defaults to C(yes) when unset during creation.
type: bool
ipv6_nd_policy:
description:
- The name of the IPv6 Neighbor Discovery Policy the Bridge Domain should use when
overridding the default IPV6 ND Policy.
l2_unknown_unicast:
description:
- Determines what forwarding method to use for unknown l2 destinations.
- The APIC defaults to C(proxy) when unset during creation.
choices: [ proxy, flood ]
l3_unknown_multicast:
description:
- Determines the forwarding method to use for unknown multicast destinations.
- The APIC defaults to C(flood) when unset during creation.
choices: [ flood, opt-flood ]
limit_ip_learn:
description:
- Determines if the BD should limit IP learning to only subnets owned by the Bridge Domain.
- The APIC defaults to C(yes) when unset during creation.
type: bool
mac_address:
description:
- The MAC Address to assign to the C(bd) instead of using the default.
- The APIC defaults to C(00:22:BD:F8:19:FF) when unset during creation.
aliases: [ mac ]
version_added: '2.5'
multi_dest:
description:
- Determines the forwarding method for L2 multicast, broadcast, and link layer traffic.
- The APIC defaults to C(bd-flood) when unset during creation.
choices: [ bd-flood, drop, encap-flood ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
tenant:
description:
- The name of the Tenant.
aliases: [ tenant_name ]
vrf:
description:
- The name of the VRF.
aliases: [ vrf_name ]
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: Add Bridge Domain
aci_bd:
host: "{{ inventory_hostname }}"
username: "{{ username }}"
password: "{{ password }}"
validate_certs: no
tenant: prod
bd: web_servers
mac_address: 00:22:BD:F8:19:FE
vrf: prod_vrf
state: present
delegate_to: localhost
- name: Add an FC Bridge Domain
aci_bd:
host: "{{ inventory_hostname }}"
username: "{{ username }}"
password: "{{ password }}"
validate_certs: no
tenant: prod
bd: storage
bd_type: fc
vrf: fc_vrf
enable_routing: no
state: present
delegate_to: localhost
- name: Modify a Bridge Domain
aci_bd:
host: "{{ inventory_hostname }}"
username: "{{ username }}"
password: "{{ password }}"
validate_certs: yes
tenant: prod
bd: web_servers
arp_flooding: yes
l2_unknown_unicast: flood
state: present
delegate_to: localhost
- name: Query All Bridge Domains
aci_bd:
host: "{{ inventory_hostname }}"
username: "{{ username }}"
password: "{{ password }}"
validate_certs: yes
state: query
delegate_to: localhost
register: query_result
- name: Query a Bridge Domain
aci_bd:
host: "{{ inventory_hostname }}"
username: "{{ username }}"
password: "{{ password }}"
validate_certs: yes
tenant: prod
bd: web_servers
state: query
delegate_to: localhost
register: query_result
- name: Delete a Bridge Domain
aci_bd:
host: "{{ inventory_hostname }}"
username: "{{ username }}"
password: "{{ password }}"
validate_certs: yes
tenant: prod
bd: web_servers
state: absent
delegate_to: localhost
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
arp_flooding=dict(type='bool'),
bd=dict(type='str', aliases=['bd_name', 'name']), # Not required for querying all objects
bd_type=dict(type='str', choices=['ethernet', 'fc']),
description=dict(type='str'),
enable_multicast=dict(type='bool'),
enable_routing=dict(type='bool'),
endpoint_clear=dict(type='bool'),
endpoint_move_detect=dict(type='str', choices=['default', 'garp']),
endpoint_retention_action=dict(type='str', choices=['inherit', 'resolve']),
endpoint_retention_policy=dict(type='str'),
igmp_snoop_policy=dict(type='str'),
ip_learning=dict(type='bool'),
ipv6_nd_policy=dict(type='str'),
l2_unknown_unicast=dict(type='str', choices=['proxy', 'flood']),
l3_unknown_multicast=dict(type='str', choices=['flood', 'opt-flood']),
limit_ip_learn=dict(type='bool'),
mac_address=dict(type='str', aliases=['mac']),
multi_dest=dict(type='str', choices=['bd-flood', 'drop', 'encap-flood']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
tenant=dict(type='str', aliases=['tenant_name']), # Not required for querying all objects
vrf=dict(type='str', aliases=['vrf_name']),
gateway_ip=dict(type='str', removed_in_version='2.4'), # Deprecated starting from v2.4
scope=dict(type='str', removed_in_version='2.4'), # Deprecated starting from v2.4
subnet_mask=dict(type='str', removed_in_version='2.4'), # Deprecated starting from v2.4
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['bd', 'tenant']],
['state', 'present', ['bd', 'tenant']],
],
)
aci = ACIModule(module)
arp_flooding = aci.boolean(module.params['arp_flooding'])
bd = module.params['bd']
bd_type = module.params['bd_type']
if bd_type == 'ethernet':
# ethernet type is represented as regular, but that is not clear to the users
bd_type = 'regular'
description = module.params['description']
enable_multicast = aci.boolean(module.params['enable_multicast'])
enable_routing = aci.boolean(module.params['enable_routing'])
endpoint_clear = aci.boolean(module.params['endpoint_clear'])
endpoint_move_detect = module.params['endpoint_move_detect']
if endpoint_move_detect == 'default':
# the ACI default setting is an empty string, but that is not a good input value
endpoint_move_detect = ''
endpoint_retention_action = module.params['endpoint_retention_action']
endpoint_retention_policy = module.params['endpoint_retention_policy']
igmp_snoop_policy = module.params['igmp_snoop_policy']
ip_learning = aci.boolean(module.params['ip_learning'])
ipv6_nd_policy = module.params['ipv6_nd_policy']
l2_unknown_unicast = module.params['l2_unknown_unicast']
l3_unknown_multicast = module.params['l3_unknown_multicast']
limit_ip_learn = aci.boolean(module.params['limit_ip_learn'])
mac_address = module.params['mac_address']
multi_dest = module.params['multi_dest']
state = module.params['state']
tenant = module.params['tenant']
vrf = module.params['vrf']
# Give warning when fvSubnet parameters are passed as those have been moved to the aci_subnet module
if module.params['gateway_ip'] or module.params['subnet_mask'] or module.params['scope']:
module._warnings = ["The support for managing Subnets has been moved to its own module, aci_subnet. \
The new modules still supports 'gateway_ip' and 'subnet_mask' along with more features"]
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
module_object=tenant,
target_filter={'name': tenant},
),
subclass_1=dict(
aci_class='fvBD',
aci_rn='BD-{0}'.format(bd),
module_object=bd,
target_filter={'name': bd},
),
child_classes=['fvRsCtx', 'fvRsIgmpsn', 'fvRsBDToNdP', 'fvRsBdToEpRet'],
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='fvBD',
class_config=dict(
arpFlood=arp_flooding,
descr=description,
epClear=endpoint_clear,
epMoveDetectMode=endpoint_move_detect,
ipLearning=ip_learning,
limitIpLearnToSubnets=limit_ip_learn,
mac=mac_address,
mcastAllow=enable_multicast,
multiDstPktAct=multi_dest,
name=bd,
type=bd_type,
unicastRoute=enable_routing,
unkMacUcastAct=l2_unknown_unicast,
unkMcastAct=l3_unknown_multicast,
),
child_configs=[
{'fvRsCtx': {'attributes': {'tnFvCtxName': vrf}}},
{'fvRsIgmpsn': {'attributes': {'tnIgmpSnoopPolName': igmp_snoop_policy}}},
{'fvRsBDToNdP': {'attributes': {'tnNdIfPolName': ipv6_nd_policy}}},
{'fvRsBdToEpRet': {'attributes': {'resolveAct': endpoint_retention_action, 'tnFvEpRetPolName': endpoint_retention_policy}}},
],
)
aci.get_diff(aci_class='fvBD')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Robert Layton <robertlayton@gmail.com>
#
# License: BSD Style.
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
import re
import unicodedata
from operator import itemgetter
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..preprocessing import normalize
from ..utils.fixes import Counter
from .stop_words import ENGLISH_STOP_WORDS
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return u''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(ur"<([^>]+)>", flags=re.UNICODE).sub(u" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, str) or isinstance(stop, unicode):
raise ValueError("not a built-in stop list: %s" % stop)
else: # assume it's a collection
return stop
class CountVectorizer(BaseEstimator):
"""Convert a collection of raw documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analysing the data. The default
analyzer does simple stop word filtering for English.
Parameters
----------
input: string {'filename', 'file', 'content'}
If filename, the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have 'read' method (file-like
object) it is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
charset: string, 'utf-8' by default.
If bytes or files are given to analyze, this charset is used to
decode.
charset_error: {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `charset`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents: {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer: string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor: callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer: callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
min_n: integer
The lower boundary of the range of n-values for different n-grams to be
extracted.
max_n: integer
The upper boundary of the range of n-values for different n-grams to be
extracted. All values of n such that min_n <= n <= max_n will be used.
stop_words: string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned is currently the only
supported string value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
token_pattern: string
Regular expression denoting what constitutes a "token", only used
if `tokenize == 'word'`. The default regexp select tokens of 2
or more letters characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0], optional, 1.0 by default
When building the vocabulary ignore terms that have a term frequency
strictly higher than the given threshold (corpus specific stop words).
This parameter is ignored if vocabulary is not None.
max_features : optional, None by default
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
binary: boolean, False by default.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
"""
_white_spaces = re.compile(ur"\s\s+")
def __init__(self, input='content', charset='utf-8',
charset_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=ur"\b\w\w+\b",
min_n=1, max_n=1, analyzer='word',
max_df=1.0, max_features=None,
vocabulary=None, binary=False, dtype=long):
self.input = input
self.charset = charset
self.charset_error = charset_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.min_n = min_n
self.max_n = max_n
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.max_features = max_features
if vocabulary is not None:
self.fixed_vocabulary = True
if not hasattr(vocabulary, 'get'):
vocabulary = dict((t, i) for i, t in enumerate(vocabulary))
self.vocabulary_ = vocabulary
else:
self.fixed_vocabulary = False
self.binary = binary
self.dtype = dtype
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
doc = open(doc, 'rb').read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.charset, self.charset_error)
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
if self.min_n != 1 or self.max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(self.min_n,
min(self.max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(u" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(u" ", text_document)
text_len = len(text_document)
ngrams = []
for n in xrange(self.min_n, min(self.max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the however of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif hasattr(self.strip_accents, '__call__'):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that split a string in sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if hasattr(self.analyzer, '__call__'):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme' %
self.tokenize)
def _term_count_dicts_to_matrix(self, term_count_dicts):
i_indices = []
j_indices = []
values = []
vocabulary = self.vocabulary_
for i, term_count_dict in enumerate(term_count_dicts):
for term, count in term_count_dict.iteritems():
j = vocabulary.get(term)
if j is not None:
i_indices.append(i)
j_indices.append(j)
values.append(count)
# free memory as we go
term_count_dict.clear()
shape = (len(term_count_dicts), max(vocabulary.itervalues()) + 1)
spmatrix = sp.coo_matrix((values, (i_indices, j_indices)),
shape=shape, dtype=self.dtype)
if self.binary:
spmatrix.data[:] = 1
return spmatrix
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents
Parameters
----------
raw_documents: iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return the count vectors
This is more efficient than calling fit followed by transform.
Parameters
----------
raw_documents: iterable
an iterable which yields either str, unicode or file objects
Returns
-------
vectors: array, [n_samples, n_features]
"""
if self.fixed_vocabulary:
# No need to fit anything, directly perform the transformation.
# We intentionally don't call the transform method to make it
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer
analyze = self.build_analyzer()
term_counts_per_doc = [Counter(analyze(doc))
for doc in raw_documents]
return self._term_count_dicts_to_matrix(term_counts_per_doc)
self.vocabulary_ = {}
# result of document conversion to term count dicts
term_counts_per_doc = []
term_counts = Counter()
# term counts across entire corpus (count each term maximum once per
# document)
document_counts = Counter()
max_df = self.max_df
max_features = self.max_features
analyze = self.build_analyzer()
# TODO: parallelize the following loop with joblib?
# (see XXX up ahead)
for doc in raw_documents:
term_count_current = Counter(analyze(doc))
term_counts.update(term_count_current)
if max_df < 1.0:
document_counts.update(term_count_current.iterkeys())
term_counts_per_doc.append(term_count_current)
n_doc = len(term_counts_per_doc)
# filter out stop words: terms that occur in almost all documents
if max_df < 1.0:
max_document_count = max_df * n_doc
stop_words = set(t for t, dc in document_counts.iteritems()
if dc > max_document_count)
else:
stop_words = set()
# list the terms that should be part of the vocabulary
if max_features is None:
terms = set(term_counts) - stop_words
else:
# extract the most frequent terms for the vocabulary
terms = set()
for t, tc in term_counts.most_common():
if t not in stop_words:
terms.add(t)
if len(terms) >= max_features:
break
# store the learned stop words to make it easier to debug the value of
# max_df
self.max_df_stop_words_ = stop_words
# store map from term name to feature integer index: we sort the term
# to have reproducible outcome for the vocabulary structure: otherwise
# the mapping from feature name to indices might depend on the memory
# layout of the machine. Furthermore sorted terms might make it
# possible to perform binary search in the feature names array.
self.vocabulary_ = dict(((t, i) for i, t in enumerate(sorted(terms))))
# the term_counts and document_counts might be useful statistics, are
# we really sure want we want to drop them? They take some memory but
# can be useful for corpus introspection
return self._term_count_dicts_to_matrix(term_counts_per_doc)
def transform(self, raw_documents):
"""Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided in the constructor.
Parameters
----------
raw_documents: iterable
an iterable which yields either str, unicode or file objects
Returns
-------
vectors: sparse matrix, [n_samples, n_features]
"""
if not hasattr(self, 'vocabulary_') or len(self.vocabulary_) == 0:
raise ValueError("Vocabulary wasn't fitted or is empty!")
# raw_documents can be an iterable so we don't know its size in
# advance
# XXX @larsmans tried to parallelize the following loop with joblib.
# The result was some 20% slower than the serial version.
analyze = self.build_analyzer()
term_counts_per_doc = [Counter(analyze(doc)) for doc in raw_documents]
return self._term_count_dicts_to_matrix(term_counts_per_doc)
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
if sp.isspmatrix_coo(X): # COO matrix is not indexable
X = X.tocsr()
elif not sp.issparse(X):
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(self.vocabulary_.keys())
indices = np.array(self.vocabulary_.values())
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in xrange(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
if not hasattr(self, 'vocabulary_') or len(self.vocabulary_) == 0:
raise ValueError("Vocabulary wasn't fitted or is empty!")
return [t for t, i in sorted(self.vocabulary_.iteritems(),
key=itemgetter(1))]
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf–idf representation
Tf means term-frequency while tf–idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf–idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
In the SMART notation used in IR, this class implements several tf–idf
variants. Tf is always "n" (natural), idf is "t" iff use_idf is given,
"n" otherwise, and normalization is "c" iff norm='l2', "n" iff norm=None.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, optional
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, optional
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, optional
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68–74.`
.. [MSR2008] `C.D. Manning, H. Schütze and P. Raghavan (2008). Introduction
to Information Retrieval. Cambridge University Press,
pp. 121–125.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
self.idf_ = None
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X: sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if self.use_idf:
if not hasattr(X, 'nonzero'):
X = sp.csr_matrix(X)
n_samples, n_features = X.shape
df = np.bincount(X.nonzero()[1])
if df.shape[0] < n_features:
# bincount might return fewer bins than there are features
df = np.concatenate([df, np.zeros(n_features - df.shape[0])])
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# avoid division by zeros for features that occur in all documents
self.idf_ = np.log(float(n_samples) / df) + 1.0
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf–idf representation
Parameters
----------
X: sparse matrix, [n_samples, n_features]
a matrix of term/token counts
Returns
-------
vectors: sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
expected_n_features = self.idf_.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
d = sp.lil_matrix((n_features, n_features))
d.setdiag(self.idf_)
# *= doesn't work
X = X * d
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
"""
def __init__(self, input='content', charset='utf-8',
charset_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
analyzer='word', stop_words=None, token_pattern=ur"\b\w\w+\b",
min_n=1, max_n=1, max_df=1.0, max_features=None,
vocabulary=None, binary=False, dtype=long, norm='l2',
use_idf=True, smooth_idf=True, sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, charset=charset, charset_error=charset_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern, min_n=min_n,
max_n=max_n, max_df=max_df, max_features=max_features,
vocabulary=vocabulary, binary=False, dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
def fit(self, raw_documents):
"""Learn a conversion law from documents to array data"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the representation and return the vectors.
Parameters
----------
raw_documents: iterable
an iterable which yields either str, unicode or file objects
Returns
-------
vectors: array, [n_samples, n_features]
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform raw text documents to tf–idf vectors
Parameters
----------
raw_documents: iterable
an iterable which yields either str, unicode or file objects
Returns
-------
vectors: sparse matrix, [n_samples, n_features]
"""
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy)
class Vectorizer(TfidfVectorizer):
"""Vectorizer is eprecated in 0.11, use TfidfVectorizer instead"""
def __init__(self, input='content', charset='utf-8',
charset_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
analyzer='word', stop_words=None, token_pattern=ur"\b\w\w+\b",
min_n=1, max_n=1, max_df=1.0, max_features=None,
vocabulary=None, binary=False, dtype=long, norm='l2',
use_idf=True, smooth_idf=True, sublinear_tf=False):
warnings.warn("Vectorizer is deprecated in 0.11 and will be removed"
" in 0.13. Please use TfidfVectorizer instead.",
category=DeprecationWarning)
super(Vectorizer, self).__init__(
input=input, charset=charset, charset_error=charset_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern, min_n=min_n,
max_n=max_n, max_df=max_df, max_features=max_features,
vocabulary=vocabulary, binary=False, dtype=dtype,
norm=norm, use_idf=use_idf, smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf) | unknown | codeparrot/codeparrot-clean | ||
# coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 1.4.41
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Employees(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, pagination=None, data=None):
"""
Employees - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'pagination': 'Pagination',
'data': 'list[Employee]'
}
self.attribute_map = {
'pagination': 'pagination',
'data': 'data'
}
self._pagination = pagination
self._data = data
@property
def pagination(self):
"""
Gets the pagination of this Employees.
:return: The pagination of this Employees.
:rtype: Pagination
"""
return self._pagination
@pagination.setter
def pagination(self, pagination):
"""
Sets the pagination of this Employees.
:param pagination: The pagination of this Employees.
:type: Pagination
"""
self._pagination = pagination
@property
def data(self):
"""
Gets the data of this Employees.
:return: The data of this Employees.
:rtype: list[Employee]
"""
return self._data
@data.setter
def data(self, data):
"""
Sets the data of this Employees.
:param data: The data of this Employees.
:type: list[Employee]
"""
self._data = data
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other | unknown | codeparrot/codeparrot-clean | ||
# (c) 2016, James Cammarata <jimi@sngx.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat import unittest
from units.compat.mock import patch, MagicMock
from ansible.executor.task_result import TaskResult
class TestTaskResult(unittest.TestCase):
def test_task_result_basic(self):
mock_host = MagicMock()
mock_task = MagicMock()
# test loading a result with a dict
tr = TaskResult(mock_host, mock_task, dict())
# test loading a result with a JSON string
with patch('ansible.parsing.dataloader.DataLoader.load') as p:
tr = TaskResult(mock_host, mock_task, '{}')
def test_task_result_is_changed(self):
mock_host = MagicMock()
mock_task = MagicMock()
# test with no changed in result
tr = TaskResult(mock_host, mock_task, dict())
self.assertFalse(tr.is_changed())
# test with changed in the result
tr = TaskResult(mock_host, mock_task, dict(changed=True))
self.assertTrue(tr.is_changed())
# test with multiple results but none changed
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(foo='bar'), dict(bam='baz'), True]))
self.assertFalse(tr.is_changed())
# test with multiple results and one changed
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(changed=False), dict(changed=True), dict(some_key=False)]))
self.assertTrue(tr.is_changed())
def test_task_result_is_skipped(self):
mock_host = MagicMock()
mock_task = MagicMock()
# test with no skipped in result
tr = TaskResult(mock_host, mock_task, dict())
self.assertFalse(tr.is_skipped())
# test with skipped in the result
tr = TaskResult(mock_host, mock_task, dict(skipped=True))
self.assertTrue(tr.is_skipped())
# test with multiple results but none skipped
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(foo='bar'), dict(bam='baz'), True]))
self.assertFalse(tr.is_skipped())
# test with multiple results and one skipped
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(skipped=False), dict(skipped=True), dict(some_key=False)]))
self.assertFalse(tr.is_skipped())
# test with multiple results and all skipped
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(skipped=True), dict(skipped=True), dict(skipped=True)]))
self.assertTrue(tr.is_skipped())
# test with multiple squashed results (list of strings)
# first with the main result having skipped=False
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=["a", "b", "c"], skipped=False))
self.assertFalse(tr.is_skipped())
# then with the main result having skipped=True
tr = TaskResult(mock_host, mock_task, dict(results=["a", "b", "c"], skipped=True))
self.assertTrue(tr.is_skipped())
def test_task_result_is_unreachable(self):
mock_host = MagicMock()
mock_task = MagicMock()
# test with no unreachable in result
tr = TaskResult(mock_host, mock_task, dict())
self.assertFalse(tr.is_unreachable())
# test with unreachable in the result
tr = TaskResult(mock_host, mock_task, dict(unreachable=True))
self.assertTrue(tr.is_unreachable())
# test with multiple results but none unreachable
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(foo='bar'), dict(bam='baz'), True]))
self.assertFalse(tr.is_unreachable())
# test with multiple results and one unreachable
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(unreachable=False), dict(unreachable=True), dict(some_key=False)]))
self.assertTrue(tr.is_unreachable())
def test_task_result_is_failed(self):
mock_host = MagicMock()
mock_task = MagicMock()
# test with no failed in result
tr = TaskResult(mock_host, mock_task, dict())
self.assertFalse(tr.is_failed())
# test failed result with rc values (should not matter)
tr = TaskResult(mock_host, mock_task, dict(rc=0))
self.assertFalse(tr.is_failed())
tr = TaskResult(mock_host, mock_task, dict(rc=1))
self.assertFalse(tr.is_failed())
# test with failed in result
tr = TaskResult(mock_host, mock_task, dict(failed=True))
self.assertTrue(tr.is_failed())
# test with failed_when in result
tr = TaskResult(mock_host, mock_task, dict(failed_when_result=True))
self.assertTrue(tr.is_failed())
def test_task_result_no_log(self):
mock_host = MagicMock()
mock_task = MagicMock()
# no_log should remove secrets
tr = TaskResult(mock_host, mock_task, dict(_ansible_no_log=True, secret='DONTSHOWME'))
clean = tr.clean_copy()
self.assertTrue('secret' not in clean._result)
def test_task_result_no_log_preserve(self):
mock_host = MagicMock()
mock_task = MagicMock()
# no_log should not remove presrved keys
tr = TaskResult(
mock_host,
mock_task,
dict(
_ansible_no_log=True,
retries=5,
attempts=5,
changed=False,
foo='bar',
)
)
clean = tr.clean_copy()
self.assertTrue('retries' in clean._result)
self.assertTrue('attempts' in clean._result)
self.assertTrue('changed' in clean._result)
self.assertTrue('foo' not in clean._result) | unknown | codeparrot/codeparrot-clean | ||
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
package command
import (
"fmt"
"strings"
"github.com/hashicorp/cli"
"github.com/posener/complete"
)
var (
_ cli.Command = (*SecretsDisableCommand)(nil)
_ cli.CommandAutocomplete = (*SecretsDisableCommand)(nil)
)
type SecretsDisableCommand struct {
*BaseCommand
}
func (c *SecretsDisableCommand) Synopsis() string {
return "Disable a secret engine"
}
func (c *SecretsDisableCommand) Help() string {
helpText := `
Usage: vault secrets disable [options] PATH
Disables a secrets engine at the given PATH. The argument corresponds to
the enabled PATH of the engine, not the TYPE! All secrets created by this
engine are revoked and its Vault data is removed.
Disable the secrets engine enabled at aws/:
$ vault secrets disable aws/
` + c.Flags().Help()
return strings.TrimSpace(helpText)
}
func (c *SecretsDisableCommand) Flags() *FlagSets {
return c.flagSet(FlagSetHTTP)
}
func (c *SecretsDisableCommand) AutocompleteArgs() complete.Predictor {
return c.PredictVaultMounts()
}
func (c *SecretsDisableCommand) AutocompleteFlags() complete.Flags {
return c.Flags().Completions()
}
func (c *SecretsDisableCommand) Run(args []string) int {
f := c.Flags()
if err := f.Parse(args); err != nil {
c.UI.Error(err.Error())
return 1
}
args = f.Args()
switch {
case len(args) < 1:
c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args)))
return 1
case len(args) > 1:
c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args)))
return 1
}
client, err := c.Client()
if err != nil {
c.UI.Error(err.Error())
return 2
}
path := ensureTrailingSlash(sanitizePath(args[0]))
if err := client.Sys().Unmount(path); err != nil {
c.UI.Error(fmt.Sprintf("Error disabling secrets engine at %s: %s", path, err))
return 2
}
c.UI.Output(fmt.Sprintf("Success! Disabled the secrets engine (if it existed) at: %s", path))
return 0
} | go | github | https://github.com/hashicorp/vault | command/secrets_disable.go |
/*-------------------------------------------------------------------------
*
* xlogdesc.c
* rmgr descriptor routines for access/transam/xlog.c
*
* Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/backend/access/rmgrdesc/xlogdesc.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "access/transam.h"
#include "access/xlog.h"
#include "access/xlog_internal.h"
#include "catalog/pg_control.h"
#include "utils/guc.h"
#include "utils/timestamp.h"
/*
* GUC support
*/
const struct config_enum_entry wal_level_options[] = {
{"minimal", WAL_LEVEL_MINIMAL, false},
{"replica", WAL_LEVEL_REPLICA, false},
{"archive", WAL_LEVEL_REPLICA, true}, /* deprecated */
{"hot_standby", WAL_LEVEL_REPLICA, true}, /* deprecated */
{"logical", WAL_LEVEL_LOGICAL, false},
{NULL, 0, false}
};
/*
* Find a string representation for wal_level
*/
static const char *
get_wal_level_string(int wal_level)
{
const struct config_enum_entry *entry;
const char *wal_level_str = "?";
for (entry = wal_level_options; entry->name; entry++)
{
if (entry->val == wal_level)
{
wal_level_str = entry->name;
break;
}
}
return wal_level_str;
}
void
xlog_desc(StringInfo buf, XLogReaderState *record)
{
char *rec = XLogRecGetData(record);
uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
if (info == XLOG_CHECKPOINT_SHUTDOWN ||
info == XLOG_CHECKPOINT_ONLINE)
{
CheckPoint *checkpoint = (CheckPoint *) rec;
appendStringInfo(buf, "redo %X/%08X; "
"tli %u; prev tli %u; fpw %s; wal_level %s; logical decoding %s; xid %u:%u; oid %u; multi %u; offset %" PRIu64 "; "
"oldest xid %u in DB %u; oldest multi %u in DB %u; "
"oldest/newest commit timestamp xid: %u/%u; "
"oldest running xid %u; %s",
LSN_FORMAT_ARGS(checkpoint->redo),
checkpoint->ThisTimeLineID,
checkpoint->PrevTimeLineID,
checkpoint->fullPageWrites ? "true" : "false",
get_wal_level_string(checkpoint->wal_level),
checkpoint->logicalDecodingEnabled ? "true" : "false",
EpochFromFullTransactionId(checkpoint->nextXid),
XidFromFullTransactionId(checkpoint->nextXid),
checkpoint->nextOid,
checkpoint->nextMulti,
checkpoint->nextMultiOffset,
checkpoint->oldestXid,
checkpoint->oldestXidDB,
checkpoint->oldestMulti,
checkpoint->oldestMultiDB,
checkpoint->oldestCommitTsXid,
checkpoint->newestCommitTsXid,
checkpoint->oldestActiveXid,
(info == XLOG_CHECKPOINT_SHUTDOWN) ? "shutdown" : "online");
}
else if (info == XLOG_NEXTOID)
{
Oid nextOid;
memcpy(&nextOid, rec, sizeof(Oid));
appendStringInfo(buf, "%u", nextOid);
}
else if (info == XLOG_RESTORE_POINT)
{
xl_restore_point *xlrec = (xl_restore_point *) rec;
appendStringInfoString(buf, xlrec->rp_name);
}
else if (info == XLOG_FPI || info == XLOG_FPI_FOR_HINT)
{
/* no further information to print */
}
else if (info == XLOG_BACKUP_END)
{
XLogRecPtr startpoint;
memcpy(&startpoint, rec, sizeof(XLogRecPtr));
appendStringInfo(buf, "%X/%08X", LSN_FORMAT_ARGS(startpoint));
}
else if (info == XLOG_PARAMETER_CHANGE)
{
xl_parameter_change xlrec;
const char *wal_level_str;
memcpy(&xlrec, rec, sizeof(xl_parameter_change));
wal_level_str = get_wal_level_string(xlrec.wal_level);
appendStringInfo(buf, "max_connections=%d max_worker_processes=%d "
"max_wal_senders=%d max_prepared_xacts=%d "
"max_locks_per_xact=%d wal_level=%s "
"wal_log_hints=%s track_commit_timestamp=%s",
xlrec.MaxConnections,
xlrec.max_worker_processes,
xlrec.max_wal_senders,
xlrec.max_prepared_xacts,
xlrec.max_locks_per_xact,
wal_level_str,
xlrec.wal_log_hints ? "on" : "off",
xlrec.track_commit_timestamp ? "on" : "off");
}
else if (info == XLOG_FPW_CHANGE)
{
bool fpw;
memcpy(&fpw, rec, sizeof(bool));
appendStringInfoString(buf, fpw ? "true" : "false");
}
else if (info == XLOG_END_OF_RECOVERY)
{
xl_end_of_recovery xlrec;
memcpy(&xlrec, rec, sizeof(xl_end_of_recovery));
appendStringInfo(buf, "tli %u; prev tli %u; time %s; wal_level %s",
xlrec.ThisTimeLineID, xlrec.PrevTimeLineID,
timestamptz_to_str(xlrec.end_time),
get_wal_level_string(xlrec.wal_level));
}
else if (info == XLOG_OVERWRITE_CONTRECORD)
{
xl_overwrite_contrecord xlrec;
memcpy(&xlrec, rec, sizeof(xl_overwrite_contrecord));
appendStringInfo(buf, "lsn %X/%08X; time %s",
LSN_FORMAT_ARGS(xlrec.overwritten_lsn),
timestamptz_to_str(xlrec.overwrite_time));
}
else if (info == XLOG_CHECKPOINT_REDO)
{
int wal_level;
memcpy(&wal_level, rec, sizeof(int));
appendStringInfo(buf, "wal_level %s", get_wal_level_string(wal_level));
}
else if (info == XLOG_LOGICAL_DECODING_STATUS_CHANGE)
{
bool enabled;
memcpy(&enabled, rec, sizeof(bool));
appendStringInfoString(buf, enabled ? "true" : "false");
}
}
const char *
xlog_identify(uint8 info)
{
const char *id = NULL;
switch (info & ~XLR_INFO_MASK)
{
case XLOG_CHECKPOINT_SHUTDOWN:
id = "CHECKPOINT_SHUTDOWN";
break;
case XLOG_CHECKPOINT_ONLINE:
id = "CHECKPOINT_ONLINE";
break;
case XLOG_NOOP:
id = "NOOP";
break;
case XLOG_NEXTOID:
id = "NEXTOID";
break;
case XLOG_SWITCH:
id = "SWITCH";
break;
case XLOG_BACKUP_END:
id = "BACKUP_END";
break;
case XLOG_PARAMETER_CHANGE:
id = "PARAMETER_CHANGE";
break;
case XLOG_RESTORE_POINT:
id = "RESTORE_POINT";
break;
case XLOG_FPW_CHANGE:
id = "FPW_CHANGE";
break;
case XLOG_END_OF_RECOVERY:
id = "END_OF_RECOVERY";
break;
case XLOG_OVERWRITE_CONTRECORD:
id = "OVERWRITE_CONTRECORD";
break;
case XLOG_FPI:
id = "FPI";
break;
case XLOG_FPI_FOR_HINT:
id = "FPI_FOR_HINT";
break;
case XLOG_CHECKPOINT_REDO:
id = "CHECKPOINT_REDO";
break;
case XLOG_LOGICAL_DECODING_STATUS_CHANGE:
id = "LOGICAL_DECODING_STATUS_CHANGE";
break;
}
return id;
}
/*
* Returns a string giving information about all the blocks in an
* XLogRecord.
*/
void
XLogRecGetBlockRefInfo(XLogReaderState *record, bool pretty,
bool detailed_format, StringInfo buf,
uint32 *fpi_len)
{
int block_id;
Assert(record != NULL);
if (detailed_format && pretty)
appendStringInfoChar(buf, '\n');
for (block_id = 0; block_id <= XLogRecMaxBlockId(record); block_id++)
{
RelFileLocator rlocator;
ForkNumber forknum;
BlockNumber blk;
if (!XLogRecGetBlockTagExtended(record, block_id,
&rlocator, &forknum, &blk, NULL))
continue;
if (detailed_format)
{
/* Get block references in detailed format. */
if (pretty)
appendStringInfoChar(buf, '\t');
else if (block_id > 0)
appendStringInfoChar(buf, ' ');
appendStringInfo(buf,
"blkref #%d: rel %u/%u/%u fork %s blk %u",
block_id,
rlocator.spcOid, rlocator.dbOid, rlocator.relNumber,
forkNames[forknum],
blk);
if (XLogRecHasBlockImage(record, block_id))
{
uint8 bimg_info = XLogRecGetBlock(record, block_id)->bimg_info;
/* Calculate the amount of FPI data in the record. */
if (fpi_len)
*fpi_len += XLogRecGetBlock(record, block_id)->bimg_len;
if (BKPIMAGE_COMPRESSED(bimg_info))
{
const char *method;
if ((bimg_info & BKPIMAGE_COMPRESS_PGLZ) != 0)
method = "pglz";
else if ((bimg_info & BKPIMAGE_COMPRESS_LZ4) != 0)
method = "lz4";
else if ((bimg_info & BKPIMAGE_COMPRESS_ZSTD) != 0)
method = "zstd";
else
method = "unknown";
appendStringInfo(buf,
" (FPW%s); hole: offset: %u, length: %u, "
"compression saved: %u, method: %s",
XLogRecBlockImageApply(record, block_id) ?
"" : " for WAL verification",
XLogRecGetBlock(record, block_id)->hole_offset,
XLogRecGetBlock(record, block_id)->hole_length,
BLCKSZ -
XLogRecGetBlock(record, block_id)->hole_length -
XLogRecGetBlock(record, block_id)->bimg_len,
method);
}
else
{
appendStringInfo(buf,
" (FPW%s); hole: offset: %u, length: %u",
XLogRecBlockImageApply(record, block_id) ?
"" : " for WAL verification",
XLogRecGetBlock(record, block_id)->hole_offset,
XLogRecGetBlock(record, block_id)->hole_length);
}
}
if (pretty)
appendStringInfoChar(buf, '\n');
}
else
{
/* Get block references in short format. */
if (forknum != MAIN_FORKNUM)
{
appendStringInfo(buf,
", blkref #%d: rel %u/%u/%u fork %s blk %u",
block_id,
rlocator.spcOid, rlocator.dbOid, rlocator.relNumber,
forkNames[forknum],
blk);
}
else
{
appendStringInfo(buf,
", blkref #%d: rel %u/%u/%u blk %u",
block_id,
rlocator.spcOid, rlocator.dbOid, rlocator.relNumber,
blk);
}
if (XLogRecHasBlockImage(record, block_id))
{
/* Calculate the amount of FPI data in the record. */
if (fpi_len)
*fpi_len += XLogRecGetBlock(record, block_id)->bimg_len;
if (XLogRecBlockImageApply(record, block_id))
appendStringInfoString(buf, " FPW");
else
appendStringInfoString(buf, " FPW for WAL verification");
}
}
}
if (!detailed_format && pretty)
appendStringInfoChar(buf, '\n');
} | c | github | https://github.com/postgres/postgres | src/backend/access/rmgrdesc/xlogdesc.c |
import _plotly_utils.basevalidators
class MarkerValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="marker", parent_name="scatter3d", **kwargs):
super(MarkerValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Marker"),
data_docs=kwargs.pop(
"data_docs",
"""
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.colorscale`. Has an
effect only if in `marker.color`is set to a
numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.color`) or the bounds set in
`marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color`is set to a numerical
array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.cmin` and/or `marker.cmax` to
be equidistant to this point. Has an effect
only if in `marker.color`is set to a numerical
array. Value should have the same units as in
`marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
:class:`plotly.graph_objects.scatter3d.marker.C
olorBar` instance or dict with compatible
properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays
mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use`marker.cmin` and
`marker.cmax`. Alternatively, `colorscale` may
be a palette name string of the following list:
Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Bl
ues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
line
:class:`plotly.graph_objects.scatter3d.marker.L
ine` instance or dict with compatible
properties
opacity
Sets the marker opacity. Note that the marker
opacity for scatter3d traces must be a scalar
value for performance reasons. To set a
blending opacity value (i.e. which is not
transparent), set "marker.color" to an rgba
color and use its alpha channel.
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.color`is set to a
numerical array. If true, `marker.cmin` will
correspond to the last color in the array and
`marker.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is
displayed for this trace. Has an effect only if
in `marker.color`is set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px)
of the rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the
data in `size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points.
Use with `sizemin` and `sizemode`.
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
symbol
Sets the marker symbol type.
symbolsrc
Sets the source reference on Chart Studio Cloud
for symbol .
""",
),
**kwargs
) | unknown | codeparrot/codeparrot-clean | ||
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v2beta1",
"metadata": {
"name": "v0alpha1.color_modes.v42"
},
"spec": {
"annotations": [
{
"kind": "AnnotationQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "",
"version": "v0",
"spec": {}
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"builtIn": true,
"legacyOptions": {
"type": "dashboard"
}
}
}
],
"cursorSync": "Off",
"editable": true,
"elements": {
"panel-2": {
"kind": "Panel",
"spec": {
"id": 2,
"title": "Stats",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"datasource": {
"name": "testdata-type-uid"
},
"spec": {
"alias": "",
"csvWave": {
"timeStep": 60,
"valuesCSV": "0,0,2,2,1,1"
},
"labels": "",
"lines": 10,
"points": [],
"pulseWave": {
"offCount": 3,
"offValue": 1,
"onCount": 3,
"onValue": 2,
"timeStep": 60
},
"scenarioId": "random_walk",
"seriesCount": 30,
"stream": {
"bands": 1,
"noise": 2.2,
"speed": 250,
"spread": 3.5,
"type": "signal"
},
"stringInput": ""
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "stat",
"version": "7.4.0-pre",
"spec": {
"options": {
"colorMode": "background",
"graphMode": "none",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"mean"
],
"fields": "",
"values": false
},
"textMode": "value"
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "green"
},
{
"value": 20,
"color": "red"
}
]
},
"color": {
"mode": "continuous-blues"
}
},
"overrides": []
}
}
}
}
},
"panel-4": {
"kind": "Panel",
"spec": {
"id": 4,
"title": "Gradient color schemes",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"spec": {
"alias": "",
"csvWave": {
"timeStep": 60,
"valuesCSV": "0,0,2,2,1,1"
},
"lines": 10,
"points": [],
"pulseWave": {
"offCount": 3,
"offValue": 1,
"onCount": 3,
"onValue": 2,
"timeStep": 60
},
"scenarioId": "random_walk",
"seriesCount": 15,
"stream": {
"bands": 1,
"noise": 2.2,
"speed": 250,
"spread": 3.5,
"type": "signal"
},
"stringInput": ""
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [
{
"kind": "reduce",
"spec": {
"id": "reduce",
"options": {
"reducers": [
"max",
"mean",
"last",
"min"
]
}
}
},
{
"kind": "organize",
"spec": {
"id": "organize",
"options": {
"excludeByName": {
"Field": false
},
"indexByName": {},
"renameByName": {}
}
}
}
],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "table",
"version": "7.4.0-pre",
"spec": {
"options": {
"showHeader": true,
"sortBy": [
{
"desc": true,
"displayName": "Last"
}
]
},
"fieldConfig": {
"defaults": {
"unit": "degree",
"thresholds": {
"mode": "percentage",
"steps": [
{
"value": null,
"color": "green"
},
{
"value": 20,
"color": "blue"
},
{
"value": 60,
"color": "orange"
},
{
"value": 70,
"color": "red"
}
]
},
"color": {
"mode": "continuous-BlYlRd"
},
"custom": {
"align": "center",
"cellOptions": {
"mode": "gradient",
"type": "color-background"
},
"filterable": false
}
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "Field"
},
"properties": [
{
"id": "custom.cellOptions",
"value": {}
}
]
}
]
}
}
}
}
},
"panel-5": {
"kind": "Panel",
"spec": {
"id": 5,
"title": "Bar Gauge LCD",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"spec": {
"alias": "",
"csvWave": {
"timeStep": 60,
"valuesCSV": "0,0,2,2,1,1"
},
"lines": 10,
"points": [],
"pulseWave": {
"offCount": 3,
"offValue": 1,
"onCount": 3,
"onValue": 2,
"timeStep": 60
},
"scenarioId": "random_walk",
"seriesCount": 15,
"stream": {
"bands": 1,
"noise": 2.2,
"speed": 250,
"spread": 3.5,
"type": "signal"
},
"stringInput": ""
}
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "bargauge",
"version": "7.4.0-pre",
"spec": {
"options": {
"displayMode": "lcd",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"mean"
],
"fields": "",
"values": false
},
"showUnfilled": true
},
"fieldConfig": {
"defaults": {
"unit": "degree",
"thresholds": {
"mode": "percentage",
"steps": [
{
"value": null,
"color": "green"
},
{
"value": 20,
"color": "blue"
},
{
"value": 60,
"color": "orange"
},
{
"value": 70,
"color": "red"
}
]
},
"color": {
"mode": "continuous-GrYlRd"
},
"custom": {
"align": "center",
"displayMode": "color-background",
"filterable": false
}
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "Field"
},
"properties": [
{
"id": "custom.displayMode"
}
]
}
]
}
}
}
}
}
},
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 19,
"height": 16,
"element": {
"kind": "ElementReference",
"name": "panel-4"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 19,
"y": 0,
"width": 5,
"height": 26,
"element": {
"kind": "ElementReference",
"name": "panel-2"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 16,
"width": 19,
"height": 10,
"element": {
"kind": "ElementReference",
"name": "panel-5"
}
}
}
]
}
},
"links": [],
"liveNow": false,
"preload": false,
"tags": [
"gdev",
"demo"
],
"timeSettings": {
"timezone": "",
"from": "now-6h",
"to": "now",
"autoRefresh": "",
"autoRefreshIntervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"hideTimepicker": false,
"fiscalYearStartMonth": 0
},
"title": "Gradient Color modes",
"variables": []
},
"status": {
"conversion": {
"failed": false,
"storedVersion": "v0alpha1"
}
}
} | json | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/conversion/testdata/output/migrated_dev_dashboards/panel-common/v0alpha1.color_modes.v42.v2beta1.json |
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) Ansible Inc, 2016
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import glob
import os
import pickle
import platform
import select
import shlex
import subprocess
import traceback
from ansible.module_utils.six import PY2, b
from ansible.module_utils._text import to_bytes, to_text
def sysv_is_enabled(name):
'''
This function will check if the service name supplied
is enabled in any of the sysv runlevels
:arg name: name of the service to test for
'''
return bool(glob.glob('/etc/rc?.d/S??%s' % name))
def get_sysv_script(name):
'''
This function will return the expected path for an init script
corresponding to the service name supplied.
:arg name: name or path of the service to test for
'''
if name.startswith('/'):
result = name
else:
result = '/etc/init.d/%s' % name
return result
def sysv_exists(name):
'''
This function will return True or False depending on
the existence of an init script corresponding to the service name supplied.
:arg name: name of the service to test for
'''
return os.path.exists(get_sysv_script(name))
def fail_if_missing(module, found, service, msg=''):
'''
This function will return an error or exit gracefully depending on check mode status
and if the service is missing or not.
:arg module: is an AnsbileModule object, used for it's utility methods
:arg found: boolean indicating if services was found or not
:arg service: name of service
:kw msg: extra info to append to error/success msg when missing
'''
if not found:
if module.check_mode:
module.exit_json(msg="Service %s not found on %s, assuming it will exist on full run" % (service, msg), changed=True)
else:
module.fail_json(msg='Could not find the requested service %s: %s' % (service, msg))
def daemonize(module, cmd):
'''
Execute a command while detaching as a daemon, returns rc, stdout, and stderr.
:arg module: is an AnsbileModule object, used for it's utility methods
:arg cmd: is a list or string representing the command and options to run
This is complex because daemonization is hard for people.
What we do is daemonize a part of this module, the daemon runs the command,
picks up the return code and output, and returns it to the main process.
'''
# init some vars
chunk = 4096 # FIXME: pass in as arg?
errors = 'surrogate_or_strict'
# start it!
try:
pipe = os.pipe()
pid = os.fork()
except OSError:
module.fail_json(msg="Error while attempting to fork: %s", exception=traceback.format_exc())
# we don't do any locking as this should be a unique module/process
if pid == 0:
os.close(pipe[0])
# Set stdin/stdout/stderr to /dev/null
fd = os.open(os.devnull, os.O_RDWR)
# clone stdin/out/err
for num in range(3):
if fd != num:
os.dup2(fd, num)
# close otherwise
if fd not in range(3):
os.close(fd)
# Make us a daemon
pid = os.fork()
# end if not in child
if pid > 0:
os._exit(0)
# get new process session and detach
sid = os.setsid()
if sid == -1:
module.fail_json(msg="Unable to detach session while daemonizing")
# avoid possible problems with cwd being removed
os.chdir("/")
pid = os.fork()
if pid > 0:
os._exit(0)
# if command is string deal with py2 vs py3 conversions for shlex
if not isinstance(cmd, list):
if PY2:
cmd = shlex.split(to_bytes(cmd, errors=errors))
else:
cmd = shlex.split(to_text(cmd, errors=errors))
# make sure we always use byte strings
run_cmd = []
for c in cmd:
run_cmd.append(to_bytes(c, errors=errors))
# execute the command in forked process
p = subprocess.Popen(run_cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=lambda: os.close(pipe[1]))
fds = [p.stdout, p.stderr]
# loop reading output till its done
output = {p.stdout: b(""), p.sterr: b("")}
while fds:
rfd, wfd, efd = select.select(fds, [], fds, 1)
if (rfd + wfd + efd) or p.poll():
for out in fds:
if out in rfd:
data = os.read(out.fileno(), chunk)
if not data:
fds.remove(out)
output[out] += b(data)
# even after fds close, we might want to wait for pid to die
p.wait()
# Return a pickled data of parent
return_data = pickle.dumps([p.returncode, to_text(output[p.stdout]), to_text(output[p.stderr])], protocol=pickle.HIGHEST_PROTOCOL)
os.write(pipe[1], to_bytes(return_data, errors=errors))
# clean up
os.close(pipe[1])
os._exit(0)
elif pid == -1:
module.fail_json(msg="Unable to fork, no exception thrown, probably due to lack of resources, check logs.")
else:
# in parent
os.close(pipe[1])
os.waitpid(pid, 0)
# Grab response data after child finishes
return_data = b("")
while True:
rfd, wfd, efd = select.select([pipe[0]], [], [pipe[0]])
if pipe[0] in rfd:
data = os.read(pipe[0], chunk)
if not data:
break
return_data += b(data)
# Note: no need to specify encoding on py3 as this module sends the
# pickle to itself (thus same python interpreter so we aren't mixing
# py2 and py3)
return pickle.loads(to_bytes(return_data, errors=errors))
def check_ps(module, pattern):
# Set ps flags
if platform.system() == 'SunOS':
psflags = '-ef'
else:
psflags = 'auxww'
# Find ps binary
psbin = module.get_bin_path('ps', True)
(rc, out, err) = module.run_command('%s %s' % (psbin, psflags))
# If rc is 0, set running as appropriate
if rc == 0:
for line in out.split('\n'):
if pattern in line:
return True
return False | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
import glob
import os
import numpy as np
import pandas as pd
from sklearn_evaluation.plot import confusion_matrix as plot_cm
# from data_load import load_data
import numpy
from keras import callbacks
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.constraints import maxnorm
from keras.optimizers import SGD
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.cross_validation import cross_val_score
from sklearn.cross_validation import StratifiedShuffleSplit, StratifiedKFold
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler
from sklearn.preprocessing import Imputer
from sklearn.metrics import confusion_matrix
from utils import print_cm_summary
import matplotlib.pyplot as plt
def shift_log_transform(df, name, shift):
df[name] = np.log(df[name] + shift)
def load_data(fnames, names, names_to_delete):
"""
Function that loads data from series of files where first file contains
class of zeros and other files - classes of ones.
:param fnames:
Iterable of file names.
:param names:
Names of columns in files.
:param names_to_delete:
Column names to delete.
:return:
X, y - ``sklearn`` arrays of features & responces.
"""
# Load data
dfs = list()
for fn in fnames:
dfs.append(pd.read_table(fn, names=names, engine='python',
na_values='+inf', sep=r"\s*",
usecols=range(30)))
# Remove meaningless features
delta = list()
for df in dfs:
delta.append(df['CSSD'].min())
delta = np.min([d for d in delta if not np.isinf(d)])
print "delta = {}".format(delta)
for df in dfs:
for name in names_to_delete:
del df[name]
try:
shift_log_transform(df, 'CSSD', -delta + 0.1)
except KeyError:
pass
# List of feature names
features_names = list(dfs[0])
# Count number of NaN for each feature
for i, df in enumerate(dfs):
print("File {}".format(i))
for feature in features_names:
print("Feature {} has {} NaNs".format(feature,
df[feature].isnull().sum()))
print("=======================")
# Convert to numpy arrays
# Features
X = list()
for df in dfs:
X.append(np.array(df[list(features_names)].values, dtype=float))
X = np.vstack(X)
# Responses
y = np.zeros(len(X))
y[len(dfs[0]):] = np.ones(len(X) - len(dfs[0]))
df = pd.concat(dfs)
df['variable'] = y
return X, y, df, features_names, delta
def load_data_tgt(fname, names, names_to_delete, delta):
"""
Function that loads target data for classification.
:param fname:
Target data file.
:param names:
Names of columns in files.
:param names_to_delete:
Column names to delete.
:return:
X, ``sklearn`` array of features, list of feature names
"""
# Load data
df = pd.read_table(fname, names=names, engine='python', na_values='+inf',
sep=r"\s*", usecols=range(30))
for name in names_to_delete:
del df[name]
try:
shift_log_transform(df, 'CSSD', -delta + 0.1)
except KeyError:
pass
# List of feature names
features_names = list(df)
# Count number of NaN for each feature
for feature in features_names:
print("Feature {} has {} NaNs".format(feature,
df[feature].isnull().sum()))
print("=======================")
# Convert to numpy arrays
# Features
X = np.array(df[list(features_names)].values, dtype=float)
# Original data
df = pd.read_table(fname, names=names, engine='python', na_values='+inf',
sep=r"\s*", usecols=range(30))
return X, features_names, df
remote = callbacks.RemoteMonitor(root='http://localhost:9000')
# fix random seed for reproducibility
seed = 1
numpy.random.seed(seed)
# load dataset
data_dir = '/home/ilya/code/ml4vs/data/dataset_OGLE/indexes_normalized'
file_1 = 'vast_lightcurve_statistics_normalized_variables_only.log'
file_0 = 'vast_lightcurve_statistics_normalized_constant_only.log'
file_0 = os.path.join(data_dir, file_0)
file_1 = os.path.join(data_dir, file_1)
names = ['Magnitude', 'clipped_sigma', 'meaningless_1', 'meaningless_2',
'star_ID', 'weighted_sigma', 'skew', 'kurt', 'I', 'J', 'K', 'L',
'Npts', 'MAD', 'lag1', 'RoMS', 'rCh2', 'Isgn', 'Vp2p', 'Jclp', 'Lclp',
'Jtim', 'Ltim', 'CSSD', 'Ex', 'inv_eta', 'E_A', 'S_B', 'NXS', 'IQR']
names_to_delete = ['Magnitude', 'meaningless_1', 'meaningless_2', 'star_ID',
'Npts']
X, y, df, feature_names, delta = load_data([file_0, file_1], names, names_to_delete)
n_cv_iter = 5
def create_baseline():
# create model
model = Sequential()
model.add(Dense(25, input_dim=25, init='normal', activation='relu',
W_constraint=maxnorm(3)))
model.add(Dropout(0.1))
model.add(Dense(25, init='normal', activation='relu',
W_constraint=maxnorm(3)))
model.add(Dropout(0.1))
model.add(Dense(13, init='normal', activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(1, init='normal', activation='sigmoid'))
# Compile model
learning_rate = 0.1
decay_rate = learning_rate / epochs
momentum = 0.90
sgd = SGD(lr=learning_rate, decay=decay_rate, momentum=momentum,
nesterov=False)
model.compile(loss='binary_crossentropy', optimizer=sgd,
metrics=['accuracy'])
return model
epochs = 50
# epochs = 125
batch_size = 12
estimators = list()
estimators.append(('imputer', Imputer(missing_values='NaN', strategy='median',
axis=0, verbose=2)))
estimators.append(('scaler', StandardScaler()))
estimators.append(('mlp', KerasClassifier(build_fn=create_baseline,
nb_epoch=epochs,
batch_size=batch_size,
verbose=0)))
skf = StratifiedKFold(y, n_folds=4, shuffle=True, random_state=seed)
pipeline = Pipeline(estimators)
results = cross_val_score(pipeline, X, y, cv=skf, scoring='f1', n_jobs=3)
print("\n")
print(results)
print("\n")
print("Baseline: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100))
print("\n")
results = cross_val_score(pipeline, X, y, cv=skf, scoring='roc_auc', n_jobs=3)
print("\n")
print(results)
print("\n")
print("Baseline: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100))
print("\n")
# Load blind test data
file_tgt = 'LMC_SC19_PSF_Pgood98__vast_lightcurve_statistics_normalized.log'
file_tgt = os.path.join(data_dir, file_tgt)
X_tgt, feature_names, df = load_data_tgt(file_tgt, names, names_to_delete,
delta)
pipeline.fit(X, y, mlp__batch_size=batch_size, mlp__nb_epoch=epochs)
model = pipeline.named_steps['mlp']
y_pred = model.predict(X_tgt)
y_probs = model.predict_proba(X_tgt)
idx = y_probs[:, 1] > 0.5
idx_ = y_probs[:, 1] < 0.5
nns_no = list(df['star_ID'][idx_])
print("Found {} variables".format(np.count_nonzero(idx)))
with open('nn_results.txt', 'w') as fo:
for line in list(df['star_ID'][idx]):
fo.write(line + '\n')
# Found negatives
nns_no = set([line.strip().split('_')[4].split('.')[0] for line in nns_no])
with open('clean_list_of_new_variables.txt', 'r') as fo:
news = fo.readlines()
news = [line.strip().split(' ')[1] for line in news]
with open('nn_results.txt', 'r') as fo:
nns = fo.readlines()
nns = [line.strip().split('_')[4].split('.')[0] for line in nns]
nns = set(nns)
# New variables discovered by GBC
news = set(news)
# 11 new variables are found
len(news.intersection(nns))
# It was found
'181193' in nns
with open('candidates_50perc_threshold.txt', 'r') as fo:
c50 = fo.readlines()
c50 = [line.strip("\", ', \", \n, }, {") for line in c50]
with open('variables_not_in_catalogs.txt', 'r') as fo:
not_in_cat = fo.readlines()
nic = [line.strip().split(' ')[1] for line in not_in_cat]
# Catalogue variables
cat_vars = set(c50).difference(set(nic))
# Non-catalogue variable
noncat_vars = set([line.strip().split(' ')[1] for line in not_in_cat if 'CST' not in line])
# All variables
all_vars = news.union(cat_vars).union(noncat_vars)
# Number of true positives
# 145
len(all_vars.intersection(nns))
# Number of false negatives
# 43
len(nns_no.intersection(all_vars))
# # Check overfitting
# sss = StratifiedShuffleSplit(y, n_iter=1, test_size=1. / n_cv_iter,
# random_state=seed)
# for train_index, test_index in sss:
# X_train, X_test = X[train_index], X[test_index]
# y_train, y_test = y[train_index], y[test_index]
#
# import keras
# history = keras.callbacks.History()
# print("Fitting...")
# X_test_ = X_test.copy()
# X_train_ = X_train.copy()
# for name, transform in pipeline.steps[:-1]:
# print(name, transform)
# transform.fit(X_train_)
# X_test_ = transform.transform(X_test_)
# X_train_ = transform.transform(X_train_)
# pipeline.fit(X_train, y_train, mlp__validation_data=(X_test_, y_test),
# mlp__batch_size=batch_size, mlp__nb_epoch=epochs,
# mlp__callbacks=[history])
# model = pipeline.named_steps['mlp']
#
# y_pred = model.predict(X_test_)
# y_pred[y_pred < 0.5] = 0.
# y_pred[y_pred >= 0.5] = 1.
# y_probs = model.predict_proba(X_test_)
# cm = confusion_matrix(y_test, y_pred)
# print_cm_summary(cm)
#
# plt.plot(history.history['acc'])
# plt.plot(history.history['val_acc'])
# plt.title('model accuracy')
# plt.ylabel('accuracy')
# plt.xlabel('epoch')
# plt.legend(['train', 'test'], loc='upper left')
# plt.show()
# # summarize history for loss
# plt.plot(history.history['loss'])
# plt.plot(history.history['val_loss'])
# plt.title('model loss')
# plt.ylabel('loss')
# plt.xlabel('epoch')
# plt.legend(['train', 'test'], loc='upper left')
# plt.show()
# # Build several cm
# skf = StratifiedKFold(y, n_folds=4, shuffle=True, random_state=seed)
# for train_index, test_index in skf:
# X_train, X_test = X[train_index], X[test_index]
# y_train, y_test = y[train_index], y[test_index]
#
# import keras
# history = keras.callbacks.History()
# print("Fitting...")
# X_test_ = X_test.copy()
# X_train_ = X_train.copy()
# estimators = list()
# estimators.append(('imputer', Imputer(missing_values='NaN', strategy='mean',
# axis=0, verbose=2)))
# estimators.append(('scaler', StandardScaler()))
# estimators.append(('mlp', KerasClassifier(build_fn=create_baseline,
# nb_epoch=epochs,
# batch_size=batch_size,
# verbose=0)))
# pipeline = Pipeline(estimators)
# for name, transform in pipeline.steps[:-1]:
# print(name, transform)
# transform.fit(X_train_)
# X_test_ = transform.transform(X_test_)
# X_train_ = transform.transform(X_train_)
# pipeline.fit(X_train, y_train, mlp__validation_data=(X_test_, y_test),
# mlp__batch_size=batch_size, mlp__nb_epoch=epochs,
# mlp__callbacks=[history])
# model = pipeline.named_steps['mlp']
#
# y_pred = model.predict(X_test_)
# y_pred[y_pred < 0.5] = 0.
# y_pred[y_pred >= 0.5] = 1.
# y_probs = model.predict_proba(X_test_)
# cm = confusion_matrix(y_test, y_pred)
# print_cm_summary(cm)
#
# # summarize history for loss
# plt.plot(history.history['acc'])
# plt.plot(history.history['val_acc'])
# plt.title('model accuracy')
# plt.ylabel('accuracy')
# plt.xlabel('epoch')
# plt.legend(['train', 'test'], loc='upper left')
# plt.show()
# plt.plot(history.history['loss'])
# plt.plot(history.history['val_loss'])
# plt.title('model loss')
# plt.ylabel('loss')
# plt.xlabel('epoch')
# plt.legend(['train', 'test'], loc='upper left')
# plt.show() | unknown | codeparrot/codeparrot-clean | ||
//// [tests/cases/conformance/statements/for-ofStatements/ES5For-ofTypeCheck13.ts] ////
//// [ES5For-ofTypeCheck13.ts]
const strSet: Set<string> = new Set()
strSet.add('Hello')
strSet.add('World')
for (const str of strSet) { }
//// [ES5For-ofTypeCheck13.js]
"use strict";
const strSet = new Set();
strSet.add('Hello');
strSet.add('World');
for (const str of strSet) { } | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/ES5For-ofTypeCheck13(target=es2015).js |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.linalg_grad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test as test_lib
def _AddTest(test, op_name, testcase_name, fn):
test_name = '_'.join(['test', op_name, testcase_name])
if hasattr(test, test_name):
raise RuntimeError('Test %s defined more than once' % test_name)
setattr(test, test_name, fn)
class ShapeTest(test_lib.TestCase):
def testBatchGradientUnknownSize(self):
with self.test_session():
batch_size = constant_op.constant(3)
matrix_size = constant_op.constant(4)
batch_identity = array_ops.tile(
array_ops.expand_dims(
array_ops.diag(array_ops.ones([matrix_size])), 0),
[batch_size, 1, 1])
determinants = linalg_ops.matrix_determinant(batch_identity)
reduced = math_ops.reduce_sum(determinants)
sum_grad = gradients_impl.gradients(reduced, batch_identity)[0]
self.assertAllClose(batch_identity.eval(), sum_grad.eval())
class MatrixUnaryFunctorGradientTest(test_lib.TestCase):
pass # Filled in below
def _GetMatrixUnaryFunctorGradientTest(functor_, dtype_, shape_, **kwargs_):
def Test(self):
with self.test_session():
np.random.seed(1)
a_np = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
a = constant_op.constant(a_np)
b = functor_(a, **kwargs_)
# Optimal stepsize for central difference is O(epsilon^{1/3}).
epsilon = np.finfo(dtype_).eps
delta = epsilon**(1.0 / 3.0)
# tolerance obtained by looking at actual differences using
# np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
tol = 1e-6 if dtype_ == np.float64 else 0.05
theoretical, numerical = gradient_checker.compute_gradient(
a,
a.get_shape().as_list(),
b,
b.get_shape().as_list(),
x_init_value=a_np,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
return Test
class MatrixBinaryFunctorGradientTest(test_lib.TestCase):
pass # Filled in below
def _GetMatrixBinaryFunctorGradientTest(functor_,
dtype_,
shape_,
float32_tol_fudge=1.0,
**kwargs_):
def Test(self):
with self.test_session():
np.random.seed(1)
a_np = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
a = constant_op.constant(a_np)
b_np = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
b = constant_op.constant(b_np)
c = functor_(a, b, **kwargs_)
# Optimal stepsize for central difference is O(epsilon^{1/3}).
epsilon = np.finfo(dtype_).eps
delta = epsilon**(1.0 / 3.0)
# tolerance obtained by looking at actual differences using
# np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
tol = 1e-6 if dtype_ == np.float64 else float32_tol_fudge * 0.04
# The gradients for a and b may be of very different magnitudes,
# so to not get spurious failures we test them separately.
for factor, factor_init in [a, a_np], [b, b_np]:
theoretical, numerical = gradient_checker.compute_gradient(
factor,
factor.get_shape().as_list(),
c,
c.get_shape().as_list(),
x_init_value=factor_init,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
return Test
if __name__ == '__main__':
# Tests for gradients of binary matrix operations.
for dtype in np.float32, np.float64:
for size in 2, 5, 10:
# We skip the rank 4, size 10 case: it is slow and conceptually covered
# by the other cases.
for extra in [(), (2,), (3,)] + [(3, 2)] * (size < 10):
for adjoint in False, True:
shape = extra + (size, size)
name = '%s_%s_adj_%s' % (dtype.__name__, '_'.join(map(str, shape)),
str(adjoint))
_AddTest(
MatrixBinaryFunctorGradientTest,
'MatrixSolveGradient',
name,
_GetMatrixBinaryFunctorGradientTest(
linalg_ops.matrix_solve, dtype, shape, adjoint=adjoint))
for lower in True, False:
name = '%s_low_%s' % (name, lower)
_AddTest(
MatrixBinaryFunctorGradientTest,
'MatrixTriangularSolveGradient',
name,
_GetMatrixBinaryFunctorGradientTest(
linalg_ops.matrix_triangular_solve,
dtype,
shape,
float32_tol_fudge=4.0,
adjoint=adjoint,
lower=lower))
# Tests for gradients of unary matrix operations.
for dtype in np.float32, np.float64:
for size in 2, 5, 10:
# We skip the rank 4, size 10 case: it is slow and conceptually covered
# by the other cases.
for extra in [(), (2,), (3,)] + [(3, 2)] * (size < 10):
shape = extra + (size, size)
name = '%s_%s' % (dtype.__name__, '_'.join(map(str, shape)))
_AddTest(MatrixUnaryFunctorGradientTest, 'MatrixInverseGradient', name,
_GetMatrixUnaryFunctorGradientTest(linalg_ops.matrix_inverse,
dtype, shape))
_AddTest(
MatrixUnaryFunctorGradientTest, 'MatrixDeterminantGradient', name,
_GetMatrixUnaryFunctorGradientTest(linalg_ops.matrix_determinant,
dtype, shape))
# Tests for gradients of matrix_solve_ls
for dtype in np.float32, np.float64:
for rows in 2, 5, 10:
for cols in 2, 5, 10:
for l2_regularization in 1e-6, 0.001, 1.0:
shape = (rows, cols)
name = '%s_%s_%s' % (dtype.__name__, '_'.join(map(str, shape)),
l2_regularization)
_AddTest(
MatrixBinaryFunctorGradientTest,
'MatrixSolveLsGradient',
name,
_GetMatrixBinaryFunctorGradientTest(
lambda a, b, l=l2_regularization: linalg_ops.matrix_solve_ls(a, b, l),
dtype,
shape,
float32_tol_fudge=4.0))
test_lib.main() | unknown | codeparrot/codeparrot-clean | ||
# (C) Copyright David Abrahams 2001. Permission to copy, use, modify, sell and
# distribute this software is granted provided this copyright notice appears in
# all copies. This software is provided "as is" without express or implied
# warranty, and with no claim as to its suitability for any purpose.
from utility import to_seq
def difference (b, a):
""" Returns the elements of B that are not in A.
"""
result = []
for element in b:
if not element in a:
result.append (element)
return result
def intersection (set1, set2):
""" Removes from set1 any items which don't appear in set2 and returns the result.
"""
result = []
for v in set1:
if v in set2:
result.append (v)
return result
def contains (small, large):
""" Returns true iff all elements of 'small' exist in 'large'.
"""
small = to_seq (small)
large = to_seq (large)
for s in small:
if not s in large:
return False
return True
def equal (a, b):
""" Returns True iff 'a' contains the same elements as 'b', irrespective of their order.
# TODO: Python 2.4 has a proper set class.
"""
return contains (a, b) and contains (b, a) | unknown | codeparrot/codeparrot-clean | ||
from pluggy import HookspecMarker, HookimplMarker
hookspec = HookspecMarker("allure")
hookimpl = HookimplMarker("allure")
class AllureUserHooks(object):
@hookspec
def decorate_as_label(self, label_type, labels):
""" label """
@hookspec
def add_label(self, label_type, labels):
""" label """
@hookspec
def decorate_as_link(self, url, link_type, name):
""" url """
@hookspec
def add_link(self, url, link_type, name):
""" url """
@hookspec
def start_step(self, uuid, title, params):
""" step """
@hookspec
def stop_step(self, uuid, exc_type, exc_val, exc_tb):
""" step """
@hookspec
def attach_data(self, body, name, attachment_type, extension):
""" attach data """
@hookspec
def attach_file(self, source, name, attachment_type, extension):
""" attach file """
class AllureDeveloperHooks(object):
@hookspec
def start_fixture(self, parent_uuid, uuid, name, parameters):
""" start fixture"""
@hookspec
def stop_fixture(self, parent_uuid, uuid, name, exc_type, exc_val, exc_tb):
""" stop fixture """
@hookspec
def start_test(self, parent_uuid, uuid, name, parameters, context):
""" start test"""
@hookspec
def stop_test(self, parent_uuid, uuid, name, context, exc_type, exc_val, exc_tb):
""" stop test """
@hookspec
def report_result(self, result):
""" reporting """
@hookspec
def report_container(self, container):
""" reporting """
@hookspec
def report_attached_file(self, source, file_name):
""" reporting """
@hookspec
def report_attached_data(self, body, file_name):
""" reporting """ | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""create_def_file.py - tool to create a windows def file.
The def file can be used to export symbols from the tensorflow dll to enable
tf.load_library().
Because the linker allows only 64K symbols to be exported per dll
we filter the symbols down to the essentials. The regular expressions
we use for this are specific to tensorflow.
TODO: this works fine but there is an issue with exporting
'const char * const' and importing it from a user_ops. The problem is
on the importing end and using __declspec(dllimport) works around it.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import io
import os
import re
import subprocess
import sys
import tempfile
# External tools we use that come with visual studio sdk and
# we assume that the caller has the correct PATH to the sdk
UNDNAME = "undname.exe"
DUMPBIN = "dumpbin.exe"
# Exclude if matched
EXCLUDE_RE = re.compile(r"deleting destructor|::internal::")
# Include if matched before exclude
INCLUDEPRE_RE = re.compile(r"tensorflow::internal::LogMessage|"
r"tensorflow::internal::CheckOpMessageBuilder")
# Include if matched after exclude
INCLUDE_RE = re.compile(r"^(TF_\w*)$|"
r"tensorflow::|"
r"functor::|"
r"perftools::gputools")
def get_args():
"""Parse command line."""
parser = argparse.ArgumentParser()
parser.add_argument("--input", help="input library", required=True)
parser.add_argument("--output", help="output deffile", required=True)
args = parser.parse_args()
return args
def main():
"""main."""
args = get_args()
# Pipe dumpbin to extract all linkable symbols from a lib.
# Good symbols are collected in candidates and also written to
# a temp file.
candidates = []
tmpfile = tempfile.NamedTemporaryFile(mode="w", delete=False)
proc = subprocess.Popen([DUMPBIN, "/nologo", "/linkermember:1", args.input],
stdout=subprocess.PIPE)
for line in io.TextIOWrapper(proc.stdout, encoding="utf-8"):
cols = line.split()
if len(cols) < 2:
continue
sym = cols[1]
tmpfile.file.write(sym + "\n")
candidates.append(sym)
tmpfile.file.close()
exit_code = proc.wait()
if exit_code != 0:
print("{} failed, exit={}".format(DUMPBIN, exit_code))
return exit_code
# Run the symbols through undname to get their undecorated name
# so we can filter on something readable.
with open(args.output, "w") as def_fp:
# track dupes
taken = set()
# Header for the def file. Since the tensorflow.dll is actually called
# _pywrap_tensorflow.pyd in the python wheel, hint that in the def file.
def_fp.write("LIBRARY _pywrap_tensorflow_internal.pyd\n")
def_fp.write("EXPORTS\n")
def_fp.write("\t ??1OpDef@tensorflow@@UEAA@XZ\n")
# Each symbols returned by undname matches the same position in candidates.
# We compare on undname but use the decorated name from candidates.
dupes = 0
proc = subprocess.Popen([UNDNAME, tmpfile.name], stdout=subprocess.PIPE)
for idx, line in enumerate(io.TextIOWrapper(proc.stdout, encoding="utf-8")):
decorated = candidates[idx]
if decorated in taken:
# Symbol is already in output, done.
dupes += 1
continue
if not INCLUDEPRE_RE.search(line):
if EXCLUDE_RE.search(line):
continue
if not INCLUDE_RE.search(line):
continue
def_fp.write("\t" + decorated + "\n")
taken.add(decorated)
exit_code = proc.wait()
if exit_code != 0:
print("{} failed, exit={}".format(UNDNAME, exit_code))
return exit_code
os.unlink(tmpfile.name)
print("symbols={}, taken={}, dupes={}"
.format(len(candidates), len(taken), dupes))
return 0
if __name__ == "__main__":
sys.exit(main()) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
import re
import os
from disabler import cut, ratio, better, worse_filter
import urlparse
import string
from pcnile import langconv
from pcnile.langconv import Converter
from django.utils.encoding import force_unicode
import pymongo
def get_online():
logo = list()
name = list()
netloc = list()
db=pymongo.Connection().server
for d in db.server.find():
for o in d['resource']['online']:
logo.append(o['logo'])
name.append(o['name'])
t = urlparse.urlparse(o['link']).netloc
if t == 'video.baidu.com':
print o
netloc.append(t)
logo=list(set(logo))
name=list(set(name))
netloc=list(set(netloc))
with codecs.open('logo.txt', 'wb', 'utf-8') as f:
f.write('\n'.join(logo))
with codecs.open('name.txt', 'wb', 'utf-8') as f:
f.write('\n'.join(name))
with codecs.open('netloc.txt', 'wb', 'utf-8') as f:
f.write('\n'.join(netloc))
def get_name():
lists = list()
with codecs.open('m.txt', 'rb') as f:
for l in f:
if l.rstrip().lstrip():
lists.append(l)
out = list()
for l in lists:
l = force_unicode(l)
try:
out.append( re.findall(ur"《(\S+)》", l)[0] )
except IndexError:
pass
with codecs.open('out.txt', 'wb', 'utf-8') as f:
f.write('\n'.join(out))
def merge_dict():
words = list()
df = set()
for fil in os.listdir("D:\\obtainfo\\obtainfostatic\\dict\\"):
with codecs.open(os.path.join("D:\\obtainfo\\obtainfostatic\\dict", fil), 'rb', 'utf-8') as f:
for w in f:
w = w.lstrip().rstrip()
if w:
if w not in df:
df.add(w)
words.append(w)
with codecs.open('words1.txt', 'wb', 'utf-8') as f:
f.write("\n".join(words))
words = list()
df = set()
for fil in os.listdir("D:\\obtainfo\\obtainfostatic\\1\\tools\\dict\\"):
print fil
with codecs.open(os.path.join("D:\\obtainfo\\obtainfostatic\\1\\tools\\dict\\", fil), 'rb', 'utf-8') as f:
for w in f:
w = w.lstrip().rstrip()
if w:
if w not in df:
df.add(w)
words.append(w)
with codecs.open('words2.txt', 'wb', 'utf-8') as f:
f.write("\n".join(words))
def make_jieba_dict():
df = set()
maybe = list()
with codecs.open('words2.txt', 'rb', 'utf-8') as f:
for l in f:
w = l.strip().upper()
if w and w not in df:
df.add(w)
maybe.append(w)
maybe = sorted(maybe, reverse = True, key = lambda w : len(w) )
dicts = [u"%s %d n" % ( m, len(m) * 100 ) for m in maybe]
"""
maybe = list()
with codecs.open('words1.txt', 'rb', 'utf-8') as f:
for l in f:
w = l.strip()
if w and len(w) > 1 and w not in df:
df.add(w)
maybe.append(w)
maybe = sorted(maybe, reverse = True, key = lambda w : len(w) )
dicts = dicts + [u"%s %d n" % ( m, len(m) * 50 ) for m in maybe]
"""
with codecs.open('userdict.txt', 'wb', 'utf-8') as f:
f.write('\n'.join(dicts))
format_title = lambda title : re.sub(ur"【|】|\[|\]|/|_.", u' ', title).upper()
def get_server_name():
db = pymongo.Connection().server
conv = Converter('zh-hans')
re_han = re.compile(ur"([\u4E00-\u9FA5a-zA-Z0-9+#&\_]+)", re.U)
re_pure_han = re.compile(ur"([\u4E00-\u9FA5]+)", re.U)
lines = list()
for d in db.server.find():
lines += d['title'].split()
for aka in d['aka']:
lines += aka.split()
outs = list()
df = set()
for l in lines:
for w in re_han.findall(l):
if len(w) == 1 or re.match(r'\w+', w):
continue
w = conv.convert(w)
if w not in df:
df.add(w)
outs.append(w)
for w in re_pure_han.findall(l):
if len(w) == 1:
continue
w = conv.convert(w)
if w not in df:
df.add(w)
outs.append(w)
with codecs.open('server_title.txt', 'wb', 'utf-8') as f:
f.write('\n'.join(sorted(outs, reverse = True, key = lambda w : len(w) )))
def get_server_genre():
db = pymongo.Connection().server
conv = Converter('zh-hans')
lines = list()
for d in db.server.find({}, {'genre':1}):
for genre in d['genre']:
lines += genre.split()
outs = list()
df = set()
re_han = re.compile(ur"([\u4E00-\u9FA5a-zA-Z0-9+#&\._]+)", re.U)
for l in lines:
for w in re_han.findall(l):
w = conv.convert(w)
if w not in df:
df.add(w)
outs.append(w)
with codecs.open('server_genre.txt', 'wb', 'utf-8') as f:
f.write('\n'.join( sorted(outs, reverse = True, key = lambda w : len(w) )))
def Q2B(uchar):
"""全角转半角"""
inside_code=ord(uchar)
if inside_code==0x3000:
inside_code=0x0020
else:
inside_code-=0xfee0
if inside_code<0x0020 or inside_code>0x7e: #转完之后不是半角字符返回原来的字符
return uchar
return unichr(inside_code)
def stringQ2B(ustring):
"""把字符串全角转半角"""
return "".join([Q2B(uchar) for uchar in ustring])
def uniform(ustring):
"""格式化字符串,完成全角转半角,大写转小写的工作"""
return stringQ2B(ustring).lower()
append = [u'加长版', u'重压', u'3D版', u'全集']
conv = Converter('zh-hans')
re_han = re.compile(ur"([\u4E00-\u9FA5a-zA-Z0-9+#&\-:·]+)", re.U)
re_split = re.compile(ur'[^《》」「】【\[\]\)\(/\+_ ]+', re.U)
re_size = re.compile(r'[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?(g|G|m|M|mb|Mb|MB|Mb)')
re_num = re.compile(ur"(^\d{2}\-\d{2}$)|(^[0-9.]+$)")
re_eng = re.compile(ur'[a-zA-Z0-9+#&\.\-_:]+$')
re_end = re.compile(ur'(%s)$' % "|".join(append))
re_title = re.compile(ur"[\u4E00-\u9FA5a-zA-Z0-9+#&\-:·]+[\u4E00-\u9FA5]?(|(i|ii|iii|iv)|\d{1, 4})")
def get_title(rawtitle):
blocks = list()
title = conv.convert(uniform( rawtitle ))
for b in re_split.findall(title):
block = b.strip()
if block and not worse_filter( block ):
blocks.append(block)
block_size = len(blocks)
if block_size == 0:
print 'zero block size in %s' % rawtitle
elif block_size == 1:
title = blocks[0]
else:
words = list()
for block in blocks:
if not re_num.match(block) and not re_size.match(block):
words.append(block)
word_size = len(words)
if word_size == 0:
title = " ".join(blocks)
elif word_size == 1:
title = words[0]
else:
result = [{'w':w, 'r':ratio(w)} for w in words]
result = [ r for r in sorted(result, key=lambda w : w['r'])[:2] ]
if len(result) == 1:
title = result[0]['w']
else:
if result[1]['r'] < 0.2:
title = " ".join([r['w'] for r in result])
else:
title = result[0]['w']
if not re_eng.match(title):
for t in title.split('.'):
if t.strip():
title = t
break
title = re_end.sub('', title)
try:
return re_title.match(title).group()
except AttributeError:
return title
def get_scrapy_name():
db = pymongo.Connection().scrapy
titles = list()
for d in db.scrapy.find():
titles.append( get_title(d['title']) + ' | ' + d['title'] )
with codecs.open('title.txt', 'wb', 'utf-8') as f:
f.write("\n".join( titles ))
if __name__ == '__main__':
make_jieba_dict() | unknown | codeparrot/codeparrot-clean | ||
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that uses Google AutoML services.
"""
import os
from airflow import models
from airflow.providers.google.cloud.hooks.automl import CloudAutoMLHook
from airflow.providers.google.cloud.operators.automl import (
AutoMLCreateDatasetOperator, AutoMLDeleteDatasetOperator, AutoMLDeleteModelOperator,
AutoMLImportDataOperator, AutoMLTrainModelOperator,
)
from airflow.utils.dates import days_ago
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "your-project-id")
GCP_AUTOML_LOCATION = os.environ.get("GCP_AUTOML_LOCATION", "us-central1")
GCP_AUTOML_VIDEO_BUCKET = os.environ.get(
"GCP_AUTOML_VIDEO_BUCKET", "gs://automl-video-demo-data/hmdb_split1.csv"
)
# Example values
DATASET_ID = "VCN123455678"
# Example model
MODEL = {
"display_name": "auto_model_1",
"dataset_id": DATASET_ID,
"video_classification_model_metadata": {},
}
# Example dataset
DATASET = {
"display_name": "test_video_dataset",
"video_classification_dataset_metadata": {},
}
IMPORT_INPUT_CONFIG = {"gcs_source": {"input_uris": [GCP_AUTOML_VIDEO_BUCKET]}}
default_args = {"start_date": days_ago(1)}
extract_object_id = CloudAutoMLHook.extract_object_id
# Example DAG for AutoML Video Intelligence Classification
with models.DAG(
"example_automl_video",
default_args=default_args,
schedule_interval=None, # Override to match your needs
user_defined_macros={"extract_object_id": extract_object_id},
tags=['example'],
) as example_dag:
create_dataset_task = AutoMLCreateDatasetOperator(
task_id="create_dataset_task", dataset=DATASET, location=GCP_AUTOML_LOCATION
)
dataset_id = (
'{{ task_instance.xcom_pull("create_dataset_task", key="dataset_id") }}'
)
import_dataset_task = AutoMLImportDataOperator(
task_id="import_dataset_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
input_config=IMPORT_INPUT_CONFIG,
)
MODEL["dataset_id"] = dataset_id
create_model = AutoMLTrainModelOperator(
task_id="create_model", model=MODEL, location=GCP_AUTOML_LOCATION
)
model_id = "{{ task_instance.xcom_pull('create_model', key='model_id') }}"
delete_model_task = AutoMLDeleteModelOperator(
task_id="delete_model_task",
model_id=model_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
delete_datasets_task = AutoMLDeleteDatasetOperator(
task_id="delete_datasets_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
create_dataset_task >> import_dataset_task >> create_model >> \
delete_model_task >> delete_datasets_task | unknown | codeparrot/codeparrot-clean | ||
import hachoir_core
import hachoir_core.cmd_line
import hachoir_metadata
import hachoir_parser
import sys
# TODO: add TrID
# I changed this
# https://bitbucket.org/haypo/hachoir/wiki/hachoir-metadata/code
def getMetaData(filename):
text = ""
filename, realname = hachoir_core.cmd_line.unicodeFilename(filename), filename
parser = hachoir_parser.createParser(filename, realname)
if not parser:
print >>sys.stderr, "Unable to parse file"
return text
try:
metadata = hachoir_metadata.extractMetadata(parser)
except HachoirError, err:
print "Metadata extraction error: %s" % unicode(err)
metadata = None
if not metadata:
print >>sys.stderr, "Unable to extract metadata"
return text
text = metadata.exportPlaintext()
return text
if __name__ == "__main__":
filename = "../temp/diskcheckup.exe"
if 1 < len(sys.argv):
filename = sys.argv[1]
meta_data_text = getMetaData(filename)
#print meta_data_text
for line in meta_data_text:
print hachoir_core.tools.makePrintable(line, hachoir_core.i18n.getTerminalCharset() ) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""LBaaS add status description
Revision ID: 2032abe8edac
Revises: 477a4488d3f4
Create Date: 2013-06-24 06:51:47.308545
"""
# revision identifiers, used by Alembic.
revision = '2032abe8edac'
down_revision = '477a4488d3f4'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.services.loadbalancer.plugin.LoadBalancerPlugin',
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
ENTITIES = ['vips', 'pools', 'members', 'healthmonitors']
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
for entity in ENTITIES:
op.add_column(entity, sa.Column('status_description', sa.String(255)))
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
for entity in ENTITIES:
op.drop_column(entity, 'status_description') | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
pygments.lexers.dsls
~~~~~~~~~~~~~~~~~~~~
Lexers for various domain-specific languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, words, include, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Literal
__all__ = ['ProtoBufLexer', 'BroLexer', 'PuppetLexer', 'RslLexer',
'MscgenLexer', 'VGLLexer', 'AlloyLexer', 'PanLexer']
class ProtoBufLexer(RegexLexer):
"""
Lexer for `Protocol Buffer <http://code.google.com/p/protobuf/>`_
definition files.
.. versionadded:: 1.4
"""
name = 'Protocol Buffer'
aliases = ['protobuf', 'proto']
filenames = ['*.proto']
tokens = {
'root': [
(r'[ \t]+', Text),
(r'[,;{}\[\]()]', Punctuation),
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline),
(words((
'import', 'option', 'optional', 'required', 'repeated', 'default',
'packed', 'ctype', 'extensions', 'to', 'max', 'rpc', 'returns',
'oneof'), prefix=r'\b', suffix=r'\b'),
Keyword),
(words((
'int32', 'int64', 'uint32', 'uint64', 'sint32', 'sint64',
'fixed32', 'fixed64', 'sfixed32', 'sfixed64',
'float', 'double', 'bool', 'string', 'bytes'), suffix=r'\b'),
Keyword.Type),
(r'(true|false)\b', Keyword.Constant),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Text), 'package'),
(r'(message|extend)(\s+)',
bygroups(Keyword.Declaration, Text), 'message'),
(r'(enum|group|service)(\s+)',
bygroups(Keyword.Declaration, Text), 'type'),
(r'\".*?\"', String),
(r'\'.*?\'', String),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'(\-?(inf|nan))\b', Number.Float),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'0[0-7]+[LlUu]*', Number.Oct),
(r'\d+[LlUu]*', Number.Integer),
(r'[+-=]', Operator),
(r'([a-zA-Z_][\w.]*)([ \t]*)(=)',
bygroups(Name.Attribute, Text, Operator)),
('[a-zA-Z_][\w.]*', Name),
],
'package': [
(r'[a-zA-Z_]\w*', Name.Namespace, '#pop'),
default('#pop'),
],
'message': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
default('#pop'),
],
'type': [
(r'[a-zA-Z_]\w*', Name, '#pop'),
default('#pop'),
],
}
class BroLexer(RegexLexer):
"""
For `Bro <http://bro-ids.org/>`_ scripts.
.. versionadded:: 1.5
"""
name = 'Bro'
aliases = ['bro']
filenames = ['*.bro']
_hex = r'[0-9a-fA-F_]'
_float = r'((\d*\.?\d+)|(\d+\.?\d*))([eE][-+]?\d+)?'
_h = r'[A-Za-z0-9][-A-Za-z0-9]*'
tokens = {
'root': [
# Whitespace
(r'^@.*?\n', Comment.Preproc),
(r'#.*?\n', Comment.Single),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text),
# Keywords
(r'(add|alarm|break|case|const|continue|delete|do|else|enum|event'
r'|export|for|function|if|global|hook|local|module|next'
r'|of|print|redef|return|schedule|switch|type|when|while)\b', Keyword),
(r'(addr|any|bool|count|counter|double|file|int|interval|net'
r'|pattern|port|record|set|string|subnet|table|time|timer'
r'|vector)\b', Keyword.Type),
(r'(T|F)\b', Keyword.Constant),
(r'(&)((?:add|delete|expire)_func|attr|(?:create|read|write)_expire'
r'|default|disable_print_hook|raw_output|encrypt|group|log'
r'|mergeable|optional|persistent|priority|redef'
r'|rotate_(?:interval|size)|synchronized)\b',
bygroups(Punctuation, Keyword)),
(r'\s+module\b', Keyword.Namespace),
# Addresses, ports and networks
(r'\d+/(tcp|udp|icmp|unknown)\b', Number),
(r'(\d+\.){3}\d+', Number),
(r'(' + _hex + r'){7}' + _hex, Number),
(r'0x' + _hex + r'(' + _hex + r'|:)*::(' + _hex + r'|:)*', Number),
(r'((\d+|:)(' + _hex + r'|:)*)?::(' + _hex + r'|:)*', Number),
(r'(\d+\.\d+\.|(\d+\.){2}\d+)', Number),
# Hostnames
(_h + r'(\.' + _h + r')+', String),
# Numeric
(_float + r'\s+(day|hr|min|sec|msec|usec)s?\b', Literal.Date),
(r'0[xX]' + _hex, Number.Hex),
(_float, Number.Float),
(r'\d+', Number.Integer),
(r'/', String.Regex, 'regex'),
(r'"', String, 'string'),
# Operators
(r'[!%*/+:<=>?~|-]', Operator),
(r'([-+=&|]{2}|[+=!><-]=)', Operator),
(r'(in|match)\b', Operator.Word),
(r'[{}()\[\]$.,;]', Punctuation),
# Identfier
(r'([_a-zA-Z]\w*)(::)', bygroups(Name, Name.Namespace)),
(r'[a-zA-Z_]\w*', Name)
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String),
(r'\\\n', String),
(r'\\', String)
],
'regex': [
(r'/', String.Regex, '#pop'),
(r'\\[\\nt/]', String.Regex), # String.Escape is too intense here.
(r'[^\\/\n]+', String.Regex),
(r'\\\n', String.Regex),
(r'\\', String.Regex)
]
}
class PuppetLexer(RegexLexer):
"""
For `Puppet <http://puppetlabs.com/>`__ configuration DSL.
.. versionadded:: 1.6
"""
name = 'Puppet'
aliases = ['puppet']
filenames = ['*.pp']
tokens = {
'root': [
include('comments'),
include('keywords'),
include('names'),
include('numbers'),
include('operators'),
include('strings'),
(r'[]{}:(),;[]', Punctuation),
(r'[^\S\n]+', Text),
],
'comments': [
(r'\s*#.*$', Comment),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
],
'operators': [
(r'(=>|\?|<|>|=|\+|-|/|\*|~|!|\|)', Operator),
(r'(in|and|or|not)\b', Operator.Word),
],
'names': [
('[a-zA-Z_]\w*', Name.Attribute),
(r'(\$\S+)(\[)(\S+)(\])', bygroups(Name.Variable, Punctuation,
String, Punctuation)),
(r'\$\S+', Name.Variable),
],
'numbers': [
# Copypasta from the Python lexer
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float),
(r'\d+[eE][+-]?[0-9]+j?', Number.Float),
(r'0[0-7]+j?', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+j?', Number.Integer)
],
'keywords': [
# Left out 'group' and 'require'
# Since they're often used as attributes
(words((
'absent', 'alert', 'alias', 'audit', 'augeas', 'before', 'case',
'check', 'class', 'computer', 'configured', 'contained',
'create_resources', 'crit', 'cron', 'debug', 'default',
'define', 'defined', 'directory', 'else', 'elsif', 'emerg',
'err', 'exec', 'extlookup', 'fail', 'false', 'file',
'filebucket', 'fqdn_rand', 'generate', 'host', 'if', 'import',
'include', 'info', 'inherits', 'inline_template', 'installed',
'interface', 'k5login', 'latest', 'link', 'loglevel',
'macauthorization', 'mailalias', 'maillist', 'mcx', 'md5',
'mount', 'mounted', 'nagios_command', 'nagios_contact',
'nagios_contactgroup', 'nagios_host', 'nagios_hostdependency',
'nagios_hostescalation', 'nagios_hostextinfo', 'nagios_hostgroup',
'nagios_service', 'nagios_servicedependency', 'nagios_serviceescalation',
'nagios_serviceextinfo', 'nagios_servicegroup', 'nagios_timeperiod',
'node', 'noop', 'notice', 'notify', 'package', 'present', 'purged',
'realize', 'regsubst', 'resources', 'role', 'router', 'running',
'schedule', 'scheduled_task', 'search', 'selboolean', 'selmodule',
'service', 'sha1', 'shellquote', 'split', 'sprintf',
'ssh_authorized_key', 'sshkey', 'stage', 'stopped', 'subscribe',
'tag', 'tagged', 'template', 'tidy', 'true', 'undef', 'unmounted',
'user', 'versioncmp', 'vlan', 'warning', 'yumrepo', 'zfs', 'zone',
'zpool'), prefix='(?i)', suffix=r'\b'),
Keyword),
],
'strings': [
(r'"([^"])*"', String),
(r"'(\\'|[^'])*'", String),
],
}
class RslLexer(RegexLexer):
"""
`RSL <http://en.wikipedia.org/wiki/RAISE>`_ is the formal specification
language used in RAISE (Rigorous Approach to Industrial Software Engineering)
method.
.. versionadded:: 2.0
"""
name = 'RSL'
aliases = ['rsl']
filenames = ['*.rsl']
mimetypes = ['text/rsl']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
(words((
'Bool', 'Char', 'Int', 'Nat', 'Real', 'Text', 'Unit', 'abs',
'all', 'always', 'any', 'as', 'axiom', 'card', 'case', 'channel',
'chaos', 'class', 'devt_relation', 'dom', 'elems', 'else', 'elif',
'end', 'exists', 'extend', 'false', 'for', 'hd', 'hide', 'if',
'in', 'is', 'inds', 'initialise', 'int', 'inter', 'isin', 'len',
'let', 'local', 'ltl_assertion', 'object', 'of', 'out', 'post',
'pre', 'read', 'real', 'rng', 'scheme', 'skip', 'stop', 'swap',
'then', 'theory', 'test_case', 'tl', 'transition_system', 'true',
'type', 'union', 'until', 'use', 'value', 'variable', 'while',
'with', 'write', '~isin', '-inflist', '-infset', '-list',
'-set'), prefix=r'\b', suffix=r'\b'),
Keyword),
(r'(variable|value)\b', Keyword.Declaration),
(r'--.*?\n', Comment),
(r'<:.*?:>', Comment),
(r'\{!.*?!\}', Comment),
(r'/\*.*?\*/', Comment),
(r'^[ \t]*([\w]+)[ \t]*:[^:]', Name.Function),
(r'(^[ \t]*)([\w]+)([ \t]*\([\w\s,]*\)[ \t]*)(is|as)',
bygroups(Text, Name.Function, Text, Keyword)),
(r'\b[A-Z]\w*\b', Keyword.Type),
(r'(true|false)\b', Keyword.Constant),
(r'".*"', String),
(r'\'.\'', String.Char),
(r'(><|->|-m->|/\\|<=|<<=|<\.|\|\||\|\^\||-~->|-~m->|\\/|>=|>>|'
r'\.>|\+\+|-\\|<->|=>|:-|~=|\*\*|<<|>>=|\+>|!!|\|=\||#)',
Operator),
(r'[0-9]+\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'.', Text),
],
}
def analyse_text(text):
"""
Check for the most common text in the beginning of a RSL file.
"""
if re.search(r'scheme\s*.*?=\s*class\s*type', text, re.I) is not None:
return 1.0
class MscgenLexer(RegexLexer):
"""
For `Mscgen <http://www.mcternan.me.uk/mscgen/>`_ files.
.. versionadded:: 1.6
"""
name = 'Mscgen'
aliases = ['mscgen', 'msc']
filenames = ['*.msc']
_var = r'(\w+|"(?:\\"|[^"])*")'
tokens = {
'root': [
(r'msc\b', Keyword.Type),
# Options
(r'(hscale|HSCALE|width|WIDTH|wordwraparcs|WORDWRAPARCS'
r'|arcgradient|ARCGRADIENT)\b', Name.Property),
# Operators
(r'(abox|ABOX|rbox|RBOX|box|BOX|note|NOTE)\b', Operator.Word),
(r'(\.|-|\|){3}', Keyword),
(r'(?:-|=|\.|:){2}'
r'|<<=>>|<->|<=>|<<>>|<:>'
r'|->|=>>|>>|=>|:>|-x|-X'
r'|<-|<<=|<<|<=|<:|x-|X-|=', Operator),
# Names
(r'\*', Name.Builtin),
(_var, Name.Variable),
# Other
(r'\[', Punctuation, 'attrs'),
(r'\{|\}|,|;', Punctuation),
include('comments')
],
'attrs': [
(r'\]', Punctuation, '#pop'),
(_var + r'(\s*)(=)(\s*)' + _var,
bygroups(Name.Attribute, Text.Whitespace, Operator, Text.Whitespace,
String)),
(r',', Punctuation),
include('comments')
],
'comments': [
(r'(?://|#).*?\n', Comment.Single),
(r'/\*(?:.|\n)*?\*/', Comment.Multiline),
(r'[ \t\r\n]+', Text.Whitespace)
]
}
class VGLLexer(RegexLexer):
"""
For `SampleManager VGL <http://www.thermoscientific.com/samplemanager>`_
source code.
.. versionadded:: 1.6
"""
name = 'VGL'
aliases = ['vgl']
filenames = ['*.rpf']
flags = re.MULTILINE | re.DOTALL | re.IGNORECASE
tokens = {
'root': [
(r'\{[^}]*\}', Comment.Multiline),
(r'declare', Keyword.Constant),
(r'(if|then|else|endif|while|do|endwhile|and|or|prompt|object'
r'|create|on|line|with|global|routine|value|endroutine|constant'
r'|global|set|join|library|compile_option|file|exists|create|copy'
r'|delete|enable|windows|name|notprotected)(?! *[=<>.,()])',
Keyword),
(r'(true|false|null|empty|error|locked)', Keyword.Constant),
(r'[~^*#!%&\[\]()<>|+=:;,./?-]', Operator),
(r'"[^"]*"', String),
(r'(\.)([a-z_$][\w$]*)', bygroups(Operator, Name.Attribute)),
(r'[0-9][0-9]*(\.[0-9]+(e[+\-]?[0-9]+)?)?', Number),
(r'[a-z_$][\w$]*', Name),
(r'[\r\n]+', Text),
(r'\s+', Text)
]
}
class AlloyLexer(RegexLexer):
"""
For `Alloy <http://alloy.mit.edu>`_ source code.
.. versionadded:: 2.0
"""
name = 'Alloy'
aliases = ['alloy']
filenames = ['*.als']
mimetypes = ['text/x-alloy']
flags = re.MULTILINE | re.DOTALL
iden_rex = r'[a-zA-Z_][\w\']*'
text_tuple = (r'[^\S\n]+', Text)
tokens = {
'sig': [
(r'(extends)\b', Keyword, '#pop'),
(iden_rex, Name),
text_tuple,
(r',', Punctuation),
(r'\{', Operator, '#pop'),
],
'module': [
text_tuple,
(iden_rex, Name, '#pop'),
],
'fun': [
text_tuple,
(r'\{', Operator, '#pop'),
(iden_rex, Name, '#pop'),
],
'root': [
(r'--.*?$', Comment.Single),
(r'//.*?$', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
text_tuple,
(r'(module|open)(\s+)', bygroups(Keyword.Namespace, Text),
'module'),
(r'(sig|enum)(\s+)', bygroups(Keyword.Declaration, Text), 'sig'),
(r'(iden|univ|none)\b', Keyword.Constant),
(r'(int|Int)\b', Keyword.Type),
(r'(this|abstract|extends|set|seq|one|lone|let)\b', Keyword),
(r'(all|some|no|sum|disj|when|else)\b', Keyword),
(r'(run|check|for|but|exactly|expect|as)\b', Keyword),
(r'(and|or|implies|iff|in)\b', Operator.Word),
(r'(fun|pred|fact|assert)(\s+)', bygroups(Keyword, Text), 'fun'),
(r'!|#|&&|\+\+|<<|>>|>=|<=>|<=|\.|->', Operator),
(r'[-+/*%=<>&!^|~{}\[\]().]', Operator),
(iden_rex, Name),
(r'[:,]', Punctuation),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String),
(r'\n', Text),
]
}
class PanLexer(RegexLexer):
"""
Lexer for `pan <http://github.com/quattor/pan/>`_ source files.
Based on tcsh lexer.
.. versionadded:: 2.0
"""
name = 'Pan'
aliases = ['pan']
filenames = ['*.pan']
tokens = {
'root': [
include('basic'),
(r'\(', Keyword, 'paren'),
(r'\{', Keyword, 'curly'),
include('data'),
],
'basic': [
(words((
'if', 'for', 'with', 'else', 'type', 'bind', 'while', 'valid', 'final', 'prefix',
'unique', 'object', 'foreach', 'include', 'template', 'function', 'variable',
'structure', 'extensible', 'declaration'), prefix=r'\b', suffix=r'\s*\b'),
Keyword),
(words((
'file_contents', 'format', 'index', 'length', 'match', 'matches', 'replace',
'splice', 'split', 'substr', 'to_lowercase', 'to_uppercase', 'debug', 'error',
'traceback', 'deprecated', 'base64_decode', 'base64_encode', 'digest', 'escape',
'unescape', 'append', 'create', 'first', 'nlist', 'key', 'list', 'merge', 'next',
'prepend', 'is_boolean', 'is_defined', 'is_double', 'is_list', 'is_long',
'is_nlist', 'is_null', 'is_number', 'is_property', 'is_resource', 'is_string',
'to_boolean', 'to_double', 'to_long', 'to_string', 'clone', 'delete', 'exists',
'path_exists', 'if_exists', 'return', 'value'), prefix=r'\b', suffix=r'\s*\b'),
Name.Builtin),
(r'#.*', Comment),
(r'\\[\w\W]', String.Escape),
(r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)),
(r'[\[\]{}()=]+', Operator),
(r'<<\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
(r';', Punctuation),
],
'data': [
(r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
(r'\s+', Text),
(r'[^=\s\[\]{}()$"\'`\\;#]+', Text),
(r'\d+(?= |\Z)', Number),
],
'curly': [
(r'\}', Keyword, '#pop'),
(r':-', Keyword),
(r'\w+', Name.Variable),
(r'[^}:"\'`$]+', Punctuation),
(r':', Punctuation),
include('root'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('root'),
],
} | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
sphinx.util.jsdump
~~~~~~~~~~~~~~~~~~
This module implements a simple JavaScript serializer.
Uses the basestring encode function from simplejson by Bob Ippolito.
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from sphinx.util.pycompat import u
_str_re = re.compile(r'"(\\\\|\\"|[^"])*"')
_int_re = re.compile(r'\d+')
_name_re = re.compile(r'[a-zA-Z]\w*')
_nameonly_re = re.compile(r'[a-zA-Z]\w*$')
# escape \, ", control characters and everything outside ASCII
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
ESCAPE_DICT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
ESCAPED = re.compile(r'\\u.{4}|\\.')
def encode_string(s):
def replace(match):
s = match.group(0)
try:
return ESCAPE_DICT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
def decode_string(s):
return ESCAPED.sub(lambda m: eval(u + '"' + m.group() + '"'), s)
reswords = set("""\
abstract else instanceof switch
boolean enum int synchronized
break export interface this
byte extends long throw
case false native throws
catch final new transient
char finally null true
class float package try
const for private typeof
continue function protected var
debugger goto public void
default if return volatile
delete implements short while
do import static with
double in super""".split())
def dumps(obj, key=False):
if key:
if not isinstance(obj, basestring):
obj = str(obj)
if _nameonly_re.match(obj) and obj not in reswords:
return obj # return it as a bare word
else:
return encode_string(obj)
if obj is None:
return 'null'
elif obj is True or obj is False:
return obj and 'true' or 'false'
elif isinstance(obj, (int, long, float)):
return str(obj)
elif isinstance(obj, dict):
return '{%s}' % ','.join('%s:%s' % (
dumps(key, True),
dumps(value)
) for key, value in obj.iteritems())
elif isinstance(obj, (tuple, list, set)):
return '[%s]' % ','.join(dumps(x) for x in obj)
elif isinstance(obj, basestring):
return encode_string(obj)
raise TypeError(type(obj))
def dump(obj, f):
f.write(dumps(obj))
def loads(x):
"""Loader that can read the JS subset the indexer produces."""
nothing = object()
i = 0
n = len(x)
stack = []
obj = nothing
key = False
keys = []
while i < n:
c = x[i]
if c == '{':
obj = {}
stack.append(obj)
key = True
keys.append(nothing)
i += 1
elif c == '[':
obj = []
stack.append(obj)
key = False
keys.append(nothing)
i += 1
elif c in '}]':
if key:
if keys[-1] is not nothing:
raise ValueError("unfinished dict")
# empty dict
key = False
oldobj = stack.pop()
keys.pop()
if stack:
obj = stack[-1]
if isinstance(obj, dict):
if keys[-1] is nothing:
raise ValueError("invalid key object", oldobj)
obj[keys[-1]] = oldobj
else:
obj.append(oldobj)
else:
break
i += 1
elif c == ',':
if key:
raise ValueError("multiple keys")
if isinstance(obj, dict):
key = True
i += 1
elif c == ':':
if not isinstance(obj, dict):
raise ValueError("colon in list")
i += 1
if not key:
raise ValueError("multiple values")
key = False
else:
m = _str_re.match(x, i)
if m:
y = decode_string(m.group()[1:-1])
else:
m = _int_re.match(x, i)
if m:
y = int(m.group())
else:
m = _name_re.match(x, i)
if m:
y = m.group()
if y == 'true':
y = True
elif y == 'false':
y = False
elif y == 'null':
y = None
elif not key:
raise ValueError("bareword as value")
else:
raise ValueError("read error at pos %d" % i)
i = m.end()
if isinstance(obj, dict):
if key:
keys[-1] = y
else:
obj[keys[-1]] = y
key = False
else:
obj.append(y)
if obj is nothing:
raise ValueError("nothing loaded from string")
return obj
def load(f):
return loads(f.read()) | unknown | codeparrot/codeparrot-clean | ||
test_kind: js_test
selector:
roots:
- jstests/query_golden/plan_stability*.js
executor:
config:
shell_options:
crashOnInvalidBSONError: ""
objcheck: ""
eval: |
// Keep in sync with query_golden_*.yml.
await import("jstests/libs/override_methods/detect_spawning_own_mongod.js");
await import("jstests/libs/override_methods/golden_overrides.js");
import {beginGoldenTest} from "jstests/libs/begin_golden_test.js";
beginGoldenTest("jstests/query_golden/expected_output");
// Enforce plain log format until json format is the default.
TestData.logFormat = "plain";
fixture:
class: MongoDFixture
mongod_options:
set_parameters:
enableTestCommands: 1
featureFlagCostBasedRanker: true
internalQueryCBRCEMode: "automaticCE"
automaticCEPlanRankingStrategy: "CBRCostBasedRankerChoice" | unknown | github | https://github.com/mongodb/mongo | buildscripts/resmokeconfig/suites/query_golden_cbr_automatic_cost_choice.yml |
name: Feature Request
description: Request a new feature we haven't thought of
labels: [">enhancement", "needs:triage"]
body:
- type: markdown
attributes:
value: |
Please first search existing issues for the feature you are requesting;
it may already exist, even as a closed issue.
- type: textarea
id: description
attributes:
label: Description
description: |
Please give us as much context as possible about the feature. For example,
you could include a story about a time when you wanted to use the feature,
and also tell us what you had to do instead. The last part is helpful
because it gives us an idea of how much harder your life is without the
feature. | unknown | github | https://github.com/elastic/elasticsearch | .github/ISSUE_TEMPLATE/feature-request.yml |
% This is generated by ESQL's AbstractFunctionTestCase. Do not edit it. See ../README.md for how to regenerate it.
**Description**
The minimum value of a field. | unknown | github | https://github.com/elastic/elasticsearch | docs/reference/query-languages/esql/_snippets/functions/description/min.md |
"""Revert VM from a vmsnapshot."""
from baseCmd import *
from baseResponse import *
class revertToVMSnapshotCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "true"
"""The ID of the vm snapshot"""
"""Required"""
self.vmsnapshotid = None
self.typeInfo['vmsnapshotid'] = 'uuid'
self.required = ["vmsnapshotid", ]
class revertToVMSnapshotResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""the ID of the virtual machine"""
self.id = None
self.typeInfo['id'] = 'string'
"""the account associated with the virtual machine"""
self.account = None
self.typeInfo['account'] = 'string'
"""the number of cpu this virtual machine is running with"""
self.cpunumber = None
self.typeInfo['cpunumber'] = 'integer'
"""the speed of each cpu"""
self.cpuspeed = None
self.typeInfo['cpuspeed'] = 'integer'
"""the amount of the vm's CPU currently used"""
self.cpuused = None
self.typeInfo['cpuused'] = 'string'
"""the date when this virtual machine was created"""
self.created = None
self.typeInfo['created'] = 'date'
"""Vm details in key/value pairs."""
self.details = None
self.typeInfo['details'] = 'map'
"""the read (io) of disk on the vm"""
self.diskioread = None
self.typeInfo['diskioread'] = 'long'
"""the write (io) of disk on the vm"""
self.diskiowrite = None
self.typeInfo['diskiowrite'] = 'long'
"""the read (bytes) of disk on the vm"""
self.diskkbsread = None
self.typeInfo['diskkbsread'] = 'long'
"""the write (bytes) of disk on the vm"""
self.diskkbswrite = None
self.typeInfo['diskkbswrite'] = 'long'
"""the ID of the disk offering of the virtual machine"""
self.diskofferingid = None
self.typeInfo['diskofferingid'] = 'string'
"""the name of the disk offering of the virtual machine"""
self.diskofferingname = None
self.typeInfo['diskofferingname'] = 'string'
"""user generated name. The name of the virtual machine is returned if no displayname exists."""
self.displayname = None
self.typeInfo['displayname'] = 'string'
"""an optional field whether to the display the vm to the end user or not."""
self.displayvm = None
self.typeInfo['displayvm'] = 'boolean'
"""the name of the domain in which the virtual machine exists"""
self.domain = None
self.typeInfo['domain'] = 'string'
"""the ID of the domain in which the virtual machine exists"""
self.domainid = None
self.typeInfo['domainid'] = 'string'
"""the virtual network for the service offering"""
self.forvirtualnetwork = None
self.typeInfo['forvirtualnetwork'] = 'boolean'
"""the group name of the virtual machine"""
self.group = None
self.typeInfo['group'] = 'string'
"""the group ID of the virtual machine"""
self.groupid = None
self.typeInfo['groupid'] = 'string'
"""Os type ID of the virtual machine"""
self.guestosid = None
self.typeInfo['guestosid'] = 'string'
"""true if high-availability is enabled, false otherwise"""
self.haenable = None
self.typeInfo['haenable'] = 'boolean'
"""the ID of the host for the virtual machine"""
self.hostid = None
self.typeInfo['hostid'] = 'string'
"""the name of the host for the virtual machine"""
self.hostname = None
self.typeInfo['hostname'] = 'string'
"""the hypervisor on which the template runs"""
self.hypervisor = None
self.typeInfo['hypervisor'] = 'string'
"""instance name of the user vm; this parameter is returned to the ROOT admin only"""
self.instancename = None
self.typeInfo['instancename'] = 'string'
"""true if vm contains XS tools inorder to support dynamic scaling of VM cpu/memory."""
self.isdynamicallyscalable = None
self.typeInfo['isdynamicallyscalable'] = 'boolean'
"""an alternate display text of the ISO attached to the virtual machine"""
self.isodisplaytext = None
self.typeInfo['isodisplaytext'] = 'string'
"""the ID of the ISO attached to the virtual machine"""
self.isoid = None
self.typeInfo['isoid'] = 'string'
"""the name of the ISO attached to the virtual machine"""
self.isoname = None
self.typeInfo['isoname'] = 'string'
"""ssh key-pair"""
self.keypair = None
self.typeInfo['keypair'] = 'string'
"""the memory allocated for the virtual machine"""
self.memory = None
self.typeInfo['memory'] = 'integer'
"""the name of the virtual machine"""
self.name = None
self.typeInfo['name'] = 'string'
"""the incoming network traffic on the vm"""
self.networkkbsread = None
self.typeInfo['networkkbsread'] = 'long'
"""the outgoing network traffic on the host"""
self.networkkbswrite = None
self.typeInfo['networkkbswrite'] = 'long'
"""OS type id of the vm"""
self.ostypeid = None
self.typeInfo['ostypeid'] = 'long'
"""the password (if exists) of the virtual machine"""
self.password = None
self.typeInfo['password'] = 'string'
"""true if the password rest feature is enabled, false otherwise"""
self.passwordenabled = None
self.typeInfo['passwordenabled'] = 'boolean'
"""the project name of the vm"""
self.project = None
self.typeInfo['project'] = 'string'
"""the project id of the vm"""
self.projectid = None
self.typeInfo['projectid'] = 'string'
"""public IP address id associated with vm via Static nat rule"""
self.publicip = None
self.typeInfo['publicip'] = 'string'
"""public IP address id associated with vm via Static nat rule"""
self.publicipid = None
self.typeInfo['publicipid'] = 'string'
"""device ID of the root volume"""
self.rootdeviceid = None
self.typeInfo['rootdeviceid'] = 'long'
"""device type of the root volume"""
self.rootdevicetype = None
self.typeInfo['rootdevicetype'] = 'string'
"""the ID of the service offering of the virtual machine"""
self.serviceofferingid = None
self.typeInfo['serviceofferingid'] = 'string'
"""the name of the service offering of the virtual machine"""
self.serviceofferingname = None
self.typeInfo['serviceofferingname'] = 'string'
"""State of the Service from LB rule"""
self.servicestate = None
self.typeInfo['servicestate'] = 'string'
"""the state of the virtual machine"""
self.state = None
self.typeInfo['state'] = 'string'
"""an alternate display text of the template for the virtual machine"""
self.templatedisplaytext = None
self.typeInfo['templatedisplaytext'] = 'string'
"""the ID of the template for the virtual machine. A -1 is returned if the virtual machine was created from an ISO file."""
self.templateid = None
self.typeInfo['templateid'] = 'string'
"""the name of the template for the virtual machine"""
self.templatename = None
self.typeInfo['templatename'] = 'string'
"""the user's ID who deployed the virtual machine"""
self.userid = None
self.typeInfo['userid'] = 'string'
"""the user's name who deployed the virtual machine"""
self.username = None
self.typeInfo['username'] = 'string'
"""the vgpu type used by the virtual machine"""
self.vgpu = None
self.typeInfo['vgpu'] = 'string'
"""the ID of the availablility zone for the virtual machine"""
self.zoneid = None
self.typeInfo['zoneid'] = 'string'
"""the name of the availability zone for the virtual machine"""
self.zonename = None
self.typeInfo['zonename'] = 'string'
"""list of affinity groups associated with the virtual machine"""
self.affinitygroup = []
"""the list of nics associated with vm"""
self.nic = []
"""list of security groups associated with the virtual machine"""
self.securitygroup = []
"""the list of resource tags associated with vm"""
self.tags = []
"""the ID of the latest async job acting on this object"""
self.jobid = None
self.typeInfo['jobid'] = ''
"""the current status of the latest async job acting on this object"""
self.jobstatus = None
self.typeInfo['jobstatus'] = ''
class affinitygroup:
def __init__(self):
""""the ID of the affinity group"""
self.id = None
""""the account owning the affinity group"""
self.account = None
""""the description of the affinity group"""
self.description = None
""""the domain name of the affinity group"""
self.domain = None
""""the domain ID of the affinity group"""
self.domainid = None
""""the name of the affinity group"""
self.name = None
""""the project name of the affinity group"""
self.project = None
""""the project ID of the affinity group"""
self.projectid = None
""""the type of the affinity group"""
self.type = None
""""virtual machine IDs associated with this affinity group"""
self.virtualmachineIds = None
class nic:
def __init__(self):
""""the ID of the nic"""
self.id = None
""""the broadcast uri of the nic"""
self.broadcasturi = None
""""device id for the network when plugged into the virtual machine"""
self.deviceid = None
""""the gateway of the nic"""
self.gateway = None
""""the IPv6 address of network"""
self.ip6address = None
""""the cidr of IPv6 network"""
self.ip6cidr = None
""""the gateway of IPv6 network"""
self.ip6gateway = None
""""the ip address of the nic"""
self.ipaddress = None
""""true if nic is default, false otherwise"""
self.isdefault = None
""""the isolation uri of the nic"""
self.isolationuri = None
""""true if nic is default, false otherwise"""
self.macaddress = None
""""the netmask of the nic"""
self.netmask = None
""""the ID of the corresponding network"""
self.networkid = None
""""the name of the corresponding network"""
self.networkname = None
""""the Secondary ipv4 addr of nic"""
self.secondaryip = None
""""the traffic type of the nic"""
self.traffictype = None
""""the type of the nic"""
self.type = None
""""Id of the vm to which the nic belongs"""
self.virtualmachineid = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class egressrule:
def __init__(self):
""""account owning the security group rule"""
self.account = None
""""the CIDR notation for the base IP address of the security group rule"""
self.cidr = None
""""the ending IP of the security group rule"""
self.endport = None
""""the code for the ICMP message response"""
self.icmpcode = None
""""the type of the ICMP message response"""
self.icmptype = None
""""the protocol of the security group rule"""
self.protocol = None
""""the id of the security group rule"""
self.ruleid = None
""""security group name"""
self.securitygroupname = None
""""the starting IP of the security group rule"""
self.startport = None
""""the list of resource tags associated with the rule"""
self.tags = []
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class ingressrule:
def __init__(self):
""""account owning the security group rule"""
self.account = None
""""the CIDR notation for the base IP address of the security group rule"""
self.cidr = None
""""the ending IP of the security group rule"""
self.endport = None
""""the code for the ICMP message response"""
self.icmpcode = None
""""the type of the ICMP message response"""
self.icmptype = None
""""the protocol of the security group rule"""
self.protocol = None
""""the id of the security group rule"""
self.ruleid = None
""""security group name"""
self.securitygroupname = None
""""the starting IP of the security group rule"""
self.startport = None
""""the list of resource tags associated with the rule"""
self.tags = []
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class securitygroup:
def __init__(self):
""""the ID of the security group"""
self.id = None
""""the account owning the security group"""
self.account = None
""""the description of the security group"""
self.description = None
""""the domain name of the security group"""
self.domain = None
""""the domain ID of the security group"""
self.domainid = None
""""the name of the security group"""
self.name = None
""""the project name of the group"""
self.project = None
""""the project id of the group"""
self.projectid = None
""""the number of virtualmachines associated with this securitygroup"""
self.virtualmachinecount = None
""""the list of virtualmachine ids associated with this securitygroup"""
self.virtualmachineids = None
""""the list of egress rules associated with the security group"""
self.egressrule = []
""""account owning the security group rule"""
self.account = None
""""the CIDR notation for the base IP address of the security group rule"""
self.cidr = None
""""the ending IP of the security group rule"""
self.endport = None
""""the code for the ICMP message response"""
self.icmpcode = None
""""the type of the ICMP message response"""
self.icmptype = None
""""the protocol of the security group rule"""
self.protocol = None
""""the id of the security group rule"""
self.ruleid = None
""""security group name"""
self.securitygroupname = None
""""the starting IP of the security group rule"""
self.startport = None
""""the list of resource tags associated with the rule"""
self.tags = []
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
""""the list of ingress rules associated with the security group"""
self.ingressrule = []
""""account owning the security group rule"""
self.account = None
""""the CIDR notation for the base IP address of the security group rule"""
self.cidr = None
""""the ending IP of the security group rule"""
self.endport = None
""""the code for the ICMP message response"""
self.icmpcode = None
""""the type of the ICMP message response"""
self.icmptype = None
""""the protocol of the security group rule"""
self.protocol = None
""""the id of the security group rule"""
self.ruleid = None
""""security group name"""
self.securitygroupname = None
""""the starting IP of the security group rule"""
self.startport = None
""""the list of resource tags associated with the rule"""
self.tags = []
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
""""the list of resource tags associated with the rule"""
self.tags = []
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
""""the ID of the latest async job acting on this object"""
self.jobid = None
""""the current status of the latest async job acting on this object"""
self.jobstatus = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None | unknown | codeparrot/codeparrot-clean | ||
import { __turbopack_module_id__ as id } from "../components/hello" with {
"turbopack-transition": "next-client-dynamic",
"turbopack-chunking-type": "none"
};
import dynamic from 'next/dynamic';
const DynamicComponent = dynamic(()=>import(`../components/hello`, {
with: {
"turbopack-transition": "next-dynamic"
}
}), {
loadableGenerated: {
modules: [
id
]
}
});
const componentRoot = '@/some-components';
const Component1 = dynamic(()=>import(`${componentRoot}/component1`));
const Component2 = dynamic(()=>import(`${componentRoot}/component2`)); | javascript | github | https://github.com/vercel/next.js | crates/next-custom-transforms/tests/fixture/next-dynamic/template-literal/output-turbo-dev.js |
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package statemgr
import (
"fmt"
uuid "github.com/hashicorp/go-uuid"
)
// NewLineage generates a new lineage identifier string. A lineage identifier
// is an opaque string that is intended to be unique in space and time, chosen
// when state is recorded at a location for the first time and then preserved
// afterwards to allow Terraform to recognize when one state snapshot is a
// predecessor or successor of another.
func NewLineage() string {
lineage, err := uuid.GenerateUUID()
if err != nil {
panic(fmt.Errorf("Failed to generate lineage: %v", err))
}
return lineage
} | go | github | https://github.com/hashicorp/terraform | internal/states/statemgr/lineage.go |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import tempfile
import fixtures
from testtools import matchers
from tests.software_config import common
class HeatConfigKubeletORCTest(common.RunScriptTest):
fake_hooks = ['kubelet']
data = [{
"id": "abcdef001",
"group": "kubelet",
"name": "mysql",
"config": {
"version": "v1beta2",
"volumes": [{
"name": "mariadb-data"
}],
"containers": [{
"image": "mariadb_image",
"volumeMounts": [{
"mountPath": "/var/lib/mysql",
"name": "mariadb-data"
}],
"name": "mariadb",
"env": [{
"name": "DB_ROOT_PASSWORD",
"value": "mariadb_password"
}],
"ports": [{
"containerPort": 3306
}]
}]}
}, {
"id": "abcdef002",
"group": "kubelet",
"name": "rabbitmq",
"config": {
"version": "v1beta2",
"containers": [{
"image": "rabbitmq_image",
"name": "rabbitmq",
"ports": [{
"containerPort": 5672
}]
}]
}
}, {
"id": "abcdef003",
"group": "kubelet",
"name": "heat_api_engine",
"config": {
"version": "v1beta2",
"containers": [{
"image": "heat_engine_image",
"name": "heat-engine",
"env": [{
"name": "DB_ROOT_PASSWORD",
"value": "mariadb_password"
}, {
"name": "HEAT_DB_PASSWORD",
"value": "heatdb_password"
}, {
"name": "HEAT_KEYSTONE_PASSWORD",
"value": "password"
}]
}, {
"image": "heat_api_image",
"name": "heat-api",
"ports": [{
"containerPort": 8004
}]
}]
}
}]
def setUp(self):
super(HeatConfigKubeletORCTest, self).setUp()
self.fake_hook_path = self.relative_path(__file__, 'hook-fake.py')
self.heat_config_kubelet_path = self.relative_path(
__file__,
'../..',
'hot/software-config/elements',
'heat-config-kubelet/os-refresh-config/configure.d/'
'50-heat-config-kubelet')
self.manifests_dir = self.useFixture(fixtures.TempDir())
with open(self.fake_hook_path) as f:
fake_hook = f.read()
for hook in self.fake_hooks:
hook_name = self.manifests_dir.join(hook)
with open(hook_name, 'w') as f:
os.utime(hook_name, None)
f.write(fake_hook)
f.flush()
os.chmod(hook_name, 0o755)
def write_config_file(self, data):
config_file = tempfile.NamedTemporaryFile()
config_file.write(json.dumps(data))
config_file.flush()
return config_file
def test_run_heat_config(self):
with self.write_config_file(self.data) as config_file:
env = os.environ.copy()
env.update({
'HEAT_KUBELET_MANIFESTS': self.manifests_dir.join(),
'HEAT_SHELL_CONFIG': config_file.name
})
returncode, stdout, stderr = self.run_cmd(
[self.heat_config_kubelet_path], env)
self.assertEqual(0, returncode, stderr)
for config in self.data:
manifest_name = '%s.json' % config['id']
manifest_path = self.manifests_dir.join(manifest_name)
self.assertThat(manifest_path, matchers.FileExists())
# manifest file should match manifest config
self.assertEqual(config['config'],
self.json_from_file(manifest_path)) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# NAV documentation build configuration file, created by
# sphinx-quickstart on Tue Feb 8 10:54:59 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.join(os.path.abspath('..'), 'python'))
sys.path.insert(0, os.path.abspath("exts"))
from nav import buildconf
from nav import bootstrap
bootstrap.bootstrap_django('doc')
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.ifconfig', 'xref']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'contents'
# General information about the project.
project = u'NAV'
copyright = u'2012-2019, Uninett AS'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'dev'
#version = '.'.join(buildconf.VERSION.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = buildconf.VERSION
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'bootstrap'
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [os.path.join(os.path.abspath(os.path.dirname(__file__)), "templates")]
html_logo = "templates/bootstrap/static/nav-logo.svg"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
'navbar_title': " ",
# Tab name for entire site. (Default: "Site")
'navbar_site_name': "Contents",
# A list of tuples containing pages or urls to link to.
# Valid tuples should be in the following forms:
# (name, page) # a link to a page
# (name, "/aa/bb", 1) # a link to an arbitrary relative url
# (name, "http://example.com", True) # arbitrary absolute url
# Note the "1" or "True" value above as the third argument to indicate
# an arbitrary url.
#'navbar_links': [
# ("Examples", "examples"),
# ("Link", "http://example.com", True),
#],
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': True,
# Render the current pages TOC in the navbar. (Default: true)
'navbar_pagenav': True,
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 2,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "true",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
'navbar_class': "navbar",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "true",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "navbar-brand",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing with "" (default) or the name of a valid theme
# such as "amelia" or "cosmo".
'bootswatch_theme': "flatly",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
'bootstrap_version': "3",
}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'**': ['localtoc.html', 'sourcelink.html', 'searchbox.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'NAVdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'NAV.tex', u'NAV Documentation',
u'Uninett AS', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}
# External links definitions
xref_links = {
"Graphite": ("Graphite", "https://graphiteapp.org"),
"PostgreSQL": ("PostgreSQL", "https://www.postgresql.org"),
}
def setup(app):
app.add_stylesheet("custom.css") | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
Parse, stream, create, sign and verify Bitcoin transactions as Tx structures.
The MIT License (MIT)
Copyright (c) 2015 by Richard Kiss
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from ... import ecdsa
from ...encoding import sec_to_public_pair, EncodingError
from ...intbytes import int_from_bytes
from . import der
from . import ScriptError
from .microcode import VCH_TRUE, VCH_FALSE
from .tools import bin_script, delete_subscript
def parse_signature_blob(sig_blob):
sig_pair = der.sigdecode_der(sig_blob[:-1], use_broken_open_ssl_mechanism=True)
signature_type = ord(sig_blob[-1:])
return sig_pair, signature_type
def op_checksig(stack, signature_for_hash_type_f, expected_hash_type, tmp_script):
public_pair = sec_to_public_pair(stack.pop())
sig_blob = stack.pop()
try:
sig_pair, signature_type = parse_signature_blob(sig_blob)
except der.UnexpectedDER:
stack.append(VCH_FALSE)
return
if expected_hash_type not in (None, signature_type):
raise ScriptError("wrong hash type")
# Drop the signature, since there's no way for a signature to sign itself
# see: Bitcoin Core/script/interpreter.cpp::EvalScript()
tmp_script = delete_subscript(tmp_script, bin_script([sig_blob]))
signature_hash = signature_for_hash_type_f(signature_type, script=tmp_script)
if ecdsa.verify(ecdsa.generator_secp256k1, public_pair, signature_hash, sig_pair):
stack.append(VCH_TRUE)
else:
stack.append(VCH_FALSE)
def sig_blob_matches(sig_blobs, public_pairs, tmp_script, signature_for_hash_type_f, strict_checks=False):
"""
sig_blobs: signature blobs
public_pairs: a list of public pairs that might be valid
tmp_script: the script as of the last code separator
signature_for_hash_type_f: signature_for_hash_type_f
strict_checks: if True, we may exit early if one of the sig_blobs is incorrect or misplaced. Used
for checking a supposedly validated transaction. A -1 indicates no match.
Returns a list of indices into public_pairs. If strict_checks is True, it may return early.
If strict_checks isn't long enough or contains a -1, the signature is not valid.
"""
# Drop the signatures, since there's no way for a signature to sign itself
for sig_blob in sig_blobs:
tmp_script = delete_subscript(tmp_script, bin_script([sig_blob]))
sig_cache = {}
sig_blob_indices = []
for sig_blob in sig_blobs:
public_pair_index = -1
try:
sig_pair, signature_type = parse_signature_blob(sig_blob)
except der.UnexpectedDER:
if strict_checks:
return sig_blob_indices
if signature_type not in sig_cache:
sig_cache[signature_type] = signature_for_hash_type_f(signature_type, script=tmp_script)
try:
ppp = ecdsa.possible_public_pairs_for_signature(
ecdsa.generator_secp256k1, sig_cache[signature_type], sig_pair)
except ecdsa.NoSuchPointError as err:
ppp = []
if len(ppp) > 0:
for idx, pp in enumerate(public_pairs):
if idx in sig_blob_indices:
continue
if pp in ppp:
sig_blob_indices.append(idx)
break
else:
if strict_checks:
return sig_blob_indices
sig_blob_indices.append(-1)
if len(sig_blob_indices) > 1 and strict_checks:
# look for signatures in the wrong order
if sig_blob_indices[-1] <= sig_blob_indices[-2]:
return sig_blob_indices
else:
if strict_checks:
return sig_blob_indices
return sig_blob_indices
def op_checkmultisig(stack, signature_for_hash_type_f, expected_hash_type, tmp_script):
key_count = int_from_bytes(stack.pop())
public_pairs = []
for i in range(key_count):
the_sec = stack.pop()
try:
public_pairs.append(sec_to_public_pair(the_sec))
except EncodingError:
# we must ignore badly encoded public pairs
# the transaction 70c4e749f2b8b907875d1483ae43e8a6790b0c8397bbb33682e3602617f9a77a
# is in a block and requires this hack
pass
signature_count = int_from_bytes(stack.pop())
sig_blobs = []
for i in range(signature_count):
sig_blobs.append(stack.pop())
# check that we have the required hack 00 byte
if stack != [b'\x00']:
stack.append(VCH_FALSE)
return
# remove the 0 byte hack for pay to script hash
stack.pop()
sig_blob_indices = sig_blob_matches(
sig_blobs, public_pairs, tmp_script, signature_for_hash_type_f, strict_checks=True)
sig_ok = VCH_FALSE
if -1 not in sig_blob_indices and len(sig_blob_indices) == len(sig_blobs):
# bitcoin requires the signatures to be in the same order as the public keys
# so let's make sure the indices are strictly increasing
for i in range(len(sig_blob_indices) - 1):
if sig_blob_indices[i] >= sig_blob_indices[i+1]:
break
else:
sig_ok = VCH_TRUE
stack.append(sig_ok) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""An I/O event loop for non-blocking sockets.
Typical applications will use a single `IOLoop` object, in the
`IOLoop.instance` singleton. The `IOLoop.start` method should usually
be called at the end of the ``main()`` function. Atypical applications may
use more than one `IOLoop`, such as one `IOLoop` per thread, or per `unittest`
case.
In addition to I/O events, the `IOLoop` can also schedule time-based events.
`IOLoop.add_timeout` is a non-blocking alternative to `time.sleep`.
"""
from __future__ import absolute_import, division, print_function, with_statement
import datetime
import errno
import functools
import heapq
import itertools
import logging
import numbers
import os
import select
import sys
import threading
import time
import traceback
import math
from tornado.concurrent import TracebackFuture, is_future
from tornado.log import app_log, gen_log
from tornado import stack_context
from tornado.util import Configurable, errno_from_exception, timedelta_to_seconds
try:
import signal
except ImportError:
signal = None
try:
import thread # py2
except ImportError:
import _thread as thread # py3
from tornado.platform.auto import set_close_exec, Waker
_POLL_TIMEOUT = 3600.0
class TimeoutError(Exception):
pass
class IOLoop(Configurable):
"""A level-triggered I/O loop.
We use ``epoll`` (Linux) or ``kqueue`` (BSD and Mac OS X) if they
are available, or else we fall back on select(). If you are
implementing a system that needs to handle thousands of
simultaneous connections, you should use a system that supports
either ``epoll`` or ``kqueue``.
Example usage for a simple TCP server:
.. testcode::
import errno
import functools
import tornado.ioloop
import socket
def connection_ready(sock, fd, events):
while True:
try:
connection, address = sock.accept()
except socket.error as e:
if e.args[0] not in (errno.EWOULDBLOCK, errno.EAGAIN):
raise
return
connection.setblocking(0)
handle_connection(connection, address)
if __name__ == '__main__':
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
sock.bind(("", port))
sock.listen(128)
io_loop = tornado.ioloop.IOLoop.current()
callback = functools.partial(connection_ready, sock)
io_loop.add_handler(sock.fileno(), callback, io_loop.READ)
io_loop.start()
.. testoutput::
:hide:
By default, a newly-constructed `IOLoop` becomes the thread's current
`IOLoop`, unless there already is a current `IOLoop`. This behavior
can be controlled with the ``make_current`` argument to the `IOLoop`
constructor: if ``make_current=True``, the new `IOLoop` will always
try to become current and it raises an error if there is already a
current instance. If ``make_current=False``, the new `IOLoop` will
not try to become current.
.. versionchanged:: 4.2
Added the ``make_current`` keyword argument to the `IOLoop`
constructor.
"""
# Constants from the epoll module
_EPOLLIN = 0x001
_EPOLLPRI = 0x002
_EPOLLOUT = 0x004
_EPOLLERR = 0x008
_EPOLLHUP = 0x010
_EPOLLRDHUP = 0x2000
_EPOLLONESHOT = (1 << 30)
_EPOLLET = (1 << 31)
# Our events map exactly to the epoll events
NONE = 0
READ = _EPOLLIN
WRITE = _EPOLLOUT
ERROR = _EPOLLERR | _EPOLLHUP
# Global lock for creating global IOLoop instance
_instance_lock = threading.Lock()
_current = threading.local()
@staticmethod
def instance():
"""Returns a global `IOLoop` instance.
Most applications have a single, global `IOLoop` running on the
main thread. Use this method to get this instance from
another thread. In most other cases, it is better to use `current()`
to get the current thread's `IOLoop`.
"""
if not hasattr(IOLoop, "_instance"):
with IOLoop._instance_lock:
if not hasattr(IOLoop, "_instance"):
# New instance after double check
IOLoop._instance = IOLoop()
return IOLoop._instance
@staticmethod
def initialized():
"""Returns true if the singleton instance has been created."""
return hasattr(IOLoop, "_instance")
def install(self):
"""Installs this `IOLoop` object as the singleton instance.
This is normally not necessary as `instance()` will create
an `IOLoop` on demand, but you may want to call `install` to use
a custom subclass of `IOLoop`.
"""
assert not IOLoop.initialized()
IOLoop._instance = self
@staticmethod
def clear_instance():
"""Clear the global `IOLoop` instance.
.. versionadded:: 4.0
"""
if hasattr(IOLoop, "_instance"):
del IOLoop._instance
@staticmethod
def current(instance=True):
"""Returns the current thread's `IOLoop`.
If an `IOLoop` is currently running or has been marked as
current by `make_current`, returns that instance. If there is
no current `IOLoop`, returns `IOLoop.instance()` (i.e. the
main thread's `IOLoop`, creating one if necessary) if ``instance``
is true.
In general you should use `IOLoop.current` as the default when
constructing an asynchronous object, and use `IOLoop.instance`
when you mean to communicate to the main thread from a different
one.
.. versionchanged:: 4.1
Added ``instance`` argument to control the fallback to
`IOLoop.instance()`.
"""
current = getattr(IOLoop._current, "instance", None)
if current is None and instance:
return IOLoop.instance()
return current
def make_current(self):
"""Makes this the `IOLoop` for the current thread.
An `IOLoop` automatically becomes current for its thread
when it is started, but it is sometimes useful to call
`make_current` explicitly before starting the `IOLoop`,
so that code run at startup time can find the right
instance.
.. versionchanged:: 4.1
An `IOLoop` created while there is no current `IOLoop`
will automatically become current.
"""
IOLoop._current.instance = self
@staticmethod
def clear_current():
IOLoop._current.instance = None
@classmethod
def configurable_base(cls):
return IOLoop
@classmethod
def configurable_default(cls):
if hasattr(select, "epoll"):
from tornado.platform.epoll import EPollIOLoop
return EPollIOLoop
if hasattr(select, "kqueue"):
# Python 2.6+ on BSD or Mac
from tornado.platform.kqueue import KQueueIOLoop
return KQueueIOLoop
from tornado.platform.select import SelectIOLoop
return SelectIOLoop
def initialize(self, make_current=None):
if make_current is None:
if IOLoop.current(instance=False) is None:
self.make_current()
elif make_current:
if IOLoop.current(instance=False) is not None:
raise RuntimeError("current IOLoop already exists")
self.make_current()
def close(self, all_fds=False):
"""Closes the `IOLoop`, freeing any resources used.
If ``all_fds`` is true, all file descriptors registered on the
IOLoop will be closed (not just the ones created by the
`IOLoop` itself).
Many applications will only use a single `IOLoop` that runs for the
entire lifetime of the process. In that case closing the `IOLoop`
is not necessary since everything will be cleaned up when the
process exits. `IOLoop.close` is provided mainly for scenarios
such as unit tests, which create and destroy a large number of
``IOLoops``.
An `IOLoop` must be completely stopped before it can be closed. This
means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must
be allowed to return before attempting to call `IOLoop.close()`.
Therefore the call to `close` will usually appear just after
the call to `start` rather than near the call to `stop`.
.. versionchanged:: 3.1
If the `IOLoop` implementation supports non-integer objects
for "file descriptors", those objects will have their
``close`` method when ``all_fds`` is true.
"""
raise NotImplementedError()
def add_handler(self, fd, handler, events):
"""Registers the given handler to receive the given events for ``fd``.
The ``fd`` argument may either be an integer file descriptor or
a file-like object with a ``fileno()`` method (and optionally a
``close()`` method, which may be called when the `IOLoop` is shut
down).
The ``events`` argument is a bitwise or of the constants
``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``.
When an event occurs, ``handler(fd, events)`` will be run.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def update_handler(self, fd, events):
"""Changes the events we listen for ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def remove_handler(self, fd):
"""Stop listening for events on ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def set_blocking_signal_threshold(self, seconds, action):
"""Sends a signal if the `IOLoop` is blocked for more than
``s`` seconds.
Pass ``seconds=None`` to disable. Requires Python 2.6 on a unixy
platform.
The action parameter is a Python signal handler. Read the
documentation for the `signal` module for more information.
If ``action`` is None, the process will be killed if it is
blocked for too long.
"""
raise NotImplementedError()
def set_blocking_log_threshold(self, seconds):
"""Logs a stack trace if the `IOLoop` is blocked for more than
``s`` seconds.
Equivalent to ``set_blocking_signal_threshold(seconds,
self.log_stack)``
"""
self.set_blocking_signal_threshold(seconds, self.log_stack)
def log_stack(self, signal, frame):
"""Signal handler to log the stack trace of the current thread.
For use with `set_blocking_signal_threshold`.
"""
gen_log.warning('IOLoop blocked for %f seconds in\n%s',
self._blocking_signal_threshold,
''.join(traceback.format_stack(frame)))
def start(self):
"""Starts the I/O loop.
The loop will run until one of the callbacks calls `stop()`, which
will make the loop stop after the current event iteration completes.
"""
raise NotImplementedError()
def _setup_logging(self):
"""The IOLoop catches and logs exceptions, so it's
important that log output be visible. However, python's
default behavior for non-root loggers (prior to python
3.2) is to print an unhelpful "no handlers could be
found" message rather than the actual log entry, so we
must explicitly configure logging if we've made it this
far without anything.
This method should be called from start() in subclasses.
"""
if not any([logging.getLogger().handlers,
logging.getLogger('tornado').handlers,
logging.getLogger('tornado.application').handlers]):
logging.basicConfig()
def stop(self):
"""Stop the I/O loop.
If the event loop is not currently running, the next call to `start()`
will return immediately.
To use asynchronous methods from otherwise-synchronous code (such as
unit tests), you can start and stop the event loop like this::
ioloop = IOLoop()
async_method(ioloop=ioloop, callback=ioloop.stop)
ioloop.start()
``ioloop.start()`` will return after ``async_method`` has run
its callback, whether that callback was invoked before or
after ``ioloop.start``.
Note that even after `stop` has been called, the `IOLoop` is not
completely stopped until `IOLoop.start` has also returned.
Some work that was scheduled before the call to `stop` may still
be run before the `IOLoop` shuts down.
"""
raise NotImplementedError()
def run_sync(self, func, timeout=None):
"""Starts the `IOLoop`, runs the given function, and stops the loop.
If the function returns a `.Future`, the `IOLoop` will run
until the future is resolved. If it raises an exception, the
`IOLoop` will stop and the exception will be re-raised to the
caller.
The keyword-only argument ``timeout`` may be used to set
a maximum duration for the function. If the timeout expires,
a `TimeoutError` is raised.
This method is useful in conjunction with `tornado.gen.coroutine`
to allow asynchronous calls in a ``main()`` function::
@gen.coroutine
def main():
# do stuff...
if __name__ == '__main__':
IOLoop.current().run_sync(main)
"""
future_cell = [None]
def run():
try:
result = func()
except Exception:
future_cell[0] = TracebackFuture()
future_cell[0].set_exc_info(sys.exc_info())
else:
if is_future(result):
future_cell[0] = result
else:
future_cell[0] = TracebackFuture()
future_cell[0].set_result(result)
self.add_future(future_cell[0], lambda future: self.stop())
self.add_callback(run)
if timeout is not None:
timeout_handle = self.add_timeout(self.time() + timeout, self.stop)
self.start()
if timeout is not None:
self.remove_timeout(timeout_handle)
if not future_cell[0].done():
raise TimeoutError('Operation timed out after %s seconds' % timeout)
return future_cell[0].result()
def time(self):
"""Returns the current time according to the `IOLoop`'s clock.
The return value is a floating-point number relative to an
unspecified time in the past.
By default, the `IOLoop`'s time function is `time.time`. However,
it may be configured to use e.g. `time.monotonic` instead.
Calls to `add_timeout` that pass a number instead of a
`datetime.timedelta` should use this function to compute the
appropriate time, so they can work no matter what time function
is chosen.
"""
return time.time()
def add_timeout(self, deadline, callback, *args, **kwargs):
"""Runs the ``callback`` at the time ``deadline`` from the I/O loop.
Returns an opaque handle that may be passed to
`remove_timeout` to cancel.
``deadline`` may be a number denoting a time (on the same
scale as `IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time. Since Tornado 4.0, `call_later` is a more
convenient alternative for the relative case since it does not
require a timedelta object.
Note that it is not safe to call `add_timeout` from other threads.
Instead, you must use `add_callback` to transfer control to the
`IOLoop`'s thread, and then call `add_timeout` from there.
Subclasses of IOLoop must implement either `add_timeout` or
`call_at`; the default implementations of each will call
the other. `call_at` is usually easier to implement, but
subclasses that wish to maintain compatibility with Tornado
versions prior to 4.0 must use `add_timeout` instead.
.. versionchanged:: 4.0
Now passes through ``*args`` and ``**kwargs`` to the callback.
"""
if isinstance(deadline, numbers.Real):
return self.call_at(deadline, callback, *args, **kwargs)
elif isinstance(deadline, datetime.timedelta):
return self.call_at(self.time() + timedelta_to_seconds(deadline),
callback, *args, **kwargs)
else:
raise TypeError("Unsupported deadline %r" % deadline)
def call_later(self, delay, callback, *args, **kwargs):
"""Runs the ``callback`` after ``delay`` seconds have passed.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.call_at(self.time() + delay, callback, *args, **kwargs)
def call_at(self, when, callback, *args, **kwargs):
"""Runs the ``callback`` at the absolute time designated by ``when``.
``when`` must be a number using the same reference point as
`IOLoop.time`.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.add_timeout(when, callback, *args, **kwargs)
def remove_timeout(self, timeout):
"""Cancels a pending timeout.
The argument is a handle as returned by `add_timeout`. It is
safe to call `remove_timeout` even if the callback has already
been run.
"""
raise NotImplementedError()
def add_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
It is safe to call this method from any thread at any time,
except from a signal handler. Note that this is the **only**
method in `IOLoop` that makes this thread-safety guarantee; all
other interaction with the `IOLoop` must be done from that
`IOLoop`'s thread. `add_callback()` may be used to transfer
control from other threads to the `IOLoop`'s thread.
To add a callback from a signal handler, see
`add_callback_from_signal`.
"""
raise NotImplementedError()
def add_callback_from_signal(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
Safe for use from a Python signal handler; should not be used
otherwise.
Callbacks added with this method will be run without any
`.stack_context`, to avoid picking up the context of the function
that was interrupted by the signal.
"""
raise NotImplementedError()
def spawn_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next IOLoop iteration.
Unlike all other callback-related methods on IOLoop,
``spawn_callback`` does not associate the callback with its caller's
``stack_context``, so it is suitable for fire-and-forget callbacks
that should not interfere with the caller.
.. versionadded:: 4.0
"""
with stack_context.NullContext():
self.add_callback(callback, *args, **kwargs)
def add_future(self, future, callback):
"""Schedules a callback on the ``IOLoop`` when the given
`.Future` is finished.
The callback is invoked with one argument, the
`.Future`.
"""
assert is_future(future)
callback = stack_context.wrap(callback)
future.add_done_callback(
lambda future: self.add_callback(callback, future))
def _run_callback(self, callback):
"""Runs a callback with error handling.
For use in subclasses.
"""
try:
ret = callback()
if ret is not None and is_future(ret):
# Functions that return Futures typically swallow all
# exceptions and store them in the Future. If a Future
# makes it out to the IOLoop, ensure its exception (if any)
# gets logged too.
self.add_future(ret, lambda f: f.result())
except Exception:
self.handle_callback_exception(callback)
def handle_callback_exception(self, callback):
"""This method is called whenever a callback run by the `IOLoop`
throws an exception.
By default simply logs the exception as an error. Subclasses
may override this method to customize reporting of exceptions.
The exception itself is not passed explicitly, but is available
in `sys.exc_info`.
"""
app_log.error("Exception in callback %r", callback, exc_info=True)
def split_fd(self, fd):
"""Returns an (fd, obj) pair from an ``fd`` parameter.
We accept both raw file descriptors and file-like objects as
input to `add_handler` and related methods. When a file-like
object is passed, we must retain the object itself so we can
close it correctly when the `IOLoop` shuts down, but the
poller interfaces favor file descriptors (they will accept
file-like objects and call ``fileno()`` for you, but they
always return the descriptor itself).
This method is provided for use by `IOLoop` subclasses and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
return fd.fileno(), fd
except AttributeError:
return fd, fd
def close_fd(self, fd):
"""Utility method to close an ``fd``.
If ``fd`` is a file-like object, we close it directly; otherwise
we use `os.close`.
This method is provided for use by `IOLoop` subclasses (in
implementations of ``IOLoop.close(all_fds=True)`` and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
try:
fd.close()
except AttributeError:
os.close(fd)
except OSError:
pass
class PollIOLoop(IOLoop):
"""Base class for IOLoops built around a select-like function.
For concrete implementations, see `tornado.platform.epoll.EPollIOLoop`
(Linux), `tornado.platform.kqueue.KQueueIOLoop` (BSD and Mac), or
`tornado.platform.select.SelectIOLoop` (all platforms).
"""
def initialize(self, impl, time_func=None, **kwargs):
super(PollIOLoop, self).initialize(**kwargs)
self._impl = impl
if hasattr(self._impl, 'fileno'):
set_close_exec(self._impl.fileno())
self.time_func = time_func or time.time
self._handlers = {}
self._events = {}
self._callbacks = []
self._callback_lock = threading.Lock()
self._timeouts = []
self._cancellations = 0
self._running = False
self._stopped = False
self._closing = False
self._thread_ident = None
self._blocking_signal_threshold = None
self._timeout_counter = itertools.count()
# Create a pipe that we send bogus data to when we want to wake
# the I/O loop when it is idle
self._waker = Waker()
self.add_handler(self._waker.fileno(),
lambda fd, events: self._waker.consume(),
self.READ)
def close(self, all_fds=False):
with self._callback_lock:
self._closing = True
self.remove_handler(self._waker.fileno())
if all_fds:
for fd, handler in self._handlers.values():
self.close_fd(fd)
self._waker.close()
self._impl.close()
self._callbacks = None
self._timeouts = None
def add_handler(self, fd, handler, events):
fd, obj = self.split_fd(fd)
self._handlers[fd] = (obj, stack_context.wrap(handler))
self._impl.register(fd, events | self.ERROR)
def update_handler(self, fd, events):
fd, obj = self.split_fd(fd)
self._impl.modify(fd, events | self.ERROR)
def remove_handler(self, fd):
fd, obj = self.split_fd(fd)
self._handlers.pop(fd, None)
self._events.pop(fd, None)
try:
self._impl.unregister(fd)
except Exception:
gen_log.debug("Error deleting fd from IOLoop", exc_info=True)
def set_blocking_signal_threshold(self, seconds, action):
if not hasattr(signal, "setitimer"):
gen_log.error("set_blocking_signal_threshold requires a signal module "
"with the setitimer method")
return
self._blocking_signal_threshold = seconds
if seconds is not None:
signal.signal(signal.SIGALRM,
action if action is not None else signal.SIG_DFL)
def start(self):
if self._running:
raise RuntimeError("IOLoop is already running")
self._setup_logging()
if self._stopped:
self._stopped = False
return
old_current = getattr(IOLoop._current, "instance", None)
IOLoop._current.instance = self
self._thread_ident = thread.get_ident()
self._running = True
# signal.set_wakeup_fd closes a race condition in event loops:
# a signal may arrive at the beginning of select/poll/etc
# before it goes into its interruptible sleep, so the signal
# will be consumed without waking the select. The solution is
# for the (C, synchronous) signal handler to write to a pipe,
# which will then be seen by select.
#
# In python's signal handling semantics, this only matters on the
# main thread (fortunately, set_wakeup_fd only works on the main
# thread and will raise a ValueError otherwise).
#
# If someone has already set a wakeup fd, we don't want to
# disturb it. This is an issue for twisted, which does its
# SIGCHLD processing in response to its own wakeup fd being
# written to. As long as the wakeup fd is registered on the IOLoop,
# the loop will still wake up and everything should work.
old_wakeup_fd = None
if hasattr(signal, 'set_wakeup_fd') and os.name == 'posix':
# requires python 2.6+, unix. set_wakeup_fd exists but crashes
# the python process on windows.
try:
old_wakeup_fd = signal.set_wakeup_fd(self._waker.write_fileno())
if old_wakeup_fd != -1:
# Already set, restore previous value. This is a little racy,
# but there's no clean get_wakeup_fd and in real use the
# IOLoop is just started once at the beginning.
signal.set_wakeup_fd(old_wakeup_fd)
old_wakeup_fd = None
except ValueError:
# Non-main thread, or the previous value of wakeup_fd
# is no longer valid.
old_wakeup_fd = None
try:
while True:
# Prevent IO event starvation by delaying new callbacks
# to the next iteration of the event loop.
with self._callback_lock:
callbacks = self._callbacks
self._callbacks = []
# Add any timeouts that have come due to the callback list.
# Do not run anything until we have determined which ones
# are ready, so timeouts that call add_timeout cannot
# schedule anything in this iteration.
due_timeouts = []
if self._timeouts:
now = self.time()
while self._timeouts:
if self._timeouts[0].callback is None:
# The timeout was cancelled. Note that the
# cancellation check is repeated below for timeouts
# that are cancelled by another timeout or callback.
heapq.heappop(self._timeouts)
self._cancellations -= 1
elif self._timeouts[0].deadline <= now:
due_timeouts.append(heapq.heappop(self._timeouts))
else:
break
if (self._cancellations > 512
and self._cancellations > (len(self._timeouts) >> 1)):
# Clean up the timeout queue when it gets large and it's
# more than half cancellations.
self._cancellations = 0
self._timeouts = [x for x in self._timeouts
if x.callback is not None]
heapq.heapify(self._timeouts)
for callback in callbacks:
self._run_callback(callback)
for timeout in due_timeouts:
if timeout.callback is not None:
self._run_callback(timeout.callback)
# Closures may be holding on to a lot of memory, so allow
# them to be freed before we go into our poll wait.
callbacks = callback = due_timeouts = timeout = None
if self._callbacks:
# If any callbacks or timeouts called add_callback,
# we don't want to wait in poll() before we run them.
poll_timeout = 0.0
elif self._timeouts:
# If there are any timeouts, schedule the first one.
# Use self.time() instead of 'now' to account for time
# spent running callbacks.
poll_timeout = self._timeouts[0].deadline - self.time()
poll_timeout = max(0, min(poll_timeout, _POLL_TIMEOUT))
else:
# No timeouts and no callbacks, so use the default.
poll_timeout = _POLL_TIMEOUT
if not self._running:
break
if self._blocking_signal_threshold is not None:
# clear alarm so it doesn't fire while poll is waiting for
# events.
signal.setitimer(signal.ITIMER_REAL, 0, 0)
try:
event_pairs = self._impl.poll(poll_timeout)
except Exception as e:
# Depending on python version and IOLoop implementation,
# different exception types may be thrown and there are
# two ways EINTR might be signaled:
# * e.errno == errno.EINTR
# * e.args is like (errno.EINTR, 'Interrupted system call')
if errno_from_exception(e) == errno.EINTR:
continue
else:
raise
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL,
self._blocking_signal_threshold, 0)
# Pop one fd at a time from the set of pending fds and run
# its handler. Since that handler may perform actions on
# other file descriptors, there may be reentrant calls to
# this IOLoop that update self._events
self._events.update(event_pairs)
while self._events:
fd, events = self._events.popitem()
try:
fd_obj, handler_func = self._handlers[fd]
handler_func(fd_obj, events)
except (OSError, IOError) as e:
if errno_from_exception(e) == errno.EPIPE:
# Happens when the client closes the connection
pass
else:
self.handle_callback_exception(self._handlers.get(fd))
except Exception:
self.handle_callback_exception(self._handlers.get(fd))
fd_obj = handler_func = None
finally:
# reset the stopped flag so another start/stop pair can be issued
self._stopped = False
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL, 0, 0)
IOLoop._current.instance = old_current
if old_wakeup_fd is not None:
signal.set_wakeup_fd(old_wakeup_fd)
def stop(self):
self._running = False
self._stopped = True
self._waker.wake()
def time(self):
return self.time_func()
def call_at(self, deadline, callback, *args, **kwargs):
timeout = _Timeout(
deadline,
functools.partial(stack_context.wrap(callback), *args, **kwargs),
self)
heapq.heappush(self._timeouts, timeout)
return timeout
def remove_timeout(self, timeout):
# Removing from a heap is complicated, so just leave the defunct
# timeout object in the queue (see discussion in
# http://docs.python.org/library/heapq.html).
# If this turns out to be a problem, we could add a garbage
# collection pass whenever there are too many dead timeouts.
timeout.callback = None
self._cancellations += 1
def add_callback(self, callback, *args, **kwargs):
with self._callback_lock:
if self._closing:
raise RuntimeError("IOLoop is closing")
list_empty = not self._callbacks
self._callbacks.append(functools.partial(
stack_context.wrap(callback), *args, **kwargs))
if list_empty and thread.get_ident() != self._thread_ident:
# If we're in the IOLoop's thread, we know it's not currently
# polling. If we're not, and we added the first callback to an
# empty list, we may need to wake it up (it may wake up on its
# own, but an occasional extra wake is harmless). Waking
# up a polling IOLoop is relatively expensive, so we try to
# avoid it when we can.
self._waker.wake()
def add_callback_from_signal(self, callback, *args, **kwargs):
with stack_context.NullContext():
if thread.get_ident() != self._thread_ident:
# if the signal is handled on another thread, we can add
# it normally (modulo the NullContext)
self.add_callback(callback, *args, **kwargs)
else:
# If we're on the IOLoop's thread, we cannot use
# the regular add_callback because it may deadlock on
# _callback_lock. Blindly insert into self._callbacks.
# This is safe because the GIL makes list.append atomic.
# One subtlety is that if the signal interrupted the
# _callback_lock block in IOLoop.start, we may modify
# either the old or new version of self._callbacks,
# but either way will work.
self._callbacks.append(functools.partial(
stack_context.wrap(callback), *args, **kwargs))
class _Timeout(object):
"""An IOLoop timeout, a UNIX timestamp and a callback"""
# Reduce memory overhead when there are lots of pending callbacks
__slots__ = ['deadline', 'callback', 'tiebreaker']
def __init__(self, deadline, callback, io_loop):
if not isinstance(deadline, numbers.Real):
raise TypeError("Unsupported deadline %r" % deadline)
self.deadline = deadline
self.callback = callback
self.tiebreaker = next(io_loop._timeout_counter)
# Comparison methods to sort by deadline, with object id as a tiebreaker
# to guarantee a consistent ordering. The heapq module uses __le__
# in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons
# use __lt__).
def __lt__(self, other):
return ((self.deadline, self.tiebreaker) <
(other.deadline, other.tiebreaker))
def __le__(self, other):
return ((self.deadline, self.tiebreaker) <=
(other.deadline, other.tiebreaker))
class PeriodicCallback(object):
"""Schedules the given callback to be called periodically.
The callback is called every ``callback_time`` milliseconds.
Note that the timeout is given in milliseconds, while most other
time-related functions in Tornado use seconds.
If the callback runs for longer than ``callback_time`` milliseconds,
subsequent invocations will be skipped to get back on schedule.
`start` must be called after the `PeriodicCallback` is created.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
def __init__(self, callback, callback_time, io_loop=None):
self.callback = callback
if callback_time <= 0:
raise ValueError("Periodic callback must have a positive callback_time")
self.callback_time = callback_time
self.io_loop = io_loop or IOLoop.current()
self._running = False
self._timeout = None
def start(self):
"""Starts the timer."""
self._running = True
self._next_timeout = self.io_loop.time()
self._schedule_next()
def stop(self):
"""Stops the timer."""
self._running = False
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
def is_running(self):
"""Return True if this `.PeriodicCallback` has been started.
.. versionadded:: 4.1
"""
return self._running
def _run(self):
if not self._running:
return
try:
return self.callback()
except Exception:
self.io_loop.handle_callback_exception(self.callback)
finally:
self._schedule_next()
def _schedule_next(self):
if self._running:
current_time = self.io_loop.time()
if self._next_timeout <= current_time:
callback_time_sec = self.callback_time / 1000.0
self._next_timeout += (math.floor((current_time - self._next_timeout) / callback_time_sec) + 1) * callback_time_sec
self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, AsyncIterable, Awaitable, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.bigquery_storage_v1.types import arrow
from google.cloud.bigquery_storage_v1.types import avro
from google.cloud.bigquery_storage_v1.types import storage
from google.cloud.bigquery_storage_v1.types import stream
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import BigQueryReadTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import BigQueryReadGrpcAsyncIOTransport
from .client import BigQueryReadClient
class BigQueryReadAsyncClient:
"""BigQuery Read API.
The Read API can be used to read data from BigQuery.
"""
_client: BigQueryReadClient
DEFAULT_ENDPOINT = BigQueryReadClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = BigQueryReadClient.DEFAULT_MTLS_ENDPOINT
read_session_path = staticmethod(BigQueryReadClient.read_session_path)
parse_read_session_path = staticmethod(BigQueryReadClient.parse_read_session_path)
read_stream_path = staticmethod(BigQueryReadClient.read_stream_path)
parse_read_stream_path = staticmethod(BigQueryReadClient.parse_read_stream_path)
table_path = staticmethod(BigQueryReadClient.table_path)
parse_table_path = staticmethod(BigQueryReadClient.parse_table_path)
common_billing_account_path = staticmethod(
BigQueryReadClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
BigQueryReadClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(BigQueryReadClient.common_folder_path)
parse_common_folder_path = staticmethod(BigQueryReadClient.parse_common_folder_path)
common_organization_path = staticmethod(BigQueryReadClient.common_organization_path)
parse_common_organization_path = staticmethod(
BigQueryReadClient.parse_common_organization_path
)
common_project_path = staticmethod(BigQueryReadClient.common_project_path)
parse_common_project_path = staticmethod(
BigQueryReadClient.parse_common_project_path
)
common_location_path = staticmethod(BigQueryReadClient.common_location_path)
parse_common_location_path = staticmethod(
BigQueryReadClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
BigQueryReadAsyncClient: The constructed client.
"""
return BigQueryReadClient.from_service_account_info.__func__(BigQueryReadAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
BigQueryReadAsyncClient: The constructed client.
"""
return BigQueryReadClient.from_service_account_file.__func__(BigQueryReadAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@property
def transport(self) -> BigQueryReadTransport:
"""Returns the transport used by the client instance.
Returns:
BigQueryReadTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(BigQueryReadClient).get_transport_class, type(BigQueryReadClient)
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, BigQueryReadTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the big query read client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.BigQueryReadTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = BigQueryReadClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def create_read_session(
self,
request: storage.CreateReadSessionRequest = None,
*,
parent: str = None,
read_session: stream.ReadSession = None,
max_stream_count: int = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> stream.ReadSession:
r"""Creates a new read session. A read session divides
the contents of a BigQuery table into one or more
streams, which can then be used to read data from the
table. The read session also specifies properties of the
data to be read, such as a list of columns or a push-
down filter describing the rows to be returned.
A particular row can be read by at most one stream. When
the caller has reached the end of each stream in the
session, then all the data in the table has been read.
Data is assigned to each stream such that roughly the
same number of rows can be read from each stream.
Because the server-side unit for assigning data is
collections of rows, the API does not guarantee that
each stream will return the same number or rows.
Additionally, the limits are enforced based on the
number of pre-filtered rows, so some filters can lead to
lopsided assignments.
Read sessions automatically expire 24 hours after they
are created and do not require manual clean-up by the
caller.
Args:
request (:class:`google.cloud.bigquery_storage_v1.types.CreateReadSessionRequest`):
The request object. Request message for
`CreateReadSession`.
parent (:class:`str`):
Required. The request project that owns the session, in
the form of ``projects/{project_id}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
read_session (:class:`google.cloud.bigquery_storage_v1.types.ReadSession`):
Required. Session to be created.
This corresponds to the ``read_session`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
max_stream_count (:class:`int`):
Max initial number of streams. If
unset or zero, the server will provide a
value of streams so as to produce
reasonable throughput. Must be non-
negative. The number of streams may be
lower than the requested number,
depending on the amount parallelism that
is reasonable for the table. Error will
be returned if the max count is greater
than the current system max limit of
1,000.
Streams must be read starting from
offset 0.
This corresponds to the ``max_stream_count`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_storage_v1.types.ReadSession:
Information about the ReadSession.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, read_session, max_stream_count])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = storage.CreateReadSessionRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if read_session is not None:
request.read_session = read_session
if max_stream_count is not None:
request.max_stream_count = max_stream_count
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_read_session,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("read_session.table", request.read_session.table),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def read_rows(
self,
request: storage.ReadRowsRequest = None,
*,
read_stream: str = None,
offset: int = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Awaitable[AsyncIterable[storage.ReadRowsResponse]]:
r"""Reads rows from the stream in the format prescribed
by the ReadSession. Each response contains one or more
table rows, up to a maximum of 100 MiB per response;
read requests which attempt to read individual rows
larger than 100 MiB will fail.
Each request also returns a set of stream statistics
reflecting the current state of the stream.
Args:
request (:class:`google.cloud.bigquery_storage_v1.types.ReadRowsRequest`):
The request object. Request message for `ReadRows`.
read_stream (:class:`str`):
Required. Stream to read rows from.
This corresponds to the ``read_stream`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
offset (:class:`int`):
The offset requested must be less
than the last row read from Read.
Requesting a larger offset is undefined.
If not specified, start reading from
offset zero.
This corresponds to the ``offset`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
AsyncIterable[google.cloud.bigquery_storage_v1.types.ReadRowsResponse]:
Response from calling ReadRows may include row data, progress and
throttling information.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([read_stream, offset])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = storage.ReadRowsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if read_stream is not None:
request.read_stream = read_stream
if offset is not None:
request.offset = offset
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.read_rows,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=86400.0,
),
default_timeout=86400.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("read_stream", request.read_stream),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def split_read_stream(
self,
request: storage.SplitReadStreamRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> storage.SplitReadStreamResponse:
r"""Splits a given ``ReadStream`` into two ``ReadStream`` objects.
These ``ReadStream`` objects are referred to as the primary and
the residual streams of the split. The original ``ReadStream``
can still be read from in the same manner as before. Both of the
returned ``ReadStream`` objects can also be read from, and the
rows returned by both child streams will be the same as the rows
read from the original stream.
Moreover, the two child streams will be allocated back-to-back
in the original ``ReadStream``. Concretely, it is guaranteed
that for streams original, primary, and residual, that
original[0-j] = primary[0-j] and original[j-n] = residual[0-m]
once the streams have been read to completion.
Args:
request (:class:`google.cloud.bigquery_storage_v1.types.SplitReadStreamRequest`):
The request object. Request message for
`SplitReadStream`.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_storage_v1.types.SplitReadStreamResponse:
Response message for SplitReadStream.
"""
# Create or coerce a protobuf request object.
request = storage.SplitReadStreamRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.split_read_stream,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-bigquery-storage",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("BigQueryReadAsyncClient",) | unknown | codeparrot/codeparrot-clean | ||
import time
from .constants import *
class Message(object):
def __init__(self, device, body):
self.device = device
self.body = body
self.retries = 0
self.time = time.time()
self.requires_ack = True
def is_response(self, message):
return self == message
def to_llap(self):
message = LLAP_START + self.device + self.body
message = message + (LLAP_FILL * (LLAP_LEN-len(message)))
return message
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.device == other.device and self.body == other.body
class Ack(Message):
def __init__(self, device):
super(Ack, self).__init__(device, 'ACK')
self.requires_ack = False
class Awake(Message):
def __init__(self, device):
super(Awake, self).__init__(device, 'AWAKE')
self.requires_ack = False
class Battery(Message):
def __init__(self, device, voltage=None):
body = 'BATT'
self.voltage = voltage
if voltage:
body = body+voltage
super(Battery, self).__init__(device, body)
self.requires_ack = False
def is_response(self, message):
if self.device != message.device:
return False
return isinstance(message, self.__class__)
class BatteryLow(Message):
def __init__(self, device):
super(BatteryLow, self).__init__(device, 'BATTLOW')
class ButtonDoor(Message):
def __init__(self, device, message, input, state):
self.message = message
self.input = input
self.state = state
body = 'ON' if state else 'OFF'
super(ButtonDoor, self).__init__(device, message + input + body)
class ButtonPress(Message):
def __init__(self, device, message, input):
self.message = message
self.input = input
super(ButtonPress, self).__init__(device, message + input)
class ButtonSwitch(Message):
def __init__(self, device, message, state):
self.message = message
self.state = state
body = 'ON' if state else 'OFF'
super(ButtonSwitch, self).__init__(device, message + body)
class FirmwareVersion(Message):
def __init__(self, device, version=None):
body = 'FVER'
self.version = version
if version:
body = body+version
super(FirmwareVersion, self).__init__(device, body)
self.requires_ack = False
def is_response(self, message):
if self.device != message.device:
return False
return isinstance(message, self.__class__)
class Hello(Message):
def __init__(self, device):
super(Hello, self).__init__(device, 'HELLO')
self.requires_ack = False
class ProtocolVersion(Message):
def __init__(self, device, version=None):
body = 'APVER'
self.version = version
if version:
body = body+version
super(ProtocolVersion, self).__init__(device, body)
self.requires_ack = False
def is_response(self, message):
if self.device != message.device:
return False
return isinstance(message, self.__class__)
class Reboot(Message):
def __init__(self, device):
super(Reboot, self).__init__(device, 'REBOOT')
self.requires_ack = False
class Sleep(Message):
def __init__(self, device):
super(Sleep, self).__init__(device, 'SLEEP')
def is_response(self, message):
if self.device != message.device:
return False
return isinstance(message, Sleeping)
class Sleeping(Message):
def __init__(self, device):
super(Sleeping, self).__init__(device, 'SLEEPING')
self.requires_ack = False
class Started(Message):
def __init__(self, device):
super(Started, self).__init__(device, 'STARTED')
class Wake(Message):
def __init__(self, device):
super(Wake, self).__init__(device, 'WAKE')
self.requires_ack = False
class WakeCount(Message):
def __init__(self, device, count):
super(WakeCount, self).__init__(device, 'WAKEC' + format(count, '03d'))
self.count = count
self.requires_ack = False | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_range op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_math_ops
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedRangeOpTest(test_util.TensorFlowTestCase):
def testDocStringExamples(self):
"""Examples from ragged_range.__doc__."""
rt1 = ragged_math_ops.range([3, 5, 2])
self.assertAllEqual(rt1, [[0, 1, 2], [0, 1, 2, 3, 4], [0, 1]])
rt2 = ragged_math_ops.range([0, 5, 8], [3, 3, 12])
self.assertAllEqual(rt2, [[0, 1, 2], [], [8, 9, 10, 11]])
rt3 = ragged_math_ops.range([0, 5, 8], [3, 3, 12], 2)
self.assertAllEqual(rt3, [[0, 2], [], [8, 10]])
def testBasicRanges(self):
# Specify limits only.
self.assertAllEqual(
ragged_math_ops.range([0, 3, 5]),
[list(range(0)), list(range(3)),
list(range(5))])
# Specify starts and limits.
self.assertAllEqual(
ragged_math_ops.range([0, 3, 5], [2, 3, 10]),
[list(range(0, 2)),
list(range(3, 3)),
list(range(5, 10))])
# Specify starts, limits, and deltas.
self.assertAllEqual(
ragged_math_ops.range([0, 3, 5], [4, 4, 15], [2, 3, 4]),
[list(range(0, 4, 2)),
list(range(3, 4, 3)),
list(range(5, 15, 4))])
def testFloatRanges(self):
expected = [[0.0, 0.4, 0.8, 1.2, 1.6, 2.0, 2.4, 2.8, 3.2, 3.6], [3.0],
[5.0, 7.2, 9.4, 11.6, 13.8]]
actual = ragged_math_ops.range([0.0, 3.0, 5.0], [3.9, 4.0, 15.0],
[0.4, 1.5, 2.2])
self.assertAllClose(actual, expected)
def testNegativeDeltas(self):
self.assertAllEqual(
ragged_math_ops.range([0, 3, 5], limits=0, deltas=-1),
[list(range(0, 0, -1)),
list(range(3, 0, -1)),
list(range(5, 0, -1))])
self.assertAllEqual(
ragged_math_ops.range([0, -3, 5], limits=0, deltas=[-1, 1, -2]),
[list(range(0, 0, -1)),
list(range(-3, 0, 1)),
list(range(5, 0, -2))])
def testBroadcast(self):
# Specify starts and limits, broadcast deltas.
self.assertAllEqual(
ragged_math_ops.range([0, 3, 5], [4, 4, 15], 3),
[list(range(0, 4, 3)),
list(range(3, 4, 3)),
list(range(5, 15, 3))])
# Broadcast all arguments.
self.assertAllEqual(
ragged_math_ops.range(0, 5, 1), [list(range(0, 5, 1))])
def testEmptyRanges(self):
rt1 = ragged_math_ops.range([0, 5, 3], [0, 3, 5])
rt2 = ragged_math_ops.range([0, 5, 5], [0, 3, 5], -1)
self.assertAllEqual(rt1, [[], [], [3, 4]])
self.assertAllEqual(rt2, [[], [5, 4], []])
def testShapeFnErrors(self):
self.assertRaises((ValueError, errors.InvalidArgumentError),
ragged_math_ops.range, [[0]], 5)
self.assertRaises((ValueError, errors.InvalidArgumentError),
ragged_math_ops.range, 0, [[5]])
self.assertRaises((ValueError, errors.InvalidArgumentError),
ragged_math_ops.range, 0, 5, [[0]])
self.assertRaises((ValueError, errors.InvalidArgumentError),
ragged_math_ops.range, [0], [1, 2])
def testKernelErrors(self):
with self.assertRaisesRegexp(errors.InvalidArgumentError,
r'Requires delta != 0'):
self.evaluate(ragged_math_ops.range(0, 0, 0))
def testShape(self):
self.assertAllEqual(
ragged_math_ops.range(0, 0, 1).shape.as_list(), [1, None])
self.assertAllEqual(
ragged_math_ops.range([1, 2, 3]).shape.as_list(), [3, None])
self.assertAllEqual(
ragged_math_ops.range([1, 2, 3], [4, 5, 6]).shape.as_list(), [3, None])
if __name__ == '__main__':
googletest.main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
'''
importer libs
~~~~~~~~~~~~~
pre-installed libraries (for override).
:author: Sam Gammon <sam@keen.io>
:license: This software follows the MIT (OSI-approved)
license for open source software. A truncated
version is included here; for full licensing
details, see ``LICENSE.md`` in the root directory
of the project.
Copyright (c) 2013, Keen IO
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
''' | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
# Creating reStructuredText Directives
# @see http://docutils.sourceforge.net/docs/howto/rst-directives.html
from docutils.parsers.rst import directives, Directive
from docutils import nodes
from pelican import signals
_CONTENT_PATH = None
_DEBUG = False
from os.path import basename
from os.path import join
# http://pygments.org/docs/quickstart/
from pygments import highlight
from pygments.lexers import guess_lexer_for_filename
from pygments.lexers import get_lexer_by_name
from pygments.formatters import HtmlFormatter
from pygments.util import ClassNotFound
class show_github_file(Directive):
required_arguments = 3
has_content = False
def run(self):
username = self.arguments[0].strip()
repo = self.arguments[1].strip()
relative_path = self.arguments[2].strip()
filename = basename(relative_path)
repo_url = 'https://github.com/{}/{}'.format(username, repo)
github_url = '{}/blob/master/{}'.format(repo_url, relative_path)
raw_url = 'https://raw.githubusercontent.com/{}/{}/master/{}'.format(username, repo, relative_path)
# FIXME: do not assume PATH='content'
abs_path = join(_CONTENT_PATH, relative_path[8:])
try:
with open(abs_path, 'r') as f:
code = f.read()
try:
lexer = guess_lexer_for_filename(filename, code)
except ClassNotFound:
if _DEBUG: print("guess fail: {}".format(filename))
lexer = get_lexer_by_name("text")
html = """<figure class="github-file">
<figcaption>
<a href="{}">{}</a> |
<a href="{}">repository</a> |
<a href="{}">view raw</a>
</figcaption><div class="code-file">
""".format(github_url, filename, repo_url, raw_url)
html += highlight(code, lexer, HtmlFormatter(linenos='table'))
html += '</div></figure>'
except IOError:
if _DEBUG: print("IO fail: {}".format(filename))
# use Gistfy, https://github.com/alexandrevicenzi/gistfy
#html = '<script type="text/javascript" src="//gistfy-app.herokuapp.com/github/{}/{}/{}"></script>'.format(username, repo, relative_path)
# use gist-it, https://github.com/robertkrimen/gist-it
html = '<script src="https://gist-it.appspot.com/github/{}/{}/blob/master/{}"></script>'.format(username, repo, relative_path)
html += "<noscript>You need to enable JavaScript to see {}!</noscript>".format(relative_path)
return [nodes.raw('', html, format='html')]
def init_github_file_plugin(pelican_obj):
global _CONTENT_PATH
if _CONTENT_PATH is None:
_CONTENT_PATH = pelican_obj.settings['PATH']
def register():
signals.get_generators.connect(init_github_file_plugin)
directives.register_directive('show_github_file', show_github_file) | unknown | codeparrot/codeparrot-clean | ||
<?php
/*
* This file is part of the Symfony package.
*
* (c) Fabien Potencier <fabien@symfony.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Symfony\Bundle\FrameworkBundle\Console;
use Symfony\Component\Console\Application as BaseApplication;
use Symfony\Component\Console\Command\Command;
use Symfony\Component\Console\Command\ListCommand;
use Symfony\Component\Console\Command\TraceableCommand;
use Symfony\Component\Console\Debug\CliRequest;
use Symfony\Component\Console\Input\InputInterface;
use Symfony\Component\Console\Input\InputOption;
use Symfony\Component\Console\Output\ConsoleOutputInterface;
use Symfony\Component\Console\Output\OutputInterface;
use Symfony\Component\Console\Style\SymfonyStyle;
use Symfony\Component\HttpKernel\Bundle\Bundle;
use Symfony\Component\HttpKernel\Kernel;
use Symfony\Component\HttpKernel\KernelInterface;
/**
* @author Fabien Potencier <fabien@symfony.com>
*/
class Application extends BaseApplication
{
private bool $commandsRegistered = false;
private array $registrationErrors = [];
public function __construct(
private KernelInterface $kernel,
) {
parent::__construct('Symfony', Kernel::VERSION);
$inputDefinition = $this->getDefinition();
$inputDefinition->addOption(new InputOption('--env', '-e', InputOption::VALUE_REQUIRED, 'The Environment name.', $kernel->getEnvironment()));
$inputDefinition->addOption(new InputOption('--no-debug', null, InputOption::VALUE_NONE, 'Switch off debug mode.'));
$inputDefinition->addOption(new InputOption('--profile', null, InputOption::VALUE_NONE, 'Enables profiling (requires debug).'));
}
/**
* Gets the Kernel associated with this Console.
*/
public function getKernel(): KernelInterface
{
return $this->kernel;
}
public function reset(): void
{
if ($this->kernel->getContainer()->has('services_resetter')) {
$this->kernel->getContainer()->get('services_resetter')->reset();
}
}
/**
* Runs the current application.
*
* @return int 0 if everything went fine, or an error code
*/
public function doRun(InputInterface $input, OutputInterface $output): int
{
$this->registerCommands();
if ($this->registrationErrors) {
$this->renderRegistrationErrors($input, $output);
}
$container = $this->kernel->getContainer();
$this->setDispatcher($container->get('event_dispatcher'));
if ($container->has('console.argument_resolver')) {
$this->setArgumentResolver($container->get('console.argument_resolver'));
}
return parent::doRun($input, $output);
}
protected function doRunCommand(Command $command, InputInterface $input, OutputInterface $output): int
{
$requestStack = null;
$renderRegistrationErrors = true;
if (!$command instanceof ListCommand) {
if ($this->registrationErrors) {
$this->renderRegistrationErrors($input, $output);
$this->registrationErrors = [];
$renderRegistrationErrors = false;
}
}
if ($input->hasParameterOption('--profile')) {
$container = $this->kernel->getContainer();
if (!$this->kernel->isDebug()) {
if ($output instanceof ConsoleOutputInterface) {
$output = $output->getErrorOutput();
}
(new SymfonyStyle($input, $output))->warning('Debug mode should be enabled when the "--profile" option is used.');
} elseif (!$container->has('debug.stopwatch')) {
if ($output instanceof ConsoleOutputInterface) {
$output = $output->getErrorOutput();
}
(new SymfonyStyle($input, $output))->warning('The "--profile" option needs the Stopwatch component. Try running "composer require symfony/stopwatch".');
} elseif (!$container->has('.virtual_request_stack')) {
if ($output instanceof ConsoleOutputInterface) {
$output = $output->getErrorOutput();
}
(new SymfonyStyle($input, $output))->warning('The "--profile" option needs the profiler integration. Try enabling the "framework.profiler" option.');
} else {
$command = new TraceableCommand($command, $container->get('debug.stopwatch'));
$requestStack = $container->get('.virtual_request_stack');
$requestStack->push(new CliRequest($command));
}
}
try {
$returnCode = parent::doRunCommand($command, $input, $output);
} finally {
$requestStack?->pop();
}
if ($renderRegistrationErrors && $this->registrationErrors) {
$this->renderRegistrationErrors($input, $output);
$this->registrationErrors = [];
}
return $returnCode;
}
public function find(string $name): Command
{
$this->registerCommands();
return parent::find($name);
}
public function get(string $name): Command
{
$this->registerCommands();
return parent::get($name);
}
public function all(?string $namespace = null): array
{
$this->registerCommands();
return parent::all($namespace);
}
public function getLongVersion(): string
{
return parent::getLongVersion().\sprintf(' (env: <comment>%s</>, debug: <comment>%s</>)', $this->kernel->getEnvironment(), $this->kernel->isDebug() ? 'true' : 'false');
}
public function addCommand(callable|Command $command): ?Command
{
$this->registerCommands();
return parent::addCommand($command);
}
protected function registerCommands(): void
{
if ($this->commandsRegistered) {
return;
}
$this->commandsRegistered = true;
$this->kernel->boot();
$container = $this->kernel->getContainer();
foreach ($this->kernel->getBundles() as $bundle) {
if ($bundle instanceof Bundle) {
try {
$bundle->registerCommands($this);
} catch (\Throwable $e) {
$this->registrationErrors[] = $e;
}
}
}
if ($container->has('console.command_loader')) {
$this->setCommandLoader($container->get('console.command_loader'));
}
if ($container->hasParameter('console.command.ids')) {
$lazyCommandIds = $container->hasParameter('console.lazy_command.ids') ? $container->getParameter('console.lazy_command.ids') : [];
foreach ($container->getParameter('console.command.ids') as $id) {
if (!isset($lazyCommandIds[$id])) {
try {
$this->addCommand($container->get($id));
} catch (\Throwable $e) {
$this->registrationErrors[] = $e;
}
}
}
}
}
private function renderRegistrationErrors(InputInterface $input, OutputInterface $output): void
{
if ($output instanceof ConsoleOutputInterface) {
$output = $output->getErrorOutput();
}
(new SymfonyStyle($input, $output))->warning('Some commands could not be registered:');
foreach ($this->registrationErrors as $error) {
$this->doRenderThrowable($error, $output);
}
}
} | php | github | https://github.com/symfony/symfony | src/Symfony/Bundle/FrameworkBundle/Console/Application.php |
from __future__ import with_statement
import os, sys
class TiLogger:
ERROR = 0
WARN = 1
INFO = 2
DEBUG = 3
TRACE = 4
def __init__(self, logfile, level=TRACE, output_stream=sys.stdout):
self.level = level
self.output_stream = output_stream
global _logfile
_logfile = logfile
if _logfile is not None:
logfolder = os.path.dirname(_logfile)
try:
if not os.path.exists(logfolder):
os.mkdir(logfolder)
except:
print "[ERROR] error creating log folder %s: %s" % (logfolder, sys.exc_info()[0])
try:
with open(_logfile, 'w') as f:
f.write('Logfile initialized\n')
except:
print "[ERROR] error initializing (writing to) log file %s: %s" % (_logfile, sys.exc_info()[0])
self.info("logfile = " + logfile)
def _level_prefix(self, level):
return {
TiLogger.ERROR: "ERROR",
TiLogger.WARN: "WARN",
TiLogger.INFO: "INFO",
TiLogger.DEBUG: "DEBUG",
TiLogger.TRACE: "TRACE"
}[level];
def _log(self, msg, level):
global _logfile
if self.level >= level:
prefix = self._level_prefix(level)
line = "[%s] %s" % (prefix, msg)
print >> self.output_stream, line
self.output_stream.flush()
sys.stdout.flush()
if _logfile is not None:
try:
with open(_logfile, 'a') as f:
f.write("%s\n" % line)
except:
print "[ERROR] error writing to log %s: %s" % (_logfile, sys.exc_info()[0])
def info(self, msg):
self._log(msg, TiLogger.INFO)
def debug(self, msg):
self._log(msg, TiLogger.DEBUG)
def warn(self, msg):
self._log(msg, TiLogger.WARN)
def trace(self, msg):
self._log(msg, TiLogger.TRACE)
def error(self, msg):
self._log(msg, TiLogger.ERROR)
# if __name__ == "__main__":
# _logfile = ''
# print "[DEBUG] TiLogger initialized" | unknown | codeparrot/codeparrot-clean | ||
"""
Course Textbooks page.
"""
import requests
from path import Path as path
from common.test.acceptance.pages.common.utils import click_css
from common.test.acceptance.pages.studio.course_page import CoursePage
class TextbookUploadPage(CoursePage):
"""
Course Textbooks page.
"""
url_path = "textbooks"
def is_browser_on_page(self):
return self.q(css='.textbooks-list').visible
def open_add_textbook_form(self):
"""
Open new textbook form by clicking on new textbook button.
"""
self.q(css='.nav-item .new-button').click()
def get_element_text(self, selector):
"""
Return the text of the css selector.
"""
return self.q(css=selector)[0].text
def set_input_field_value(self, selector, value):
"""
Set the value of input field by selector.
"""
self.q(css=selector)[0].send_keys(value)
def upload_pdf_file(self, file_name):
"""
Uploads a pdf textbook.
"""
# If the pdf upload section has not yet been toggled on, click on the upload pdf button
test_dir = path(__file__).abspath().dirname().dirname().dirname().dirname() # pylint:disable=no-value-for-parameter
file_path = test_dir + '/data/uploads/' + file_name
click_css(self, ".edit-textbook .action-upload", require_notification=False)
self.wait_for_element_visibility(".upload-dialog input", "Upload modal opened")
file_input = self.q(css=".upload-dialog input").results[0]
file_input.send_keys(file_path)
click_css(self, ".wrapper-modal-window-assetupload .action-upload", require_notification=False)
self.wait_for_element_absence(".modal-window-overlay", "Upload modal closed")
def click_textbook_submit_button(self):
"""
Submit the new textbook form and check if it is rendered properly.
"""
self.wait_for_element_visibility('#edit_textbook_form button[type="submit"]', 'Save button visibility')
self.q(css='#edit_textbook_form button[type="submit"]').first.click()
self.wait_for_element_absence(".wrapper-form", "Add/Edit form closed")
def is_view_live_link_worked(self):
"""
Check if the view live button of textbook is working fine.
"""
try:
self.wait_for(lambda: len(self.q(css='.textbook a.view').attrs('href')) > 0, "href value present")
response = requests.get(self.q(css='.textbook a.view').attrs('href')[0])
except requests.exceptions.ConnectionError:
return False
return response.status_code == 200
def upload_new_textbook(self):
"""
Fills out form to upload a new textbook
"""
self.open_add_textbook_form()
self.upload_pdf_file('textbook.pdf')
self.set_input_field_value('.edit-textbook #textbook-name-input', 'book_1')
self.set_input_field_value('.edit-textbook #chapter1-name', 'chap_1')
self.click_textbook_submit_button() | unknown | codeparrot/codeparrot-clean | ||
/*
* This file was automatically generated.
* DO NOT MODIFY BY HAND.
* Run `yarn fix:special` to update
*/
const r=/^(?:[A-Za-z]:[\\/]|\\\\|\/)/;function t(e,{instancePath:a="",parentData:o,parentDataProperty:n,rootData:s=e}={}){if(!e||"object"!=typeof e||Array.isArray(e))return t.errors=[{params:{type:"object"}}],!1;{const a=0;for(const r in e)if("outputPath"!==r)return t.errors=[{params:{additionalProperty:r}}],!1;if(0===a&&void 0!==e.outputPath){let a=e.outputPath;if("string"!=typeof a)return t.errors=[{params:{type:"string"}}],!1;if(a.includes("!")||!0!==r.test(a))return t.errors=[{params:{}}],!1}}return t.errors=null,!0}module.exports=t,module.exports.default=t; | javascript | github | https://github.com/webpack/webpack | schemas/plugins/debug/ProfilingPlugin.check.js |
/* C implementation of the datetime module */
/* bpo-35081: Defining this prevents including the C API capsule;
* internal versions of the Py*_Check macros which do not require
* the capsule are defined below */
#define _PY_DATETIME_IMPL
#ifndef Py_BUILD_CORE_BUILTIN
# define Py_BUILD_CORE_MODULE 1
#endif
#include "Python.h"
#include "pycore_long.h" // _PyLong_GetOne()
#include "pycore_object.h" // _PyObject_Init()
#include "pycore_time.h" // _PyTime_ObjectToTime_t()
#include "pycore_unicodeobject.h" // _PyUnicode_Copy()
#include "pycore_initconfig.h" // _PyStatus_OK()
#include "pycore_pyatomic_ft_wrappers.h"
#include "datetime.h"
#include <time.h>
#ifdef MS_WINDOWS
# include <winsock2.h> /* struct timeval */
#endif
/* forward declarations */
static PyTypeObject PyDateTime_DateType;
static PyTypeObject PyDateTime_DateTimeType;
static PyTypeObject PyDateTime_TimeType;
static PyTypeObject PyDateTime_DeltaType;
static PyTypeObject PyDateTime_TZInfoType;
static PyTypeObject PyDateTime_TimeZoneType;
typedef struct {
/* Module heap types. */
PyTypeObject *isocalendar_date_type;
/* Conversion factors. */
PyObject *us_per_ms; // 1_000
PyObject *us_per_second; // 1_000_000
PyObject *us_per_minute; // 1e6 * 60 as Python int
PyObject *us_per_hour; // 1e6 * 3600 as Python int
PyObject *us_per_day; // 1e6 * 3600 * 24 as Python int
PyObject *us_per_week; // 1e6 * 3600 * 24 * 7 as Python int
PyObject *seconds_per_day; // 3600 * 24 as Python int
/* The interned Unix epoch datetime instance */
PyObject *epoch;
} datetime_state;
/* The module has a fixed number of static objects, due to being exposed
* through the datetime C-API. There are five types exposed directly,
* one type exposed indirectly, and one singleton constant (UTC).
*
* Each of these objects is hidden behind a macro in the same way as
* the per-module objects stored in module state. The macros for the
* static objects don't need to be passed a state, but the consistency
* of doing so is more clear. We use a dedicated noop macro, NO_STATE,
* to make the special case obvious.
*
* The casting macros perform a simple fast pointer cast without
* checking the runtime type. In the future, we may decide whether
* to include that check and whether to provide a fast pointer cast
* macro for pointers known to be of correct time.
*/
#define NO_STATE NULL
#define DATE_TYPE(st) &PyDateTime_DateType
#define DATETIME_TYPE(st) &PyDateTime_DateTimeType
#define TIME_TYPE(st) &PyDateTime_TimeType
#define DELTA_TYPE(st) &PyDateTime_DeltaType
#define TZINFO_TYPE(st) &PyDateTime_TZInfoType
#define TIMEZONE_TYPE(st) &PyDateTime_TimeZoneType
#define ISOCALENDAR_DATE_TYPE(st) st->isocalendar_date_type
#define PyDate_CAST(op) ((PyDateTime_Date *)(op))
#define PyDate_Check(op) PyObject_TypeCheck(op, DATE_TYPE(NO_STATE))
#define PyDate_CheckExact(op) Py_IS_TYPE(op, DATE_TYPE(NO_STATE))
#define PyDateTime_CAST(op) ((PyDateTime_DateTime *)(op))
#define PyDateTime_Check(op) PyObject_TypeCheck(op, DATETIME_TYPE(NO_STATE))
#define PyDateTime_CheckExact(op) Py_IS_TYPE(op, DATETIME_TYPE(NO_STATE))
#define PyTime_CAST(op) ((PyDateTime_Time *)(op))
#define PyTime_Check(op) PyObject_TypeCheck(op, TIME_TYPE(NO_STATE))
#define PyTime_CheckExact(op) Py_IS_TYPE(op, TIME_TYPE(NO_STATE))
#define PyDelta_CAST(op) ((PyDateTime_Delta *)(op))
#define PyDelta_Check(op) PyObject_TypeCheck(op, DELTA_TYPE(NO_STATE))
#define PyDelta_CheckExact(op) Py_IS_TYPE(op, DELTA_TYPE(NO_STATE))
#define PyTZInfo_CAST(op) ((PyDateTime_TZInfo *)(op))
#define PyTZInfo_Check(op) PyObject_TypeCheck(op, TZINFO_TYPE(NO_STATE))
#define PyTZInfo_CheckExact(op) Py_IS_TYPE(op, TZINFO_TYPE(NO_STATE))
#define PyTimeZone_CAST(op) ((PyDateTime_TimeZone *)(op))
#define PyTimezone_Check(op) PyObject_TypeCheck(op, TIMEZONE_TYPE(NO_STATE))
#define PyIsoCalendarDate_CAST(op) ((PyDateTime_IsoCalendarDate *)(op))
#define CONST_US_PER_MS(st) st->us_per_ms
#define CONST_US_PER_SECOND(st) st->us_per_second
#define CONST_US_PER_MINUTE(st) st->us_per_minute
#define CONST_US_PER_HOUR(st) st->us_per_hour
#define CONST_US_PER_DAY(st) st->us_per_day
#define CONST_US_PER_WEEK(st) st->us_per_week
#define CONST_SEC_PER_DAY(st) st->seconds_per_day
#define CONST_EPOCH(st) st->epoch
#define CONST_UTC(st) ((PyObject *)&utc_timezone)
static datetime_state *
get_module_state(PyObject *module)
{
void *state = _PyModule_GetState(module);
assert(state != NULL);
return (datetime_state *)state;
}
#define INTERP_KEY ((PyObject *)&_Py_ID(cached_datetime_module))
static PyObject *
get_current_module(PyInterpreterState *interp)
{
PyObject *mod = NULL;
PyObject *dict = PyInterpreterState_GetDict(interp);
if (dict == NULL) {
goto error;
}
PyObject *ref = NULL;
if (PyDict_GetItemRef(dict, INTERP_KEY, &ref) < 0) {
goto error;
}
if (ref != NULL) {
if (ref != Py_None) {
(void)PyWeakref_GetRef(ref, &mod);
if (mod == Py_None) {
Py_CLEAR(mod);
}
Py_DECREF(ref);
}
}
return mod;
error:
assert(PyErr_Occurred());
return NULL;
}
static PyModuleDef datetimemodule;
static datetime_state *
_get_current_state(PyObject **p_mod)
{
PyInterpreterState *interp = PyInterpreterState_Get();
PyObject *mod = get_current_module(interp);
if (mod == NULL) {
assert(!PyErr_Occurred());
if (PyErr_Occurred()) {
return NULL;
}
/* The static types can outlive the module,
* so we must re-import the module. */
mod = PyImport_ImportModule("_datetime");
if (mod == NULL) {
return NULL;
}
}
datetime_state *st = get_module_state(mod);
*p_mod = mod;
return st;
}
#define GET_CURRENT_STATE(MOD_VAR) \
_get_current_state(&MOD_VAR)
#define RELEASE_CURRENT_STATE(ST_VAR, MOD_VAR) \
Py_DECREF(MOD_VAR)
static int
set_current_module(PyInterpreterState *interp, PyObject *mod)
{
assert(mod != NULL);
PyObject *dict = PyInterpreterState_GetDict(interp);
if (dict == NULL) {
return -1;
}
PyObject *ref = PyWeakref_NewRef(mod, NULL);
if (ref == NULL) {
return -1;
}
int rc = PyDict_SetItem(dict, INTERP_KEY, ref);
Py_DECREF(ref);
return rc;
}
static void
clear_current_module(PyInterpreterState *interp, PyObject *expected)
{
PyObject *exc = PyErr_GetRaisedException();
PyObject *dict = PyInterpreterState_GetDict(interp);
if (dict == NULL) {
goto error;
}
if (expected != NULL) {
PyObject *ref = NULL;
if (PyDict_GetItemRef(dict, INTERP_KEY, &ref) < 0) {
goto error;
}
if (ref != NULL && ref != Py_None) {
PyObject *current = NULL;
int rc = PyWeakref_GetRef(ref, ¤t);
/* We only need "current" for pointer comparison. */
Py_XDECREF(current);
Py_DECREF(ref);
if (rc < 0) {
goto error;
}
if (current != expected) {
goto finally;
}
}
}
/* We use None to identify that the module was previously loaded. */
if (PyDict_SetItem(dict, INTERP_KEY, Py_None) < 0) {
goto error;
}
goto finally;
error:
PyErr_FormatUnraisable("Exception ignored while clearing _datetime module");
finally:
PyErr_SetRaisedException(exc);
}
/* We require that C int be at least 32 bits, and use int virtually
* everywhere. In just a few cases we use a temp long, where a Python
* API returns a C long. In such cases, we have to ensure that the
* final result fits in a C int (this can be an issue on 64-bit boxes).
*/
#if SIZEOF_INT < 4
# error "_datetime.c requires that C int have at least 32 bits"
#endif
#define MINYEAR 1
#define MAXYEAR 9999
#define MAXORDINAL 3652059 /* date(9999,12,31).toordinal() */
/* Nine decimal digits is easy to communicate, and leaves enough room
* so that two delta days can be added w/o fear of overflowing a signed
* 32-bit int, and with plenty of room left over to absorb any possible
* carries from adding seconds.
*/
#define MAX_DELTA_DAYS 999999999
/* Rename the long macros in datetime.h to more reasonable short names. */
#define GET_YEAR PyDateTime_GET_YEAR
#define GET_MONTH PyDateTime_GET_MONTH
#define GET_DAY PyDateTime_GET_DAY
#define DATE_GET_HOUR PyDateTime_DATE_GET_HOUR
#define DATE_GET_MINUTE PyDateTime_DATE_GET_MINUTE
#define DATE_GET_SECOND PyDateTime_DATE_GET_SECOND
#define DATE_GET_MICROSECOND PyDateTime_DATE_GET_MICROSECOND
#define DATE_GET_FOLD PyDateTime_DATE_GET_FOLD
/* Date accessors for date and datetime. */
#define SET_YEAR(o, v) (((o)->data[0] = ((v) & 0xff00) >> 8), \
((o)->data[1] = ((v) & 0x00ff)))
#define SET_MONTH(o, v) (PyDateTime_GET_MONTH(o) = (v))
#define SET_DAY(o, v) (PyDateTime_GET_DAY(o) = (v))
/* Date/Time accessors for datetime. */
#define DATE_SET_HOUR(o, v) (PyDateTime_DATE_GET_HOUR(o) = (v))
#define DATE_SET_MINUTE(o, v) (PyDateTime_DATE_GET_MINUTE(o) = (v))
#define DATE_SET_SECOND(o, v) (PyDateTime_DATE_GET_SECOND(o) = (v))
#define DATE_SET_MICROSECOND(o, v) \
(((o)->data[7] = ((v) & 0xff0000) >> 16), \
((o)->data[8] = ((v) & 0x00ff00) >> 8), \
((o)->data[9] = ((v) & 0x0000ff)))
#define DATE_SET_FOLD(o, v) (PyDateTime_DATE_GET_FOLD(o) = (v))
/* Time accessors for time. */
#define TIME_GET_HOUR PyDateTime_TIME_GET_HOUR
#define TIME_GET_MINUTE PyDateTime_TIME_GET_MINUTE
#define TIME_GET_SECOND PyDateTime_TIME_GET_SECOND
#define TIME_GET_MICROSECOND PyDateTime_TIME_GET_MICROSECOND
#define TIME_GET_FOLD PyDateTime_TIME_GET_FOLD
#define TIME_SET_HOUR(o, v) (PyDateTime_TIME_GET_HOUR(o) = (v))
#define TIME_SET_MINUTE(o, v) (PyDateTime_TIME_GET_MINUTE(o) = (v))
#define TIME_SET_SECOND(o, v) (PyDateTime_TIME_GET_SECOND(o) = (v))
#define TIME_SET_MICROSECOND(o, v) \
(((o)->data[3] = ((v) & 0xff0000) >> 16), \
((o)->data[4] = ((v) & 0x00ff00) >> 8), \
((o)->data[5] = ((v) & 0x0000ff)))
#define TIME_SET_FOLD(o, v) (PyDateTime_TIME_GET_FOLD(o) = (v))
/* Delta accessors for timedelta. */
#define GET_TD_DAYS(o) (PyDelta_CAST(o)->days)
#define GET_TD_SECONDS(o) (PyDelta_CAST(o)->seconds)
#define GET_TD_MICROSECONDS(o) (PyDelta_CAST(o)->microseconds)
#define SET_TD_DAYS(o, v) ((o)->days = (v))
#define SET_TD_SECONDS(o, v) ((o)->seconds = (v))
#define SET_TD_MICROSECONDS(o, v) ((o)->microseconds = (v))
#define HASTZINFO _PyDateTime_HAS_TZINFO
#define GET_TIME_TZINFO PyDateTime_TIME_GET_TZINFO
#define GET_DT_TZINFO PyDateTime_DATE_GET_TZINFO
/* M is a char or int claiming to be a valid month. The macro is equivalent
* to the two-sided Python test
* 1 <= M <= 12
*/
#define MONTH_IS_SANE(M) ((unsigned int)(M) - 1 < 12)
static int check_tzinfo_subclass(PyObject *p);
/*[clinic input]
module datetime
class datetime.datetime "PyDateTime_DateTime *" "get_datetime_state()->datetime_type"
class datetime.date "PyDateTime_Date *" "get_datetime_state()->date_type"
class datetime.time "PyDateTime_Time *" "get_datetime_state()->time_type"
class datetime.IsoCalendarDate "PyDateTime_IsoCalendarDate *" "get_datetime_state()->isocalendar_date_type"
class datetime.timedelta "PyDateTime_Delta *" "&PyDateTime_DeltaType"
class datetime.timezone "PyDateTime_TimeZone *" "&PyDateTime_TimeZoneType"
[clinic start generated code]*/
/*[clinic end generated code: output=da39a3ee5e6b4b0d input=c54b9adf60082f0d]*/
#include "clinic/_datetimemodule.c.h"
/* ---------------------------------------------------------------------------
* Math utilities.
*/
/* k = i+j overflows iff k differs in sign from both inputs,
* iff k^i has sign bit set and k^j has sign bit set,
* iff (k^i)&(k^j) has sign bit set.
*/
#define SIGNED_ADD_OVERFLOWED(RESULT, I, J) \
((((RESULT) ^ (I)) & ((RESULT) ^ (J))) < 0)
/* Compute Python divmod(x, y), returning the quotient and storing the
* remainder into *r. The quotient is the floor of x/y, and that's
* the real point of this. C will probably truncate instead (C99
* requires truncation; C89 left it implementation-defined).
* Simplification: we *require* that y > 0 here. That's appropriate
* for all the uses made of it. This simplifies the code and makes
* the overflow case impossible (divmod(LONG_MIN, -1) is the only
* overflow case).
*/
static int
divmod(int x, int y, int *r)
{
int quo;
assert(y > 0);
quo = x / y;
*r = x - quo * y;
if (*r < 0) {
--quo;
*r += y;
}
assert(0 <= *r && *r < y);
return quo;
}
/* Nearest integer to m / n for integers m and n. Half-integer results
* are rounded to even.
*/
static PyObject *
divide_nearest(PyObject *m, PyObject *n)
{
PyObject *result;
PyObject *temp;
temp = _PyLong_DivmodNear(m, n);
if (temp == NULL)
return NULL;
result = Py_NewRef(PyTuple_GET_ITEM(temp, 0));
Py_DECREF(temp);
return result;
}
/* ---------------------------------------------------------------------------
* General calendrical helper functions
*/
/* For each month ordinal in 1..12, the number of days in that month,
* and the number of days before that month in the same year. These
* are correct for non-leap years only.
*/
static const int _days_in_month[] = {
0, /* unused; this vector uses 1-based indexing */
31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
};
static const int _days_before_month[] = {
0, /* unused; this vector uses 1-based indexing */
0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334
};
/* year -> 1 if leap year, else 0. */
static int
is_leap(int year)
{
/* Cast year to unsigned. The result is the same either way, but
* C can generate faster code for unsigned mod than for signed
* mod (especially for % 4 -- a good compiler should just grab
* the last 2 bits when the LHS is unsigned).
*/
const unsigned int ayear = (unsigned int)year;
return ayear % 4 == 0 && (ayear % 100 != 0 || ayear % 400 == 0);
}
/* year, month -> number of days in that month in that year */
static int
days_in_month(int year, int month)
{
assert(month >= 1);
assert(month <= 12);
if (month == 2 && is_leap(year))
return 29;
else
return _days_in_month[month];
}
/* year, month -> number of days in year preceding first day of month */
static int
days_before_month(int year, int month)
{
int days;
assert(month >= 1);
assert(month <= 12);
days = _days_before_month[month];
if (month > 2 && is_leap(year))
++days;
return days;
}
/* year -> number of days before January 1st of year. Remember that we
* start with year 1, so days_before_year(1) == 0.
*/
static int
days_before_year(int year)
{
int y = year - 1;
/* This is incorrect if year <= 0; we really want the floor
* here. But so long as MINYEAR is 1, the smallest year this
* can see is 1.
*/
assert (year >= 1);
return y*365 + y/4 - y/100 + y/400;
}
/* Number of days in 4, 100, and 400 year cycles. That these have
* the correct values is asserted in the module init function.
*/
#define DI4Y 1461 /* days_before_year(5); days in 4 years */
#define DI100Y 36524 /* days_before_year(101); days in 100 years */
#define DI400Y 146097 /* days_before_year(401); days in 400 years */
/* ordinal -> year, month, day, considering 01-Jan-0001 as day 1. */
static void
ord_to_ymd(int ordinal, int *year, int *month, int *day)
{
int n, n1, n4, n100, n400, leapyear, preceding;
/* ordinal is a 1-based index, starting at 1-Jan-1. The pattern of
* leap years repeats exactly every 400 years. The basic strategy is
* to find the closest 400-year boundary at or before ordinal, then
* work with the offset from that boundary to ordinal. Life is much
* clearer if we subtract 1 from ordinal first -- then the values
* of ordinal at 400-year boundaries are exactly those divisible
* by DI400Y:
*
* D M Y n n-1
* -- --- ---- ---------- ----------------
* 31 Dec -400 -DI400Y -DI400Y -1
* 1 Jan -399 -DI400Y +1 -DI400Y 400-year boundary
* ...
* 30 Dec 000 -1 -2
* 31 Dec 000 0 -1
* 1 Jan 001 1 0 400-year boundary
* 2 Jan 001 2 1
* 3 Jan 001 3 2
* ...
* 31 Dec 400 DI400Y DI400Y -1
* 1 Jan 401 DI400Y +1 DI400Y 400-year boundary
*/
assert(ordinal >= 1);
--ordinal;
n400 = ordinal / DI400Y;
n = ordinal % DI400Y;
*year = n400 * 400 + 1;
/* Now n is the (non-negative) offset, in days, from January 1 of
* year, to the desired date. Now compute how many 100-year cycles
* precede n.
* Note that it's possible for n100 to equal 4! In that case 4 full
* 100-year cycles precede the desired day, which implies the
* desired day is December 31 at the end of a 400-year cycle.
*/
n100 = n / DI100Y;
n = n % DI100Y;
/* Now compute how many 4-year cycles precede it. */
n4 = n / DI4Y;
n = n % DI4Y;
/* And now how many single years. Again n1 can be 4, and again
* meaning that the desired day is December 31 at the end of the
* 4-year cycle.
*/
n1 = n / 365;
n = n % 365;
*year += n100 * 100 + n4 * 4 + n1;
if (n1 == 4 || n100 == 4) {
assert(n == 0);
*year -= 1;
*month = 12;
*day = 31;
return;
}
/* Now the year is correct, and n is the offset from January 1. We
* find the month via an estimate that's either exact or one too
* large.
*/
leapyear = n1 == 3 && (n4 != 24 || n100 == 3);
assert(leapyear == is_leap(*year));
*month = (n + 50) >> 5;
preceding = (_days_before_month[*month] + (*month > 2 && leapyear));
if (preceding > n) {
/* estimate is too large */
*month -= 1;
preceding -= days_in_month(*year, *month);
}
n -= preceding;
assert(0 <= n);
assert(n < days_in_month(*year, *month));
*day = n + 1;
}
/* year, month, day -> ordinal, considering 01-Jan-0001 as day 1. */
static int
ymd_to_ord(int year, int month, int day)
{
return days_before_year(year) + days_before_month(year, month) + day;
}
/* Day of week, where Monday==0, ..., Sunday==6. 1/1/1 was a Monday. */
static int
weekday(int year, int month, int day)
{
return (ymd_to_ord(year, month, day) + 6) % 7;
}
/* Ordinal of the Monday starting week 1 of the ISO year. Week 1 is the
* first calendar week containing a Thursday.
*/
static int
iso_week1_monday(int year)
{
int first_day = ymd_to_ord(year, 1, 1); /* ord of 1/1 */
/* 0 if 1/1 is a Monday, 1 if a Tue, etc. */
int first_weekday = (first_day + 6) % 7;
/* ordinal of closest Monday at or before 1/1 */
int week1_monday = first_day - first_weekday;
if (first_weekday > 3) /* if 1/1 was Fri, Sat, Sun */
week1_monday += 7;
return week1_monday;
}
static int
iso_to_ymd(const int iso_year, const int iso_week, const int iso_day,
int *year, int *month, int *day) {
// Year is bounded to 0 < year < 10000 because 9999-12-31 is (9999, 52, 5)
if (iso_year < MINYEAR || iso_year > MAXYEAR) {
return -4;
}
if (iso_week <= 0 || iso_week >= 53) {
int out_of_range = 1;
if (iso_week == 53) {
// ISO years have 53 weeks in it on years starting with a Thursday
// and on leap years starting on Wednesday
int first_weekday = weekday(iso_year, 1, 1);
if (first_weekday == 3 || (first_weekday == 2 && is_leap(iso_year))) {
out_of_range = 0;
}
}
if (out_of_range) {
return -2;
}
}
if (iso_day <= 0 || iso_day >= 8) {
return -3;
}
// Convert (Y, W, D) to (Y, M, D) in-place
int day_1 = iso_week1_monday(iso_year);
int day_offset = (iso_week - 1)*7 + iso_day - 1;
ord_to_ymd(day_1 + day_offset, year, month, day);
return 0;
}
/* ---------------------------------------------------------------------------
* Range checkers.
*/
/* Check that -MAX_DELTA_DAYS <= days <= MAX_DELTA_DAYS. If so, return 0.
* If not, raise OverflowError and return -1.
*/
static int
check_delta_day_range(int days)
{
if (-MAX_DELTA_DAYS <= days && days <= MAX_DELTA_DAYS)
return 0;
PyErr_Format(PyExc_OverflowError,
"days=%d; must have magnitude <= %d",
days, MAX_DELTA_DAYS);
return -1;
}
/* Check that date arguments are in range. Return 0 if they are. If they
* aren't, raise ValueError and return -1.
*/
static int
check_date_args(int year, int month, int day)
{
if (year < MINYEAR || year > MAXYEAR) {
PyErr_Format(PyExc_ValueError,
"year must be in %d..%d, not %d", MINYEAR, MAXYEAR, year);
return -1;
}
if (month < 1 || month > 12) {
PyErr_Format(PyExc_ValueError,
"month must be in 1..12, not %d", month);
return -1;
}
int dim = days_in_month(year, month);
if (day < 1 || day > dim) {
PyErr_Format(PyExc_ValueError,
"day %i must be in range 1..%d for month %i in year %i",
day, dim, month, year);
return -1;
}
return 0;
}
/* Check that time arguments are in range. Return 0 if they are. If they
* aren't, raise ValueError and return -1.
*/
static int
check_time_args(int h, int m, int s, int us, int fold)
{
if (h < 0 || h > 23) {
PyErr_Format(PyExc_ValueError, "hour must be in 0..23, not %i", h);
return -1;
}
if (m < 0 || m > 59) {
PyErr_Format(PyExc_ValueError, "minute must be in 0..59, not %i", m);
return -1;
}
if (s < 0 || s > 59) {
PyErr_Format(PyExc_ValueError, "second must be in 0..59, not %i", s);
return -1;
}
if (us < 0 || us > 999999) {
PyErr_Format(PyExc_ValueError,
"microsecond must be in 0..999999, not %i", us);
return -1;
}
if (fold != 0 && fold != 1) {
PyErr_Format(PyExc_ValueError,
"fold must be either 0 or 1, not %i", fold);
return -1;
}
return 0;
}
/* ---------------------------------------------------------------------------
* Normalization utilities.
*/
/* One step of a mixed-radix conversion. A "hi" unit is equivalent to
* factor "lo" units. factor must be > 0. If *lo is less than 0, or
* at least factor, enough of *lo is converted into "hi" units so that
* 0 <= *lo < factor. The input values must be such that int overflow
* is impossible.
*/
static void
normalize_pair(int *hi, int *lo, int factor)
{
assert(factor > 0);
assert(lo != hi);
if (*lo < 0 || *lo >= factor) {
const int num_hi = divmod(*lo, factor, lo);
const int new_hi = *hi + num_hi;
assert(! SIGNED_ADD_OVERFLOWED(new_hi, *hi, num_hi));
*hi = new_hi;
}
assert(0 <= *lo && *lo < factor);
}
/* Fiddle days (d), seconds (s), and microseconds (us) so that
* 0 <= *s < 24*3600
* 0 <= *us < 1000000
* The input values must be such that the internals don't overflow.
* The way this routine is used, we don't get close.
*/
static void
normalize_d_s_us(int *d, int *s, int *us)
{
if (*us < 0 || *us >= 1000000) {
normalize_pair(s, us, 1000000);
/* |s| can't be bigger than about
* |original s| + |original us|/1000000 now.
*/
}
if (*s < 0 || *s >= 24*3600) {
normalize_pair(d, s, 24*3600);
/* |d| can't be bigger than about
* |original d| +
* (|original s| + |original us|/1000000) / (24*3600) now.
*/
}
assert(0 <= *s && *s < 24*3600);
assert(0 <= *us && *us < 1000000);
}
/* Fiddle years (y), months (m), and days (d) so that
* 1 <= *m <= 12
* 1 <= *d <= days_in_month(*y, *m)
* The input values must be such that the internals don't overflow.
* The way this routine is used, we don't get close.
*/
static int
normalize_y_m_d(int *y, int *m, int *d)
{
int dim; /* # of days in month */
/* In actual use, m is always the month component extracted from a
* date/datetime object. Therefore it is always in [1, 12] range.
*/
assert(1 <= *m && *m <= 12);
/* Now only day can be out of bounds (year may also be out of bounds
* for a datetime object, but we don't care about that here).
* If day is out of bounds, what to do is arguable, but at least the
* method here is principled and explainable.
*/
dim = days_in_month(*y, *m);
if (*d < 1 || *d > dim) {
/* Move day-1 days from the first of the month. First try to
* get off cheap if we're only one day out of range
* (adjustments for timezone alone can't be worse than that).
*/
if (*d == 0) {
--*m;
if (*m > 0)
*d = days_in_month(*y, *m);
else {
--*y;
*m = 12;
*d = 31;
}
}
else if (*d == dim + 1) {
/* move forward a day */
++*m;
*d = 1;
if (*m > 12) {
*m = 1;
++*y;
}
}
else {
int ordinal = ymd_to_ord(*y, *m, 1) +
*d - 1;
if (ordinal < 1 || ordinal > MAXORDINAL) {
goto error;
} else {
ord_to_ymd(ordinal, y, m, d);
return 0;
}
}
}
assert(*m > 0);
assert(*d > 0);
if (MINYEAR <= *y && *y <= MAXYEAR)
return 0;
error:
PyErr_SetString(PyExc_OverflowError,
"date value out of range");
return -1;
}
/* Fiddle out-of-bounds months and days so that the result makes some kind
* of sense. The parameters are both inputs and outputs. Returns < 0 on
* failure, where failure means the adjusted year is out of bounds.
*/
static int
normalize_date(int *year, int *month, int *day)
{
return normalize_y_m_d(year, month, day);
}
/* Force all the datetime fields into range. The parameters are both
* inputs and outputs. Returns < 0 on error.
*/
static int
normalize_datetime(int *year, int *month, int *day,
int *hour, int *minute, int *second,
int *microsecond)
{
normalize_pair(second, microsecond, 1000000);
normalize_pair(minute, second, 60);
normalize_pair(hour, minute, 60);
normalize_pair(day, hour, 24);
return normalize_date(year, month, day);
}
/* ---------------------------------------------------------------------------
* Basic object allocation: tp_alloc implementations. These allocate
* Python objects of the right size and type, and do the Python object-
* initialization bit. If there's not enough memory, they return NULL after
* setting MemoryError. All data members remain uninitialized trash.
*
* We abuse the tp_alloc "nitems" argument to communicate whether a tzinfo
* member is needed. This is ugly, imprecise, and possibly insecure.
* tp_basicsize for the time and datetime types is set to the size of the
* struct that has room for the tzinfo member, so subclasses in Python will
* allocate enough space for a tzinfo member whether or not one is actually
* needed. That's the "ugly and imprecise" parts. The "possibly insecure"
* part is that PyType_GenericAlloc() (which subclasses in Python end up
* using) just happens today to effectively ignore the nitems argument
* when tp_itemsize is 0, which it is for these type objects. If that
* changes, perhaps the callers of tp_alloc slots in this file should
* be changed to force a 0 nitems argument unless the type being allocated
* is a base type implemented in this file (so that tp_alloc is time_alloc
* or datetime_alloc below, which know about the nitems abuse).
*/
static PyObject *
time_alloc(PyTypeObject *type, Py_ssize_t aware)
{
size_t size = aware ? sizeof(PyDateTime_Time) : sizeof(_PyDateTime_BaseTime);
PyObject *self = (PyObject *)PyObject_Malloc(size);
if (self == NULL) {
return PyErr_NoMemory();
}
_PyObject_Init(self, type);
return self;
}
static PyObject *
datetime_alloc(PyTypeObject *type, Py_ssize_t aware)
{
size_t size = aware ? sizeof(PyDateTime_DateTime) : sizeof(_PyDateTime_BaseDateTime);
PyObject *self = (PyObject *)PyObject_Malloc(size);
if (self == NULL) {
return PyErr_NoMemory();
}
_PyObject_Init(self, type);
return self;
}
/* ---------------------------------------------------------------------------
* Helpers for setting object fields. These work on pointers to the
* appropriate base class.
*/
/* For date and datetime. */
static void
set_date_fields(PyDateTime_Date *self, int y, int m, int d)
{
self->hashcode = -1;
SET_YEAR(self, y);
SET_MONTH(self, m);
SET_DAY(self, d);
}
/* ---------------------------------------------------------------------------
* String parsing utilities and helper functions
*/
static unsigned char
is_digit(const char c) {
return ((unsigned int)(c - '0')) < 10;
}
static const char *
parse_digits(const char *ptr, int *var, size_t num_digits)
{
for (size_t i = 0; i < num_digits; ++i) {
unsigned int tmp = (unsigned int)(*(ptr++) - '0');
if (tmp > 9) {
return NULL;
}
*var *= 10;
*var += (signed int)tmp;
}
return ptr;
}
static int
parse_isoformat_date(const char *dtstr, const size_t len, int *year, int *month, int *day)
{
/* Parse the date components of the result of date.isoformat()
*
* Return codes:
* 0: Success
* -1: Failed to parse date component
* -2: Inconsistent date separator usage
* -3: Failed to parse ISO week.
* -4: Failed to parse ISO day.
* -5, -6, -7: Failure in iso_to_ymd
*/
const char *p = dtstr;
p = parse_digits(p, year, 4);
if (NULL == p) {
return -1;
}
const unsigned char uses_separator = (*p == '-');
if (uses_separator) {
++p;
}
if(*p == 'W') {
// This is an isocalendar-style date string
p++;
int iso_week = 0;
int iso_day = 0;
p = parse_digits(p, &iso_week, 2);
if (NULL == p) {
return -3;
}
assert(p > dtstr);
if ((size_t)(p - dtstr) < len) {
if (uses_separator && *(p++) != '-') {
return -2;
}
p = parse_digits(p, &iso_day, 1);
if (NULL == p) {
return -4;
}
} else {
iso_day = 1;
}
int rv = iso_to_ymd(*year, iso_week, iso_day, year, month, day);
if (rv) {
return -3 + rv;
} else {
return 0;
}
}
p = parse_digits(p, month, 2);
if (NULL == p) {
return -1;
}
if (uses_separator && *(p++) != '-') {
return -2;
}
p = parse_digits(p, day, 2);
if (p == NULL) {
return -1;
}
return 0;
}
static int
parse_hh_mm_ss_ff(const char *tstr, const char *tstr_end, int *hour,
int *minute, int *second, int *microsecond)
{
*hour = *minute = *second = *microsecond = 0;
const char *p = tstr;
const char *p_end = tstr_end;
int *vals[3] = {hour, minute, second};
// This is initialized to satisfy an erroneous compiler warning.
unsigned char has_separator = 1;
// Parse [HH[:?MM[:?SS]]]
for (size_t i = 0; i < 3; ++i) {
p = parse_digits(p, vals[i], 2);
if (NULL == p) {
return -3;
}
char c = *(p++);
if (i == 0) {
has_separator = (c == ':');
}
if (p >= p_end) {
return c != '\0';
}
else if (has_separator && (c == ':')) {
if (i == 2) {
return -4; // Malformed microsecond separator
}
continue;
}
else if (c == '.' || c == ',') {
if (i < 2) {
return -3; // Decimal mark on hour or minute
}
break;
} else if (!has_separator) {
--p;
} else {
return -4; // Malformed time separator
}
}
// Parse fractional components
size_t len_remains = p_end - p;
size_t to_parse = len_remains;
if (len_remains >= 6) {
to_parse = 6;
}
p = parse_digits(p, microsecond, to_parse);
if (NULL == p) {
return -3;
}
static int correction[] = {
100000, 10000, 1000, 100, 10
};
if (to_parse < 6) {
*microsecond *= correction[to_parse-1];
}
while (is_digit(*p)){
++p; // skip truncated digits
}
// Return 1 if it's not the end of the string
return *p != '\0';
}
static int
parse_isoformat_time(const char *dtstr, size_t dtlen, int *hour, int *minute,
int *second, int *microsecond, int *tzoffset,
int *tzmicrosecond)
{
// Parse the time portion of a datetime.isoformat() string
//
// Return codes:
// 0: Success (no tzoffset)
// 1: Success (with tzoffset)
// -3: Failed to parse time component
// -4: Failed to parse time separator
// -5: Malformed timezone string
// -6: Timezone fields are not in range
const char *p = dtstr;
const char *p_end = dtstr + dtlen;
const char *tzinfo_pos = p;
do {
if (*tzinfo_pos == 'Z' || *tzinfo_pos == '+' || *tzinfo_pos == '-') {
break;
}
} while (++tzinfo_pos < p_end);
int rv = parse_hh_mm_ss_ff(dtstr, tzinfo_pos, hour, minute, second,
microsecond);
if (rv < 0) {
return rv;
}
else if (tzinfo_pos == p_end) {
// We know that there's no time zone, so if there's stuff at the
// end of the string it's an error.
if (rv == 1) {
return -5;
}
else {
return 0;
}
}
// Special case UTC / Zulu time.
if (*tzinfo_pos == 'Z') {
*tzoffset = 0;
*tzmicrosecond = 0;
if (*(tzinfo_pos + 1) != '\0') {
return -5;
} else {
return 1;
}
}
int tzsign = (*tzinfo_pos == '-') ? -1 : 1;
tzinfo_pos++;
int tzhour = 0, tzminute = 0, tzsecond = 0;
rv = parse_hh_mm_ss_ff(tzinfo_pos, p_end, &tzhour, &tzminute, &tzsecond,
tzmicrosecond);
// Check if timezone fields are in range
if (check_time_args(tzhour, tzminute, tzsecond, *tzmicrosecond, 0) < 0) {
return -6;
}
*tzoffset = tzsign * ((tzhour * 3600) + (tzminute * 60) + tzsecond);
*tzmicrosecond *= tzsign;
return rv ? -5 : 1;
}
/* ---------------------------------------------------------------------------
* Create various objects, mostly without range checking.
*/
/* Create a date instance with no range checking. */
static PyObject *
new_date_ex(int year, int month, int day, PyTypeObject *type)
{
PyDateTime_Date *self;
if (check_date_args(year, month, day) < 0) {
return NULL;
}
self = (PyDateTime_Date *)(type->tp_alloc(type, 0));
if (self != NULL)
set_date_fields(self, year, month, day);
return (PyObject *)self;
}
#define new_date(year, month, day) \
new_date_ex(year, month, day, DATE_TYPE(NO_STATE))
// Forward declaration
static PyObject *
new_datetime_ex(int, int, int, int, int, int, int, PyObject *, PyTypeObject *);
/* Create date instance with no range checking, or call subclass constructor */
static PyObject *
new_date_subclass_ex(int year, int month, int day, PyTypeObject *cls)
{
PyObject *result;
// We have "fast path" constructors for two subclasses: date and datetime
if (cls == DATE_TYPE(NO_STATE)) {
result = new_date_ex(year, month, day, cls);
}
else if (cls == DATETIME_TYPE(NO_STATE)) {
result = new_datetime_ex(year, month, day, 0, 0, 0, 0, Py_None, cls);
}
else {
result = PyObject_CallFunction((PyObject *)cls, "iii", year, month, day);
}
return result;
}
/* Create a datetime instance with no range checking. */
static PyObject *
new_datetime_ex2(int year, int month, int day, int hour, int minute,
int second, int usecond, PyObject *tzinfo, int fold, PyTypeObject *type)
{
PyDateTime_DateTime *self;
char aware = tzinfo != Py_None;
if (check_date_args(year, month, day) < 0) {
return NULL;
}
if (check_time_args(hour, minute, second, usecond, fold) < 0) {
return NULL;
}
if (check_tzinfo_subclass(tzinfo) < 0) {
return NULL;
}
self = (PyDateTime_DateTime *) (type->tp_alloc(type, aware));
if (self != NULL) {
self->hastzinfo = aware;
set_date_fields((PyDateTime_Date *)self, year, month, day);
DATE_SET_HOUR(self, hour);
DATE_SET_MINUTE(self, minute);
DATE_SET_SECOND(self, second);
DATE_SET_MICROSECOND(self, usecond);
if (aware) {
self->tzinfo = Py_NewRef(tzinfo);
}
DATE_SET_FOLD(self, fold);
}
return (PyObject *)self;
}
static PyObject *
new_datetime_ex(int year, int month, int day, int hour, int minute,
int second, int usecond, PyObject *tzinfo, PyTypeObject *type)
{
return new_datetime_ex2(year, month, day, hour, minute, second, usecond,
tzinfo, 0, type);
}
#define new_datetime(y, m, d, hh, mm, ss, us, tzinfo, fold) \
new_datetime_ex2(y, m, d, hh, mm, ss, us, tzinfo, fold, DATETIME_TYPE(NO_STATE))
static PyObject *
call_subclass_fold(PyTypeObject *cls, int fold, const char *format, ...)
{
PyObject *kwargs = NULL, *res = NULL;
va_list va;
va_start(va, format);
PyObject *args = Py_VaBuildValue(format, va);
va_end(va);
if (args == NULL) {
return NULL;
}
if (fold) {
kwargs = PyDict_New();
if (kwargs == NULL) {
goto Done;
}
PyObject *obj = PyLong_FromLong(fold);
if (obj == NULL) {
goto Done;
}
int err = PyDict_SetItemString(kwargs, "fold", obj);
Py_DECREF(obj);
if (err < 0) {
goto Done;
}
}
res = PyObject_Call((PyObject *)cls, args, kwargs);
Done:
Py_DECREF(args);
Py_XDECREF(kwargs);
return res;
}
static PyObject *
new_datetime_subclass_fold_ex(int year, int month, int day, int hour, int minute,
int second, int usecond, PyObject *tzinfo,
int fold, PyTypeObject *cls)
{
PyObject* dt;
if (cls == DATETIME_TYPE(NO_STATE)) {
// Use the fast path constructor
dt = new_datetime(year, month, day, hour, minute, second, usecond,
tzinfo, fold);
}
else {
// Subclass
dt = call_subclass_fold(cls, fold, "iiiiiiiO", year, month, day,
hour, minute, second, usecond, tzinfo);
}
return dt;
}
static PyObject *
new_datetime_subclass_ex(int year, int month, int day, int hour, int minute,
int second, int usecond, PyObject *tzinfo,
PyTypeObject *cls) {
return new_datetime_subclass_fold_ex(year, month, day, hour, minute,
second, usecond, tzinfo, 0,
cls);
}
/* Create a time instance with no range checking. */
static PyObject *
new_time_ex2(int hour, int minute, int second, int usecond,
PyObject *tzinfo, int fold, PyTypeObject *type)
{
PyDateTime_Time *self;
char aware = tzinfo != Py_None;
if (check_time_args(hour, minute, second, usecond, fold) < 0) {
return NULL;
}
if (check_tzinfo_subclass(tzinfo) < 0) {
return NULL;
}
self = (PyDateTime_Time *) (type->tp_alloc(type, aware));
if (self != NULL) {
self->hastzinfo = aware;
self->hashcode = -1;
TIME_SET_HOUR(self, hour);
TIME_SET_MINUTE(self, minute);
TIME_SET_SECOND(self, second);
TIME_SET_MICROSECOND(self, usecond);
if (aware) {
self->tzinfo = Py_NewRef(tzinfo);
}
TIME_SET_FOLD(self, fold);
}
return (PyObject *)self;
}
static PyObject *
new_time_ex(int hour, int minute, int second, int usecond,
PyObject *tzinfo, PyTypeObject *type)
{
return new_time_ex2(hour, minute, second, usecond, tzinfo, 0, type);
}
#define new_time(hh, mm, ss, us, tzinfo, fold) \
new_time_ex2(hh, mm, ss, us, tzinfo, fold, TIME_TYPE(NO_STATE))
static PyObject *
new_time_subclass_fold_ex(int hour, int minute, int second, int usecond,
PyObject *tzinfo, int fold, PyTypeObject *cls)
{
PyObject *t;
if (cls == TIME_TYPE(NO_STATE)) {
// Use the fast path constructor
t = new_time(hour, minute, second, usecond, tzinfo, fold);
}
else {
// Subclass
t = call_subclass_fold(cls, fold, "iiiiO", hour, minute, second,
usecond, tzinfo);
}
return t;
}
static PyDateTime_Delta * look_up_delta(int, int, int, PyTypeObject *);
/* Create a timedelta instance. Normalize the members iff normalize is
* true. Passing false is a speed optimization, if you know for sure
* that seconds and microseconds are already in their proper ranges. In any
* case, raises OverflowError and returns NULL if the normalized days is out
* of range.
*/
static PyObject *
new_delta_ex(int days, int seconds, int microseconds, int normalize,
PyTypeObject *type)
{
PyDateTime_Delta *self;
if (normalize)
normalize_d_s_us(&days, &seconds, µseconds);
assert(0 <= seconds && seconds < 24*3600);
assert(0 <= microseconds && microseconds < 1000000);
if (check_delta_day_range(days) < 0)
return NULL;
self = look_up_delta(days, seconds, microseconds, type);
if (self != NULL) {
return (PyObject *)self;
}
assert(!PyErr_Occurred());
self = (PyDateTime_Delta *) (type->tp_alloc(type, 0));
if (self != NULL) {
self->hashcode = -1;
SET_TD_DAYS(self, days);
SET_TD_SECONDS(self, seconds);
SET_TD_MICROSECONDS(self, microseconds);
}
return (PyObject *) self;
}
#define new_delta(d, s, us, normalize) \
new_delta_ex(d, s, us, normalize, DELTA_TYPE(NO_STATE))
typedef struct
{
PyObject_HEAD
PyObject *offset;
PyObject *name;
} PyDateTime_TimeZone;
static PyDateTime_TimeZone * look_up_timezone(PyObject *offset, PyObject *name);
/* Create new timezone instance checking offset range. This
function does not check the name argument. Caller must assure
that offset is a timedelta instance and name is either NULL
or a unicode object. */
static PyObject *
create_timezone(PyObject *offset, PyObject *name)
{
PyDateTime_TimeZone *self;
PyTypeObject *type = TIMEZONE_TYPE(NO_STATE);
assert(offset != NULL);
assert(PyDelta_Check(offset));
assert(name == NULL || PyUnicode_Check(name));
self = look_up_timezone(offset, name);
if (self != NULL) {
return (PyObject *)self;
}
assert(!PyErr_Occurred());
self = (PyDateTime_TimeZone *)(type->tp_alloc(type, 0));
if (self == NULL) {
return NULL;
}
self->offset = Py_NewRef(offset);
self->name = Py_XNewRef(name);
return (PyObject *)self;
}
static int delta_bool(PyObject *op);
static PyDateTime_TimeZone utc_timezone;
static PyObject *
new_timezone(PyObject *offset, PyObject *name)
{
assert(offset != NULL);
assert(PyDelta_Check(offset));
assert(name == NULL || PyUnicode_Check(name));
if (name == NULL && delta_bool(offset) == 0) {
return Py_NewRef(CONST_UTC(NO_STATE));
}
if ((GET_TD_DAYS(offset) == -1 &&
GET_TD_SECONDS(offset) == 0 &&
GET_TD_MICROSECONDS(offset) < 1) ||
GET_TD_DAYS(offset) < -1 || GET_TD_DAYS(offset) >= 1) {
PyErr_Format(PyExc_ValueError, "offset must be a timedelta"
" strictly between -timedelta(hours=24) and"
" timedelta(hours=24), not %R", offset);
return NULL;
}
return create_timezone(offset, name);
}
/* ---------------------------------------------------------------------------
* tzinfo helpers.
*/
/* Ensure that p is None or of a tzinfo subclass. Return 0 if OK; if not
* raise TypeError and return -1.
*/
static int
check_tzinfo_subclass(PyObject *p)
{
if (p == Py_None || PyTZInfo_Check(p))
return 0;
PyErr_Format(PyExc_TypeError,
"tzinfo argument must be None or of a tzinfo subclass, "
"not type '%s'",
Py_TYPE(p)->tp_name);
return -1;
}
/* If self has a tzinfo member, return a BORROWED reference to it. Else
* return NULL, which is NOT AN ERROR. There are no error returns here,
* and the caller must not decref the result.
*/
static PyObject *
get_tzinfo_member(PyObject *self)
{
PyObject *tzinfo = NULL;
if (PyDateTime_Check(self) && HASTZINFO(self))
tzinfo = ((PyDateTime_DateTime *)self)->tzinfo;
else if (PyTime_Check(self) && HASTZINFO(self))
tzinfo = ((PyDateTime_Time *)self)->tzinfo;
return tzinfo;
}
/* Call getattr(tzinfo, name)(tzinfoarg), and check the result. tzinfo must
* be an instance of the tzinfo class. If the method returns None, this
* returns None. If the method doesn't return None or timedelta, TypeError is
* raised and this returns NULL. If it returns a timedelta and the value is
* out of range or isn't a whole number of minutes, ValueError is raised and
* this returns NULL. Else result is returned.
*/
static PyObject *
call_tzinfo_method(PyObject *tzinfo, const char *name, PyObject *tzinfoarg)
{
PyObject *offset;
assert(tzinfo != NULL);
assert(PyTZInfo_Check(tzinfo) || tzinfo == Py_None);
assert(tzinfoarg != NULL);
if (tzinfo == Py_None)
Py_RETURN_NONE;
offset = PyObject_CallMethod(tzinfo, name, "O", tzinfoarg);
if (offset == Py_None || offset == NULL)
return offset;
if (PyDelta_Check(offset)) {
if ((GET_TD_DAYS(offset) == -1 &&
GET_TD_SECONDS(offset) == 0 &&
GET_TD_MICROSECONDS(offset) < 1) ||
GET_TD_DAYS(offset) < -1 || GET_TD_DAYS(offset) >= 1) {
PyErr_Format(PyExc_ValueError, "offset must be a timedelta"
" strictly between -timedelta(hours=24) and"
" timedelta(hours=24), not %R", offset);
Py_DECREF(offset);
return NULL;
}
}
else {
PyErr_Format(PyExc_TypeError,
"tzinfo.%s() must return None or "
"timedelta, not '%.200s'",
name, Py_TYPE(offset)->tp_name);
Py_DECREF(offset);
return NULL;
}
return offset;
}
/* Call tzinfo.utcoffset(tzinfoarg), and extract an integer from the
* result. tzinfo must be an instance of the tzinfo class. If utcoffset()
* returns None, call_utcoffset returns 0 and sets *none to 1. If uctoffset()
* doesn't return None or timedelta, TypeError is raised and this returns -1.
* If utcoffset() returns an out of range timedelta,
* ValueError is raised and this returns -1. Else *none is
* set to 0 and the offset is returned (as timedelta, positive east of UTC).
*/
static PyObject *
call_utcoffset(PyObject *tzinfo, PyObject *tzinfoarg)
{
return call_tzinfo_method(tzinfo, "utcoffset", tzinfoarg);
}
/* Call tzinfo.dst(tzinfoarg), and extract an integer from the
* result. tzinfo must be an instance of the tzinfo class. If dst()
* returns None, call_dst returns 0 and sets *none to 1. If dst()
* doesn't return None or timedelta, TypeError is raised and this
* returns -1. If dst() returns an invalid timedelta for a UTC offset,
* ValueError is raised and this returns -1. Else *none is set to 0 and
* the offset is returned (as timedelta, positive east of UTC).
*/
static PyObject *
call_dst(PyObject *tzinfo, PyObject *tzinfoarg)
{
return call_tzinfo_method(tzinfo, "dst", tzinfoarg);
}
/* Call tzinfo.tzname(tzinfoarg), and return the result. tzinfo must be
* an instance of the tzinfo class or None. If tzinfo isn't None, and
* tzname() doesn't return None or a string, TypeError is raised and this
* returns NULL. If the result is a string, we ensure it is a Unicode
* string.
*/
static PyObject *
call_tzname(PyObject *tzinfo, PyObject *tzinfoarg)
{
PyObject *result;
assert(tzinfo != NULL);
assert(check_tzinfo_subclass(tzinfo) >= 0);
assert(tzinfoarg != NULL);
if (tzinfo == Py_None)
Py_RETURN_NONE;
result = PyObject_CallMethodOneArg(tzinfo, &_Py_ID(tzname), tzinfoarg);
if (result == NULL || result == Py_None)
return result;
if (!PyUnicode_Check(result)) {
PyErr_Format(PyExc_TypeError, "tzinfo.tzname() must "
"return None or a string, not '%s'",
Py_TYPE(result)->tp_name);
Py_SETREF(result, NULL);
}
return result;
}
/* repr is like "someclass(arg1, arg2)". If tzinfo isn't None,
* stuff
* ", tzinfo=" + repr(tzinfo)
* before the closing ")".
*/
static PyObject *
append_keyword_tzinfo(PyObject *repr, PyObject *tzinfo)
{
PyObject *temp;
assert(PyUnicode_Check(repr));
assert(tzinfo);
if (tzinfo == Py_None)
return repr;
/* Get rid of the trailing ')'. */
assert(PyUnicode_READ_CHAR(repr, PyUnicode_GET_LENGTH(repr)-1) == ')');
temp = PyUnicode_Substring(repr, 0, PyUnicode_GET_LENGTH(repr) - 1);
Py_DECREF(repr);
if (temp == NULL)
return NULL;
repr = PyUnicode_FromFormat("%U, tzinfo=%R)", temp, tzinfo);
Py_DECREF(temp);
return repr;
}
/* repr is like "someclass(arg1, arg2)". If fold isn't 0,
* stuff
* ", fold=" + repr(tzinfo)
* before the closing ")".
*/
static PyObject *
append_keyword_fold(PyObject *repr, int fold)
{
PyObject *temp;
assert(PyUnicode_Check(repr));
if (fold == 0)
return repr;
/* Get rid of the trailing ')'. */
assert(PyUnicode_READ_CHAR(repr, PyUnicode_GET_LENGTH(repr)-1) == ')');
temp = PyUnicode_Substring(repr, 0, PyUnicode_GET_LENGTH(repr) - 1);
Py_DECREF(repr);
if (temp == NULL)
return NULL;
repr = PyUnicode_FromFormat("%U, fold=%d)", temp, fold);
Py_DECREF(temp);
return repr;
}
static inline PyObject *
tzinfo_from_isoformat_results(int rv, int tzoffset, int tz_useconds)
{
PyObject *tzinfo;
if (rv == 1) {
// Create a timezone from offset in seconds (0 returns UTC)
if (tzoffset == 0) {
return Py_NewRef(CONST_UTC(NO_STATE));
}
PyObject *delta = new_delta(0, tzoffset, tz_useconds, 1);
if (delta == NULL) {
return NULL;
}
tzinfo = new_timezone(delta, NULL);
Py_DECREF(delta);
}
else {
tzinfo = Py_NewRef(Py_None);
}
return tzinfo;
}
/* ---------------------------------------------------------------------------
* String format helpers.
*/
static PyObject *
format_ctime(PyObject *date, int hours, int minutes, int seconds)
{
static const char * const DayNames[] = {
"Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"
};
static const char * const MonthNames[] = {
"Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"
};
int wday = weekday(GET_YEAR(date), GET_MONTH(date), GET_DAY(date));
return PyUnicode_FromFormat("%s %s %2d %02d:%02d:%02d %04d",
DayNames[wday], MonthNames[GET_MONTH(date)-1],
GET_DAY(date), hours, minutes, seconds,
GET_YEAR(date));
}
static PyObject *delta_negative(PyObject *op);
/* Add formatted UTC offset string to buf. buf has no more than
* buflen bytes remaining. The UTC offset is gotten by calling
* tzinfo.uctoffset(tzinfoarg). If that returns None, \0 is stored into
* *buf, and that's all. Else the returned value is checked for sanity (an
* integer in range), and if that's OK it's converted to an hours & minutes
* string of the form
* sign HH sep MM [sep SS [. UUUUUU]]
* Returns 0 if everything is OK. If the return value from utcoffset() is
* bogus, an appropriate exception is set and -1 is returned.
*/
static int
format_utcoffset(char *buf, size_t buflen, const char *sep,
PyObject *tzinfo, PyObject *tzinfoarg)
{
PyObject *offset;
int hours, minutes, seconds, microseconds;
char sign;
assert(buflen >= 1);
offset = call_utcoffset(tzinfo, tzinfoarg);
if (offset == NULL)
return -1;
if (offset == Py_None) {
Py_DECREF(offset);
*buf = '\0';
return 0;
}
/* Offset is normalized, so it is negative if days < 0 */
if (GET_TD_DAYS(offset) < 0) {
sign = '-';
Py_SETREF(offset, delta_negative(offset));
if (offset == NULL)
return -1;
}
else {
sign = '+';
}
/* Offset is not negative here. */
microseconds = GET_TD_MICROSECONDS(offset);
seconds = GET_TD_SECONDS(offset);
Py_DECREF(offset);
minutes = divmod(seconds, 60, &seconds);
hours = divmod(minutes, 60, &minutes);
if (microseconds) {
PyOS_snprintf(buf, buflen, "%c%02d%s%02d%s%02d.%06d", sign,
hours, sep, minutes, sep, seconds, microseconds);
return 0;
}
if (seconds) {
PyOS_snprintf(buf, buflen, "%c%02d%s%02d%s%02d", sign, hours,
sep, minutes, sep, seconds);
return 0;
}
PyOS_snprintf(buf, buflen, "%c%02d%s%02d", sign, hours, sep, minutes);
return 0;
}
/* Check whether year with century should be normalized for strftime. */
inline static int
normalize_century(void)
{
static int cache = -1;
if (cache < 0) {
char year[5];
struct tm date = {
.tm_year = -1801,
.tm_mon = 0,
.tm_mday = 1
};
cache = (strftime(year, sizeof(year), "%Y", &date) &&
strcmp(year, "0099") != 0);
}
return cache;
}
static PyObject *
make_somezreplacement(PyObject *object, char *sep, PyObject *tzinfoarg)
{
char buf[100];
PyObject *tzinfo = get_tzinfo_member(object);
if (tzinfo == Py_None || tzinfo == NULL) {
return PyUnicode_FromStringAndSize(NULL, 0);
}
assert(tzinfoarg != NULL);
if (format_utcoffset(buf,
sizeof(buf),
sep,
tzinfo,
tzinfoarg) < 0)
return NULL;
return PyUnicode_FromString(buf);
}
static PyObject *
make_Zreplacement(PyObject *object, PyObject *tzinfoarg)
{
PyObject *temp;
PyObject *tzinfo = get_tzinfo_member(object);
PyObject *Zreplacement = Py_GetConstant(Py_CONSTANT_EMPTY_STR);
if (Zreplacement == NULL)
return NULL;
if (tzinfo == Py_None || tzinfo == NULL)
return Zreplacement;
assert(tzinfoarg != NULL);
temp = call_tzname(tzinfo, tzinfoarg);
if (temp == NULL)
goto Error;
if (temp == Py_None) {
Py_DECREF(temp);
return Zreplacement;
}
assert(PyUnicode_Check(temp));
/* Since the tzname is getting stuffed into the
* format, we have to double any % signs so that
* strftime doesn't treat them as format codes.
*/
Py_DECREF(Zreplacement);
Zreplacement = PyObject_CallMethod(temp, "replace", "ss", "%", "%%");
Py_DECREF(temp);
if (Zreplacement == NULL)
return NULL;
if (!PyUnicode_Check(Zreplacement)) {
PyErr_SetString(PyExc_TypeError,
"tzname.replace() did not return a string");
goto Error;
}
return Zreplacement;
Error:
Py_DECREF(Zreplacement);
return NULL;
}
static PyObject *
make_freplacement(PyObject *object)
{
char freplacement[64];
if (PyTime_Check(object))
sprintf(freplacement, "%06d", TIME_GET_MICROSECOND(object));
else if (PyDateTime_Check(object))
sprintf(freplacement, "%06d", DATE_GET_MICROSECOND(object));
else
sprintf(freplacement, "%06d", 0);
return PyUnicode_FromString(freplacement);
}
/* I sure don't want to reproduce the strftime code from the time module,
* so this imports the module and calls it. All the hair is due to
* giving special meanings to the %z, %:z, %Z and %f format codes via a
* preprocessing step on the format string.
* tzinfoarg is the argument to pass to the object's tzinfo method, if
* needed.
*/
static PyObject *
wrap_strftime(PyObject *object, PyObject *format, PyObject *timetuple,
PyObject *tzinfoarg)
{
PyObject *result = NULL; /* guilty until proved innocent */
PyObject *zreplacement = NULL; /* py string, replacement for %z */
PyObject *colonzreplacement = NULL; /* py string, replacement for %:z */
PyObject *Zreplacement = NULL; /* py string, replacement for %Z */
PyObject *freplacement = NULL; /* py string, replacement for %f */
assert(object && format && timetuple);
assert(PyUnicode_Check(format));
PyObject *strftime = PyImport_ImportModuleAttrString("time", "strftime");
if (strftime == NULL) {
return NULL;
}
/* Scan the input format, looking for %z/%Z/%f escapes, building
* a new format. Since computing the replacements for those codes
* is expensive, don't unless they're actually used.
*/
PyUnicodeWriter *writer = PyUnicodeWriter_Create(0);
if (writer == NULL) {
goto Error;
}
Py_ssize_t flen = PyUnicode_GET_LENGTH(format);
Py_ssize_t i = 0;
Py_ssize_t start = 0;
Py_ssize_t end = 0;
while (i != flen) {
i = PyUnicode_FindChar(format, '%', i, flen, 1);
if (i < 0) {
assert(!PyErr_Occurred());
break;
}
end = i;
i++;
if (i == flen) {
break;
}
Py_UCS4 ch = PyUnicode_READ_CHAR(format, i);
i++;
/* A % has been seen and ch is the character after it. */
PyObject *replacement = NULL;
if (ch == 'z') {
/* %z -> +HHMM */
if (zreplacement == NULL) {
zreplacement = make_somezreplacement(object, "", tzinfoarg);
if (zreplacement == NULL)
goto Error;
}
replacement = zreplacement;
}
else if (ch == ':' && i < flen && PyUnicode_READ_CHAR(format, i) == 'z') {
/* %:z -> +HH:MM */
i++;
if (colonzreplacement == NULL) {
colonzreplacement = make_somezreplacement(object, ":", tzinfoarg);
if (colonzreplacement == NULL)
goto Error;
}
replacement = colonzreplacement;
}
else if (ch == 'Z') {
/* format tzname */
if (Zreplacement == NULL) {
Zreplacement = make_Zreplacement(object,
tzinfoarg);
if (Zreplacement == NULL)
goto Error;
}
replacement = Zreplacement;
}
else if (ch == 'f') {
/* format microseconds */
if (freplacement == NULL) {
freplacement = make_freplacement(object);
if (freplacement == NULL)
goto Error;
}
replacement = freplacement;
}
else if (normalize_century()
&& (ch == 'Y' || ch == 'G' || ch == 'F' || ch == 'C'))
{
/* 0-pad year with century as necessary */
PyObject *item = PySequence_GetItem(timetuple, 0);
if (item == NULL) {
goto Error;
}
long year_long = PyLong_AsLong(item);
Py_DECREF(item);
if (year_long == -1 && PyErr_Occurred()) {
goto Error;
}
/* Note that datetime(1000, 1, 1).strftime('%G') == '1000' so year
1000 for %G can go on the fast path. */
if (year_long >= 1000) {
continue;
}
if (ch == 'G') {
PyObject *year_str = PyObject_CallFunction(strftime, "sO",
"%G", timetuple);
if (year_str == NULL) {
goto Error;
}
PyObject *year = PyNumber_Long(year_str);
Py_DECREF(year_str);
if (year == NULL) {
goto Error;
}
year_long = PyLong_AsLong(year);
Py_DECREF(year);
if (year_long == -1 && PyErr_Occurred()) {
goto Error;
}
}
/* Buffer of maximum size of formatted year permitted by long.
* +6 to accommodate dashes, 2-digit month and day for %F. */
char buf[SIZEOF_LONG * 5 / 2 + 2 + 6];
Py_ssize_t n = PyOS_snprintf(buf, sizeof(buf),
ch == 'F' ? "%04ld-%%m-%%d" :
"%04ld", year_long);
if (ch == 'C') {
n -= 2;
}
if (PyUnicodeWriter_WriteSubstring(writer, format, start, end) < 0) {
goto Error;
}
start = i;
if (PyUnicodeWriter_WriteUTF8(writer, buf, n) < 0) {
goto Error;
}
continue;
}
else {
/* percent followed by something else */
continue;
}
assert(replacement != NULL);
assert(PyUnicode_Check(replacement));
if (PyUnicodeWriter_WriteSubstring(writer, format, start, end) < 0) {
goto Error;
}
start = i;
if (PyUnicodeWriter_WriteStr(writer, replacement) < 0) {
goto Error;
}
} /* end while() */
PyObject *newformat;
if (start == 0) {
PyUnicodeWriter_Discard(writer);
newformat = Py_NewRef(format);
}
else {
if (PyUnicodeWriter_WriteSubstring(writer, format, start, flen) < 0) {
goto Error;
}
newformat = PyUnicodeWriter_Finish(writer);
if (newformat == NULL) {
goto Done;
}
}
result = PyObject_CallFunctionObjArgs(strftime,
newformat, timetuple, NULL);
Py_DECREF(newformat);
Done:
Py_XDECREF(freplacement);
Py_XDECREF(zreplacement);
Py_XDECREF(colonzreplacement);
Py_XDECREF(Zreplacement);
Py_XDECREF(strftime);
return result;
Error:
PyUnicodeWriter_Discard(writer);
goto Done;
}
/* ---------------------------------------------------------------------------
* Wrap functions from the time module. These aren't directly available
* from C. Perhaps they should be.
*/
/* Call time.time() and return its result (a Python float). */
static PyObject *
time_time(void)
{
PyObject *result = NULL;
PyObject *time = PyImport_ImportModuleAttrString("time", "time");
if (time != NULL) {
result = PyObject_CallNoArgs(time);
Py_DECREF(time);
}
return result;
}
/* Build a time.struct_time. The weekday and day number are automatically
* computed from the y,m,d args.
*/
static PyObject *
build_struct_time(int y, int m, int d, int hh, int mm, int ss, int dstflag)
{
PyObject *struct_time;
PyObject *result;
struct_time = PyImport_ImportModuleAttrString("time", "struct_time");
if (struct_time == NULL) {
return NULL;
}
result = PyObject_CallFunction(struct_time, "((iiiiiiiii))",
y, m, d,
hh, mm, ss,
weekday(y, m, d),
days_before_month(y, m) + d,
dstflag);
Py_DECREF(struct_time);
return result;
}
/* ---------------------------------------------------------------------------
* Miscellaneous helpers.
*/
/* The comparisons here all most naturally compute a cmp()-like result.
* This little helper turns that into a bool result for rich comparisons.
*/
static PyObject *
diff_to_bool(int diff, int op)
{
Py_RETURN_RICHCOMPARE(diff, 0, op);
}
/* ---------------------------------------------------------------------------
* Class implementations.
*/
/*
* PyDateTime_Delta implementation.
*/
/* Convert a timedelta to a number of us,
* (24*3600*self.days + self.seconds)*1000000 + self.microseconds
* as a Python int.
* Doing mixed-radix arithmetic by hand instead is excruciating in C,
* due to ubiquitous overflow possibilities.
*/
static PyObject *
delta_to_microseconds(PyDateTime_Delta *self)
{
PyObject *x1 = NULL;
PyObject *x2 = NULL;
PyObject *x3 = NULL;
PyObject *result = NULL;
PyObject *current_mod = NULL;
datetime_state *st = GET_CURRENT_STATE(current_mod);
x1 = PyLong_FromLong(GET_TD_DAYS(self));
if (x1 == NULL)
goto Done;
x2 = PyNumber_Multiply(x1, CONST_SEC_PER_DAY(st)); /* days in seconds */
if (x2 == NULL)
goto Done;
Py_SETREF(x1, NULL);
/* x2 has days in seconds */
x1 = PyLong_FromLong(GET_TD_SECONDS(self)); /* seconds */
if (x1 == NULL)
goto Done;
x3 = PyNumber_Add(x1, x2); /* days and seconds in seconds */
if (x3 == NULL)
goto Done;
Py_DECREF(x1);
Py_DECREF(x2);
/* x1 = */ x2 = NULL;
/* x3 has days+seconds in seconds */
x1 = PyNumber_Multiply(x3, CONST_US_PER_SECOND(st)); /* us */
if (x1 == NULL)
goto Done;
Py_SETREF(x3, NULL);
/* x1 has days+seconds in us */
x2 = PyLong_FromLong(GET_TD_MICROSECONDS(self));
if (x2 == NULL)
goto Done;
result = PyNumber_Add(x1, x2);
assert(result == NULL || PyLong_CheckExact(result));
Done:
Py_XDECREF(x1);
Py_XDECREF(x2);
Py_XDECREF(x3);
RELEASE_CURRENT_STATE(st, current_mod);
return result;
}
static PyObject *
checked_divmod(PyObject *a, PyObject *b)
{
PyObject *result = PyNumber_Divmod(a, b);
if (result != NULL) {
if (!PyTuple_Check(result)) {
PyErr_Format(PyExc_TypeError,
"divmod() returned non-tuple (type %.200s)",
Py_TYPE(result)->tp_name);
Py_DECREF(result);
return NULL;
}
if (PyTuple_GET_SIZE(result) != 2) {
PyErr_Format(PyExc_TypeError,
"divmod() returned a tuple of size %zd",
PyTuple_GET_SIZE(result));
Py_DECREF(result);
return NULL;
}
}
return result;
}
/* Convert a number of us (as a Python int) to a timedelta.
*/
static PyObject *
microseconds_to_delta_ex(PyObject *pyus, PyTypeObject *type)
{
int us;
int s;
int d;
PyObject *tuple = NULL;
PyObject *num = NULL;
PyObject *result = NULL;
PyObject *current_mod = NULL;
datetime_state *st = GET_CURRENT_STATE(current_mod);
tuple = checked_divmod(pyus, CONST_US_PER_SECOND(st));
if (tuple == NULL) {
goto Done;
}
num = PyTuple_GET_ITEM(tuple, 1); /* us */
us = PyLong_AsInt(num);
num = NULL;
if (us == -1 && PyErr_Occurred()) {
goto Done;
}
if (!(0 <= us && us < 1000000)) {
goto BadDivmod;
}
num = Py_NewRef(PyTuple_GET_ITEM(tuple, 0)); /* leftover seconds */
Py_DECREF(tuple);
tuple = checked_divmod(num, CONST_SEC_PER_DAY(st));
if (tuple == NULL)
goto Done;
Py_DECREF(num);
num = PyTuple_GET_ITEM(tuple, 1); /* seconds */
s = PyLong_AsInt(num);
num = NULL;
if (s == -1 && PyErr_Occurred()) {
goto Done;
}
if (!(0 <= s && s < 24*3600)) {
goto BadDivmod;
}
num = Py_NewRef(PyTuple_GET_ITEM(tuple, 0)); /* leftover days */
d = PyLong_AsInt(num);
if (d == -1 && PyErr_Occurred()) {
goto Done;
}
result = new_delta_ex(d, s, us, 0, type);
Done:
Py_XDECREF(tuple);
Py_XDECREF(num);
RELEASE_CURRENT_STATE(st, current_mod);
return result;
BadDivmod:
PyErr_SetString(PyExc_TypeError,
"divmod() returned a value out of range");
goto Done;
}
#define microseconds_to_delta(pymicros) \
microseconds_to_delta_ex(pymicros, DELTA_TYPE(NO_STATE))
static PyObject *
multiply_int_timedelta(PyObject *intobj, PyDateTime_Delta *delta)
{
PyObject *pyus_in;
PyObject *pyus_out;
PyObject *result;
pyus_in = delta_to_microseconds(delta);
if (pyus_in == NULL)
return NULL;
pyus_out = PyNumber_Multiply(intobj, pyus_in);
Py_DECREF(pyus_in);
if (pyus_out == NULL)
return NULL;
result = microseconds_to_delta(pyus_out);
Py_DECREF(pyus_out);
return result;
}
static PyObject *
get_float_as_integer_ratio(PyObject *floatobj)
{
PyObject *ratio;
assert(floatobj && PyFloat_Check(floatobj));
ratio = PyObject_CallMethodNoArgs(floatobj, &_Py_ID(as_integer_ratio));
if (ratio == NULL) {
return NULL;
}
if (!PyTuple_Check(ratio)) {
PyErr_Format(PyExc_TypeError,
"unexpected return type from as_integer_ratio(): "
"expected tuple, not '%.200s'",
Py_TYPE(ratio)->tp_name);
Py_DECREF(ratio);
return NULL;
}
if (PyTuple_Size(ratio) != 2) {
PyErr_SetString(PyExc_ValueError,
"as_integer_ratio() must return a 2-tuple");
Py_DECREF(ratio);
return NULL;
}
return ratio;
}
/* op is 0 for multiplication, 1 for division */
static PyObject *
multiply_truedivide_timedelta_float(PyDateTime_Delta *delta, PyObject *floatobj, int op)
{
PyObject *result = NULL;
PyObject *pyus_in = NULL, *temp, *pyus_out;
PyObject *ratio = NULL;
pyus_in = delta_to_microseconds(delta);
if (pyus_in == NULL)
return NULL;
ratio = get_float_as_integer_ratio(floatobj);
if (ratio == NULL) {
goto error;
}
temp = PyNumber_Multiply(pyus_in, PyTuple_GET_ITEM(ratio, op));
Py_SETREF(pyus_in, NULL);
if (temp == NULL)
goto error;
pyus_out = divide_nearest(temp, PyTuple_GET_ITEM(ratio, !op));
Py_DECREF(temp);
if (pyus_out == NULL)
goto error;
result = microseconds_to_delta(pyus_out);
Py_DECREF(pyus_out);
error:
Py_XDECREF(pyus_in);
Py_XDECREF(ratio);
return result;
}
static PyObject *
divide_timedelta_int(PyDateTime_Delta *delta, PyObject *intobj)
{
PyObject *pyus_in;
PyObject *pyus_out;
PyObject *result;
pyus_in = delta_to_microseconds(delta);
if (pyus_in == NULL)
return NULL;
pyus_out = PyNumber_FloorDivide(pyus_in, intobj);
Py_DECREF(pyus_in);
if (pyus_out == NULL)
return NULL;
result = microseconds_to_delta(pyus_out);
Py_DECREF(pyus_out);
return result;
}
static PyObject *
divide_timedelta_timedelta(PyDateTime_Delta *left, PyDateTime_Delta *right)
{
PyObject *pyus_left;
PyObject *pyus_right;
PyObject *result;
pyus_left = delta_to_microseconds(left);
if (pyus_left == NULL)
return NULL;
pyus_right = delta_to_microseconds(right);
if (pyus_right == NULL) {
Py_DECREF(pyus_left);
return NULL;
}
result = PyNumber_FloorDivide(pyus_left, pyus_right);
Py_DECREF(pyus_left);
Py_DECREF(pyus_right);
return result;
}
static PyObject *
truedivide_timedelta_timedelta(PyDateTime_Delta *left, PyDateTime_Delta *right)
{
PyObject *pyus_left;
PyObject *pyus_right;
PyObject *result;
pyus_left = delta_to_microseconds(left);
if (pyus_left == NULL)
return NULL;
pyus_right = delta_to_microseconds(right);
if (pyus_right == NULL) {
Py_DECREF(pyus_left);
return NULL;
}
result = PyNumber_TrueDivide(pyus_left, pyus_right);
Py_DECREF(pyus_left);
Py_DECREF(pyus_right);
return result;
}
static PyObject *
truedivide_timedelta_int(PyDateTime_Delta *delta, PyObject *i)
{
PyObject *result;
PyObject *pyus_in, *pyus_out;
pyus_in = delta_to_microseconds(delta);
if (pyus_in == NULL)
return NULL;
pyus_out = divide_nearest(pyus_in, i);
Py_DECREF(pyus_in);
if (pyus_out == NULL)
return NULL;
result = microseconds_to_delta(pyus_out);
Py_DECREF(pyus_out);
return result;
}
static PyObject *
delta_add(PyObject *left, PyObject *right)
{
PyObject *result = Py_NotImplemented;
if (PyDelta_Check(left) && PyDelta_Check(right)) {
/* delta + delta */
/* The C-level additions can't overflow because of the
* invariant bounds.
*/
int days = GET_TD_DAYS(left) + GET_TD_DAYS(right);
int seconds = GET_TD_SECONDS(left) + GET_TD_SECONDS(right);
int microseconds = GET_TD_MICROSECONDS(left) +
GET_TD_MICROSECONDS(right);
result = new_delta(days, seconds, microseconds, 1);
}
if (result == Py_NotImplemented)
Py_INCREF(result);
return result;
}
static PyObject *
delta_negative(PyObject *self)
{
return new_delta(-GET_TD_DAYS(self),
-GET_TD_SECONDS(self),
-GET_TD_MICROSECONDS(self),
1);
}
static PyObject *
delta_positive(PyObject *self)
{
/* Could optimize this (by returning self) if this isn't a
* subclass -- but who uses unary + ? Approximately nobody.
*/
return new_delta(GET_TD_DAYS(self),
GET_TD_SECONDS(self),
GET_TD_MICROSECONDS(self),
0);
}
static PyObject *
delta_abs(PyObject *self)
{
PyObject *result;
assert(GET_TD_MICROSECONDS(self) >= 0);
assert(GET_TD_SECONDS(self) >= 0);
if (GET_TD_DAYS(self) < 0)
result = delta_negative(self);
else
result = delta_positive(self);
return result;
}
static PyObject *
delta_subtract(PyObject *left, PyObject *right)
{
PyObject *result = Py_NotImplemented;
if (PyDelta_Check(left) && PyDelta_Check(right)) {
/* delta - delta */
/* The C-level additions can't overflow because of the
* invariant bounds.
*/
int days = GET_TD_DAYS(left) - GET_TD_DAYS(right);
int seconds = GET_TD_SECONDS(left) - GET_TD_SECONDS(right);
int microseconds = GET_TD_MICROSECONDS(left) -
GET_TD_MICROSECONDS(right);
result = new_delta(days, seconds, microseconds, 1);
}
if (result == Py_NotImplemented)
Py_INCREF(result);
return result;
}
static int
delta_cmp(PyObject *self, PyObject *other)
{
int diff = GET_TD_DAYS(self) - GET_TD_DAYS(other);
if (diff == 0) {
diff = GET_TD_SECONDS(self) - GET_TD_SECONDS(other);
if (diff == 0)
diff = GET_TD_MICROSECONDS(self) -
GET_TD_MICROSECONDS(other);
}
return diff;
}
static PyObject *
delta_richcompare(PyObject *self, PyObject *other, int op)
{
if (PyDelta_Check(other)) {
int diff = delta_cmp(self, other);
return diff_to_bool(diff, op);
}
else {
Py_RETURN_NOTIMPLEMENTED;
}
}
static PyObject *delta_getstate(PyDateTime_Delta *self);
static Py_hash_t
delta_hash(PyObject *op)
{
PyDateTime_Delta *self = PyDelta_CAST(op);
Py_hash_t hash = FT_ATOMIC_LOAD_SSIZE_RELAXED(self->hashcode);
if (hash == -1) {
PyObject *temp = delta_getstate(self);
if (temp != NULL) {
hash = PyObject_Hash(temp);
FT_ATOMIC_STORE_SSIZE_RELAXED(self->hashcode, hash);
Py_DECREF(temp);
}
}
return hash;
}
static PyObject *
delta_multiply(PyObject *left, PyObject *right)
{
PyObject *result = Py_NotImplemented;
if (PyDelta_Check(left)) {
/* delta * ??? */
if (PyLong_Check(right))
result = multiply_int_timedelta(right,
(PyDateTime_Delta *) left);
else if (PyFloat_Check(right))
result = multiply_truedivide_timedelta_float(
(PyDateTime_Delta *) left, right, 0);
}
else if (PyLong_Check(left))
result = multiply_int_timedelta(left,
(PyDateTime_Delta *) right);
else if (PyFloat_Check(left))
result = multiply_truedivide_timedelta_float(
(PyDateTime_Delta *) right, left, 0);
if (result == Py_NotImplemented)
Py_INCREF(result);
return result;
}
static PyObject *
delta_divide(PyObject *left, PyObject *right)
{
PyObject *result = Py_NotImplemented;
if (PyDelta_Check(left)) {
/* delta * ??? */
if (PyLong_Check(right))
result = divide_timedelta_int(
(PyDateTime_Delta *)left,
right);
else if (PyDelta_Check(right))
result = divide_timedelta_timedelta(
(PyDateTime_Delta *)left,
(PyDateTime_Delta *)right);
}
if (result == Py_NotImplemented)
Py_INCREF(result);
return result;
}
static PyObject *
delta_truedivide(PyObject *left, PyObject *right)
{
PyObject *result = Py_NotImplemented;
if (PyDelta_Check(left)) {
if (PyDelta_Check(right))
result = truedivide_timedelta_timedelta(
(PyDateTime_Delta *)left,
(PyDateTime_Delta *)right);
else if (PyFloat_Check(right))
result = multiply_truedivide_timedelta_float(
(PyDateTime_Delta *)left, right, 1);
else if (PyLong_Check(right))
result = truedivide_timedelta_int(
(PyDateTime_Delta *)left, right);
}
if (result == Py_NotImplemented)
Py_INCREF(result);
return result;
}
static PyObject *
delta_remainder(PyObject *left, PyObject *right)
{
PyObject *pyus_left;
PyObject *pyus_right;
PyObject *pyus_remainder;
PyObject *remainder;
if (!PyDelta_Check(left) || !PyDelta_Check(right))
Py_RETURN_NOTIMPLEMENTED;
pyus_left = delta_to_microseconds((PyDateTime_Delta *)left);
if (pyus_left == NULL)
return NULL;
pyus_right = delta_to_microseconds((PyDateTime_Delta *)right);
if (pyus_right == NULL) {
Py_DECREF(pyus_left);
return NULL;
}
pyus_remainder = PyNumber_Remainder(pyus_left, pyus_right);
Py_DECREF(pyus_left);
Py_DECREF(pyus_right);
if (pyus_remainder == NULL)
return NULL;
remainder = microseconds_to_delta(pyus_remainder);
Py_DECREF(pyus_remainder);
if (remainder == NULL)
return NULL;
return remainder;
}
static PyObject *
delta_divmod(PyObject *left, PyObject *right)
{
PyObject *pyus_left;
PyObject *pyus_right;
PyObject *divmod;
PyObject *delta;
PyObject *result;
if (!PyDelta_Check(left) || !PyDelta_Check(right))
Py_RETURN_NOTIMPLEMENTED;
pyus_left = delta_to_microseconds((PyDateTime_Delta *)left);
if (pyus_left == NULL)
return NULL;
pyus_right = delta_to_microseconds((PyDateTime_Delta *)right);
if (pyus_right == NULL) {
Py_DECREF(pyus_left);
return NULL;
}
divmod = checked_divmod(pyus_left, pyus_right);
Py_DECREF(pyus_left);
Py_DECREF(pyus_right);
if (divmod == NULL)
return NULL;
delta = microseconds_to_delta(PyTuple_GET_ITEM(divmod, 1));
if (delta == NULL) {
Py_DECREF(divmod);
return NULL;
}
result = PyTuple_Pack(2, PyTuple_GET_ITEM(divmod, 0), delta);
Py_DECREF(delta);
Py_DECREF(divmod);
return result;
}
/* Fold in the value of the tag ("seconds", "weeks", etc) component of a
* timedelta constructor. sofar is the # of microseconds accounted for
* so far, and there are factor microseconds per current unit, the number
* of which is given by num. num * factor is added to sofar in a
* numerically careful way, and that's the result. Any fractional
* microseconds left over (this can happen if num is a float type) are
* added into *leftover.
* Note that there are many ways this can give an error (NULL) return.
*/
static PyObject *
accum(const char* tag, PyObject *sofar, PyObject *num, PyObject *factor,
double *leftover)
{
PyObject *prod;
PyObject *sum;
assert(num != NULL);
if (PyLong_Check(num)) {
prod = PyNumber_Multiply(num, factor);
if (prod == NULL)
return NULL;
sum = PyNumber_Add(sofar, prod);
Py_DECREF(prod);
return sum;
}
if (PyFloat_Check(num)) {
double dnum;
double fracpart;
double intpart;
PyObject *x;
PyObject *y;
/* The Plan: decompose num into an integer part and a
* fractional part, num = intpart + fracpart.
* Then num * factor ==
* intpart * factor + fracpart * factor
* and the LHS can be computed exactly in long arithmetic.
* The RHS is again broken into an int part and frac part.
* and the frac part is added into *leftover.
*/
dnum = PyFloat_AsDouble(num);
if (dnum == -1.0 && PyErr_Occurred())
return NULL;
fracpart = modf(dnum, &intpart);
x = PyLong_FromDouble(intpart);
if (x == NULL)
return NULL;
prod = PyNumber_Multiply(x, factor);
Py_DECREF(x);
if (prod == NULL)
return NULL;
sum = PyNumber_Add(sofar, prod);
Py_DECREF(prod);
if (sum == NULL)
return NULL;
if (fracpart == 0.0)
return sum;
/* So far we've lost no information. Dealing with the
* fractional part requires float arithmetic, and may
* lose a little info.
*/
assert(PyLong_CheckExact(factor));
dnum = PyLong_AsDouble(factor);
dnum *= fracpart;
fracpart = modf(dnum, &intpart);
x = PyLong_FromDouble(intpart);
if (x == NULL) {
Py_DECREF(sum);
return NULL;
}
y = PyNumber_Add(sum, x);
Py_DECREF(sum);
Py_DECREF(x);
*leftover += fracpart;
return y;
}
PyErr_Format(PyExc_TypeError,
"unsupported type for timedelta %s component: %s",
tag, Py_TYPE(num)->tp_name);
return NULL;
}
/*[clinic input]
@classmethod
datetime.timedelta.__new__ as delta_new
days: object(c_default="NULL") = 0
seconds: object(c_default="NULL") = 0
microseconds: object(c_default="NULL") = 0
milliseconds: object(c_default="NULL") = 0
minutes: object(c_default="NULL") = 0
hours: object(c_default="NULL") = 0
weeks: object(c_default="NULL") = 0
Difference between two datetime values.
All arguments are optional and default to 0.
Arguments may be integers or floats, and may be positive or negative.
[clinic start generated code]*/
static PyObject *
delta_new_impl(PyTypeObject *type, PyObject *days, PyObject *seconds,
PyObject *microseconds, PyObject *milliseconds,
PyObject *minutes, PyObject *hours, PyObject *weeks)
/*[clinic end generated code: output=61d7e02a92a97700 input=e8cd54819295d34b]*/
{
PyObject *self = NULL;
PyObject *current_mod = NULL;
datetime_state *st = GET_CURRENT_STATE(current_mod);
PyObject *x = NULL; /* running sum of microseconds */
PyObject *y = NULL; /* temp sum of microseconds */
double leftover_us = 0.0;
x = PyLong_FromLong(0);
if (x == NULL)
goto Done;
#define CLEANUP \
Py_DECREF(x); \
x = y; \
if (x == NULL) \
goto Done
if (microseconds) {
y = accum("microseconds", x, microseconds, _PyLong_GetOne(), &leftover_us);
CLEANUP;
}
if (milliseconds) {
y = accum("milliseconds", x, milliseconds, CONST_US_PER_MS(st), &leftover_us);
CLEANUP;
}
if (seconds) {
y = accum("seconds", x, seconds, CONST_US_PER_SECOND(st), &leftover_us);
CLEANUP;
}
if (minutes) {
y = accum("minutes", x, minutes, CONST_US_PER_MINUTE(st), &leftover_us);
CLEANUP;
}
if (hours) {
y = accum("hours", x, hours, CONST_US_PER_HOUR(st), &leftover_us);
CLEANUP;
}
if (days) {
y = accum("days", x, days, CONST_US_PER_DAY(st), &leftover_us);
CLEANUP;
}
if (weeks) {
y = accum("weeks", x, weeks, CONST_US_PER_WEEK(st), &leftover_us);
CLEANUP;
}
if (leftover_us) {
/* Round to nearest whole # of us, and add into x. */
double whole_us = round(leftover_us);
int x_is_odd;
PyObject *temp;
if (fabs(whole_us - leftover_us) == 0.5) {
/* We're exactly halfway between two integers. In order
* to do round-half-to-even, we must determine whether x
* is odd. Note that x is odd when it's last bit is 1. The
* code below uses bitwise and operation to check the last
* bit. */
temp = PyNumber_And(x, _PyLong_GetOne()); /* temp <- x & 1 */
if (temp == NULL) {
Py_DECREF(x);
goto Done;
}
x_is_odd = PyObject_IsTrue(temp);
Py_DECREF(temp);
if (x_is_odd == -1) {
Py_DECREF(x);
goto Done;
}
whole_us = 2.0 * round((leftover_us + x_is_odd) * 0.5) - x_is_odd;
}
temp = PyLong_FromLong((long)whole_us);
if (temp == NULL) {
Py_DECREF(x);
goto Done;
}
y = PyNumber_Add(x, temp);
Py_DECREF(temp);
CLEANUP;
}
self = microseconds_to_delta_ex(x, type);
Py_DECREF(x);
Done:
RELEASE_CURRENT_STATE(st, current_mod);
return self;
#undef CLEANUP
}
static int
delta_bool(PyObject *self)
{
return (GET_TD_DAYS(self) != 0
|| GET_TD_SECONDS(self) != 0
|| GET_TD_MICROSECONDS(self) != 0);
}
static PyObject *
delta_repr(PyObject *self)
{
PyObject *args = Py_GetConstant(Py_CONSTANT_EMPTY_STR);
if (args == NULL) {
return NULL;
}
const char *sep = "";
if (GET_TD_DAYS(self) != 0) {
Py_SETREF(args, PyUnicode_FromFormat("days=%d", GET_TD_DAYS(self)));
if (args == NULL) {
return NULL;
}
sep = ", ";
}
if (GET_TD_SECONDS(self) != 0) {
Py_SETREF(args, PyUnicode_FromFormat("%U%sseconds=%d", args, sep,
GET_TD_SECONDS(self)));
if (args == NULL) {
return NULL;
}
sep = ", ";
}
if (GET_TD_MICROSECONDS(self) != 0) {
Py_SETREF(args, PyUnicode_FromFormat("%U%smicroseconds=%d", args, sep,
GET_TD_MICROSECONDS(self)));
if (args == NULL) {
return NULL;
}
}
if (PyUnicode_GET_LENGTH(args) == 0) {
Py_SETREF(args, PyUnicode_FromString("0"));
if (args == NULL) {
return NULL;
}
}
PyObject *repr = PyUnicode_FromFormat("%s(%S)", Py_TYPE(self)->tp_name,
args);
Py_DECREF(args);
return repr;
}
static PyObject *
delta_str(PyObject *self)
{
int us = GET_TD_MICROSECONDS(self);
int seconds = GET_TD_SECONDS(self);
int minutes = divmod(seconds, 60, &seconds);
int hours = divmod(minutes, 60, &minutes);
int days = GET_TD_DAYS(self);
if (days) {
if (us)
return PyUnicode_FromFormat("%d day%s, %d:%02d:%02d.%06d",
days, (days == 1 || days == -1) ? "" : "s",
hours, minutes, seconds, us);
else
return PyUnicode_FromFormat("%d day%s, %d:%02d:%02d",
days, (days == 1 || days == -1) ? "" : "s",
hours, minutes, seconds);
} else {
if (us)
return PyUnicode_FromFormat("%d:%02d:%02d.%06d",
hours, minutes, seconds, us);
else
return PyUnicode_FromFormat("%d:%02d:%02d",
hours, minutes, seconds);
}
}
/* Pickle support, a simple use of __reduce__. */
/* __getstate__ isn't exposed */
static PyObject *
delta_getstate(PyDateTime_Delta *self)
{
return Py_BuildValue("iii", GET_TD_DAYS(self),
GET_TD_SECONDS(self),
GET_TD_MICROSECONDS(self));
}
static PyObject *
delta_total_seconds(PyObject *op, PyObject *Py_UNUSED(dummy))
{
PyObject *total_seconds;
PyObject *total_microseconds;
total_microseconds = delta_to_microseconds(PyDelta_CAST(op));
if (total_microseconds == NULL)
return NULL;
PyObject *current_mod = NULL;
datetime_state *st = GET_CURRENT_STATE(current_mod);
total_seconds = PyNumber_TrueDivide(total_microseconds, CONST_US_PER_SECOND(st));
RELEASE_CURRENT_STATE(st, current_mod);
Py_DECREF(total_microseconds);
return total_seconds;
}
static PyObject *
delta_reduce(PyObject *op, PyObject *Py_UNUSED(dummy))
{
PyDateTime_Delta *self = PyDelta_CAST(op);
return Py_BuildValue("ON", Py_TYPE(self), delta_getstate(self));
}
#define OFFSET(field) offsetof(PyDateTime_Delta, field)
static PyMemberDef delta_members[] = {
{"days", Py_T_INT, OFFSET(days), Py_READONLY,
PyDoc_STR("Number of days.")},
{"seconds", Py_T_INT, OFFSET(seconds), Py_READONLY,
PyDoc_STR("Number of seconds (>= 0 and less than 1 day).")},
{"microseconds", Py_T_INT, OFFSET(microseconds), Py_READONLY,
PyDoc_STR("Number of microseconds (>= 0 and less than 1 second).")},
{NULL}
};
static PyMethodDef delta_methods[] = {
{"total_seconds", delta_total_seconds, METH_NOARGS,
PyDoc_STR("Total seconds in the duration.")},
{"__reduce__", delta_reduce, METH_NOARGS,
PyDoc_STR("__reduce__() -> (cls, state)")},
{NULL, NULL},
};
static PyNumberMethods delta_as_number = {
delta_add, /* nb_add */
delta_subtract, /* nb_subtract */
delta_multiply, /* nb_multiply */
delta_remainder, /* nb_remainder */
delta_divmod, /* nb_divmod */
0, /* nb_power */
delta_negative, /* nb_negative */
delta_positive, /* nb_positive */
delta_abs, /* nb_absolute */
delta_bool, /* nb_bool */
0, /*nb_invert*/
0, /*nb_lshift*/
0, /*nb_rshift*/
0, /*nb_and*/
0, /*nb_xor*/
0, /*nb_or*/
0, /*nb_int*/
0, /*nb_reserved*/
0, /*nb_float*/
0, /*nb_inplace_add*/
0, /*nb_inplace_subtract*/
0, /*nb_inplace_multiply*/
0, /*nb_inplace_remainder*/
0, /*nb_inplace_power*/
0, /*nb_inplace_lshift*/
0, /*nb_inplace_rshift*/
0, /*nb_inplace_and*/
0, /*nb_inplace_xor*/
0, /*nb_inplace_or*/
delta_divide, /* nb_floor_divide */
delta_truedivide, /* nb_true_divide */
0, /* nb_inplace_floor_divide */
0, /* nb_inplace_true_divide */
};
static PyTypeObject PyDateTime_DeltaType = {
PyVarObject_HEAD_INIT(NULL, 0)
"datetime.timedelta", /* tp_name */
sizeof(PyDateTime_Delta), /* tp_basicsize */
0, /* tp_itemsize */
0, /* tp_dealloc */
0, /* tp_vectorcall_offset */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_as_async */
delta_repr, /* tp_repr */
&delta_as_number, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
delta_hash, /* tp_hash */
0, /* tp_call */
delta_str, /* tp_str */
PyObject_GenericGetAttr, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
delta_new__doc__, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
delta_richcompare, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
delta_methods, /* tp_methods */
delta_members, /* tp_members */
0, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
0, /* tp_init */
0, /* tp_alloc */
delta_new, /* tp_new */
0, /* tp_free */
};
// XXX Can we make this const?
static PyDateTime_Delta zero_delta = {
PyObject_HEAD_INIT(&PyDateTime_DeltaType)
/* Letting this be set lazily is a benign race. */
.hashcode = -1,
};
static PyDateTime_Delta *
look_up_delta(int days, int seconds, int microseconds, PyTypeObject *type)
{
if (days == 0 && seconds == 0 && microseconds == 0
&& type == Py_TYPE(&zero_delta))
{
return &zero_delta;
}
return NULL;
}
/*
* PyDateTime_Date implementation.
*/
/* Accessor properties. */
static PyObject *
date_year(PyObject *op, void *Py_UNUSED(closure))
{
PyDateTime_Date *self = PyDate_CAST(op);
return PyLong_FromLong(GET_YEAR(self));
}
static PyObject *
date_month(PyObject *op, void *Py_UNUSED(closure))
{
PyDateTime_Date *self = PyDate_CAST(op);
return PyLong_FromLong(GET_MONTH(self));
}
static PyObject *
date_day(PyObject *op, void *Py_UNUSED(closure))
{
PyDateTime_Date *self = PyDate_CAST(op);
return PyLong_FromLong(GET_DAY(self));
}
static PyGetSetDef date_getset[] = {
{"year", date_year},
{"month", date_month},
{"day", date_day},
{NULL}
};
/* Constructors. */
static PyObject *
date_from_pickle(PyTypeObject *type, PyObject *state)
{
PyDateTime_Date *me;
me = (PyDateTime_Date *) (type->tp_alloc(type, 0));
if (me != NULL) {
const char *pdata = PyBytes_AS_STRING(state);
memcpy(me->data, pdata, _PyDateTime_DATE_DATASIZE);
me->hashcode = -1;
}
return (PyObject *)me;
}
static PyObject *
date_new(PyTypeObject *type, PyObject *args, PyObject *kw)
{
/* Check for invocation from pickle with __getstate__ state */
if (PyTuple_GET_SIZE(args) == 1) {
PyObject *state = PyTuple_GET_ITEM(args, 0);
if (PyBytes_Check(state)) {
if (PyBytes_GET_SIZE(state) == _PyDateTime_DATE_DATASIZE &&
MONTH_IS_SANE(PyBytes_AS_STRING(state)[2]))
{
return date_from_pickle(type, state);
}
}
else if (PyUnicode_Check(state)) {
if (PyUnicode_GET_LENGTH(state) == _PyDateTime_DATE_DATASIZE &&
MONTH_IS_SANE(PyUnicode_READ_CHAR(state, 2)))
{
state = PyUnicode_AsLatin1String(state);
if (state == NULL) {
if (PyErr_ExceptionMatches(PyExc_UnicodeEncodeError)) {
/* More informative error message. */
PyErr_SetString(PyExc_ValueError,
"Failed to encode latin1 string when unpickling "
"a date object. "
"pickle.load(data, encoding='latin1') is assumed.");
}
return NULL;
}
PyObject *self = date_from_pickle(type, state);
Py_DECREF(state);
return self;
}
}
}
return datetime_date(type, args, kw);
}
/*[clinic input]
@classmethod
datetime.date.__new__
year: int
month: int
day: int
Concrete date type.
[clinic start generated code]*/
static PyObject *
datetime_date_impl(PyTypeObject *type, int year, int month, int day)
/*[clinic end generated code: output=6654caa3dea7d518 input=fd1bac0658690455]*/
{
return new_date_ex(year, month, day, type);
}
static PyObject *
date_fromtimestamp(PyTypeObject *cls, PyObject *obj)
{
struct tm tm;
time_t t;
if (_PyTime_ObjectToTime_t(obj, &t, _PyTime_ROUND_FLOOR) == -1)
return NULL;
if (_PyTime_localtime(t, &tm) != 0)
return NULL;
return new_date_subclass_ex(tm.tm_year + 1900,
tm.tm_mon + 1,
tm.tm_mday,
cls);
}
/* Return new date from current time.
* We say this is equivalent to fromtimestamp(time.time()), and the
* only way to be sure of that is to *call* time.time(). That's not
* generally the same as calling C's time.
*/
/*[clinic input]
@classmethod
datetime.date.today
Current date or datetime.
Equivalent to fromtimestamp(time.time()).
[clinic start generated code]*/
static PyObject *
datetime_date_today_impl(PyTypeObject *type)
/*[clinic end generated code: output=d5474697df6b251c input=21688afa289c0a06]*/
{
/* Use C implementation to boost performance for date type */
if (type == &PyDateTime_DateType) {
struct tm tm;
time_t t;
time(&t);
if (_PyTime_localtime(t, &tm) != 0) {
return NULL;
}
return new_date_ex(tm.tm_year + 1900,
tm.tm_mon + 1,
tm.tm_mday,
type);
}
PyObject *time = time_time();
if (time == NULL) {
return NULL;
}
/* Note well: since today() is a class method, it may not call
* date.fromtimestamp, e.g., it may call datetime.fromtimestamp.
*/
PyObject *result = PyObject_CallMethodOneArg((PyObject*)type, &_Py_ID(fromtimestamp), time);
Py_DECREF(time);
return result;
}
/*[clinic input]
@permit_long_docstring_body
@classmethod
datetime.date.fromtimestamp
timestamp: object
/
Create a date from a POSIX timestamp.
The timestamp is a number, e.g. created via time.time(), that is interpreted
as local time.
[clinic start generated code]*/
static PyObject *
datetime_date_fromtimestamp_impl(PyTypeObject *type, PyObject *timestamp)
/*[clinic end generated code: output=59def4e32c028fb6 input=55ff6940f0a8339f]*/
{
return date_fromtimestamp(type, timestamp);
}
/* bpo-36025: This is a wrapper for API compatibility with the public C API,
* which expects a function that takes an *args tuple, whereas the argument
* clinic generates code that takes METH_O.
*/
static PyObject *
datetime_date_fromtimestamp_capi(PyObject *cls, PyObject *args)
{
PyObject *timestamp;
PyObject *result = NULL;
if (PyArg_UnpackTuple(args, "fromtimestamp", 1, 1, ×tamp)) {
result = date_fromtimestamp((PyTypeObject *)cls, timestamp);
}
return result;
}
/*[clinic input]
@classmethod
datetime.date.fromordinal
ordinal: int
/
Construct a date from a proleptic Gregorian ordinal.
January 1 of year 1 is day 1. Only the year, month and day are
non-zero in the result.
[clinic start generated code]*/
static PyObject *
datetime_date_fromordinal_impl(PyTypeObject *type, int ordinal)
/*[clinic end generated code: output=ea5cc69d86614a6b input=a3a4eedf582f145e]*/
{
int year;
int month;
int day;
if (ordinal < 1) {
PyErr_SetString(PyExc_ValueError, "ordinal must be >= 1");
return NULL;
}
ord_to_ymd(ordinal, &year, &month, &day);
return new_date_subclass_ex(year, month, day, type);
}
/*[clinic input]
@classmethod
datetime.date.fromisoformat
string: unicode
/
Construct a date from a string in ISO 8601 format.
[clinic start generated code]*/
static PyObject *
datetime_date_fromisoformat_impl(PyTypeObject *type, PyObject *string)
/*[clinic end generated code: output=8b9f9324904fca02 input=73c64216c10bcc8e]*/
{
Py_ssize_t len;
const char *dt_ptr = PyUnicode_AsUTF8AndSize(string, &len);
if (dt_ptr == NULL) {
goto invalid_string_error;
}
int year = 0, month = 0, day = 0;
int rv;
if (len == 7 || len == 8 || len == 10) {
rv = parse_isoformat_date(dt_ptr, len, &year, &month, &day);
}
else {
rv = -1;
}
if (rv < 0) {
goto invalid_string_error;
}
return new_date_subclass_ex(year, month, day, type);
invalid_string_error:
PyErr_Format(PyExc_ValueError, "Invalid isoformat string: %R", string);
return NULL;
}
/*[clinic input]
@classmethod
datetime.date.fromisocalendar
year: int
week: int
day: int
Construct a date from the ISO year, week number and weekday.
This is the inverse of the date.isocalendar() function.
[clinic start generated code]*/
static PyObject *
datetime_date_fromisocalendar_impl(PyTypeObject *type, int year, int week,
int day)
/*[clinic end generated code: output=7b26e15115d24df6 input=fbb05b53d6fb51d8]*/
{
int month;
int rv = iso_to_ymd(year, week, day, &year, &month, &day);
if (rv == -4) {
PyErr_Format(PyExc_ValueError,
"year must be in %d..%d, not %d", MINYEAR, MAXYEAR, year);
return NULL;
}
if (rv == -2) {
PyErr_Format(PyExc_ValueError, "Invalid week: %d", week);
return NULL;
}
if (rv == -3) {
PyErr_Format(PyExc_ValueError, "Invalid weekday: %d (range is [1, 7])",
day);
return NULL;
}
return new_date_subclass_ex(year, month, day, type);
}
/*[clinic input]
@classmethod
datetime.date.strptime
string: unicode
format: unicode
/
Parse string according to the given date format (like time.strptime()).
For a list of supported format codes, see the documentation:
https://docs.python.org/3/library/datetime.html#format-codes
[clinic start generated code]*/
static PyObject *
datetime_date_strptime_impl(PyTypeObject *type, PyObject *string,
PyObject *format)
/*[clinic end generated code: output=454d473bee2d5161 input=31d57bb789433e99]*/
{
PyObject *result;
PyObject *module = PyImport_Import(&_Py_ID(_strptime));
if (module == NULL) {
return NULL;
}
result = PyObject_CallMethodObjArgs(module,
&_Py_ID(_strptime_datetime_date),
(PyObject *)type, string, format, NULL);
Py_DECREF(module);
return result;
}
/*
* Date arithmetic.
*/
/* date + timedelta -> date. If arg negate is true, subtract the timedelta
* instead.
*/
static PyObject *
add_date_timedelta(PyDateTime_Date *date, PyDateTime_Delta *delta, int negate)
{
PyObject *result = NULL;
int year = GET_YEAR(date);
int month = GET_MONTH(date);
int deltadays = GET_TD_DAYS(delta);
/* C-level overflow is impossible because |deltadays| < 1e9. */
int day = GET_DAY(date) + (negate ? -deltadays : deltadays);
if (normalize_date(&year, &month, &day) >= 0)
result = new_date_subclass_ex(year, month, day, Py_TYPE(date));
return result;
}
static PyObject *
date_add(PyObject *left, PyObject *right)
{
if (PyDateTime_Check(left) || PyDateTime_Check(right))
Py_RETURN_NOTIMPLEMENTED;
if (PyDate_Check(left)) {
/* date + ??? */
if (PyDelta_Check(right))
/* date + delta */
return add_date_timedelta((PyDateTime_Date *) left,
(PyDateTime_Delta *) right,
0);
}
else {
/* ??? + date
* 'right' must be one of us, or we wouldn't have been called
*/
if (PyDelta_Check(left))
/* delta + date */
return add_date_timedelta((PyDateTime_Date *) right,
(PyDateTime_Delta *) left,
0);
}
Py_RETURN_NOTIMPLEMENTED;
}
static PyObject *
date_subtract(PyObject *left, PyObject *right)
{
if (PyDateTime_Check(left) || PyDateTime_Check(right))
Py_RETURN_NOTIMPLEMENTED;
if (PyDate_Check(left)) {
if (PyDate_Check(right)) {
/* date - date */
int left_ord = ymd_to_ord(GET_YEAR(left),
GET_MONTH(left),
GET_DAY(left));
int right_ord = ymd_to_ord(GET_YEAR(right),
GET_MONTH(right),
GET_DAY(right));
return new_delta(left_ord - right_ord, 0, 0, 0);
}
if (PyDelta_Check(right)) {
/* date - delta */
return add_date_timedelta((PyDateTime_Date *) left,
(PyDateTime_Delta *) right,
1);
}
}
Py_RETURN_NOTIMPLEMENTED;
}
/* Various ways to turn a date into a string. */
static PyObject *
date_repr(PyObject *op)
{
PyDateTime_Date *self = PyDate_CAST(op);
return PyUnicode_FromFormat("%s(%d, %d, %d)",
Py_TYPE(self)->tp_name,
GET_YEAR(self), GET_MONTH(self), GET_DAY(self));
}
static PyObject *
date_isoformat(PyObject *op, PyObject *Py_UNUSED(dummy))
{
PyDateTime_Date *self = PyDate_CAST(op);
return PyUnicode_FromFormat("%04d-%02d-%02d",
GET_YEAR(self), GET_MONTH(self), GET_DAY(self));
}
/* str() calls the appropriate isoformat() method. */
static PyObject *
date_str(PyObject *self)
{
return PyObject_CallMethodNoArgs(self, &_Py_ID(isoformat));
}
static PyObject *
date_ctime(PyObject *self, PyObject *Py_UNUSED(dummy))
{
return format_ctime(self, 0, 0, 0);
}
/*[clinic input]
datetime.date.strftime
self: self(type="PyObject *")
format: unicode
Format using strftime().
Example: "%d/%m/%Y, %H:%M:%S".
For a list of supported format codes, see the documentation:
https://docs.python.org/3/library/datetime.html#format-codes
[clinic start generated code]*/
static PyObject *
datetime_date_strftime_impl(PyObject *self, PyObject *format)
/*[clinic end generated code: output=6529b70095e16778 input=b6fd4a2ded27b557]*/
{
/* This method can be inherited, and needs to call the
* timetuple() method appropriate to self's class.
*/
PyObject *result;
PyObject *tuple;
tuple = PyObject_CallMethodNoArgs(self, &_Py_ID(timetuple));
if (tuple == NULL)
return NULL;
result = wrap_strftime(self, format, tuple, self);
Py_DECREF(tuple);
return result;
}
/*[clinic input]
datetime.date.__format__
self: self(type="PyObject *")
format: unicode
/
Formats self with strftime.
[clinic start generated code]*/
static PyObject *
datetime_date___format___impl(PyObject *self, PyObject *format)
/*[clinic end generated code: output=efa0223d000a93b7 input=e417a7c84e1abaf9]*/
{
/* if the format is zero length, return str(self) */
if (PyUnicode_GetLength(format) == 0)
return PyObject_Str(self);
return PyObject_CallMethodOneArg(self, &_Py_ID(strftime), format);
}
/* ISO methods. */
static PyObject *
date_isoweekday(PyObject *self, PyObject *Py_UNUSED(dummy))
{
int dow = weekday(GET_YEAR(self), GET_MONTH(self), GET_DAY(self));
return PyLong_FromLong(dow + 1);
}
PyDoc_STRVAR(iso_calendar_date__doc__,
"The result of date.isocalendar() or datetime.isocalendar()\n\n\
This object may be accessed either as a tuple of\n\
((year, week, weekday)\n\
or via the object attributes as named in the above tuple.");
typedef struct {
PyTupleObject tuple;
} PyDateTime_IsoCalendarDate;
static PyObject *
iso_calendar_date_repr(PyObject *self)
{
PyObject *year = PyTuple_GetItem(self, 0);
if (year == NULL) {
return NULL;
}
PyObject *week = PyTuple_GetItem(self, 1);
if (week == NULL) {
return NULL;
}
PyObject *weekday = PyTuple_GetItem(self, 2);
if (weekday == NULL) {
return NULL;
}
return PyUnicode_FromFormat("%.200s(year=%S, week=%S, weekday=%S)",
Py_TYPE(self)->tp_name, year, week, weekday);
}
static PyObject *
iso_calendar_date_reduce(PyObject *self, PyObject *Py_UNUSED(ignored))
{
// Construct the tuple that this reduces to
PyObject *reduce_tuple = Py_BuildValue(
"O((OOO))", &PyTuple_Type,
PyTuple_GET_ITEM(self, 0),
PyTuple_GET_ITEM(self, 1),
PyTuple_GET_ITEM(self, 2)
);
return reduce_tuple;
}
static PyObject *
iso_calendar_date_year(PyObject *self, void *Py_UNUSED(closure))
{
PyObject *year = PyTuple_GetItem(self, 0);
if (year == NULL) {
return NULL;
}
return Py_NewRef(year);
}
static PyObject *
iso_calendar_date_week(PyObject *self, void *Py_UNUSED(closure))
{
PyObject *week = PyTuple_GetItem(self, 1);
if (week == NULL) {
return NULL;
}
return Py_NewRef(week);
}
static PyObject *
iso_calendar_date_weekday(PyObject *self, void *Py_UNUSED(closure))
{
PyObject *weekday = PyTuple_GetItem(self, 2);
if (weekday == NULL) {
return NULL;
}
return Py_NewRef(weekday);
}
static PyGetSetDef iso_calendar_date_getset[] = {
{"year", iso_calendar_date_year},
{"week", iso_calendar_date_week},
{"weekday", iso_calendar_date_weekday},
{NULL}
};
static PyMethodDef iso_calendar_date_methods[] = {
{"__reduce__", iso_calendar_date_reduce, METH_NOARGS,
PyDoc_STR("__reduce__() -> (cls, state)")},
{NULL, NULL},
};
static int
iso_calendar_date_traverse(PyObject *self, visitproc visit, void *arg)
{
Py_VISIT(Py_TYPE(self));
return PyTuple_Type.tp_traverse(self, visit, arg);
}
static void
iso_calendar_date_dealloc(PyObject *self)
{
PyTypeObject *tp = Py_TYPE(self);
PyTuple_Type.tp_dealloc(self); // delegate GC-untrack as well
Py_DECREF(tp);
}
static PyType_Slot isocal_slots[] = {
{Py_tp_repr, iso_calendar_date_repr},
{Py_tp_doc, (void *)iso_calendar_date__doc__},
{Py_tp_methods, iso_calendar_date_methods},
{Py_tp_getset, iso_calendar_date_getset},
{Py_tp_new, iso_calendar_date_new},
{Py_tp_dealloc, iso_calendar_date_dealloc},
{Py_tp_traverse, iso_calendar_date_traverse},
{0, NULL},
};
static PyType_Spec isocal_spec = {
.name = "datetime.IsoCalendarDate",
.basicsize = sizeof(PyDateTime_IsoCalendarDate),
.flags = (Py_TPFLAGS_DEFAULT |
Py_TPFLAGS_HAVE_GC |
Py_TPFLAGS_IMMUTABLETYPE),
.slots = isocal_slots,
};
/*[clinic input]
@classmethod
datetime.IsoCalendarDate.__new__ as iso_calendar_date_new
year: int
week: int
weekday: int
[clinic start generated code]*/
static PyObject *
iso_calendar_date_new_impl(PyTypeObject *type, int year, int week,
int weekday)
/*[clinic end generated code: output=383d33d8dc7183a2 input=4f2c663c9d19c4ee]*/
{
PyDateTime_IsoCalendarDate *self;
self = (PyDateTime_IsoCalendarDate *) type->tp_alloc(type, 3);
if (self == NULL) {
return NULL;
}
PyTuple_SET_ITEM(self, 0, PyLong_FromLong(year));
PyTuple_SET_ITEM(self, 1, PyLong_FromLong(week));
PyTuple_SET_ITEM(self, 2, PyLong_FromLong(weekday));
return (PyObject *)self;
}
static PyObject *
date_isocalendar(PyObject *self, PyObject *Py_UNUSED(dummy))
{
int year = GET_YEAR(self);
int week1_monday = iso_week1_monday(year);
int today = ymd_to_ord(year, GET_MONTH(self), GET_DAY(self));
int week;
int day;
week = divmod(today - week1_monday, 7, &day);
if (week < 0) {
--year;
week1_monday = iso_week1_monday(year);
week = divmod(today - week1_monday, 7, &day);
}
else if (week >= 52 && today >= iso_week1_monday(year + 1)) {
++year;
week = 0;
}
PyObject *current_mod = NULL;
datetime_state *st = GET_CURRENT_STATE(current_mod);
PyObject *v = iso_calendar_date_new_impl(ISOCALENDAR_DATE_TYPE(st),
year, week + 1, day + 1);
RELEASE_CURRENT_STATE(st, current_mod);
if (v == NULL) {
return NULL;
}
return v;
}
/* Miscellaneous methods. */
static PyObject *
date_richcompare(PyObject *self, PyObject *other, int op)
{
/* Since DateTime is a subclass of Date, if the other object is
* a DateTime, it would compute an equality testing or an ordering
* based on the date part alone, and we don't want that.
* So return NotImplemented here in that case.
* If a subclass wants to change this, it's up to the subclass to do so.
* The behavior is the same as if Date and DateTime were independent
* classes.
*/
if (PyDate_Check(other) && !PyDateTime_Check(other)) {
int diff = memcmp(((PyDateTime_Date *)self)->data,
((PyDateTime_Date *)other)->data,
_PyDateTime_DATE_DATASIZE);
return diff_to_bool(diff, op);
}
else
Py_RETURN_NOTIMPLEMENTED;
}
static PyObject *
date_timetuple(PyObject *self, PyObject *Py_UNUSED(dummy))
{
return build_struct_time(GET_YEAR(self),
GET_MONTH(self),
GET_DAY(self),
0, 0, 0, -1);
}
/*[clinic input]
datetime.date.replace
year: int(c_default="GET_YEAR(self)") = unchanged
month: int(c_default="GET_MONTH(self)") = unchanged
day: int(c_default="GET_DAY(self)") = unchanged
Return date with new specified fields.
[clinic start generated code]*/
static PyObject *
datetime_date_replace_impl(PyDateTime_Date *self, int year, int month,
int day)
/*[clinic end generated code: output=2a9430d1e6318aeb input=0d1f02685b3e90f6]*/
{
return new_date_subclass_ex(year, month, day, Py_TYPE(self));
}
static Py_hash_t
generic_hash(unsigned char *data, int len)
{
return Py_HashBuffer(data, len);
}
static PyObject *date_getstate(PyDateTime_Date *self);
static Py_hash_t
date_hash(PyObject *op)
{
PyDateTime_Date *self = PyDate_CAST(op);
Py_hash_t hash = FT_ATOMIC_LOAD_SSIZE_RELAXED(self->hashcode);
if (hash == -1) {
hash = generic_hash(
(unsigned char *)self->data, _PyDateTime_DATE_DATASIZE);
FT_ATOMIC_STORE_SSIZE_RELAXED(self->hashcode, hash);
}
return hash;
}
static PyObject *
date_toordinal(PyObject *self, PyObject *Py_UNUSED(dummy))
{
return PyLong_FromLong(ymd_to_ord(GET_YEAR(self), GET_MONTH(self),
GET_DAY(self)));
}
static PyObject *
date_weekday(PyObject *self, PyObject *Py_UNUSED(dummy))
{
int dow = weekday(GET_YEAR(self), GET_MONTH(self), GET_DAY(self));
return PyLong_FromLong(dow);
}
/* Pickle support, a simple use of __reduce__. */
/* __getstate__ isn't exposed */
static PyObject *
date_getstate(PyDateTime_Date *self)
{
PyObject* field;
field = PyBytes_FromStringAndSize((char*)self->data,
_PyDateTime_DATE_DATASIZE);
return Py_BuildValue("(N)", field);
}
static PyObject *
date_reduce(PyObject *op, PyObject *Py_UNUSED(dummy))
{
PyDateTime_Date *self = PyDate_CAST(op);
return Py_BuildValue("(ON)", Py_TYPE(self), date_getstate(self));
}
static PyMethodDef date_methods[] = {
/* Class methods: */
DATETIME_DATE_FROMTIMESTAMP_METHODDEF
DATETIME_DATE_FROMORDINAL_METHODDEF
DATETIME_DATE_FROMISOFORMAT_METHODDEF
DATETIME_DATE_FROMISOCALENDAR_METHODDEF
DATETIME_DATE_STRPTIME_METHODDEF
DATETIME_DATE_TODAY_METHODDEF
/* Instance methods: */
{"ctime", date_ctime, METH_NOARGS,
PyDoc_STR("Return ctime() style string.")},
DATETIME_DATE_STRFTIME_METHODDEF
DATETIME_DATE___FORMAT___METHODDEF
{"timetuple", date_timetuple, METH_NOARGS,
PyDoc_STR("Return time tuple, compatible with time.localtime().")},
{"isocalendar", date_isocalendar, METH_NOARGS,
PyDoc_STR("Return a named tuple containing ISO year, week number, and "
"weekday.")},
{"isoformat", date_isoformat, METH_NOARGS,
PyDoc_STR("Return string in ISO 8601 format, YYYY-MM-DD.")},
{"isoweekday", date_isoweekday, METH_NOARGS,
PyDoc_STR("Return the day of the week represented by the date.\n"
"Monday == 1 ... Sunday == 7")},
{"toordinal", date_toordinal, METH_NOARGS,
PyDoc_STR("Return proleptic Gregorian ordinal. January 1 of year "
"1 is day 1.")},
{"weekday", date_weekday, METH_NOARGS,
PyDoc_STR("Return the day of the week represented by the date.\n"
"Monday == 0 ... Sunday == 6")},
DATETIME_DATE_REPLACE_METHODDEF
{"__replace__", _PyCFunction_CAST(datetime_date_replace), METH_FASTCALL | METH_KEYWORDS,
PyDoc_STR("__replace__($self, /, **changes)\n--\n\nThe same as replace().")},
{"__reduce__", date_reduce, METH_NOARGS,
PyDoc_STR("__reduce__() -> (cls, state)")},
{NULL, NULL}
};
static PyNumberMethods date_as_number = {
date_add, /* nb_add */
date_subtract, /* nb_subtract */
0, /* nb_multiply */
0, /* nb_remainder */
0, /* nb_divmod */
0, /* nb_power */
0, /* nb_negative */
0, /* nb_positive */
0, /* nb_absolute */
0, /* nb_bool */
};
static PyTypeObject PyDateTime_DateType = {
PyVarObject_HEAD_INIT(NULL, 0)
"datetime.date", /* tp_name */
sizeof(PyDateTime_Date), /* tp_basicsize */
0, /* tp_itemsize */
0, /* tp_dealloc */
0, /* tp_vectorcall_offset */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_as_async */
date_repr, /* tp_repr */
&date_as_number, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
date_hash, /* tp_hash */
0, /* tp_call */
date_str, /* tp_str */
PyObject_GenericGetAttr, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
datetime_date__doc__, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
date_richcompare, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
date_methods, /* tp_methods */
0, /* tp_members */
date_getset, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
0, /* tp_init */
0, /* tp_alloc */
date_new, /* tp_new */
0, /* tp_free */
};
/*
* PyDateTime_TZInfo implementation.
*/
/* This is a pure abstract base class, so doesn't do anything beyond
* raising NotImplemented exceptions. Real tzinfo classes need
* to derive from this. This is mostly for clarity, and for efficiency in
* datetime and time constructors (their tzinfo arguments need to
* be subclasses of this tzinfo class, which is easy and quick to check).
*
* Note: For reasons having to do with pickling of subclasses, we have
* to allow tzinfo objects to be instantiated. This wasn't an issue
* in the Python implementation (__init__() could raise NotImplementedError
* there without ill effect), but doing so in the C implementation hit a
* brick wall.
*/
static PyObject *
tzinfo_nogo(const char* methodname)
{
PyErr_Format(PyExc_NotImplementedError,
"a tzinfo subclass must implement %s()",
methodname);
return NULL;
}
/* Methods. A subclass must implement these. */
static PyObject *
tzinfo_tzname(PyObject *Py_UNUSED(self), PyObject *Py_UNUSED(dt))
{
return tzinfo_nogo("tzname");
}
static PyObject *
tzinfo_utcoffset(PyObject *Py_UNUSED(self), PyObject *Py_UNUSED(dt))
{
return tzinfo_nogo("utcoffset");
}
static PyObject *
tzinfo_dst(PyObject *Py_UNUSED(self), PyObject *Py_UNUSED(dt))
{
return tzinfo_nogo("dst");
}
static PyObject *add_datetime_timedelta(PyDateTime_DateTime *date,
PyDateTime_Delta *delta,
int factor);
static PyObject *datetime_utcoffset(PyObject *self, PyObject *);
static PyObject *datetime_dst(PyObject *self, PyObject *);
static PyObject *
tzinfo_fromutc(PyObject *self, PyObject *dt)
{
PyObject *result = NULL;
PyObject *off = NULL, *dst = NULL;
PyDateTime_Delta *delta = NULL;
if (!PyDateTime_Check(dt)) {
PyErr_SetString(PyExc_TypeError,
"fromutc: argument must be a datetime");
return NULL;
}
if (GET_DT_TZINFO(dt) != self) {
PyErr_SetString(PyExc_ValueError, "fromutc: dt.tzinfo "
"is not self");
return NULL;
}
off = datetime_utcoffset(dt, NULL);
if (off == NULL)
return NULL;
if (off == Py_None) {
PyErr_SetString(PyExc_ValueError, "fromutc: non-None "
"utcoffset() result required");
goto Fail;
}
dst = datetime_dst(dt, NULL);
if (dst == NULL)
goto Fail;
if (dst == Py_None) {
PyErr_SetString(PyExc_ValueError, "fromutc: non-None "
"dst() result required");
goto Fail;
}
delta = (PyDateTime_Delta *)delta_subtract(off, dst);
if (delta == NULL)
goto Fail;
result = add_datetime_timedelta((PyDateTime_DateTime *)dt, delta, 1);
if (result == NULL)
goto Fail;
Py_DECREF(dst);
dst = call_dst(GET_DT_TZINFO(dt), result);
if (dst == NULL)
goto Fail;
if (dst == Py_None)
goto Inconsistent;
if (delta_bool(dst) != 0) {
Py_SETREF(result, add_datetime_timedelta((PyDateTime_DateTime *)result,
(PyDateTime_Delta *)dst, 1));
if (result == NULL)
goto Fail;
}
Py_DECREF(delta);
Py_DECREF(dst);
Py_DECREF(off);
return result;
Inconsistent:
PyErr_SetString(PyExc_ValueError, "fromutc: tz.dst() gave "
"inconsistent results; cannot convert");
/* fall through to failure */
Fail:
Py_XDECREF(off);
Py_XDECREF(dst);
Py_XDECREF(delta);
Py_XDECREF(result);
return NULL;
}
/*
* Pickle support. This is solely so that tzinfo subclasses can use
* pickling -- tzinfo itself is supposed to be uninstantiable.
*/
static PyObject *
tzinfo_reduce(PyObject *self, PyObject *Py_UNUSED(dummy))
{
PyObject *args, *state;
PyObject *getinitargs;
if (PyObject_GetOptionalAttr(self, &_Py_ID(__getinitargs__), &getinitargs) < 0) {
return NULL;
}
if (getinitargs != NULL) {
args = PyObject_CallNoArgs(getinitargs);
Py_DECREF(getinitargs);
}
else {
args = PyTuple_New(0);
}
if (args == NULL) {
return NULL;
}
state = _PyObject_GetState(self);
if (state == NULL) {
Py_DECREF(args);
return NULL;
}
return Py_BuildValue("(ONN)", Py_TYPE(self), args, state);
}
static PyMethodDef tzinfo_methods[] = {
{"tzname", tzinfo_tzname, METH_O,
PyDoc_STR("datetime -> string name of time zone.")},
{"utcoffset", tzinfo_utcoffset, METH_O,
PyDoc_STR("datetime -> timedelta showing offset from UTC, negative "
"values indicating West of UTC")},
{"dst", tzinfo_dst, METH_O,
PyDoc_STR("datetime -> DST offset as timedelta positive east of UTC.")},
{"fromutc", tzinfo_fromutc, METH_O,
PyDoc_STR("datetime in UTC -> datetime in local time.")},
{"__reduce__", tzinfo_reduce, METH_NOARGS,
PyDoc_STR("-> (cls, state)")},
{NULL, NULL}
};
static const char tzinfo_doc[] =
PyDoc_STR("Abstract base class for time zone info objects.\n\n"
"Subclasses must override the tzname(), utcoffset() and dst() methods.");
static PyTypeObject PyDateTime_TZInfoType = {
PyVarObject_HEAD_INIT(NULL, 0)
"datetime.tzinfo", /* tp_name */
sizeof(PyDateTime_TZInfo), /* tp_basicsize */
0, /* tp_itemsize */
0, /* tp_dealloc */
0, /* tp_vectorcall_offset */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_as_async */
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
0, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
PyObject_GenericGetAttr, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
tzinfo_doc, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
tzinfo_methods, /* tp_methods */
0, /* tp_members */
0, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
0, /* tp_init */
0, /* tp_alloc */
PyType_GenericNew, /* tp_new */
0, /* tp_free */
};
/*[clinic input]
@classmethod
datetime.timezone.__new__ as timezone_new
offset: object(subclass_of="DELTA_TYPE(NO_STATE)")
name: unicode = NULL
Fixed offset from UTC implementation of tzinfo.
[clinic start generated code]*/
static PyObject *
timezone_new_impl(PyTypeObject *type, PyObject *offset, PyObject *name)
/*[clinic end generated code: output=41a2dda500424187 input=d51255afe60382cd]*/
{
return new_timezone(offset, name);
}
static void
timezone_dealloc(PyObject *op)
{
PyDateTime_TimeZone *self = PyTimeZone_CAST(op);
Py_CLEAR(self->offset);
Py_CLEAR(self->name);
Py_TYPE(self)->tp_free(self);
}
static PyObject *
timezone_richcompare(PyObject *self, PyObject *other, int op)
{
if (op != Py_EQ && op != Py_NE)
Py_RETURN_NOTIMPLEMENTED;
if (!PyTimezone_Check(other)) {
Py_RETURN_NOTIMPLEMENTED;
}
PyDateTime_TimeZone *lhs = PyTimeZone_CAST(self);
PyDateTime_TimeZone *rhs = PyTimeZone_CAST(other);
return delta_richcompare(lhs->offset, rhs->offset, op);
}
static Py_hash_t
timezone_hash(PyObject *op)
{
PyDateTime_TimeZone *self = PyTimeZone_CAST(op);
return delta_hash(self->offset);
}
/* Check argument type passed to tzname, utcoffset, or dst methods.
Returns 0 for good argument. Returns -1 and sets exception info
otherwise.
*/
static int
_timezone_check_argument(PyObject *dt, const char *meth)
{
if (dt == Py_None || PyDateTime_Check(dt))
return 0;
PyErr_Format(PyExc_TypeError, "%s(dt) argument must be a datetime instance"
" or None, not %.200s", meth, Py_TYPE(dt)->tp_name);
return -1;
}
static PyObject *
timezone_repr(PyObject *op)
{
/* Note that although timezone is not subclassable, it is convenient
to use Py_TYPE(self)->tp_name here. */
PyDateTime_TimeZone *self = PyTimeZone_CAST(op);
const char *type_name = Py_TYPE(self)->tp_name;
if (op == CONST_UTC(NO_STATE)) {
return PyUnicode_FromFormat("%s.utc", type_name);
}
if (self->name == NULL)
return PyUnicode_FromFormat("%s(%R)", type_name, self->offset);
return PyUnicode_FromFormat("%s(%R, %R)", type_name, self->offset,
self->name);
}
static PyObject *
timezone_str(PyObject *op)
{
PyDateTime_TimeZone *self = PyTimeZone_CAST(op);
int hours, minutes, seconds, microseconds;
PyObject *offset;
char sign;
if (self->name != NULL) {
return Py_NewRef(self->name);
}
if ((PyObject *)self == CONST_UTC(NO_STATE) ||
(GET_TD_DAYS(self->offset) == 0 &&
GET_TD_SECONDS(self->offset) == 0 &&
GET_TD_MICROSECONDS(self->offset) == 0))
{
return PyUnicode_FromString("UTC");
}
/* Offset is normalized, so it is negative if days < 0 */
if (GET_TD_DAYS(self->offset) < 0) {
sign = '-';
offset = delta_negative(self->offset);
if (offset == NULL)
return NULL;
}
else {
sign = '+';
offset = Py_NewRef(self->offset);
}
/* Offset is not negative here. */
microseconds = GET_TD_MICROSECONDS(offset);
seconds = GET_TD_SECONDS(offset);
Py_DECREF(offset);
minutes = divmod(seconds, 60, &seconds);
hours = divmod(minutes, 60, &minutes);
if (microseconds != 0) {
return PyUnicode_FromFormat("UTC%c%02d:%02d:%02d.%06d",
sign, hours, minutes,
seconds, microseconds);
}
if (seconds != 0) {
return PyUnicode_FromFormat("UTC%c%02d:%02d:%02d",
sign, hours, minutes, seconds);
}
return PyUnicode_FromFormat("UTC%c%02d:%02d", sign, hours, minutes);
}
static PyObject *
timezone_tzname(PyObject *op, PyObject *dt)
{
if (_timezone_check_argument(dt, "tzname") == -1)
return NULL;
return timezone_str(op);
}
static PyObject *
timezone_utcoffset(PyObject *op, PyObject *dt)
{
if (_timezone_check_argument(dt, "utcoffset") == -1)
return NULL;
PyDateTime_TimeZone *self = PyTimeZone_CAST(op);
return Py_NewRef(self->offset);
}
static PyObject *
timezone_dst(PyObject *op, PyObject *dt)
{
if (_timezone_check_argument(dt, "dst") == -1)
return NULL;
Py_RETURN_NONE;
}
static PyObject *
timezone_fromutc(PyObject *op, PyObject *arg)
{
if (!PyDateTime_Check(arg)) {
PyErr_SetString(PyExc_TypeError,
"fromutc: argument must be a datetime");
return NULL;
}
PyDateTime_DateTime *dt = (PyDateTime_DateTime *)arg; // fast safe cast
if (!HASTZINFO(dt) || dt->tzinfo != op) {
PyErr_SetString(PyExc_ValueError, "fromutc: dt.tzinfo is not self");
return NULL;
}
PyDateTime_TimeZone *self = PyTimeZone_CAST(op);
return add_datetime_timedelta(dt, (PyDateTime_Delta *)self->offset, 1);
}
static PyObject *
timezone_getinitargs(PyObject *op, PyObject *Py_UNUSED(dummy))
{
PyDateTime_TimeZone *self = PyTimeZone_CAST(op);
if (self->name == NULL)
return PyTuple_Pack(1, self->offset);
return PyTuple_Pack(2, self->offset, self->name);
}
static PyMethodDef timezone_methods[] = {
{"tzname", timezone_tzname, METH_O,
PyDoc_STR("If name is specified when timezone is created, returns the name."
" Otherwise returns offset as 'UTC(+|-)HH:MM'.")},
{"utcoffset", timezone_utcoffset, METH_O,
PyDoc_STR("Return fixed offset.")},
{"dst", timezone_dst, METH_O,
PyDoc_STR("Return None.")},
{"fromutc", timezone_fromutc, METH_O,
PyDoc_STR("datetime in UTC -> datetime in local time.")},
{"__getinitargs__", timezone_getinitargs, METH_NOARGS,
PyDoc_STR("pickle support")},
{NULL, NULL}
};
static PyTypeObject PyDateTime_TimeZoneType = {
PyVarObject_HEAD_INIT(NULL, 0)
"datetime.timezone", /* tp_name */
sizeof(PyDateTime_TimeZone), /* tp_basicsize */
0, /* tp_itemsize */
timezone_dealloc, /* tp_dealloc */
0, /* tp_vectorcall_offset */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_as_async */
timezone_repr, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
timezone_hash, /* tp_hash */
0, /* tp_call */
timezone_str, /* tp_str */
0, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT, /* tp_flags */
timezone_new__doc__, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
timezone_richcompare, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
timezone_methods, /* tp_methods */
0, /* tp_members */
0, /* tp_getset */
&PyDateTime_TZInfoType, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
0, /* tp_init */
0, /* tp_alloc */
timezone_new, /* tp_new */
};
// XXX Can we make this const?
static PyDateTime_TimeZone utc_timezone = {
PyObject_HEAD_INIT(&PyDateTime_TimeZoneType)
.offset = (PyObject *)&zero_delta,
.name = NULL,
};
static PyDateTime_TimeZone *
look_up_timezone(PyObject *offset, PyObject *name)
{
if (offset == utc_timezone.offset && name == NULL) {
return (PyDateTime_TimeZone *)CONST_UTC(NO_STATE);
}
return NULL;
}
/*
* PyDateTime_Time implementation.
*/
/* Accessor properties.
*/
static PyObject *
time_hour(PyObject *op, void *Py_UNUSED(closure))
{
PyDateTime_Time *self = PyTime_CAST(op);
return PyLong_FromLong(TIME_GET_HOUR(self));
}
static PyObject *
time_minute(PyObject *op, void *Py_UNUSED(closure))
{
PyDateTime_Time *self = PyTime_CAST(op);
return PyLong_FromLong(TIME_GET_MINUTE(self));
}
/* The name time_second conflicted with some platform header file. */
static PyObject *
py_time_second(PyObject *op, void *Py_UNUSED(closure))
{
PyDateTime_Time *self = PyTime_CAST(op);
return PyLong_FromLong(TIME_GET_SECOND(self));
}
static PyObject *
time_microsecond(PyObject *op, void *Py_UNUSED(closure))
{
PyDateTime_Time *self = PyTime_CAST(op);
return PyLong_FromLong(TIME_GET_MICROSECOND(self));
}
static PyObject *
time_tzinfo(PyObject *op, void *Py_UNUSED(closure))
{
PyDateTime_Time *self = PyTime_CAST(op);
PyObject *result = HASTZINFO(self) ? self->tzinfo : Py_None;
return Py_NewRef(result);
}
static PyObject *
time_fold(PyObject *op, void *Py_UNUSED(closure))
{
PyDateTime_Time *self = PyTime_CAST(op);
return PyLong_FromLong(TIME_GET_FOLD(self));
}
static PyGetSetDef time_getset[] = {
{"hour", time_hour},
{"minute", time_minute},
{"second", py_time_second},
{"microsecond", time_microsecond},
{"tzinfo", time_tzinfo},
{"fold", time_fold},
{NULL}
};
/*
* Constructors.
*/
static PyObject *
time_from_pickle(PyTypeObject *type, PyObject *state, PyObject *tzinfo)
{
PyDateTime_Time *me;
char aware = (char)(tzinfo != Py_None);
if (aware && check_tzinfo_subclass(tzinfo) < 0) {
PyErr_SetString(PyExc_TypeError, "bad tzinfo state arg");
return NULL;
}
me = (PyDateTime_Time *) (type->tp_alloc(type, aware));
if (me != NULL) {
const char *pdata = PyBytes_AS_STRING(state);
memcpy(me->data, pdata, _PyDateTime_TIME_DATASIZE);
me->hashcode = -1;
me->hastzinfo = aware;
if (aware) {
me->tzinfo = Py_NewRef(tzinfo);
}
if (pdata[0] & (1 << 7)) {
me->data[0] -= 128;
me->fold = 1;
}
else {
me->fold = 0;
}
}
return (PyObject *)me;
}
static PyObject *
time_new(PyTypeObject *type, PyObject *args, PyObject *kw)
{
/* Check for invocation from pickle with __getstate__ state */
if (PyTuple_GET_SIZE(args) >= 1 && PyTuple_GET_SIZE(args) <= 2) {
PyObject *state = PyTuple_GET_ITEM(args, 0);
PyObject *tzinfo = Py_None;
if (PyTuple_GET_SIZE(args) == 2) {
tzinfo = PyTuple_GET_ITEM(args, 1);
}
if (PyBytes_Check(state)) {
if (PyBytes_GET_SIZE(state) == _PyDateTime_TIME_DATASIZE &&
(0x7F & ((unsigned char) (PyBytes_AS_STRING(state)[0]))) < 24)
{
return time_from_pickle(type, state, tzinfo);
}
}
else if (PyUnicode_Check(state)) {
if (PyUnicode_GET_LENGTH(state) == _PyDateTime_TIME_DATASIZE &&
(0x7F & PyUnicode_READ_CHAR(state, 0)) < 24)
{
state = PyUnicode_AsLatin1String(state);
if (state == NULL) {
if (PyErr_ExceptionMatches(PyExc_UnicodeEncodeError)) {
/* More informative error message. */
PyErr_SetString(PyExc_ValueError,
"Failed to encode latin1 string when unpickling "
"a time object. "
"pickle.load(data, encoding='latin1') is assumed.");
}
return NULL;
}
PyObject *self = time_from_pickle(type, state, tzinfo);
Py_DECREF(state);
return self;
}
}
}
return datetime_time(type, args, kw);
}
/*[clinic input]
@classmethod
datetime.time.__new__
hour: int = 0
minute: int = 0
second: int = 0
microsecond: int = 0
tzinfo: object = None
*
fold: int = 0
Time with time zone.
All arguments are optional. tzinfo may be None, or an instance of
a tzinfo subclass. The remaining arguments may be ints.
[clinic start generated code]*/
static PyObject *
datetime_time_impl(PyTypeObject *type, int hour, int minute, int second,
int microsecond, PyObject *tzinfo, int fold)
/*[clinic end generated code: output=f06bb4315225e7f6 input=0148df5e8138fe7b]*/
{
return new_time_ex2(hour, minute, second, microsecond, tzinfo, fold, type);
}
/*[clinic input]
@classmethod
datetime.time.strptime
string: unicode
format: unicode
/
Parse string according to the given time format (like time.strptime()).
For a list of supported format codes, see the documentation:
https://docs.python.org/3/library/datetime.html#format-codes
[clinic start generated code]*/
static PyObject *
datetime_time_strptime_impl(PyTypeObject *type, PyObject *string,
PyObject *format)
/*[clinic end generated code: output=ae05a9bc0241d3bf input=82ba425ecacc54aa]*/
{
PyObject *result;
PyObject *module = PyImport_Import(&_Py_ID(_strptime));
if (module == NULL) {
return NULL;
}
result = PyObject_CallMethodObjArgs(module,
&_Py_ID(_strptime_datetime_time),
(PyObject *)type, string, format, NULL);
Py_DECREF(module);
return result;
}
/*
* Destructor.
*/
static void
time_dealloc(PyObject *op)
{
PyDateTime_Time *self = PyTime_CAST(op);
if (HASTZINFO(self)) {
Py_XDECREF(self->tzinfo);
}
Py_TYPE(self)->tp_free(self);
}
/*
* Indirect access to tzinfo methods.
*/
/* These are all METH_NOARGS, so don't need to check the arglist. */
static PyObject *
time_utcoffset(PyObject *op, PyObject *Py_UNUSED(dummy)) {
PyDateTime_Time *self = PyTime_CAST(op);
return call_utcoffset(GET_TIME_TZINFO(self), Py_None);
}
static PyObject *
time_dst(PyObject *op, PyObject *Py_UNUSED(dummy)) {
PyDateTime_Time *self = PyTime_CAST(op);
return call_dst(GET_TIME_TZINFO(self), Py_None);
}
static PyObject *
time_tzname(PyObject *op, PyObject *Py_UNUSED(dummy)) {
PyDateTime_Time *self = PyTime_CAST(op);
return call_tzname(GET_TIME_TZINFO(self), Py_None);
}
/*
* Various ways to turn a time into a string.
*/
static PyObject *
time_repr(PyObject *op)
{
PyDateTime_Time *self = PyTime_CAST(op);
const char *type_name = Py_TYPE(self)->tp_name;
int h = TIME_GET_HOUR(self);
int m = TIME_GET_MINUTE(self);
int s = TIME_GET_SECOND(self);
int us = TIME_GET_MICROSECOND(self);
int fold = TIME_GET_FOLD(self);
PyObject *result = NULL;
if (us)
result = PyUnicode_FromFormat("%s(%d, %d, %d, %d)",
type_name, h, m, s, us);
else if (s)
result = PyUnicode_FromFormat("%s(%d, %d, %d)",
type_name, h, m, s);
else
result = PyUnicode_FromFormat("%s(%d, %d)", type_name, h, m);
if (result != NULL && HASTZINFO(self))
result = append_keyword_tzinfo(result, self->tzinfo);
if (result != NULL && fold)
result = append_keyword_fold(result, fold);
return result;
}
static PyObject *
time_str(PyObject *op)
{
return PyObject_CallMethodNoArgs(op, &_Py_ID(isoformat));
}
/*[clinic input]
datetime.time.isoformat
timespec: str(c_default="NULL") = 'auto'
Return the time formatted according to ISO.
The full format is 'HH:MM:SS.mmmmmm+zz:zz'. By default, the fractional
part is omitted if self.microsecond == 0.
The optional argument timespec specifies the number of additional
terms of the time to include. Valid options are 'auto', 'hours',
'minutes', 'seconds', 'milliseconds' and 'microseconds'.
[clinic start generated code]*/
static PyObject *
datetime_time_isoformat_impl(PyDateTime_Time *self, const char *timespec)
/*[clinic end generated code: output=2bcc7cab65c35545 input=afbbbd953d10ad07]*/
{
char buf[100];
PyObject *result;
int us = TIME_GET_MICROSECOND(self);
static const char * const specs[][2] = {
{"hours", "%02d"},
{"minutes", "%02d:%02d"},
{"seconds", "%02d:%02d:%02d"},
{"milliseconds", "%02d:%02d:%02d.%03d"},
{"microseconds", "%02d:%02d:%02d.%06d"},
};
size_t given_spec;
if (timespec == NULL || strcmp(timespec, "auto") == 0) {
if (us == 0) {
/* seconds */
given_spec = 2;
}
else {
/* microseconds */
given_spec = 4;
}
}
else {
for (given_spec = 0; given_spec < Py_ARRAY_LENGTH(specs); given_spec++) {
if (strcmp(timespec, specs[given_spec][0]) == 0) {
if (given_spec == 3) {
/* milliseconds */
us = us / 1000;
}
break;
}
}
}
if (given_spec == Py_ARRAY_LENGTH(specs)) {
PyErr_Format(PyExc_ValueError, "Unknown timespec value");
return NULL;
}
else {
result = PyUnicode_FromFormat(specs[given_spec][1],
TIME_GET_HOUR(self), TIME_GET_MINUTE(self),
TIME_GET_SECOND(self), us);
}
if (result == NULL || !HASTZINFO(self) || self->tzinfo == Py_None)
return result;
/* We need to append the UTC offset. */
if (format_utcoffset(buf, sizeof(buf), ":", self->tzinfo,
Py_None) < 0) {
Py_DECREF(result);
return NULL;
}
PyUnicode_AppendAndDel(&result, PyUnicode_FromString(buf));
return result;
}
/*[clinic input]
@permit_long_docstring_body
datetime.time.strftime
format: unicode
Format using strftime().
The date part of the timestamp passed to underlying strftime should not be used.
For a list of supported format codes, see the documentation:
https://docs.python.org/3/library/datetime.html#format-codes
[clinic start generated code]*/
static PyObject *
datetime_time_strftime_impl(PyDateTime_Time *self, PyObject *format)
/*[clinic end generated code: output=10f65af20e2a78c7 input=c4a5bbecd798654b]*/
{
PyObject *result;
PyObject *tuple;
/* Python's strftime does insane things with the year part of the
* timetuple. The year is forced to (the otherwise nonsensical)
* 1900 to work around that.
*/
tuple = Py_BuildValue("iiiiiiiii",
1900, 1, 1, /* year, month, day */
TIME_GET_HOUR(self),
TIME_GET_MINUTE(self),
TIME_GET_SECOND(self),
0, 1, -1); /* weekday, daynum, dst */
if (tuple == NULL)
return NULL;
assert(PyTuple_Size(tuple) == 9);
result = wrap_strftime((PyObject *)self, format, tuple,
Py_None);
Py_DECREF(tuple);
return result;
}
/*[clinic input]
datetime.time.__format__
self: self(type="PyObject *")
format: unicode
/
Formats self with strftime.
[clinic start generated code]*/
static PyObject *
datetime_time___format___impl(PyObject *self, PyObject *format)
/*[clinic end generated code: output=4646451f7a5d2156 input=6a858ae787d20230]*/
{
/* if the format is zero length, return str(self) */
if (PyUnicode_GetLength(format) == 0)
return PyObject_Str(self);
return PyObject_CallMethodOneArg(self, &_Py_ID(strftime), format);
}
/*
* Miscellaneous methods.
*/
static PyObject *
time_richcompare(PyObject *self, PyObject *other, int op)
{
PyObject *result = NULL;
PyObject *offset1, *offset2;
int diff;
if (! PyTime_Check(other))
Py_RETURN_NOTIMPLEMENTED;
if (GET_TIME_TZINFO(self) == GET_TIME_TZINFO(other)) {
diff = memcmp(((PyDateTime_Time *)self)->data,
((PyDateTime_Time *)other)->data,
_PyDateTime_TIME_DATASIZE);
return diff_to_bool(diff, op);
}
offset1 = time_utcoffset(self, NULL);
if (offset1 == NULL)
return NULL;
offset2 = time_utcoffset(other, NULL);
if (offset2 == NULL)
goto done;
/* If they're both naive, or both aware and have the same offsets,
* we get off cheap. Note that if they're both naive, offset1 ==
* offset2 == Py_None at this point.
*/
if ((offset1 == offset2) ||
(PyDelta_Check(offset1) && PyDelta_Check(offset2) &&
delta_cmp(offset1, offset2) == 0)) {
diff = memcmp(((PyDateTime_Time *)self)->data,
((PyDateTime_Time *)other)->data,
_PyDateTime_TIME_DATASIZE);
result = diff_to_bool(diff, op);
}
/* The hard case: both aware with different UTC offsets */
else if (offset1 != Py_None && offset2 != Py_None) {
int offsecs1, offsecs2;
assert(offset1 != offset2); /* else last "if" handled it */
offsecs1 = TIME_GET_HOUR(self) * 3600 +
TIME_GET_MINUTE(self) * 60 +
TIME_GET_SECOND(self) -
GET_TD_DAYS(offset1) * 86400 -
GET_TD_SECONDS(offset1);
offsecs2 = TIME_GET_HOUR(other) * 3600 +
TIME_GET_MINUTE(other) * 60 +
TIME_GET_SECOND(other) -
GET_TD_DAYS(offset2) * 86400 -
GET_TD_SECONDS(offset2);
diff = offsecs1 - offsecs2;
if (diff == 0)
diff = TIME_GET_MICROSECOND(self) -
TIME_GET_MICROSECOND(other);
result = diff_to_bool(diff, op);
}
else if (op == Py_EQ) {
result = Py_NewRef(Py_False);
}
else if (op == Py_NE) {
result = Py_NewRef(Py_True);
}
else {
PyErr_SetString(PyExc_TypeError,
"can't compare offset-naive and "
"offset-aware times");
}
done:
Py_DECREF(offset1);
Py_XDECREF(offset2);
return result;
}
static Py_hash_t
time_hash(PyObject *op)
{
PyDateTime_Time *self = PyTime_CAST(op);
Py_hash_t hash = FT_ATOMIC_LOAD_SSIZE_RELAXED(self->hashcode);
if (hash == -1) {
PyObject *offset, *self0;
if (TIME_GET_FOLD(self)) {
self0 = new_time_ex2(TIME_GET_HOUR(self),
TIME_GET_MINUTE(self),
TIME_GET_SECOND(self),
TIME_GET_MICROSECOND(self),
HASTZINFO(self) ? self->tzinfo : Py_None,
0, Py_TYPE(self));
if (self0 == NULL)
return -1;
}
else {
self0 = Py_NewRef(self);
}
offset = time_utcoffset(self0, NULL);
Py_DECREF(self0);
if (offset == NULL)
return -1;
/* Reduce this to a hash of another object. */
if (offset == Py_None) {
hash = generic_hash(
(unsigned char *)self->data, _PyDateTime_TIME_DATASIZE);
FT_ATOMIC_STORE_SSIZE_RELAXED(self->hashcode, hash);
} else {
PyObject *temp1, *temp2;
int seconds, microseconds;
assert(HASTZINFO(self));
seconds = TIME_GET_HOUR(self) * 3600 +
TIME_GET_MINUTE(self) * 60 +
TIME_GET_SECOND(self);
microseconds = TIME_GET_MICROSECOND(self);
temp1 = new_delta(0, seconds, microseconds, 1);
if (temp1 == NULL) {
Py_DECREF(offset);
return -1;
}
temp2 = delta_subtract(temp1, offset);
Py_DECREF(temp1);
if (temp2 == NULL) {
Py_DECREF(offset);
return -1;
}
hash = PyObject_Hash(temp2);
FT_ATOMIC_STORE_SSIZE_RELAXED(self->hashcode, hash);
Py_DECREF(temp2);
}
Py_DECREF(offset);
}
return hash;
}
/*[clinic input]
datetime.time.replace
hour: int(c_default="TIME_GET_HOUR(self)") = unchanged
minute: int(c_default="TIME_GET_MINUTE(self)") = unchanged
second: int(c_default="TIME_GET_SECOND(self)") = unchanged
microsecond: int(c_default="TIME_GET_MICROSECOND(self)") = unchanged
tzinfo: object(c_default="HASTZINFO(self) ? ((PyDateTime_Time *)self)->tzinfo : Py_None") = unchanged
*
fold: int(c_default="TIME_GET_FOLD(self)") = unchanged
Return time with new specified fields.
[clinic start generated code]*/
static PyObject *
datetime_time_replace_impl(PyDateTime_Time *self, int hour, int minute,
int second, int microsecond, PyObject *tzinfo,
int fold)
/*[clinic end generated code: output=0b89a44c299e4f80 input=abf23656e8df4e97]*/
{
return new_time_subclass_fold_ex(hour, minute, second, microsecond, tzinfo,
fold, Py_TYPE(self));
}
/*[clinic input]
@classmethod
datetime.time.fromisoformat
string: unicode
/
Construct a time from a string in ISO 8601 format.
[clinic start generated code]*/
static PyObject *
datetime_time_fromisoformat_impl(PyTypeObject *type, PyObject *string)
/*[clinic end generated code: output=97c57e896e7f2535 input=bdb4b8abea9cd688]*/
{
Py_ssize_t len;
const char *p = PyUnicode_AsUTF8AndSize(string, &len);
if (p == NULL) {
goto invalid_string_error;
}
// The spec actually requires that time-only ISO 8601 strings start with
// T, but the extended format allows this to be omitted as long as there
// is no ambiguity with date strings.
if (*p == 'T') {
++p;
len -= 1;
}
int hour = 0, minute = 0, second = 0, microsecond = 0;
int tzoffset = 0, tzimicrosecond = 0;
int rv = parse_isoformat_time(p, len,
&hour, &minute, &second, µsecond,
&tzoffset, &tzimicrosecond);
if (rv < 0) {
if (rv == -6) {
goto error;
}
goto invalid_string_error;
}
if (hour == 24) {
if (minute == 0 && second == 0 && microsecond == 0) {
hour = 0;
} else {
goto invalid_iso_midnight;
}
}
PyObject *tzinfo = tzinfo_from_isoformat_results(rv, tzoffset,
tzimicrosecond);
if (tzinfo == NULL) {
return NULL;
}
PyObject *t;
if (type == TIME_TYPE(NO_STATE)) {
t = new_time(hour, minute, second, microsecond, tzinfo, 0);
} else {
t = PyObject_CallFunction((PyObject *)type, "iiiiO",
hour, minute, second, microsecond, tzinfo);
}
Py_DECREF(tzinfo);
return t;
invalid_iso_midnight:
PyErr_SetString(PyExc_ValueError, "minute, second, and microsecond must be 0 when hour is 24");
return NULL;
invalid_string_error:
PyErr_Format(PyExc_ValueError, "Invalid isoformat string: %R", string);
return NULL;
error:
return NULL;
}
/* Pickle support, a simple use of __reduce__. */
/* Let basestate be the non-tzinfo data string.
* If tzinfo is None, this returns (basestate,), else (basestate, tzinfo).
* So it's a tuple in any (non-error) case.
* __getstate__ isn't exposed.
*/
static PyObject *
time_getstate(PyDateTime_Time *self, int proto)
{
PyObject *basestate;
PyObject *result = NULL;
basestate = PyBytes_FromStringAndSize((char *)self->data,
_PyDateTime_TIME_DATASIZE);
if (basestate != NULL) {
if (proto > 3 && TIME_GET_FOLD(self))
/* Set the first bit of the first byte */
PyBytes_AS_STRING(basestate)[0] |= (1 << 7);
if (! HASTZINFO(self) || self->tzinfo == Py_None)
result = PyTuple_Pack(1, basestate);
else
result = PyTuple_Pack(2, basestate, self->tzinfo);
Py_DECREF(basestate);
}
return result;
}
/*[clinic input]
datetime.time.__reduce_ex__
proto: int
/
[clinic start generated code]*/
static PyObject *
datetime_time___reduce_ex___impl(PyDateTime_Time *self, int proto)
/*[clinic end generated code: output=ccfab65f5c320c1b input=4cd06bb3ac3657bb]*/
{
return Py_BuildValue("(ON)", Py_TYPE(self), time_getstate(self, proto));
}
/*[clinic input]
datetime.time.__reduce__
[clinic start generated code]*/
static PyObject *
datetime_time___reduce___impl(PyDateTime_Time *self)
/*[clinic end generated code: output=9a2fcc87e64ce300 input=0fb8dd14d275857f]*/
{
return Py_BuildValue("(ON)", Py_TYPE(self), time_getstate(self, 2));
}
static PyMethodDef time_methods[] = {
/* Class method: */
DATETIME_TIME_FROMISOFORMAT_METHODDEF
DATETIME_TIME_STRPTIME_METHODDEF
/* Instance methods: */
DATETIME_TIME_ISOFORMAT_METHODDEF
DATETIME_TIME_STRFTIME_METHODDEF
DATETIME_TIME___FORMAT___METHODDEF
{"utcoffset", time_utcoffset, METH_NOARGS,
PyDoc_STR("Return self.tzinfo.utcoffset(self).")},
{"tzname", time_tzname, METH_NOARGS,
PyDoc_STR("Return self.tzinfo.tzname(self).")},
{"dst", time_dst, METH_NOARGS,
PyDoc_STR("Return self.tzinfo.dst(self).")},
DATETIME_TIME_REPLACE_METHODDEF
{"__replace__", _PyCFunction_CAST(datetime_time_replace), METH_FASTCALL | METH_KEYWORDS,
PyDoc_STR("__replace__($self, /, **changes)\n--\n\nThe same as replace().")},
DATETIME_TIME___REDUCE_EX___METHODDEF
DATETIME_TIME___REDUCE___METHODDEF
{NULL, NULL}
};
static PyTypeObject PyDateTime_TimeType = {
PyVarObject_HEAD_INIT(NULL, 0)
"datetime.time", /* tp_name */
sizeof(PyDateTime_Time), /* tp_basicsize */
0, /* tp_itemsize */
time_dealloc, /* tp_dealloc */
0, /* tp_vectorcall_offset */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_as_async */
time_repr, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
time_hash, /* tp_hash */
0, /* tp_call */
time_str, /* tp_str */
PyObject_GenericGetAttr, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
datetime_time__doc__, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
time_richcompare, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
time_methods, /* tp_methods */
0, /* tp_members */
time_getset, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
0, /* tp_init */
time_alloc, /* tp_alloc */
time_new, /* tp_new */
0, /* tp_free */
};
/*
* PyDateTime_DateTime implementation.
*/
/* Accessor properties. Properties for day, month, and year are inherited
* from date.
*/
static PyObject *
datetime_hour(PyObject *op, void *Py_UNUSED(closure))
{
PyDateTime_DateTime *self = PyDateTime_CAST(op);
return PyLong_FromLong(DATE_GET_HOUR(self));
}
static PyObject *
datetime_minute(PyObject *op, void *Py_UNUSED(closure))
{
PyDateTime_DateTime *self = PyDateTime_CAST(op);
return PyLong_FromLong(DATE_GET_MINUTE(self));
}
static PyObject *
datetime_second(PyObject *op, void *Py_UNUSED(closure))
{
PyDateTime_DateTime *self = PyDateTime_CAST(op);
return PyLong_FromLong(DATE_GET_SECOND(self));
}
static PyObject *
datetime_microsecond(PyObject *op, void *Py_UNUSED(closure))
{
PyDateTime_DateTime *self = PyDateTime_CAST(op);
return PyLong_FromLong(DATE_GET_MICROSECOND(self));
}
static PyObject *
datetime_tzinfo(PyObject *op, void *Py_UNUSED(closure))
{
PyDateTime_DateTime *self = PyDateTime_CAST(op);
PyObject *result = HASTZINFO(self) ? self->tzinfo : Py_None;
return Py_NewRef(result);
}
static PyObject *
datetime_fold(PyObject *op, void *Py_UNUSED(closure))
{
PyDateTime_DateTime *self = PyDateTime_CAST(op);
return PyLong_FromLong(DATE_GET_FOLD(self));
}
static PyGetSetDef datetime_getset[] = {
{"hour", datetime_hour},
{"minute", datetime_minute},
{"second", datetime_second},
{"microsecond", datetime_microsecond},
{"tzinfo", datetime_tzinfo},
{"fold", datetime_fold},
{NULL}
};
/*
* Constructors.
*/
static PyObject *
datetime_from_pickle(PyTypeObject *type, PyObject *state, PyObject *tzinfo)
{
PyDateTime_DateTime *me;
char aware = (char)(tzinfo != Py_None);
if (aware && check_tzinfo_subclass(tzinfo) < 0) {
PyErr_SetString(PyExc_TypeError, "bad tzinfo state arg");
return NULL;
}
me = (PyDateTime_DateTime *) (type->tp_alloc(type , aware));
if (me != NULL) {
const char *pdata = PyBytes_AS_STRING(state);
memcpy(me->data, pdata, _PyDateTime_DATETIME_DATASIZE);
me->hashcode = -1;
me->hastzinfo = aware;
if (aware) {
me->tzinfo = Py_NewRef(tzinfo);
}
if (pdata[2] & (1 << 7)) {
me->data[2] -= 128;
me->fold = 1;
}
else {
me->fold = 0;
}
}
return (PyObject *)me;
}
static PyObject *
datetime_new(PyTypeObject *type, PyObject *args, PyObject *kw)
{
/* Check for invocation from pickle with __getstate__ state */
if (PyTuple_GET_SIZE(args) >= 1 && PyTuple_GET_SIZE(args) <= 2) {
PyObject *state = PyTuple_GET_ITEM(args, 0);
PyObject *tzinfo = Py_None;
if (PyTuple_GET_SIZE(args) == 2) {
tzinfo = PyTuple_GET_ITEM(args, 1);
}
if (PyBytes_Check(state)) {
if (PyBytes_GET_SIZE(state) == _PyDateTime_DATETIME_DATASIZE &&
MONTH_IS_SANE(PyBytes_AS_STRING(state)[2] & 0x7F))
{
return datetime_from_pickle(type, state, tzinfo);
}
}
else if (PyUnicode_Check(state)) {
if (PyUnicode_GET_LENGTH(state) == _PyDateTime_DATETIME_DATASIZE &&
MONTH_IS_SANE(PyUnicode_READ_CHAR(state, 2) & 0x7F))
{
state = PyUnicode_AsLatin1String(state);
if (state == NULL) {
if (PyErr_ExceptionMatches(PyExc_UnicodeEncodeError)) {
/* More informative error message. */
PyErr_SetString(PyExc_ValueError,
"Failed to encode latin1 string when unpickling "
"a datetime object. "
"pickle.load(data, encoding='latin1') is assumed.");
}
return NULL;
}
PyObject *self = datetime_from_pickle(type, state, tzinfo);
Py_DECREF(state);
return self;
}
}
}
return datetime_datetime(type, args, kw);
}
/*[clinic input]
@classmethod
datetime.datetime.__new__
year: int
month: int
day: int
hour: int = 0
minute: int = 0
second: int = 0
microsecond: int = 0
tzinfo: object = None
*
fold: int = 0
A combination of a date and a time.
The year, month and day arguments are required. tzinfo may be None, or an
instance of a tzinfo subclass. The remaining arguments may be ints.
[clinic start generated code]*/
static PyObject *
datetime_datetime_impl(PyTypeObject *type, int year, int month, int day,
int hour, int minute, int second, int microsecond,
PyObject *tzinfo, int fold)
/*[clinic end generated code: output=47983ddb47d36037 input=2af468d7a9c1e568]*/
{
return new_datetime_ex2(year, month, day,
hour, minute, second, microsecond,
tzinfo, fold, type);
}
/* TM_FUNC is the shared type of _PyTime_localtime() and
* _PyTime_gmtime(). */
typedef int (*TM_FUNC)(time_t timer, struct tm*);
/* As of version 2015f max fold in IANA database is
* 23 hours at 1969-09-30 13:00:00 in Kwajalein. */
static long long max_fold_seconds = 24 * 3600;
/* NB: date(1970,1,1).toordinal() == 719163 */
static long long epoch = 719163LL * 24 * 60 * 60;
static long long
utc_to_seconds(int year, int month, int day,
int hour, int minute, int second)
{
long long ordinal;
/* ymd_to_ord() doesn't support year <= 0 */
if (year < MINYEAR || year > MAXYEAR) {
PyErr_Format(PyExc_ValueError,
"year must be in %d..%d, not %d", MINYEAR, MAXYEAR, year);
return -1;
}
ordinal = ymd_to_ord(year, month, day);
return ((ordinal * 24 + hour) * 60 + minute) * 60 + second;
}
static long long
local(long long u)
{
struct tm local_time;
time_t t;
u -= epoch;
t = u;
if (t != u) {
PyErr_SetString(PyExc_OverflowError,
"timestamp out of range for platform time_t");
return -1;
}
if (_PyTime_localtime(t, &local_time) != 0)
return -1;
return utc_to_seconds(local_time.tm_year + 1900,
local_time.tm_mon + 1,
local_time.tm_mday,
local_time.tm_hour,
local_time.tm_min,
local_time.tm_sec);
}
/* Internal helper.
* Build datetime from a time_t and a distinct count of microseconds.
* Pass localtime or gmtime for f, to control the interpretation of timet.
*/
static PyObject *
datetime_from_timet_and_us(PyTypeObject *cls, TM_FUNC f, time_t timet, int us,
PyObject *tzinfo)
{
struct tm tm;
int year, month, day, hour, minute, second, fold = 0;
if (f(timet, &tm) != 0)
return NULL;
year = tm.tm_year + 1900;
month = tm.tm_mon + 1;
day = tm.tm_mday;
hour = tm.tm_hour;
minute = tm.tm_min;
/* The platform localtime/gmtime may insert leap seconds,
* indicated by tm.tm_sec > 59. We don't care about them,
* except to the extent that passing them on to the datetime
* constructor would raise ValueError for a reason that
* made no sense to the user.
*/
second = Py_MIN(59, tm.tm_sec);
/* local timezone requires to compute fold */
if (tzinfo == Py_None && f == _PyTime_localtime) {
long long probe_seconds, result_seconds, transition;
result_seconds = utc_to_seconds(year, month, day,
hour, minute, second);
if (result_seconds == -1 && PyErr_Occurred()) {
return NULL;
}
/* Probe max_fold_seconds to detect a fold. */
probe_seconds = local(epoch + timet - max_fold_seconds);
if (probe_seconds == -1)
return NULL;
transition = result_seconds - probe_seconds - max_fold_seconds;
if (transition < 0) {
probe_seconds = local(epoch + timet + transition);
if (probe_seconds == -1)
return NULL;
if (probe_seconds == result_seconds)
fold = 1;
}
}
return new_datetime_subclass_fold_ex(year, month, day, hour, minute,
second, us, tzinfo, fold, cls);
}
/* Internal helper.
* Build datetime from a Python timestamp. Pass localtime or gmtime for f,
* to control the interpretation of the timestamp. Since a double doesn't
* have enough bits to cover a datetime's full range of precision, it's
* better to call datetime_from_timet_and_us provided you have a way
* to get that much precision (e.g., C time() isn't good enough).
*/
static PyObject *
datetime_from_timestamp(PyTypeObject *cls, TM_FUNC f, PyObject *timestamp,
PyObject *tzinfo)
{
time_t timet;
long us;
if (_PyTime_ObjectToTimeval(timestamp,
&timet, &us, _PyTime_ROUND_HALF_EVEN) == -1)
return NULL;
return datetime_from_timet_and_us(cls, f, timet, (int)us, tzinfo);
}
/* Internal helper.
* Build most accurate possible datetime for current time. Pass localtime or
* gmtime for f as appropriate.
*/
static PyObject *
datetime_best_possible(PyTypeObject *cls, TM_FUNC f, PyObject *tzinfo)
{
PyTime_t ts;
if (PyTime_Time(&ts) < 0) {
return NULL;
}
time_t secs;
int us;
if (_PyTime_AsTimevalTime_t(ts, &secs, &us, _PyTime_ROUND_HALF_EVEN) < 0) {
return NULL;
}
assert(0 <= us && us <= 999999);
return datetime_from_timet_and_us(cls, f, secs, us, tzinfo);
}
/*[clinic input]
@classmethod
datetime.datetime.now
tz: object = None
Timezone object.
Returns new datetime object representing current time local to tz.
If no tz is specified, uses local timezone.
[clinic start generated code]*/
static PyObject *
datetime_datetime_now_impl(PyTypeObject *type, PyObject *tz)
/*[clinic end generated code: output=b3386e5345e2b47a input=80d09869c5267d00]*/
{
PyObject *self;
/* Return best possible local time -- this isn't constrained by the
* precision of a timestamp.
*/
if (check_tzinfo_subclass(tz) < 0)
return NULL;
self = datetime_best_possible(type,
tz == Py_None ? _PyTime_localtime :
_PyTime_gmtime,
tz);
if (self != NULL && tz != Py_None) {
/* Convert UTC to tzinfo's zone. */
PyObject *res = PyObject_CallMethodOneArg(tz, &_Py_ID(fromutc), self);
Py_DECREF(self);
return res;
}
return self;
}
/* Return best possible UTC time -- this isn't constrained by the
* precision of a timestamp.
*/
/*[clinic input]
@classmethod
datetime.datetime.utcnow
Return a new datetime representing UTC day and time.
[clinic start generated code]*/
static PyObject *
datetime_datetime_utcnow_impl(PyTypeObject *type)
/*[clinic end generated code: output=cfcfe71c6c916ba9 input=576eff2b222b80a1]*/
{
if (PyErr_WarnEx(PyExc_DeprecationWarning,
"datetime.datetime.utcnow() is deprecated and scheduled for removal in a "
"future version. Use timezone-aware objects to represent datetimes "
"in UTC: datetime.datetime.now(datetime.UTC).", 1))
{
return NULL;
}
return datetime_best_possible(type, _PyTime_gmtime, Py_None);
}
/*[clinic input]
@permit_long_docstring_body
@classmethod
datetime.datetime.fromtimestamp
timestamp: object
tz as tzinfo: object = None
Create a datetime from a POSIX timestamp.
The timestamp is a number, e.g. created via time.time(), that is interpreted
as local time.
[clinic start generated code]*/
static PyObject *
datetime_datetime_fromtimestamp_impl(PyTypeObject *type, PyObject *timestamp,
PyObject *tzinfo)
/*[clinic end generated code: output=9c47ea2b2ebdaded input=d6b5b2095c5a34b2]*/
{
PyObject *self;
if (check_tzinfo_subclass(tzinfo) < 0)
return NULL;
self = datetime_from_timestamp(type,
tzinfo == Py_None ? _PyTime_localtime :
_PyTime_gmtime,
timestamp,
tzinfo);
if (self != NULL && tzinfo != Py_None) {
/* Convert UTC to tzinfo's zone. */
PyObject *res = PyObject_CallMethodOneArg(tzinfo, &_Py_ID(fromutc), self);
Py_DECREF(self);
return res;
}
return self;
}
/* This is a wrapper for API compatibility with the public C API. */
static PyObject *
datetime_datetime_fromtimestamp_capi(PyObject *cls, PyObject *args, PyObject *kw)
{
PyObject *timestamp;
PyObject *tzinfo = Py_None;
static char *keywords[] = {"timestamp", "tz", NULL};
if (!PyArg_ParseTupleAndKeywords(args, kw, "O|O:fromtimestamp",
keywords, ×tamp, &tzinfo))
return NULL;
return datetime_datetime_fromtimestamp_impl((PyTypeObject *)cls,
timestamp, tzinfo);
}
/*[clinic input]
@classmethod
datetime.datetime.utcfromtimestamp
timestamp: object
/
Create a naive UTC datetime from a POSIX timestamp.
[clinic start generated code]*/
static PyObject *
datetime_datetime_utcfromtimestamp_impl(PyTypeObject *type,
PyObject *timestamp)
/*[clinic end generated code: output=66d0b1741d788fd2 input=13fabd4296b1c206]*/
{
if (PyErr_WarnEx(PyExc_DeprecationWarning,
"datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal "
"in a future version. Use timezone-aware objects to represent "
"datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC).", 1))
{
return NULL;
}
return datetime_from_timestamp(type, _PyTime_gmtime, timestamp, Py_None);
}
/*[clinic input]
@permit_long_summary
@classmethod
datetime.datetime.strptime
string: unicode
format: unicode
/
Parse string according to the given date and time format (like time.strptime()).
For a list of supported format codes, see the documentation:
https://docs.python.org/3/library/datetime.html#format-codes
[clinic start generated code]*/
static PyObject *
datetime_datetime_strptime_impl(PyTypeObject *type, PyObject *string,
PyObject *format)
/*[clinic end generated code: output=af2c2d024f3203f5 input=ef7807589f1d50e7]*/
{
PyObject *result;
PyObject *module = PyImport_Import(&_Py_ID(_strptime));
if (module == NULL) {
return NULL;
}
result = PyObject_CallMethodObjArgs(module,
&_Py_ID(_strptime_datetime_datetime),
(PyObject *)type, string, format, NULL);
Py_DECREF(module);
return result;
}
/*[clinic input]
@classmethod
datetime.datetime.combine
date: object(subclass_of="DATE_TYPE(NO_STATE)")
time: object(subclass_of="TIME_TYPE(NO_STATE)")
tzinfo: object = NULL
Construct a datetime from a given date and a given time.
[clinic start generated code]*/
static PyObject *
datetime_datetime_combine_impl(PyTypeObject *type, PyObject *date,
PyObject *time, PyObject *tzinfo)
/*[clinic end generated code: output=a10f3cbb90f4d0aa input=4fcf0743288d0bab]*/
{
if (tzinfo == NULL) {
if (HASTZINFO(time))
tzinfo = ((PyDateTime_Time *)time)->tzinfo;
else
tzinfo = Py_None;
}
return new_datetime_subclass_fold_ex(GET_YEAR(date),
GET_MONTH(date),
GET_DAY(date),
TIME_GET_HOUR(time),
TIME_GET_MINUTE(time),
TIME_GET_SECOND(time),
TIME_GET_MICROSECOND(time),
tzinfo,
TIME_GET_FOLD(time),
type);
}
static PyObject *
_sanitize_isoformat_str(PyObject *dtstr)
{
Py_ssize_t len = PyUnicode_GetLength(dtstr);
if (len < 7) { // All valid ISO 8601 strings are at least 7 characters long
return NULL;
}
// `fromisoformat` allows surrogate characters in exactly one position,
// the separator; to allow datetime_fromisoformat to make the simplifying
// assumption that all valid strings can be encoded in UTF-8, this function
// replaces any surrogate character separators with `T`.
//
// The result of this, if not NULL, returns a new reference
const void* const unicode_data = PyUnicode_DATA(dtstr);
const int kind = PyUnicode_KIND(dtstr);
// Depending on the format of the string, the separator can only ever be
// in positions 7, 8 or 10. We'll check each of these for a surrogate and
// if we find one, replace it with `T`. If there is more than one surrogate,
// we don't have to bother sanitizing it, because the function will later
// fail when we try to encode the string as ASCII.
static const size_t potential_separators[3] = {7, 8, 10};
size_t surrogate_separator = 0;
for(size_t idx = 0;
idx < sizeof(potential_separators) / sizeof(*potential_separators);
++idx) {
size_t pos = potential_separators[idx];
if (pos > (size_t)len) {
break;
}
if(Py_UNICODE_IS_SURROGATE(PyUnicode_READ(kind, unicode_data, pos))) {
surrogate_separator = pos;
break;
}
}
if (surrogate_separator == 0) {
return Py_NewRef(dtstr);
}
PyObject *str_out = _PyUnicode_Copy(dtstr);
if (str_out == NULL) {
return NULL;
}
if (PyUnicode_WriteChar(str_out, surrogate_separator, (Py_UCS4)'T')) {
Py_DECREF(str_out);
return NULL;
}
return str_out;
}
static Py_ssize_t
_find_isoformat_datetime_separator(const char *dtstr, Py_ssize_t len) {
// The valid date formats can all be distinguished by characters 4 and 5
// and further narrowed down by character
// which tells us where to look for the separator character.
// Format | As-rendered | Position
// ---------------------------------------
// %Y-%m-%d | YYYY-MM-DD | 10
// %Y%m%d | YYYYMMDD | 8
// %Y-W%V | YYYY-Www | 8
// %YW%V | YYYYWww | 7
// %Y-W%V-%u | YYYY-Www-d | 10
// %YW%V%u | YYYYWwwd | 8
// %Y-%j | YYYY-DDD | 8
// %Y%j | YYYYDDD | 7
//
// Note that because we allow *any* character for the separator, in the
// case where character 4 is W, it's not straightforward to determine where
// the separator is — in the case of YYYY-Www-d, you have actual ambiguity,
// e.g. 2020-W01-0000 could be YYYY-Www-D0HH or YYYY-Www-HHMM, when the
// separator character is a number in the former case or a hyphen in the
// latter case.
//
// The case of YYYYWww can be distinguished from YYYYWwwd by tracking ahead
// to either the end of the string or the first non-numeric character —
// since the time components all come in pairs YYYYWww#HH can be
// distinguished from YYYYWwwd#HH by the fact that there will always be an
// odd number of digits before the first non-digit character in the former
// case.
static const char date_separator = '-';
static const char week_indicator = 'W';
if (len == 7) {
return 7;
}
if (dtstr[4] == date_separator) {
// YYYY-???
if (dtstr[5] == week_indicator) {
// YYYY-W??
if (len < 8) {
return -1;
}
if (len > 8 && dtstr[8] == date_separator) {
// YYYY-Www-D (10) or YYYY-Www-HH (8)
if (len == 9) { return -1; }
if (len > 10 && is_digit(dtstr[10])) {
// This is as far as we'll try to go to resolve the
// ambiguity for the moment — if we have YYYY-Www-##, the
// separator is either a hyphen at 8 or a number at 10.
//
// We'll assume it's a hyphen at 8 because it's way more
// likely that someone will use a hyphen as a separator
// than a number, but at this point it's really best effort
// because this is an extension of the spec anyway.
return 8;
}
return 10;
} else {
// YYYY-Www (8)
return 8;
}
} else {
// YYYY-MM-DD (10)
return 10;
}
} else {
// YYYY???
if (dtstr[4] == week_indicator) {
// YYYYWww (7) or YYYYWwwd (8)
size_t idx = 7;
for (; idx < (size_t)len; ++idx) {
// Keep going until we run out of digits.
if (!is_digit(dtstr[idx])) {
break;
}
}
if (idx < 9) {
return idx;
}
if (idx % 2 == 0) {
// If the index of the last number is even, it's YYYYWww
return 7;
} else {
return 8;
}
} else {
// YYYYMMDD (8)
return 8;
}
}
}
/*[clinic input]
@classmethod
datetime.datetime.fromisoformat
string: unicode
/
Construct a date from a string in ISO 8601 format.
[clinic start generated code]*/
static PyObject *
datetime_datetime_fromisoformat_impl(PyTypeObject *type, PyObject *string)
/*[clinic end generated code: output=1800a952fcab79d9 input=d517b158209ded42]*/
{
// We only need to sanitize this string if the separator is a surrogate
// character. In the situation where the separator location is ambiguous,
// we don't have to sanitize it anything because that can only happen when
// the separator is either '-' or a number. This should mostly be a noop
// but it makes the reference counting easier if we still sanitize.
PyObject *dtstr_clean = _sanitize_isoformat_str(string);
if (dtstr_clean == NULL) {
goto invalid_string_error;
}
Py_ssize_t len;
const char *dt_ptr = PyUnicode_AsUTF8AndSize(dtstr_clean, &len);
if (dt_ptr == NULL) {
if (PyErr_ExceptionMatches(PyExc_UnicodeEncodeError)) {
// Encoding errors are invalid string errors at this point
goto invalid_string_error;
}
else {
goto error;
}
}
const Py_ssize_t separator_location = _find_isoformat_datetime_separator(
dt_ptr, len);
const char *p = dt_ptr;
int year = 0, month = 0, day = 0;
int hour = 0, minute = 0, second = 0, microsecond = 0;
int tzoffset = 0, tzusec = 0;
// date runs up to separator_location
int rv = parse_isoformat_date(p, separator_location, &year, &month, &day);
if (!rv && len > separator_location) {
// In UTF-8, the length of multi-byte characters is encoded in the MSB
p += separator_location;
if ((p[0] & 0x80) == 0) {
p += 1;
}
else {
switch (p[0] & 0xf0) {
case 0xe0:
p += 3;
break;
case 0xf0:
p += 4;
break;
default:
p += 2;
break;
}
}
len -= (p - dt_ptr);
rv = parse_isoformat_time(p, len, &hour, &minute, &second,
µsecond, &tzoffset, &tzusec);
if (rv == -6) {
goto error;
}
}
if (rv < 0) {
goto invalid_string_error;
}
PyObject *tzinfo = tzinfo_from_isoformat_results(rv, tzoffset, tzusec);
if (tzinfo == NULL) {
goto error;
}
if ((hour == 24) && (month <= 12)) {
int d_in_month = days_in_month(year, month);
if (day <= d_in_month) {
if (minute == 0 && second == 0 && microsecond == 0) {
// Calculate midnight of the next day
hour = 0;
day += 1;
if (day > d_in_month) {
day = 1;
month += 1;
if (month > 12) {
month = 1;
year += 1;
}
}
} else {
goto invalid_iso_midnight;
}
}
}
PyObject *dt = new_datetime_subclass_ex(year, month, day, hour, minute,
second, microsecond, tzinfo, type);
Py_DECREF(tzinfo);
Py_DECREF(dtstr_clean);
return dt;
invalid_iso_midnight:
PyErr_SetString(PyExc_ValueError, "minute, second, and microsecond must be 0 when hour is 24");
Py_DECREF(tzinfo);
Py_DECREF(dtstr_clean);
return NULL;
invalid_string_error:
PyErr_Format(PyExc_ValueError, "Invalid isoformat string: %R", string);
error:
Py_XDECREF(dtstr_clean);
return NULL;
}
/*
* Destructor.
*/
static void
datetime_dealloc(PyObject *op)
{
PyDateTime_DateTime *self = PyDateTime_CAST(op);
if (HASTZINFO(self)) {
Py_XDECREF(self->tzinfo);
}
Py_TYPE(self)->tp_free(self);
}
/*
* Indirect access to tzinfo methods.
*/
/* These are all METH_NOARGS, so don't need to check the arglist. */
static PyObject *
datetime_utcoffset(PyObject *op, PyObject *Py_UNUSED(dummy)) {
PyDateTime_DateTime *self = PyDateTime_CAST(op);
return call_utcoffset(GET_DT_TZINFO(self), op);
}
static PyObject *
datetime_dst(PyObject *op, PyObject *Py_UNUSED(dummy)) {
PyDateTime_DateTime *self = PyDateTime_CAST(op);
return call_dst(GET_DT_TZINFO(self), op);
}
static PyObject *
datetime_tzname(PyObject *op, PyObject *Py_UNUSED(dummy)) {
PyDateTime_DateTime *self = PyDateTime_CAST(op);
return call_tzname(GET_DT_TZINFO(self), op);
}
/*
* datetime arithmetic.
*/
/* factor must be 1 (to add) or -1 (to subtract). The result inherits
* the tzinfo state of date.
*/
static PyObject *
add_datetime_timedelta(PyDateTime_DateTime *date, PyDateTime_Delta *delta,
int factor)
{
/* Note that the C-level additions can't overflow, because of
* invariant bounds on the member values.
*/
int year = GET_YEAR(date);
int month = GET_MONTH(date);
int day = GET_DAY(date) + GET_TD_DAYS(delta) * factor;
int hour = DATE_GET_HOUR(date);
int minute = DATE_GET_MINUTE(date);
int second = DATE_GET_SECOND(date) + GET_TD_SECONDS(delta) * factor;
int microsecond = DATE_GET_MICROSECOND(date) +
GET_TD_MICROSECONDS(delta) * factor;
assert(factor == 1 || factor == -1);
if (normalize_datetime(&year, &month, &day,
&hour, &minute, &second, µsecond) < 0) {
return NULL;
}
return new_datetime_subclass_ex(year, month, day,
hour, minute, second, microsecond,
HASTZINFO(date) ? date->tzinfo : Py_None,
Py_TYPE(date));
}
static PyObject *
datetime_add(PyObject *left, PyObject *right)
{
if (PyDateTime_Check(left)) {
/* datetime + ??? */
if (PyDelta_Check(right))
/* datetime + delta */
return add_datetime_timedelta(
(PyDateTime_DateTime *)left,
(PyDateTime_Delta *)right,
1);
}
else if (PyDelta_Check(left)) {
/* delta + datetime */
return add_datetime_timedelta((PyDateTime_DateTime *) right,
(PyDateTime_Delta *) left,
1);
}
Py_RETURN_NOTIMPLEMENTED;
}
static PyObject *
datetime_subtract(PyObject *left, PyObject *right)
{
PyObject *result = Py_NotImplemented;
if (PyDateTime_Check(left)) {
/* datetime - ??? */
if (PyDateTime_Check(right)) {
/* datetime - datetime */
PyObject *offset1, *offset2, *offdiff = NULL;
int delta_d, delta_s, delta_us;
if (GET_DT_TZINFO(left) == GET_DT_TZINFO(right)) {
offset1 = Py_NewRef(Py_None);
offset2 = Py_NewRef(Py_None);
}
else {
offset1 = datetime_utcoffset(left, NULL);
if (offset1 == NULL)
return NULL;
offset2 = datetime_utcoffset(right, NULL);
if (offset2 == NULL) {
Py_DECREF(offset1);
return NULL;
}
if ((offset1 != Py_None) != (offset2 != Py_None)) {
PyErr_SetString(PyExc_TypeError,
"can't subtract offset-naive and "
"offset-aware datetimes");
Py_DECREF(offset1);
Py_DECREF(offset2);
return NULL;
}
}
if ((offset1 != offset2) &&
delta_cmp(offset1, offset2) != 0) {
offdiff = delta_subtract(offset1, offset2);
if (offdiff == NULL) {
Py_DECREF(offset1);
Py_DECREF(offset2);
return NULL;
}
}
Py_DECREF(offset1);
Py_DECREF(offset2);
delta_d = ymd_to_ord(GET_YEAR(left),
GET_MONTH(left),
GET_DAY(left)) -
ymd_to_ord(GET_YEAR(right),
GET_MONTH(right),
GET_DAY(right));
/* These can't overflow, since the values are
* normalized. At most this gives the number of
* seconds in one day.
*/
delta_s = (DATE_GET_HOUR(left) -
DATE_GET_HOUR(right)) * 3600 +
(DATE_GET_MINUTE(left) -
DATE_GET_MINUTE(right)) * 60 +
(DATE_GET_SECOND(left) -
DATE_GET_SECOND(right));
delta_us = DATE_GET_MICROSECOND(left) -
DATE_GET_MICROSECOND(right);
result = new_delta(delta_d, delta_s, delta_us, 1);
if (result == NULL)
return NULL;
if (offdiff != NULL) {
Py_SETREF(result, delta_subtract(result, offdiff));
Py_DECREF(offdiff);
}
}
else if (PyDelta_Check(right)) {
/* datetime - delta */
result = add_datetime_timedelta(
(PyDateTime_DateTime *)left,
(PyDateTime_Delta *)right,
-1);
}
}
if (result == Py_NotImplemented)
Py_INCREF(result);
return result;
}
/* Various ways to turn a datetime into a string. */
static PyObject *
datetime_repr(PyObject *op)
{
PyDateTime_DateTime *self = PyDateTime_CAST(op);
const char *type_name = Py_TYPE(self)->tp_name;
PyObject *baserepr;
if (DATE_GET_MICROSECOND(self)) {
baserepr = PyUnicode_FromFormat(
"%s(%d, %d, %d, %d, %d, %d, %d)",
type_name,
GET_YEAR(self), GET_MONTH(self), GET_DAY(self),
DATE_GET_HOUR(self), DATE_GET_MINUTE(self),
DATE_GET_SECOND(self),
DATE_GET_MICROSECOND(self));
}
else if (DATE_GET_SECOND(self)) {
baserepr = PyUnicode_FromFormat(
"%s(%d, %d, %d, %d, %d, %d)",
type_name,
GET_YEAR(self), GET_MONTH(self), GET_DAY(self),
DATE_GET_HOUR(self), DATE_GET_MINUTE(self),
DATE_GET_SECOND(self));
}
else {
baserepr = PyUnicode_FromFormat(
"%s(%d, %d, %d, %d, %d)",
type_name,
GET_YEAR(self), GET_MONTH(self), GET_DAY(self),
DATE_GET_HOUR(self), DATE_GET_MINUTE(self));
}
if (baserepr != NULL && DATE_GET_FOLD(self) != 0)
baserepr = append_keyword_fold(baserepr, DATE_GET_FOLD(self));
if (baserepr == NULL || ! HASTZINFO(self))
return baserepr;
return append_keyword_tzinfo(baserepr, self->tzinfo);
}
static PyObject *
datetime_str(PyObject *op)
{
PyObject *space = PyUnicode_FromString(" ");
if (space == NULL) {
return NULL;
}
PyObject *res = PyObject_CallMethodOneArg(op, &_Py_ID(isoformat), space);
Py_DECREF(space);
return res;
}
/*[clinic input]
datetime.datetime.isoformat
sep: int(accept={str}, c_default="'T'", py_default="'T'") = ord('T')
timespec: str(c_default="NULL") = 'auto'
Return the time formatted according to ISO.
The full format looks like 'YYYY-MM-DD HH:MM:SS.mmmmmm'.
By default, the fractional part is omitted if self.microsecond == 0.
If self.tzinfo is not None, the UTC offset is also attached, giving
a full format of 'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM'.
Optional argument sep specifies the separator between date and
time, default 'T'.
The optional argument timespec specifies the number of additional
terms of the time to include. Valid options are 'auto', 'hours',
'minutes', 'seconds', 'milliseconds' and 'microseconds'.
[clinic start generated code]*/
static PyObject *
datetime_datetime_isoformat_impl(PyDateTime_DateTime *self, int sep,
const char *timespec)
/*[clinic end generated code: output=9b6ce1383189b0bf input=2fa2512172ccf5d5]*/
{
char buffer[100];
PyObject *result = NULL;
int us = DATE_GET_MICROSECOND(self);
static const char * const specs[][2] = {
{"hours", "%04d-%02d-%02d%c%02d"},
{"minutes", "%04d-%02d-%02d%c%02d:%02d"},
{"seconds", "%04d-%02d-%02d%c%02d:%02d:%02d"},
{"milliseconds", "%04d-%02d-%02d%c%02d:%02d:%02d.%03d"},
{"microseconds", "%04d-%02d-%02d%c%02d:%02d:%02d.%06d"},
};
size_t given_spec;
if (timespec == NULL || strcmp(timespec, "auto") == 0) {
if (us == 0) {
/* seconds */
given_spec = 2;
}
else {
/* microseconds */
given_spec = 4;
}
}
else {
for (given_spec = 0; given_spec < Py_ARRAY_LENGTH(specs); given_spec++) {
if (strcmp(timespec, specs[given_spec][0]) == 0) {
if (given_spec == 3) {
us = us / 1000;
}
break;
}
}
}
if (given_spec == Py_ARRAY_LENGTH(specs)) {
PyErr_Format(PyExc_ValueError, "Unknown timespec value");
return NULL;
}
else {
result = PyUnicode_FromFormat(specs[given_spec][1],
GET_YEAR(self), GET_MONTH(self),
GET_DAY(self), (int)sep,
DATE_GET_HOUR(self), DATE_GET_MINUTE(self),
DATE_GET_SECOND(self), us);
}
if (!result || !HASTZINFO(self))
return result;
/* We need to append the UTC offset. */
if (format_utcoffset(buffer, sizeof(buffer), ":", self->tzinfo, (PyObject *)self) < 0) {
Py_DECREF(result);
return NULL;
}
PyUnicode_AppendAndDel(&result, PyUnicode_FromString(buffer));
return result;
}
static PyObject *
datetime_ctime(PyObject *op, PyObject *Py_UNUSED(dummy))
{
PyDateTime_DateTime *self = PyDateTime_CAST(op);
return format_ctime(op,
DATE_GET_HOUR(self),
DATE_GET_MINUTE(self),
DATE_GET_SECOND(self));
}
/* Miscellaneous methods. */
static PyObject *
flip_fold(PyObject *dt)
{
return new_datetime_ex2(GET_YEAR(dt),
GET_MONTH(dt),
GET_DAY(dt),
DATE_GET_HOUR(dt),
DATE_GET_MINUTE(dt),
DATE_GET_SECOND(dt),
DATE_GET_MICROSECOND(dt),
HASTZINFO(dt) ?
((PyDateTime_DateTime *)dt)->tzinfo : Py_None,
!DATE_GET_FOLD(dt),
Py_TYPE(dt));
}
static PyObject *
get_flip_fold_offset(PyObject *dt)
{
PyObject *result, *flip_dt;
flip_dt = flip_fold(dt);
if (flip_dt == NULL)
return NULL;
result = datetime_utcoffset(flip_dt, NULL);
Py_DECREF(flip_dt);
return result;
}
/* PEP 495 exception: Whenever one or both of the operands in
* inter-zone comparison is such that its utcoffset() depends
* on the value of its fold attribute, the result is False.
*
* Return 1 if exception applies, 0 if not, and -1 on error.
*/
static int
pep495_eq_exception(PyObject *self, PyObject *other,
PyObject *offset_self, PyObject *offset_other)
{
int result = 0;
PyObject *flip_offset;
flip_offset = get_flip_fold_offset(self);
if (flip_offset == NULL)
return -1;
if (flip_offset != offset_self &&
delta_cmp(flip_offset, offset_self))
{
result = 1;
goto done;
}
Py_DECREF(flip_offset);
flip_offset = get_flip_fold_offset(other);
if (flip_offset == NULL)
return -1;
if (flip_offset != offset_other &&
delta_cmp(flip_offset, offset_other))
result = 1;
done:
Py_DECREF(flip_offset);
return result;
}
static PyObject *
datetime_richcompare(PyObject *self, PyObject *other, int op)
{
PyObject *result = NULL;
PyObject *offset1, *offset2;
int diff;
if (!PyDateTime_Check(other)) {
Py_RETURN_NOTIMPLEMENTED;
}
if (GET_DT_TZINFO(self) == GET_DT_TZINFO(other)) {
diff = memcmp(((PyDateTime_DateTime *)self)->data,
((PyDateTime_DateTime *)other)->data,
_PyDateTime_DATETIME_DATASIZE);
return diff_to_bool(diff, op);
}
offset1 = datetime_utcoffset(self, NULL);
if (offset1 == NULL)
return NULL;
offset2 = datetime_utcoffset(other, NULL);
if (offset2 == NULL)
goto done;
/* If they're both naive, or both aware and have the same offsets,
* we get off cheap. Note that if they're both naive, offset1 ==
* offset2 == Py_None at this point.
*/
if ((offset1 == offset2) ||
(PyDelta_Check(offset1) && PyDelta_Check(offset2) &&
delta_cmp(offset1, offset2) == 0)) {
diff = memcmp(((PyDateTime_DateTime *)self)->data,
((PyDateTime_DateTime *)other)->data,
_PyDateTime_DATETIME_DATASIZE);
if ((op == Py_EQ || op == Py_NE) && diff == 0) {
int ex = pep495_eq_exception(self, other, offset1, offset2);
if (ex == -1)
goto done;
if (ex)
diff = 1;
}
result = diff_to_bool(diff, op);
}
else if (offset1 != Py_None && offset2 != Py_None) {
PyDateTime_Delta *delta;
assert(offset1 != offset2); /* else last "if" handled it */
delta = (PyDateTime_Delta *)datetime_subtract(self, other);
if (delta == NULL)
goto done;
diff = GET_TD_DAYS(delta);
if (diff == 0)
diff = GET_TD_SECONDS(delta) |
GET_TD_MICROSECONDS(delta);
Py_DECREF(delta);
if ((op == Py_EQ || op == Py_NE) && diff == 0) {
int ex = pep495_eq_exception(self, other, offset1, offset2);
if (ex == -1)
goto done;
if (ex)
diff = 1;
}
result = diff_to_bool(diff, op);
}
else if (op == Py_EQ) {
result = Py_NewRef(Py_False);
}
else if (op == Py_NE) {
result = Py_NewRef(Py_True);
}
else {
PyErr_SetString(PyExc_TypeError,
"can't compare offset-naive and "
"offset-aware datetimes");
}
done:
Py_DECREF(offset1);
Py_XDECREF(offset2);
return result;
}
static Py_hash_t
datetime_hash(PyObject *op)
{
PyDateTime_DateTime *self = PyDateTime_CAST(op);
Py_hash_t hash = FT_ATOMIC_LOAD_SSIZE_RELAXED(self->hashcode);
if (hash == -1) {
PyObject *offset, *self0;
if (DATE_GET_FOLD(self)) {
self0 = new_datetime_ex2(GET_YEAR(self),
GET_MONTH(self),
GET_DAY(self),
DATE_GET_HOUR(self),
DATE_GET_MINUTE(self),
DATE_GET_SECOND(self),
DATE_GET_MICROSECOND(self),
HASTZINFO(self) ? self->tzinfo : Py_None,
0, Py_TYPE(self));
if (self0 == NULL)
return -1;
}
else {
self0 = Py_NewRef(self);
}
offset = datetime_utcoffset(self0, NULL);
Py_DECREF(self0);
if (offset == NULL)
return -1;
/* Reduce this to a hash of another object. */
if (offset == Py_None) {
hash = generic_hash(
(unsigned char *)self->data, _PyDateTime_DATETIME_DATASIZE);
FT_ATOMIC_STORE_SSIZE_RELAXED(self->hashcode, hash);
} else {
PyObject *temp1, *temp2;
int days, seconds;
assert(HASTZINFO(self));
days = ymd_to_ord(GET_YEAR(self),
GET_MONTH(self),
GET_DAY(self));
seconds = DATE_GET_HOUR(self) * 3600 +
DATE_GET_MINUTE(self) * 60 +
DATE_GET_SECOND(self);
temp1 = new_delta(days, seconds,
DATE_GET_MICROSECOND(self),
1);
if (temp1 == NULL) {
Py_DECREF(offset);
return -1;
}
temp2 = delta_subtract(temp1, offset);
Py_DECREF(temp1);
if (temp2 == NULL) {
Py_DECREF(offset);
return -1;
}
hash = PyObject_Hash(temp2);
FT_ATOMIC_STORE_SSIZE_RELAXED(self->hashcode, hash);
Py_DECREF(temp2);
}
Py_DECREF(offset);
}
return hash;
}
/*[clinic input]
datetime.datetime.replace
year: int(c_default="GET_YEAR(self)") = unchanged
month: int(c_default="GET_MONTH(self)") = unchanged
day: int(c_default="GET_DAY(self)") = unchanged
hour: int(c_default="DATE_GET_HOUR(self)") = unchanged
minute: int(c_default="DATE_GET_MINUTE(self)") = unchanged
second: int(c_default="DATE_GET_SECOND(self)") = unchanged
microsecond: int(c_default="DATE_GET_MICROSECOND(self)") = unchanged
tzinfo: object(c_default="HASTZINFO(self) ? ((PyDateTime_DateTime *)self)->tzinfo : Py_None") = unchanged
*
fold: int(c_default="DATE_GET_FOLD(self)") = unchanged
Return datetime with new specified fields.
[clinic start generated code]*/
static PyObject *
datetime_datetime_replace_impl(PyDateTime_DateTime *self, int year,
int month, int day, int hour, int minute,
int second, int microsecond, PyObject *tzinfo,
int fold)
/*[clinic end generated code: output=00bc96536833fddb input=fd972762d604d3e7]*/
{
return new_datetime_subclass_fold_ex(year, month, day, hour, minute,
second, microsecond, tzinfo, fold,
Py_TYPE(self));
}
static PyObject *
local_timezone_from_timestamp(time_t timestamp)
{
PyObject *result = NULL;
PyObject *delta;
struct tm local_time_tm;
PyObject *nameo = NULL;
const char *zone = NULL;
if (_PyTime_localtime(timestamp, &local_time_tm) != 0)
return NULL;
#ifdef HAVE_STRUCT_TM_TM_ZONE
zone = local_time_tm.tm_zone;
delta = new_delta(0, local_time_tm.tm_gmtoff, 0, 1);
#else /* HAVE_STRUCT_TM_TM_ZONE */
{
PyObject *local_time, *utc_time;
struct tm utc_time_tm;
char buf[100];
strftime(buf, sizeof(buf), "%Z", &local_time_tm);
zone = buf;
local_time = new_datetime(local_time_tm.tm_year + 1900,
local_time_tm.tm_mon + 1,
local_time_tm.tm_mday,
local_time_tm.tm_hour,
local_time_tm.tm_min,
local_time_tm.tm_sec, 0, Py_None, 0);
if (local_time == NULL) {
return NULL;
}
if (_PyTime_gmtime(timestamp, &utc_time_tm) != 0)
return NULL;
utc_time = new_datetime(utc_time_tm.tm_year + 1900,
utc_time_tm.tm_mon + 1,
utc_time_tm.tm_mday,
utc_time_tm.tm_hour,
utc_time_tm.tm_min,
utc_time_tm.tm_sec, 0, Py_None, 0);
if (utc_time == NULL) {
Py_DECREF(local_time);
return NULL;
}
delta = datetime_subtract(local_time, utc_time);
Py_DECREF(local_time);
Py_DECREF(utc_time);
}
#endif /* HAVE_STRUCT_TM_TM_ZONE */
if (delta == NULL) {
return NULL;
}
if (zone != NULL) {
nameo = PyUnicode_DecodeLocale(zone, "surrogateescape");
if (nameo == NULL)
goto error;
}
result = new_timezone(delta, nameo);
Py_XDECREF(nameo);
error:
Py_DECREF(delta);
return result;
}
static PyObject *
local_timezone(PyDateTime_DateTime *utc_time)
{
time_t timestamp;
PyObject *delta;
PyObject *one_second;
PyObject *seconds;
PyObject *current_mod = NULL;
datetime_state *st = GET_CURRENT_STATE(current_mod);
delta = datetime_subtract((PyObject *)utc_time, CONST_EPOCH(st));
RELEASE_CURRENT_STATE(st, current_mod);
if (delta == NULL)
return NULL;
one_second = new_delta(0, 1, 0, 0);
if (one_second == NULL) {
Py_DECREF(delta);
return NULL;
}
seconds = divide_timedelta_timedelta((PyDateTime_Delta *)delta,
(PyDateTime_Delta *)one_second);
Py_DECREF(one_second);
Py_DECREF(delta);
if (seconds == NULL)
return NULL;
timestamp = _PyLong_AsTime_t(seconds);
Py_DECREF(seconds);
if (timestamp == -1 && PyErr_Occurred())
return NULL;
return local_timezone_from_timestamp(timestamp);
}
static long long
local_to_seconds(int year, int month, int day,
int hour, int minute, int second, int fold);
static PyObject *
local_timezone_from_local(PyDateTime_DateTime *local_dt)
{
long long seconds, seconds2;
time_t timestamp;
int fold = DATE_GET_FOLD(local_dt);
seconds = local_to_seconds(GET_YEAR(local_dt),
GET_MONTH(local_dt),
GET_DAY(local_dt),
DATE_GET_HOUR(local_dt),
DATE_GET_MINUTE(local_dt),
DATE_GET_SECOND(local_dt),
fold);
if (seconds == -1)
return NULL;
seconds2 = local_to_seconds(GET_YEAR(local_dt),
GET_MONTH(local_dt),
GET_DAY(local_dt),
DATE_GET_HOUR(local_dt),
DATE_GET_MINUTE(local_dt),
DATE_GET_SECOND(local_dt),
!fold);
if (seconds2 == -1)
return NULL;
/* Detect gap */
if (seconds2 != seconds && (seconds2 > seconds) == fold)
seconds = seconds2;
/* XXX: add bounds check */
timestamp = seconds - epoch;
return local_timezone_from_timestamp(timestamp);
}
/*[clinic input]
datetime.datetime.astimezone
tz as tzinfo: object = None
Convert to local time in new timezone tz.
[clinic start generated code]*/
static PyObject *
datetime_datetime_astimezone_impl(PyDateTime_DateTime *self,
PyObject *tzinfo)
/*[clinic end generated code: output=ae2263d04e944537 input=9c675c8595009935]*/
{
PyDateTime_DateTime *result;
PyObject *offset;
PyObject *temp;
PyObject *self_tzinfo;
if (check_tzinfo_subclass(tzinfo) == -1)
return NULL;
if (!HASTZINFO(self) || self->tzinfo == Py_None) {
naive:
self_tzinfo = local_timezone_from_local(self);
if (self_tzinfo == NULL)
return NULL;
} else {
self_tzinfo = Py_NewRef(self->tzinfo);
}
/* Conversion to self's own time zone is a NOP. */
if (self_tzinfo == tzinfo) {
Py_DECREF(self_tzinfo);
return Py_NewRef(self);
}
/* Convert self to UTC. */
offset = call_utcoffset(self_tzinfo, (PyObject *)self);
Py_DECREF(self_tzinfo);
if (offset == NULL)
return NULL;
else if(offset == Py_None) {
Py_DECREF(offset);
goto naive;
}
else if (!PyDelta_Check(offset)) {
Py_DECREF(offset);
PyErr_Format(PyExc_TypeError, "utcoffset() returned %.200s,"
" expected timedelta or None", Py_TYPE(offset)->tp_name);
return NULL;
}
/* result = self - offset */
result = (PyDateTime_DateTime *)add_datetime_timedelta(self,
(PyDateTime_Delta *)offset, -1);
Py_DECREF(offset);
if (result == NULL)
return NULL;
/* Make sure result is aware and UTC. */
if (!HASTZINFO(result)) {
temp = (PyObject *)result;
result = (PyDateTime_DateTime *)
new_datetime_ex2(GET_YEAR(result),
GET_MONTH(result),
GET_DAY(result),
DATE_GET_HOUR(result),
DATE_GET_MINUTE(result),
DATE_GET_SECOND(result),
DATE_GET_MICROSECOND(result),
CONST_UTC(NO_STATE),
DATE_GET_FOLD(result),
Py_TYPE(result));
Py_DECREF(temp);
if (result == NULL)
return NULL;
}
else {
/* Result is already aware - just replace tzinfo. */
Py_SETREF(result->tzinfo, Py_NewRef(CONST_UTC(NO_STATE)));
}
/* Attach new tzinfo and let fromutc() do the rest. */
if (tzinfo == Py_None) {
tzinfo = local_timezone(result);
if (tzinfo == NULL) {
Py_DECREF(result);
return NULL;
}
}
else
Py_INCREF(tzinfo);
Py_SETREF(result->tzinfo, tzinfo);
temp = (PyObject *)result;
result = (PyDateTime_DateTime *)
PyObject_CallMethodOneArg(tzinfo, &_Py_ID(fromutc), temp);
Py_DECREF(temp);
return (PyObject *)result;
}
static PyObject *
datetime_timetuple(PyObject *op, PyObject *Py_UNUSED(dummy))
{
PyDateTime_DateTime *self = PyDateTime_CAST(op);
int dstflag = -1;
if (HASTZINFO(self) && self->tzinfo != Py_None) {
PyObject * dst;
dst = call_dst(self->tzinfo, op);
if (dst == NULL)
return NULL;
if (dst != Py_None)
dstflag = delta_bool(dst);
Py_DECREF(dst);
}
return build_struct_time(GET_YEAR(self),
GET_MONTH(self),
GET_DAY(self),
DATE_GET_HOUR(self),
DATE_GET_MINUTE(self),
DATE_GET_SECOND(self),
dstflag);
}
static long long
local_to_seconds(int year, int month, int day,
int hour, int minute, int second, int fold)
{
long long t, a, b, u1, u2, t1, t2, lt;
t = utc_to_seconds(year, month, day, hour, minute, second);
/* Our goal is to solve t = local(u) for u. */
lt = local(t);
if (lt == -1)
return -1;
a = lt - t;
u1 = t - a;
t1 = local(u1);
if (t1 == -1)
return -1;
if (t1 == t) {
/* We found one solution, but it may not be the one we need.
* Look for an earlier solution (if `fold` is 0), or a
* later one (if `fold` is 1). */
if (fold)
u2 = u1 + max_fold_seconds;
else
u2 = u1 - max_fold_seconds;
lt = local(u2);
if (lt == -1)
return -1;
b = lt - u2;
if (a == b)
return u1;
}
else {
b = t1 - u1;
assert(a != b);
}
u2 = t - b;
t2 = local(u2);
if (t2 == -1)
return -1;
if (t2 == t)
return u2;
if (t1 == t)
return u1;
/* We have found both offsets a and b, but neither t - a nor t - b is
* a solution. This means t is in the gap. */
return fold?Py_MIN(u1, u2):Py_MAX(u1, u2);
}
/* date(1970,1,1).toordinal() == 719163 */
#define EPOCH_SECONDS (719163LL * 24 * 60 * 60)
static PyObject *
datetime_timestamp(PyObject *op, PyObject *Py_UNUSED(dummy))
{
PyDateTime_DateTime *self = PyDateTime_CAST(op);
PyObject *result;
if (HASTZINFO(self) && self->tzinfo != Py_None) {
PyObject *current_mod = NULL;
datetime_state *st = GET_CURRENT_STATE(current_mod);
PyObject *delta;
delta = datetime_subtract(op, CONST_EPOCH(st));
RELEASE_CURRENT_STATE(st, current_mod);
if (delta == NULL)
return NULL;
result = delta_total_seconds(delta, NULL);
Py_DECREF(delta);
}
else {
long long seconds;
seconds = local_to_seconds(GET_YEAR(self),
GET_MONTH(self),
GET_DAY(self),
DATE_GET_HOUR(self),
DATE_GET_MINUTE(self),
DATE_GET_SECOND(self),
DATE_GET_FOLD(self));
if (seconds == -1)
return NULL;
result = PyFloat_FromDouble(seconds - EPOCH_SECONDS +
DATE_GET_MICROSECOND(self) / 1e6);
}
return result;
}
static PyObject *
datetime_getdate(PyObject *op, PyObject *Py_UNUSED(dummy))
{
PyDateTime_DateTime *self = PyDateTime_CAST(op);
return new_date(GET_YEAR(self),
GET_MONTH(self),
GET_DAY(self));
}
static PyObject *
datetime_gettime(PyObject *op, PyObject *Py_UNUSED(dummy))
{
PyDateTime_DateTime *self = PyDateTime_CAST(op);
return new_time(DATE_GET_HOUR(self),
DATE_GET_MINUTE(self),
DATE_GET_SECOND(self),
DATE_GET_MICROSECOND(self),
Py_None,
DATE_GET_FOLD(self));
}
static PyObject *
datetime_gettimetz(PyObject *op, PyObject *Py_UNUSED(dummy))
{
PyDateTime_DateTime *self = PyDateTime_CAST(op);
return new_time(DATE_GET_HOUR(self),
DATE_GET_MINUTE(self),
DATE_GET_SECOND(self),
DATE_GET_MICROSECOND(self),
GET_DT_TZINFO(self),
DATE_GET_FOLD(self));
}
static PyObject *
datetime_utctimetuple(PyObject *op, PyObject *Py_UNUSED(dummy))
{
int y, m, d, hh, mm, ss;
PyObject *tzinfo;
PyDateTime_DateTime *utcself;
PyDateTime_DateTime *self = PyDateTime_CAST(op);
tzinfo = GET_DT_TZINFO(self);
if (tzinfo == Py_None) {
utcself = (PyDateTime_DateTime*)Py_NewRef(self);
}
else {
PyObject *offset;
offset = call_utcoffset(tzinfo, (PyObject *)self);
if (offset == NULL)
return NULL;
if (offset == Py_None) {
Py_DECREF(offset);
utcself = (PyDateTime_DateTime*)Py_NewRef(self);
}
else {
utcself = (PyDateTime_DateTime *)add_datetime_timedelta(self,
(PyDateTime_Delta *)offset, -1);
Py_DECREF(offset);
if (utcself == NULL)
return NULL;
}
}
y = GET_YEAR(utcself);
m = GET_MONTH(utcself);
d = GET_DAY(utcself);
hh = DATE_GET_HOUR(utcself);
mm = DATE_GET_MINUTE(utcself);
ss = DATE_GET_SECOND(utcself);
Py_DECREF(utcself);
return build_struct_time(y, m, d, hh, mm, ss, 0);
}
/* Pickle support, a simple use of __reduce__. */
/* Let basestate be the non-tzinfo data string.
* If tzinfo is None, this returns (basestate,), else (basestate, tzinfo).
* So it's a tuple in any (non-error) case.
* __getstate__ isn't exposed.
*/
static PyObject *
datetime_getstate(PyDateTime_DateTime *self, int proto)
{
PyObject *basestate;
PyObject *result = NULL;
basestate = PyBytes_FromStringAndSize((char *)self->data,
_PyDateTime_DATETIME_DATASIZE);
if (basestate != NULL) {
if (proto > 3 && DATE_GET_FOLD(self))
/* Set the first bit of the third byte */
PyBytes_AS_STRING(basestate)[2] |= (1 << 7);
if (! HASTZINFO(self) || self->tzinfo == Py_None)
result = PyTuple_Pack(1, basestate);
else
result = PyTuple_Pack(2, basestate, self->tzinfo);
Py_DECREF(basestate);
}
return result;
}
/*[clinic input]
datetime.datetime.__reduce_ex__
proto: int
/
[clinic start generated code]*/
static PyObject *
datetime_datetime___reduce_ex___impl(PyDateTime_DateTime *self, int proto)
/*[clinic end generated code: output=53d712ce3e927735 input=bab748e49ffb30c3]*/
{
return Py_BuildValue("(ON)", Py_TYPE(self),
datetime_getstate(self, proto));
}
/*[clinic input]
datetime.datetime.__reduce__
[clinic start generated code]*/
static PyObject *
datetime_datetime___reduce___impl(PyDateTime_DateTime *self)
/*[clinic end generated code: output=6794df9ea75666cf input=cadbbeb3bf3bf94c]*/
{
return Py_BuildValue("(ON)", Py_TYPE(self),
datetime_getstate(self, 2));
}
static PyMethodDef datetime_methods[] = {
/* Class methods: */
DATETIME_DATETIME_NOW_METHODDEF
DATETIME_DATETIME_UTCNOW_METHODDEF
DATETIME_DATETIME_FROMTIMESTAMP_METHODDEF
DATETIME_DATETIME_UTCFROMTIMESTAMP_METHODDEF
DATETIME_DATETIME_STRPTIME_METHODDEF
DATETIME_DATETIME_COMBINE_METHODDEF
DATETIME_DATETIME_FROMISOFORMAT_METHODDEF
/* Instance methods: */
{"date", datetime_getdate, METH_NOARGS,
PyDoc_STR("Return date object with same year, month and day.")},
{"time", datetime_gettime, METH_NOARGS,
PyDoc_STR("Return time object with same time but with tzinfo=None.")},
{"timetz", datetime_gettimetz, METH_NOARGS,
PyDoc_STR("Return time object with same time and tzinfo.")},
{"ctime", datetime_ctime, METH_NOARGS,
PyDoc_STR("Return ctime() style string.")},
{"timetuple", datetime_timetuple, METH_NOARGS,
PyDoc_STR("Return time tuple, compatible with time.localtime().")},
{"timestamp", datetime_timestamp, METH_NOARGS,
PyDoc_STR("Return POSIX timestamp as float.")},
{"utctimetuple", datetime_utctimetuple, METH_NOARGS,
PyDoc_STR("Return UTC time tuple, compatible with time.localtime().")},
DATETIME_DATETIME_ISOFORMAT_METHODDEF
{"utcoffset", datetime_utcoffset, METH_NOARGS,
PyDoc_STR("Return self.tzinfo.utcoffset(self).")},
{"tzname", datetime_tzname, METH_NOARGS,
PyDoc_STR("Return self.tzinfo.tzname(self).")},
{"dst", datetime_dst, METH_NOARGS,
PyDoc_STR("Return self.tzinfo.dst(self).")},
DATETIME_DATETIME_REPLACE_METHODDEF
{"__replace__", _PyCFunction_CAST(datetime_datetime_replace), METH_FASTCALL | METH_KEYWORDS,
PyDoc_STR("__replace__($self, /, **changes)\n--\n\nThe same as replace().")},
DATETIME_DATETIME_ASTIMEZONE_METHODDEF
DATETIME_DATETIME___REDUCE_EX___METHODDEF
DATETIME_DATETIME___REDUCE___METHODDEF
{NULL, NULL}
};
static PyNumberMethods datetime_as_number = {
datetime_add, /* nb_add */
datetime_subtract, /* nb_subtract */
0, /* nb_multiply */
0, /* nb_remainder */
0, /* nb_divmod */
0, /* nb_power */
0, /* nb_negative */
0, /* nb_positive */
0, /* nb_absolute */
0, /* nb_bool */
};
static PyTypeObject PyDateTime_DateTimeType = {
PyVarObject_HEAD_INIT(NULL, 0)
"datetime.datetime", /* tp_name */
sizeof(PyDateTime_DateTime), /* tp_basicsize */
0, /* tp_itemsize */
datetime_dealloc, /* tp_dealloc */
0, /* tp_vectorcall_offset */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_as_async */
datetime_repr, /* tp_repr */
&datetime_as_number, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
datetime_hash, /* tp_hash */
0, /* tp_call */
datetime_str, /* tp_str */
PyObject_GenericGetAttr, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
datetime_datetime__doc__, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
datetime_richcompare, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
datetime_methods, /* tp_methods */
0, /* tp_members */
datetime_getset, /* tp_getset */
&PyDateTime_DateType, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
0, /* tp_init */
datetime_alloc, /* tp_alloc */
datetime_new, /* tp_new */
0, /* tp_free */
};
/* ---------------------------------------------------------------------------
* datetime C-API.
*/
static PyTypeObject * const capi_types[] = {
&PyDateTime_DateType,
&PyDateTime_DateTimeType,
&PyDateTime_TimeType,
&PyDateTime_DeltaType,
&PyDateTime_TZInfoType,
/* Indirectly, via the utc object. */
&PyDateTime_TimeZoneType,
};
/* The C-API is process-global. This violates interpreter isolation
* due to the objects stored here. Thus each of those objects must
* be managed carefully. */
// XXX Can we make this const?
static PyDateTime_CAPI capi = {
/* The classes must be readied before used here.
* That will happen the first time the module is loaded.
* They aren't safe to be shared between interpreters,
* but that's okay as long as the module is single-phase init. */
.DateType = &PyDateTime_DateType,
.DateTimeType = &PyDateTime_DateTimeType,
.TimeType = &PyDateTime_TimeType,
.DeltaType = &PyDateTime_DeltaType,
.TZInfoType = &PyDateTime_TZInfoType,
.TimeZone_UTC = (PyObject *)&utc_timezone,
.Date_FromDate = new_date_ex,
.DateTime_FromDateAndTime = new_datetime_ex,
.Time_FromTime = new_time_ex,
.Delta_FromDelta = new_delta_ex,
.TimeZone_FromTimeZone = new_timezone,
.DateTime_FromTimestamp = datetime_datetime_fromtimestamp_capi,
.Date_FromTimestamp = datetime_date_fromtimestamp_capi,
.DateTime_FromDateAndTimeAndFold = new_datetime_ex2,
.Time_FromTimeAndFold = new_time_ex2,
};
/* Get a new C API by calling this function.
* Clients get at C API via PyDateTime_IMPORT, defined in datetime.h.
*/
static inline PyDateTime_CAPI *
get_datetime_capi(void)
{
return &capi;
}
static PyObject *
create_timezone_from_delta(int days, int sec, int ms, int normalize)
{
PyObject *delta = new_delta(days, sec, ms, normalize);
if (delta == NULL) {
return NULL;
}
PyObject *tz = create_timezone(delta, NULL);
Py_DECREF(delta);
return tz;
}
/* ---------------------------------------------------------------------------
* Module state lifecycle.
*/
static int
init_state(datetime_state *st, PyObject *module, PyObject *old_module)
{
/* Each module gets its own heap types. */
#define ADD_TYPE(FIELD, SPEC, BASE) \
do { \
PyObject *cls = PyType_FromModuleAndSpec( \
module, SPEC, (PyObject *)BASE); \
if (cls == NULL) { \
return -1; \
} \
st->FIELD = (PyTypeObject *)cls; \
} while (0)
ADD_TYPE(isocalendar_date_type, &isocal_spec, &PyTuple_Type);
#undef ADD_TYPE
if (old_module != NULL) {
assert(old_module != module);
datetime_state *st_old = get_module_state(old_module);
*st = (datetime_state){
.isocalendar_date_type = st->isocalendar_date_type,
.us_per_ms = Py_NewRef(st_old->us_per_ms),
.us_per_second = Py_NewRef(st_old->us_per_second),
.us_per_minute = Py_NewRef(st_old->us_per_minute),
.us_per_hour = Py_NewRef(st_old->us_per_hour),
.us_per_day = Py_NewRef(st_old->us_per_day),
.us_per_week = Py_NewRef(st_old->us_per_week),
.seconds_per_day = Py_NewRef(st_old->seconds_per_day),
.epoch = Py_NewRef(st_old->epoch),
};
return 0;
}
st->us_per_ms = PyLong_FromLong(1000);
if (st->us_per_ms == NULL) {
return -1;
}
st->us_per_second = PyLong_FromLong(1000000);
if (st->us_per_second == NULL) {
return -1;
}
st->us_per_minute = PyLong_FromLong(60000000);
if (st->us_per_minute == NULL) {
return -1;
}
st->seconds_per_day = PyLong_FromLong(24 * 3600);
if (st->seconds_per_day == NULL) {
return -1;
}
/* The rest are too big for 32-bit ints, but even
* us_per_week fits in 40 bits, so doubles should be exact.
*/
st->us_per_hour = PyLong_FromDouble(3600000000.0);
if (st->us_per_hour == NULL) {
return -1;
}
st->us_per_day = PyLong_FromDouble(86400000000.0);
if (st->us_per_day == NULL) {
return -1;
}
st->us_per_week = PyLong_FromDouble(604800000000.0);
if (st->us_per_week == NULL) {
return -1;
}
/* Init Unix epoch */
st->epoch = new_datetime(
1970, 1, 1, 0, 0, 0, 0, (PyObject *)&utc_timezone, 0);
if (st->epoch == NULL) {
return -1;
}
return 0;
}
static int
traverse_state(datetime_state *st, visitproc visit, void *arg)
{
/* heap types */
Py_VISIT(st->isocalendar_date_type);
return 0;
}
static int
clear_state(datetime_state *st)
{
Py_CLEAR(st->isocalendar_date_type);
Py_CLEAR(st->us_per_ms);
Py_CLEAR(st->us_per_second);
Py_CLEAR(st->us_per_minute);
Py_CLEAR(st->us_per_hour);
Py_CLEAR(st->us_per_day);
Py_CLEAR(st->us_per_week);
Py_CLEAR(st->seconds_per_day);
Py_CLEAR(st->epoch);
return 0;
}
PyStatus
_PyDateTime_InitTypes(PyInterpreterState *interp)
{
/* Bases classes must be initialized before subclasses,
* so capi_types must have the types in the appropriate order. */
for (size_t i = 0; i < Py_ARRAY_LENGTH(capi_types); i++) {
PyTypeObject *type = capi_types[i];
if (_PyStaticType_InitForExtension(interp, type) < 0) {
return _PyStatus_ERR("could not initialize static types");
}
}
#define DATETIME_ADD_MACRO(dict, c, value_expr) \
do { \
assert(!PyErr_Occurred()); \
PyObject *value = (value_expr); \
if (value == NULL) { \
goto error; \
} \
if (PyDict_SetItemString(dict, c, value) < 0) { \
Py_DECREF(value); \
goto error; \
} \
Py_DECREF(value); \
} while(0)
/* timedelta values */
PyObject *d = _PyType_GetDict(&PyDateTime_DeltaType);
DATETIME_ADD_MACRO(d, "resolution", new_delta(0, 0, 1, 0));
DATETIME_ADD_MACRO(d, "min", new_delta(-MAX_DELTA_DAYS, 0, 0, 0));
DATETIME_ADD_MACRO(d, "max",
new_delta(MAX_DELTA_DAYS, 24*3600-1, 1000000-1, 0));
/* date values */
d = _PyType_GetDict(&PyDateTime_DateType);
DATETIME_ADD_MACRO(d, "min", new_date(1, 1, 1));
DATETIME_ADD_MACRO(d, "max", new_date(MAXYEAR, 12, 31));
DATETIME_ADD_MACRO(d, "resolution", new_delta(1, 0, 0, 0));
/* time values */
d = _PyType_GetDict(&PyDateTime_TimeType);
DATETIME_ADD_MACRO(d, "min", new_time(0, 0, 0, 0, Py_None, 0));
DATETIME_ADD_MACRO(d, "max", new_time(23, 59, 59, 999999, Py_None, 0));
DATETIME_ADD_MACRO(d, "resolution", new_delta(0, 0, 1, 0));
/* datetime values */
d = _PyType_GetDict(&PyDateTime_DateTimeType);
DATETIME_ADD_MACRO(d, "min",
new_datetime(1, 1, 1, 0, 0, 0, 0, Py_None, 0));
DATETIME_ADD_MACRO(d, "max", new_datetime(MAXYEAR, 12, 31, 23, 59, 59,
999999, Py_None, 0));
DATETIME_ADD_MACRO(d, "resolution", new_delta(0, 0, 1, 0));
/* timezone values */
d = _PyType_GetDict(&PyDateTime_TimeZoneType);
if (PyDict_SetItemString(d, "utc", (PyObject *)&utc_timezone) < 0) {
goto error;
}
/* bpo-37642: These attributes are rounded to the nearest minute for backwards
* compatibility, even though the constructor will accept a wider range of
* values. This may change in the future.*/
/* -23:59 */
DATETIME_ADD_MACRO(d, "min", create_timezone_from_delta(-1, 60, 0, 1));
/* +23:59 */
DATETIME_ADD_MACRO(
d, "max", create_timezone_from_delta(0, (23 * 60 + 59) * 60, 0, 0));
#undef DATETIME_ADD_MACRO
return _PyStatus_OK();
error:
return _PyStatus_NO_MEMORY();
}
/* ---------------------------------------------------------------------------
* Module methods and initialization.
*/
static PyMethodDef module_methods[] = {
{NULL, NULL}
};
static int
_datetime_exec(PyObject *module)
{
int rc = -1;
datetime_state *st = get_module_state(module);
PyInterpreterState *interp = PyInterpreterState_Get();
PyObject *old_module = get_current_module(interp);
if (PyErr_Occurred()) {
assert(old_module == NULL);
goto error;
}
/* We actually set the "current" module right before a successful return. */
for (size_t i = 0; i < Py_ARRAY_LENGTH(capi_types); i++) {
PyTypeObject *type = capi_types[i];
const char *name = _PyType_Name(type);
assert(name != NULL);
if (PyModule_AddObjectRef(module, name, (PyObject *)type) < 0) {
goto error;
}
}
if (init_state(st, module, old_module) < 0) {
goto error;
}
/* Add module level attributes */
if (PyModule_AddIntMacro(module, MINYEAR) < 0) {
goto error;
}
if (PyModule_AddIntMacro(module, MAXYEAR) < 0) {
goto error;
}
if (PyModule_AddObjectRef(module, "UTC", (PyObject *)&utc_timezone) < 0) {
goto error;
}
/* At last, set up and add the encapsulated C API */
PyDateTime_CAPI *capi = get_datetime_capi();
if (capi == NULL) {
goto error;
}
PyObject *capsule = PyCapsule_New(capi, PyDateTime_CAPSULE_NAME, NULL);
// (capsule == NULL) is handled by PyModule_Add
if (PyModule_Add(module, "datetime_CAPI", capsule) < 0) {
goto error;
}
/* A 4-year cycle has an extra leap day over what we'd get from
* pasting together 4 single years.
*/
static_assert(DI4Y == 4 * 365 + 1, "DI4Y");
assert(DI4Y == days_before_year(4+1));
/* Similarly, a 400-year cycle has an extra leap day over what we'd
* get from pasting together 4 100-year cycles.
*/
static_assert(DI400Y == 4 * DI100Y + 1, "DI400Y");
assert(DI400Y == days_before_year(400+1));
/* OTOH, a 100-year cycle has one fewer leap day than we'd get from
* pasting together 25 4-year cycles.
*/
static_assert(DI100Y == 25 * DI4Y - 1, "DI100Y");
assert(DI100Y == days_before_year(100+1));
if (set_current_module(interp, module) < 0) {
goto error;
}
rc = 0;
goto finally;
error:
clear_state(st);
finally:
Py_XDECREF(old_module);
return rc;
}
static PyModuleDef_Slot module_slots[] = {
_Py_INTERNAL_ABI_SLOT,
{Py_mod_exec, _datetime_exec},
{Py_mod_multiple_interpreters, Py_MOD_PER_INTERPRETER_GIL_SUPPORTED},
{Py_mod_gil, Py_MOD_GIL_NOT_USED},
{0, NULL},
};
static int
module_traverse(PyObject *mod, visitproc visit, void *arg)
{
datetime_state *st = get_module_state(mod);
traverse_state(st, visit, arg);
return 0;
}
static int
module_clear(PyObject *mod)
{
datetime_state *st = get_module_state(mod);
clear_state(st);
PyInterpreterState *interp = PyInterpreterState_Get();
clear_current_module(interp, mod);
// The runtime takes care of the static types for us.
// See _PyTypes_FiniExtTypes()..
return 0;
}
static void
module_free(void *mod)
{
(void)module_clear((PyObject *)mod);
}
static PyModuleDef datetimemodule = {
.m_base = PyModuleDef_HEAD_INIT,
.m_name = "_datetime",
.m_doc = "Fast implementation of the datetime module.",
.m_size = sizeof(datetime_state),
.m_methods = module_methods,
.m_slots = module_slots,
.m_traverse = module_traverse,
.m_clear = module_clear,
.m_free = module_free,
};
PyMODINIT_FUNC
PyInit__datetime(void)
{
return PyModuleDef_Init(&datetimemodule);
}
/* ---------------------------------------------------------------------------
Some time zone algebra. For a datetime x, let
x.n = x stripped of its timezone -- its naive time.
x.o = x.utcoffset(), and assuming that doesn't raise an exception or
return None
x.d = x.dst(), and assuming that doesn't raise an exception or
return None
x.s = x's standard offset, x.o - x.d
Now some derived rules, where k is a duration (timedelta).
1. x.o = x.s + x.d
This follows from the definition of x.s.
2. If x and y have the same tzinfo member, x.s = y.s.
This is actually a requirement, an assumption we need to make about
sane tzinfo classes.
3. The naive UTC time corresponding to x is x.n - x.o.
This is again a requirement for a sane tzinfo class.
4. (x+k).s = x.s
This follows from #2, and that datimetimetz+timedelta preserves tzinfo.
5. (x+k).n = x.n + k
Again follows from how arithmetic is defined.
Now we can explain tz.fromutc(x). Let's assume it's an interesting case
(meaning that the various tzinfo methods exist, and don't blow up or return
None when called).
The function wants to return a datetime y with timezone tz, equivalent to x.
x is already in UTC.
By #3, we want
y.n - y.o = x.n [1]
The algorithm starts by attaching tz to x.n, and calling that y. So
x.n = y.n at the start. Then it wants to add a duration k to y, so that [1]
becomes true; in effect, we want to solve [2] for k:
(y+k).n - (y+k).o = x.n [2]
By #1, this is the same as
(y+k).n - ((y+k).s + (y+k).d) = x.n [3]
By #5, (y+k).n = y.n + k, which equals x.n + k because x.n=y.n at the start.
Substituting that into [3],
x.n + k - (y+k).s - (y+k).d = x.n; the x.n terms cancel, leaving
k - (y+k).s - (y+k).d = 0; rearranging,
k = (y+k).s - (y+k).d; by #4, (y+k).s == y.s, so
k = y.s - (y+k).d
On the RHS, (y+k).d can't be computed directly, but y.s can be, and we
approximate k by ignoring the (y+k).d term at first. Note that k can't be
very large, since all offset-returning methods return a duration of magnitude
less than 24 hours. For that reason, if y is firmly in std time, (y+k).d must
be 0, so ignoring it has no consequence then.
In any case, the new value is
z = y + y.s [4]
It's helpful to step back at look at [4] from a higher level: it's simply
mapping from UTC to tz's standard time.
At this point, if
z.n - z.o = x.n [5]
we have an equivalent time, and are almost done. The insecurity here is
at the start of daylight time. Picture US Eastern for concreteness. The wall
time jumps from 1:59 to 3:00, and wall hours of the form 2:MM don't make good
sense then. The docs ask that an Eastern tzinfo class consider such a time to
be EDT (because it's "after 2"), which is a redundant spelling of 1:MM EST
on the day DST starts. We want to return the 1:MM EST spelling because that's
the only spelling that makes sense on the local wall clock.
In fact, if [5] holds at this point, we do have the standard-time spelling,
but that takes a bit of proof. We first prove a stronger result. What's the
difference between the LHS and RHS of [5]? Let
diff = x.n - (z.n - z.o) [6]
Now
z.n = by [4]
(y + y.s).n = by #5
y.n + y.s = since y.n = x.n
x.n + y.s = since z and y are have the same tzinfo member,
y.s = z.s by #2
x.n + z.s
Plugging that back into [6] gives
diff =
x.n - ((x.n + z.s) - z.o) = expanding
x.n - x.n - z.s + z.o = cancelling
- z.s + z.o = by #2
z.d
So diff = z.d.
If [5] is true now, diff = 0, so z.d = 0 too, and we have the standard-time
spelling we wanted in the endcase described above. We're done. Contrarily,
if z.d = 0, then we have a UTC equivalent, and are also done.
If [5] is not true now, diff = z.d != 0, and z.d is the offset we need to
add to z (in effect, z is in tz's standard time, and we need to shift the
local clock into tz's daylight time).
Let
z' = z + z.d = z + diff [7]
and we can again ask whether
z'.n - z'.o = x.n [8]
If so, we're done. If not, the tzinfo class is insane, according to the
assumptions we've made. This also requires a bit of proof. As before, let's
compute the difference between the LHS and RHS of [8] (and skipping some of
the justifications for the kinds of substitutions we've done several times
already):
diff' = x.n - (z'.n - z'.o) = replacing z'.n via [7]
x.n - (z.n + diff - z'.o) = replacing diff via [6]
x.n - (z.n + x.n - (z.n - z.o) - z'.o) =
x.n - z.n - x.n + z.n - z.o + z'.o = cancel x.n
- z.n + z.n - z.o + z'.o = cancel z.n
- z.o + z'.o = #1 twice
-z.s - z.d + z'.s + z'.d = z and z' have same tzinfo
z'.d - z.d
So z' is UTC-equivalent to x iff z'.d = z.d at this point. If they are equal,
we've found the UTC-equivalent so are done. In fact, we stop with [7] and
return z', not bothering to compute z'.d.
How could z.d and z'd differ? z' = z + z.d [7], so merely moving z' by
a dst() offset, and starting *from* a time already in DST (we know z.d != 0),
would have to change the result dst() returns: we start in DST, and moving
a little further into it takes us out of DST.
There isn't a sane case where this can happen. The closest it gets is at
the end of DST, where there's an hour in UTC with no spelling in a hybrid
tzinfo class. In US Eastern, that's 5:MM UTC = 0:MM EST = 1:MM EDT. During
that hour, on an Eastern clock 1:MM is taken as being in standard time (6:MM
UTC) because the docs insist on that, but 0:MM is taken as being in daylight
time (4:MM UTC). There is no local time mapping to 5:MM UTC. The local
clock jumps from 1:59 back to 1:00 again, and repeats the 1:MM hour in
standard time. Since that's what the local clock *does*, we want to map both
UTC hours 5:MM and 6:MM to 1:MM Eastern. The result is ambiguous
in local time, but so it goes -- it's the way the local clock works.
When x = 5:MM UTC is the input to this algorithm, x.o=0, y.o=-5 and y.d=0,
so z=0:MM. z.d=60 (minutes) then, so [5] doesn't hold and we keep going.
z' = z + z.d = 1:MM then, and z'.d=0, and z'.d - z.d = -60 != 0 so [8]
(correctly) concludes that z' is not UTC-equivalent to x.
Because we know z.d said z was in daylight time (else [5] would have held and
we would have stopped then), and we know z.d != z'.d (else [8] would have held
and we would have stopped then), and there are only 2 possible values dst() can
return in Eastern, it follows that z'.d must be 0 (which it is in the example,
but the reasoning doesn't depend on the example -- it depends on there being
two possible dst() outcomes, one zero and the other non-zero). Therefore
z' must be in standard time, and is the spelling we want in this case.
Note again that z' is not UTC-equivalent as far as the hybrid tzinfo class is
concerned (because it takes z' as being in standard time rather than the
daylight time we intend here), but returning it gives the real-life "local
clock repeats an hour" behavior when mapping the "unspellable" UTC hour into
tz.
When the input is 6:MM, z=1:MM and z.d=0, and we stop at once, again with
the 1:MM standard time spelling we want.
So how can this break? One of the assumptions must be violated. Two
possibilities:
1) [2] effectively says that y.s is invariant across all y belong to a given
time zone. This isn't true if, for political reasons or continental drift,
a region decides to change its base offset from UTC.
2) There may be versions of "double daylight" time where the tail end of
the analysis gives up a step too early. I haven't thought about that
enough to say.
In any case, it's clear that the default fromutc() is strong enough to handle
"almost all" time zones: so long as the standard offset is invariant, it
doesn't matter if daylight time transition points change from year to year, or
if daylight time is skipped in some years; it doesn't matter how large or
small dst() may get within its bounds; and it doesn't even matter if some
perverse time zone returns a negative dst()). So a breaking case must be
pretty bizarre, and a tzinfo subclass can override fromutc() if it is.
--------------------------------------------------------------------------- */ | c | github | https://github.com/python/cpython | Modules/_datetimemodule.c |
# -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007,2008,2009 Fluendo, S.L.
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
#
# This file may be distributed and/or modified under the terms of
# the GNU Lesser General Public License version 2.1 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.LGPL" in the source distribution for more information.
#
# Headers in this file shall remain intact.
import os
from flumotion.component.base import admin_gtk
from flumotion.component.effects.volume.admin_gtk import VolumeAdminGtkNode
from flumotion.component.effects.deinterlace.admin_gtk \
import DeinterlaceAdminGtkNode
from flumotion.component.effects.videoscale.admin_gtk \
import VideoscaleAdminGtkNode
__version__ = "$Rev$"
class AVProducerAdminGtk(admin_gtk.BaseAdminGtk):
def setup(self):
volume = VolumeAdminGtkNode(self.state, self.admin,
'inputVolume', 'Input Volume')
self.nodes['Volume'] = volume
deinterlace = DeinterlaceAdminGtkNode(self.state, self.admin,
'deinterlace', 'Deinterlacing')
self.nodes['Deinterlace'] = deinterlace
if 'FLU_VIDEOSCALE_DEBUG' in os.environ:
videoscale = VideoscaleAdminGtkNode(self.state, self.admin,
'videoscale', 'Video scaling')
self.nodes['Videoscale'] = videoscale
return admin_gtk.BaseAdminGtk.setup(self)
def component_volumeChanged(self, channel, rms, peak, decay):
volume = self.nodes['Volume']
volume.volumeChanged(channel, rms, peak, decay)
def component_effectVolumeSet(self, effect, volume):
"""
@param volume: volume multiplier between 0.0 and 4.0
@type volume: float
"""
if effect != 'inputVolume':
self.warning('Unknown effect %s in %r' % (effect, self))
return
v = self.nodes['Volume']
v.volumeSet(volume)
def component_effectModeSet(self, effect, mode):
"""
@param mode: deinterlace mode
@type volume: string
"""
if effect != 'deinterlace':
self.warning('Unknown effect %s in %r' % (effect, self))
return
v = self.nodes['Deinterlace']
v.modeSet(mode)
def component_effectMethodSet(self, effect, mode):
"""
@param mode: deinterlace method
@type volume: string
"""
if effect != 'deinterlace':
self.warning('Unknown effect %s in %r' % (effect, self))
return
v = self.nodes['Deinterlace']
v.methodSet(mode)
def component_effectWidthSet(self, effect, width):
if effect != 'videoscale':
self.warning('Unknown effect %s in %r' % (effect, self))
return
v = self.nodes['Videoscale']
v.widthSet(width) | unknown | codeparrot/codeparrot-clean | ||
"Test multicall, coverage 33%."
from idlelib import multicall
import unittest
from test.support import requires
from tkinter import Tk, Text
class MultiCallTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
requires('gui')
cls.root = Tk()
cls.root.withdraw()
cls.mc = multicall.MultiCallCreator(Text)
@classmethod
def tearDownClass(cls):
del cls.mc
cls.root.update_idletasks()
## for id in cls.root.tk.call('after', 'info'):
## cls.root.after_cancel(id) # Need for EditorWindow.
cls.root.destroy()
del cls.root
def test_creator(self):
mc = self.mc
self.assertIs(multicall._multicall_dict[Text], mc)
self.assertIsSubclass(mc, Text)
mc2 = multicall.MultiCallCreator(Text)
self.assertIs(mc, mc2)
def test_init(self):
mctext = self.mc(self.root)
self.assertIsInstance(mctext._MultiCall__binders, list)
def test_yview(self):
# Added for tree.wheel_event
# (it depends on yview to not be overridden)
mc = self.mc
self.assertIs(mc.yview, Text.yview)
mctext = self.mc(self.root)
self.assertIs(mctext.yview.__func__, Text.yview)
if __name__ == '__main__':
unittest.main(verbosity=2) | python | github | https://github.com/python/cpython | Lib/idlelib/idle_test/test_multicall.py |
# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""pytest is a tool that eases test running and debugging.
To be able to use pytest, you should either write tests using
the logilab.common.testlib's framework or the unittest module of the
Python's standard library.
You can customize pytest's behaviour by defining a ``pytestconf.py`` file
somewhere in your test directory. In this file, you can add options or
change the way tests are run.
To add command line options, you must define a ``update_parser`` function in
your ``pytestconf.py`` file. The function must accept a single parameter
that will be the OptionParser's instance to customize.
If you wish to customize the tester, you'll have to define a class named
``CustomPyTester``. This class should extend the default `PyTester` class
defined in the pytest module. Take a look at the `PyTester` and `DjangoTester`
classes for more information about what can be done.
For instance, if you wish to add a custom -l option to specify a loglevel, you
could define the following ``pytestconf.py`` file ::
import logging
from logilab.common.pytest import PyTester
def update_parser(parser):
parser.add_option('-l', '--loglevel', dest='loglevel', action='store',
choices=('debug', 'info', 'warning', 'error', 'critical'),
default='critical', help="the default log level possible choices are "
"('debug', 'info', 'warning', 'error', 'critical')")
return parser
class CustomPyTester(PyTester):
def __init__(self, cvg, options):
super(CustomPyTester, self).__init__(cvg, options)
loglevel = options.loglevel.upper()
logger = logging.getLogger('erudi')
logger.setLevel(logging.getLevelName(loglevel))
In your TestCase class you can then get the value of a specific option with
the ``optval`` method::
class MyTestCase(TestCase):
def test_foo(self):
loglevel = self.optval('loglevel')
# ...
You can also tag your tag your test for fine filtering
With those tag::
from logilab.common.testlib import tag, TestCase
class Exemple(TestCase):
@tag('rouge', 'carre')
def toto(self):
pass
@tag('carre', 'vert')
def tata(self):
pass
@tag('rouge')
def titi(test):
pass
you can filter the function with a simple python expression
* ``toto`` and ``titi`` match ``rouge``
* ``toto``, ``tata`` and ``titi``, match ``rouge or carre``
* ``tata`` and ``titi`` match``rouge ^ carre``
* ``titi`` match ``rouge and not carre``
"""
from __future__ import print_function
__docformat__ = "restructuredtext en"
PYTEST_DOC = """%prog [OPTIONS] [testfile [testpattern]]
examples:
pytest path/to/mytests.py
pytest path/to/mytests.py TheseTests
pytest path/to/mytests.py TheseTests.test_thisone
pytest path/to/mytests.py -m '(not long and database) or regr'
pytest one (will run both test_thisone and test_thatone)
pytest path/to/mytests.py -s not (will skip test_notthisone)
"""
ENABLE_DBC = False
FILE_RESTART = ".pytest.restart"
import os, sys, re
import os.path as osp
from time import time, clock
import warnings
import types
from inspect import isgeneratorfunction, isclass
from contextlib import contextmanager
from logilab.common.fileutils import abspath_listdir
from logilab.common import textutils
from logilab.common import testlib, STD_BLACKLIST
# use the same unittest module as testlib
from logilab.common.testlib import unittest, start_interactive_mode
from logilab.common.deprecation import deprecated
import doctest
import unittest as unittest_legacy
if not getattr(unittest_legacy, "__package__", None):
try:
import unittest2.suite as unittest_suite
except ImportError:
sys.exit("You have to install python-unittest2 to use this module")
else:
import unittest.suite as unittest_suite
try:
import django
from logilab.common.modutils import modpath_from_file, load_module_from_modpath
DJANGO_FOUND = True
except ImportError:
DJANGO_FOUND = False
CONF_FILE = 'pytestconf.py'
## coverage pausing tools
@contextmanager
def replace_trace(trace=None):
"""A context manager that temporary replaces the trace function"""
oldtrace = sys.gettrace()
sys.settrace(trace)
try:
yield
finally:
# specific hack to work around a bug in pycoverage, see
# https://bitbucket.org/ned/coveragepy/issue/123
if (oldtrace is not None and not callable(oldtrace) and
hasattr(oldtrace, 'pytrace')):
oldtrace = oldtrace.pytrace
sys.settrace(oldtrace)
def pause_trace():
"""A context manager that temporary pauses any tracing"""
return replace_trace()
class TraceController(object):
ctx_stack = []
@classmethod
@deprecated('[lgc 0.63.1] Use the pause_trace() context manager')
def pause_tracing(cls):
cls.ctx_stack.append(pause_trace())
cls.ctx_stack[-1].__enter__()
@classmethod
@deprecated('[lgc 0.63.1] Use the pause_trace() context manager')
def resume_tracing(cls):
cls.ctx_stack.pop().__exit__(None, None, None)
pause_tracing = TraceController.pause_tracing
resume_tracing = TraceController.resume_tracing
def nocoverage(func):
"""Function decorator that pauses tracing functions"""
if hasattr(func, 'uncovered'):
return func
func.uncovered = True
def not_covered(*args, **kwargs):
with pause_trace():
return func(*args, **kwargs)
not_covered.uncovered = True
return not_covered
## end of coverage pausing tools
TESTFILE_RE = re.compile("^((unit)?test.*|smoketest)\.py$")
def this_is_a_testfile(filename):
"""returns True if `filename` seems to be a test file"""
return TESTFILE_RE.match(osp.basename(filename))
TESTDIR_RE = re.compile("^(unit)?tests?$")
def this_is_a_testdir(dirpath):
"""returns True if `filename` seems to be a test directory"""
return TESTDIR_RE.match(osp.basename(dirpath))
def load_pytest_conf(path, parser):
"""loads a ``pytestconf.py`` file and update default parser
and / or tester.
"""
namespace = {}
exec(open(path, 'rb').read(), namespace)
if 'update_parser' in namespace:
namespace['update_parser'](parser)
return namespace.get('CustomPyTester', PyTester)
def project_root(parser, projdir=os.getcwd()):
"""try to find project's root and add it to sys.path"""
previousdir = curdir = osp.abspath(projdir)
testercls = PyTester
conf_file_path = osp.join(curdir, CONF_FILE)
if osp.isfile(conf_file_path):
testercls = load_pytest_conf(conf_file_path, parser)
while this_is_a_testdir(curdir) or \
osp.isfile(osp.join(curdir, '__init__.py')):
newdir = osp.normpath(osp.join(curdir, os.pardir))
if newdir == curdir:
break
previousdir = curdir
curdir = newdir
conf_file_path = osp.join(curdir, CONF_FILE)
if osp.isfile(conf_file_path):
testercls = load_pytest_conf(conf_file_path, parser)
return previousdir, testercls
class GlobalTestReport(object):
"""this class holds global test statistics"""
def __init__(self):
self.ran = 0
self.skipped = 0
self.failures = 0
self.errors = 0
self.ttime = 0
self.ctime = 0
self.modulescount = 0
self.errmodules = []
def feed(self, filename, testresult, ttime, ctime):
"""integrates new test information into internal statistics"""
ran = testresult.testsRun
self.ran += ran
self.skipped += len(getattr(testresult, 'skipped', ()))
self.failures += len(testresult.failures)
self.errors += len(testresult.errors)
self.ttime += ttime
self.ctime += ctime
self.modulescount += 1
if not testresult.wasSuccessful():
problems = len(testresult.failures) + len(testresult.errors)
self.errmodules.append((filename[:-3], problems, ran))
def failed_to_test_module(self, filename):
"""called when the test module could not be imported by unittest
"""
self.errors += 1
self.modulescount += 1
self.ran += 1
self.errmodules.append((filename[:-3], 1, 1))
def skip_module(self, filename):
self.modulescount += 1
self.ran += 1
self.errmodules.append((filename[:-3], 0, 0))
def __str__(self):
"""this is just presentation stuff"""
line1 = ['Ran %s test cases in %.2fs (%.2fs CPU)'
% (self.ran, self.ttime, self.ctime)]
if self.errors:
line1.append('%s errors' % self.errors)
if self.failures:
line1.append('%s failures' % self.failures)
if self.skipped:
line1.append('%s skipped' % self.skipped)
modulesok = self.modulescount - len(self.errmodules)
if self.errors or self.failures:
line2 = '%s modules OK (%s failed)' % (modulesok,
len(self.errmodules))
descr = ', '.join(['%s [%s/%s]' % info for info in self.errmodules])
line3 = '\nfailures: %s' % descr
elif modulesok:
line2 = 'All %s modules OK' % modulesok
line3 = ''
else:
return ''
return '%s\n%s%s' % (', '.join(line1), line2, line3)
def remove_local_modules_from_sys(testdir):
"""remove all modules from cache that come from `testdir`
This is used to avoid strange side-effects when using the
testall() mode of pytest.
For instance, if we run pytest on this tree::
A/test/test_utils.py
B/test/test_utils.py
we **have** to clean sys.modules to make sure the correct test_utils
module is ran in B
"""
for modname, mod in list(sys.modules.items()):
if mod is None:
continue
if not hasattr(mod, '__file__'):
# this is the case of some built-in modules like sys, imp, marshal
continue
modfile = mod.__file__
# if modfile is not an absolute path, it was probably loaded locally
# during the tests
if not osp.isabs(modfile) or modfile.startswith(testdir):
del sys.modules[modname]
class PyTester(object):
"""encapsulates testrun logic"""
def __init__(self, cvg, options):
self.report = GlobalTestReport()
self.cvg = cvg
self.options = options
self.firstwrite = True
self._errcode = None
def show_report(self):
"""prints the report and returns appropriate exitcode"""
# everything has been ran, print report
print("*" * 79)
print(self.report)
def get_errcode(self):
# errcode set explicitly
if self._errcode is not None:
return self._errcode
return self.report.failures + self.report.errors
def set_errcode(self, errcode):
self._errcode = errcode
errcode = property(get_errcode, set_errcode)
def testall(self, exitfirst=False):
"""walks through current working directory, finds something
which can be considered as a testdir and runs every test there
"""
here = os.getcwd()
for dirname, dirs, _ in os.walk(here):
for skipped in STD_BLACKLIST:
if skipped in dirs:
dirs.remove(skipped)
basename = osp.basename(dirname)
if this_is_a_testdir(basename):
print("going into", dirname)
# we found a testdir, let's explore it !
if not self.testonedir(dirname, exitfirst):
break
dirs[:] = []
if self.report.ran == 0:
print("no test dir found testing here:", here)
# if no test was found during the visit, consider
# the local directory as a test directory even if
# it doesn't have a traditional test directory name
self.testonedir(here)
def testonedir(self, testdir, exitfirst=False):
"""finds each testfile in the `testdir` and runs it
return true when all tests has been executed, false if exitfirst and
some test has failed.
"""
for filename in abspath_listdir(testdir):
if this_is_a_testfile(filename):
if self.options.exitfirst and not self.options.restart:
# overwrite restart file
try:
restartfile = open(FILE_RESTART, "w")
restartfile.close()
except Exception:
print("Error while overwriting succeeded test file :",
osp.join(os.getcwd(), FILE_RESTART),
file=sys.__stderr__)
raise
# run test and collect information
prog = self.testfile(filename, batchmode=True)
if exitfirst and (prog is None or not prog.result.wasSuccessful()):
return False
self.firstwrite = True
# clean local modules
remove_local_modules_from_sys(testdir)
return True
def testfile(self, filename, batchmode=False):
"""runs every test in `filename`
:param filename: an absolute path pointing to a unittest file
"""
here = os.getcwd()
dirname = osp.dirname(filename)
if dirname:
os.chdir(dirname)
# overwrite restart file if it has not been done already
if self.options.exitfirst and not self.options.restart and self.firstwrite:
try:
restartfile = open(FILE_RESTART, "w")
restartfile.close()
except Exception:
print("Error while overwriting succeeded test file :",
osp.join(os.getcwd(), FILE_RESTART), file=sys.__stderr__)
raise
modname = osp.basename(filename)[:-3]
print((' %s ' % osp.basename(filename)).center(70, '='),
file=sys.__stderr__)
try:
tstart, cstart = time(), clock()
try:
testprog = SkipAwareTestProgram(modname, batchmode=batchmode, cvg=self.cvg,
options=self.options, outstream=sys.stderr)
except KeyboardInterrupt:
raise
except SystemExit as exc:
self.errcode = exc.code
raise
except testlib.SkipTest:
print("Module skipped:", filename)
self.report.skip_module(filename)
return None
except Exception:
self.report.failed_to_test_module(filename)
print('unhandled exception occurred while testing', modname,
file=sys.stderr)
import traceback
traceback.print_exc(file=sys.stderr)
return None
tend, cend = time(), clock()
ttime, ctime = (tend - tstart), (cend - cstart)
self.report.feed(filename, testprog.result, ttime, ctime)
return testprog
finally:
if dirname:
os.chdir(here)
class DjangoTester(PyTester):
def load_django_settings(self, dirname):
"""try to find project's setting and load it"""
curdir = osp.abspath(dirname)
previousdir = curdir
while not osp.isfile(osp.join(curdir, 'settings.py')) and \
osp.isfile(osp.join(curdir, '__init__.py')):
newdir = osp.normpath(osp.join(curdir, os.pardir))
if newdir == curdir:
raise AssertionError('could not find settings.py')
previousdir = curdir
curdir = newdir
# late django initialization
settings = load_module_from_modpath(modpath_from_file(osp.join(curdir, 'settings.py')))
from django.core.management import setup_environ
setup_environ(settings)
settings.DEBUG = False
self.settings = settings
# add settings dir to pythonpath since it's the project's root
if curdir not in sys.path:
sys.path.insert(1, curdir)
def before_testfile(self):
# Those imports must be done **after** setup_environ was called
from django.test.utils import setup_test_environment
from django.test.utils import create_test_db
setup_test_environment()
create_test_db(verbosity=0)
self.dbname = self.settings.TEST_DATABASE_NAME
def after_testfile(self):
# Those imports must be done **after** setup_environ was called
from django.test.utils import teardown_test_environment
from django.test.utils import destroy_test_db
teardown_test_environment()
print('destroying', self.dbname)
destroy_test_db(self.dbname, verbosity=0)
def testall(self, exitfirst=False):
"""walks through current working directory, finds something
which can be considered as a testdir and runs every test there
"""
for dirname, dirs, files in os.walk(os.getcwd()):
for skipped in ('CVS', '.svn', '.hg'):
if skipped in dirs:
dirs.remove(skipped)
if 'tests.py' in files:
if not self.testonedir(dirname, exitfirst):
break
dirs[:] = []
else:
basename = osp.basename(dirname)
if basename in ('test', 'tests'):
print("going into", dirname)
# we found a testdir, let's explore it !
if not self.testonedir(dirname, exitfirst):
break
dirs[:] = []
def testonedir(self, testdir, exitfirst=False):
"""finds each testfile in the `testdir` and runs it
return true when all tests has been executed, false if exitfirst and
some test has failed.
"""
# special django behaviour : if tests are splitted in several files,
# remove the main tests.py file and tests each test file separately
testfiles = [fpath for fpath in abspath_listdir(testdir)
if this_is_a_testfile(fpath)]
if len(testfiles) > 1:
try:
testfiles.remove(osp.join(testdir, 'tests.py'))
except ValueError:
pass
for filename in testfiles:
# run test and collect information
prog = self.testfile(filename, batchmode=True)
if exitfirst and (prog is None or not prog.result.wasSuccessful()):
return False
# clean local modules
remove_local_modules_from_sys(testdir)
return True
def testfile(self, filename, batchmode=False):
"""runs every test in `filename`
:param filename: an absolute path pointing to a unittest file
"""
here = os.getcwd()
dirname = osp.dirname(filename)
if dirname:
os.chdir(dirname)
self.load_django_settings(dirname)
modname = osp.basename(filename)[:-3]
print((' %s ' % osp.basename(filename)).center(70, '='),
file=sys.stderr)
try:
try:
tstart, cstart = time(), clock()
self.before_testfile()
testprog = SkipAwareTestProgram(modname, batchmode=batchmode, cvg=self.cvg)
tend, cend = time(), clock()
ttime, ctime = (tend - tstart), (cend - cstart)
self.report.feed(filename, testprog.result, ttime, ctime)
return testprog
except SystemExit:
raise
except Exception as exc:
import traceback
traceback.print_exc()
self.report.failed_to_test_module(filename)
print('unhandled exception occurred while testing', modname)
print('error: %s' % exc)
return None
finally:
self.after_testfile()
if dirname:
os.chdir(here)
def make_parser():
"""creates the OptionParser instance
"""
from optparse import OptionParser
parser = OptionParser(usage=PYTEST_DOC)
parser.newargs = []
def rebuild_cmdline(option, opt, value, parser):
"""carry the option to unittest_main"""
parser.newargs.append(opt)
def rebuild_and_store(option, opt, value, parser):
"""carry the option to unittest_main and store
the value on current parser
"""
parser.newargs.append(opt)
setattr(parser.values, option.dest, True)
def capture_and_rebuild(option, opt, value, parser):
warnings.simplefilter('ignore', DeprecationWarning)
rebuild_cmdline(option, opt, value, parser)
# pytest options
parser.add_option('-t', dest='testdir', default=None,
help="directory where the tests will be found")
parser.add_option('-d', dest='dbc', default=False,
action="store_true", help="enable design-by-contract")
# unittest_main options provided and passed through pytest
parser.add_option('-v', '--verbose', callback=rebuild_cmdline,
action="callback", help="Verbose output")
parser.add_option('-i', '--pdb', callback=rebuild_and_store,
dest="pdb", action="callback",
help="Enable test failure inspection")
parser.add_option('-x', '--exitfirst', callback=rebuild_and_store,
dest="exitfirst", default=False,
action="callback", help="Exit on first failure "
"(only make sense when pytest run one test file)")
parser.add_option('-R', '--restart', callback=rebuild_and_store,
dest="restart", default=False,
action="callback",
help="Restart tests from where it failed (implies exitfirst) "
"(only make sense if tests previously ran with exitfirst only)")
parser.add_option('--color', callback=rebuild_cmdline,
action="callback",
help="colorize tracebacks")
parser.add_option('-s', '--skip',
# XXX: I wish I could use the callback action but it
# doesn't seem to be able to get the value
# associated to the option
action="store", dest="skipped", default=None,
help="test names matching this name will be skipped "
"to skip several patterns, use commas")
parser.add_option('-q', '--quiet', callback=rebuild_cmdline,
action="callback", help="Minimal output")
parser.add_option('-P', '--profile', default=None, dest='profile',
help="Profile execution and store data in the given file")
parser.add_option('-m', '--match', default=None, dest='tags_pattern',
help="only execute test whose tag match the current pattern")
if DJANGO_FOUND:
parser.add_option('-J', '--django', dest='django', default=False,
action="store_true",
help='use pytest for django test cases')
return parser
def parseargs(parser):
"""Parse the command line and return (options processed), (options to pass to
unittest_main()), (explicitfile or None).
"""
# parse the command line
options, args = parser.parse_args()
filenames = [arg for arg in args if arg.endswith('.py')]
if filenames:
if len(filenames) > 1:
parser.error("only one filename is acceptable")
explicitfile = filenames[0]
args.remove(explicitfile)
else:
explicitfile = None
# someone wants DBC
testlib.ENABLE_DBC = options.dbc
newargs = parser.newargs
if options.skipped:
newargs.extend(['--skip', options.skipped])
# restart implies exitfirst
if options.restart:
options.exitfirst = True
# append additional args to the new sys.argv and let unittest_main
# do the rest
newargs += args
return options, explicitfile
def run():
parser = make_parser()
rootdir, testercls = project_root(parser)
options, explicitfile = parseargs(parser)
# mock a new command line
sys.argv[1:] = parser.newargs
cvg = None
if not '' in sys.path:
sys.path.insert(0, '')
if DJANGO_FOUND and options.django:
tester = DjangoTester(cvg, options)
else:
tester = testercls(cvg, options)
if explicitfile:
cmd, args = tester.testfile, (explicitfile,)
elif options.testdir:
cmd, args = tester.testonedir, (options.testdir, options.exitfirst)
else:
cmd, args = tester.testall, (options.exitfirst,)
try:
try:
if options.profile:
import hotshot
prof = hotshot.Profile(options.profile)
prof.runcall(cmd, *args)
prof.close()
print('profile data saved in', options.profile)
else:
cmd(*args)
except SystemExit:
raise
except:
import traceback
traceback.print_exc()
finally:
tester.show_report()
sys.exit(tester.errcode)
class SkipAwareTestProgram(unittest.TestProgram):
# XXX: don't try to stay close to unittest.py, use optparse
USAGE = """\
Usage: %(progName)s [options] [test] [...]
Options:
-h, --help Show this message
-v, --verbose Verbose output
-i, --pdb Enable test failure inspection
-x, --exitfirst Exit on first failure
-s, --skip skip test matching this pattern (no regexp for now)
-q, --quiet Minimal output
--color colorize tracebacks
-m, --match Run only test whose tag match this pattern
-P, --profile FILE: Run the tests using cProfile and saving results
in FILE
Examples:
%(progName)s - run default set of tests
%(progName)s MyTestSuite - run suite 'MyTestSuite'
%(progName)s MyTestCase.testSomething - run MyTestCase.testSomething
%(progName)s MyTestCase - run all 'test*' test methods
in MyTestCase
"""
def __init__(self, module='__main__', defaultTest=None, batchmode=False,
cvg=None, options=None, outstream=sys.stderr):
self.batchmode = batchmode
self.cvg = cvg
self.options = options
self.outstream = outstream
super(SkipAwareTestProgram, self).__init__(
module=module, defaultTest=defaultTest,
testLoader=NonStrictTestLoader())
def parseArgs(self, argv):
self.pdbmode = False
self.exitfirst = False
self.skipped_patterns = []
self.test_pattern = None
self.tags_pattern = None
self.colorize = False
self.profile_name = None
import getopt
try:
options, args = getopt.getopt(argv[1:], 'hHvixrqcp:s:m:P:',
['help', 'verbose', 'quiet', 'pdb',
'exitfirst', 'restart',
'skip=', 'color', 'match=', 'profile='])
for opt, value in options:
if opt in ('-h', '-H', '--help'):
self.usageExit()
if opt in ('-i', '--pdb'):
self.pdbmode = True
if opt in ('-x', '--exitfirst'):
self.exitfirst = True
if opt in ('-r', '--restart'):
self.restart = True
self.exitfirst = True
if opt in ('-q', '--quiet'):
self.verbosity = 0
if opt in ('-v', '--verbose'):
self.verbosity = 2
if opt in ('-s', '--skip'):
self.skipped_patterns = [pat.strip() for pat in
value.split(', ')]
if opt == '--color':
self.colorize = True
if opt in ('-m', '--match'):
#self.tags_pattern = value
self.options["tag_pattern"] = value
if opt in ('-P', '--profile'):
self.profile_name = value
self.testLoader.skipped_patterns = self.skipped_patterns
if len(args) == 0 and self.defaultTest is None:
suitefunc = getattr(self.module, 'suite', None)
if isinstance(suitefunc, (types.FunctionType,
types.MethodType)):
self.test = self.module.suite()
else:
self.test = self.testLoader.loadTestsFromModule(self.module)
return
if len(args) > 0:
self.test_pattern = args[0]
self.testNames = args
else:
self.testNames = (self.defaultTest, )
self.createTests()
except getopt.error as msg:
self.usageExit(msg)
def runTests(self):
if self.profile_name:
import cProfile
cProfile.runctx('self._runTests()', globals(), locals(), self.profile_name )
else:
return self._runTests()
def _runTests(self):
self.testRunner = SkipAwareTextTestRunner(verbosity=self.verbosity,
stream=self.outstream,
exitfirst=self.exitfirst,
pdbmode=self.pdbmode,
cvg=self.cvg,
test_pattern=self.test_pattern,
skipped_patterns=self.skipped_patterns,
colorize=self.colorize,
batchmode=self.batchmode,
options=self.options)
def removeSucceededTests(obj, succTests):
""" Recursive function that removes succTests from
a TestSuite or TestCase
"""
if isinstance(obj, unittest.TestSuite):
removeSucceededTests(obj._tests, succTests)
if isinstance(obj, list):
for el in obj[:]:
if isinstance(el, unittest.TestSuite):
removeSucceededTests(el, succTests)
elif isinstance(el, unittest.TestCase):
descr = '.'.join((el.__class__.__module__,
el.__class__.__name__,
el._testMethodName))
if descr in succTests:
obj.remove(el)
# take care, self.options may be None
if getattr(self.options, 'restart', False):
# retrieve succeeded tests from FILE_RESTART
try:
restartfile = open(FILE_RESTART, 'r')
try:
succeededtests = list(elem.rstrip('\n\r') for elem in
restartfile.readlines())
removeSucceededTests(self.test, succeededtests)
finally:
restartfile.close()
except Exception as ex:
raise Exception("Error while reading succeeded tests into %s: %s"
% (osp.join(os.getcwd(), FILE_RESTART), ex))
result = self.testRunner.run(self.test)
# help garbage collection: we want TestSuite, which hold refs to every
# executed TestCase, to be gc'ed
del self.test
if getattr(result, "debuggers", None) and \
getattr(self, "pdbmode", None):
start_interactive_mode(result)
if not getattr(self, "batchmode", None):
sys.exit(not result.wasSuccessful())
self.result = result
class SkipAwareTextTestRunner(unittest.TextTestRunner):
def __init__(self, stream=sys.stderr, verbosity=1,
exitfirst=False, pdbmode=False, cvg=None, test_pattern=None,
skipped_patterns=(), colorize=False, batchmode=False,
options=None):
super(SkipAwareTextTestRunner, self).__init__(stream=stream,
verbosity=verbosity)
self.exitfirst = exitfirst
self.pdbmode = pdbmode
self.cvg = cvg
self.test_pattern = test_pattern
self.skipped_patterns = skipped_patterns
self.colorize = colorize
self.batchmode = batchmode
self.options = options
def _this_is_skipped(self, testedname):
return any([(pat in testedname) for pat in self.skipped_patterns])
def _runcondition(self, test, skipgenerator=True):
if isinstance(test, testlib.InnerTest):
testname = test.name
else:
if isinstance(test, testlib.TestCase):
meth = test._get_test_method()
testname = '%s.%s' % (test.__name__, meth.__name__)
elif isinstance(test, types.FunctionType):
func = test
testname = func.__name__
elif isinstance(test, types.MethodType):
cls = test.__self__.__class__
testname = '%s.%s' % (cls.__name__, test.__name__)
else:
return True # Not sure when this happens
if isgeneratorfunction(test) and skipgenerator:
return self.does_match_tags(test) # Let inner tests decide at run time
if self._this_is_skipped(testname):
return False # this was explicitly skipped
if self.test_pattern is not None:
try:
classpattern, testpattern = self.test_pattern.split('.')
klass, name = testname.split('.')
if classpattern not in klass or testpattern not in name:
return False
except ValueError:
if self.test_pattern not in testname:
return False
return self.does_match_tags(test)
def does_match_tags(self, test):
if self.options is not None:
tags_pattern = getattr(self.options, 'tags_pattern', None)
if tags_pattern is not None:
tags = getattr(test, 'tags', testlib.Tags())
if tags.inherit and isinstance(test, types.MethodType):
tags = tags | getattr(test.im_class, 'tags', testlib.Tags())
return tags.match(tags_pattern)
return True # no pattern
def _makeResult(self):
return testlib.SkipAwareTestResult(self.stream, self.descriptions,
self.verbosity, self.exitfirst,
self.pdbmode, self.cvg, self.colorize)
def run(self, test):
"Run the given test case or test suite."
result = self._makeResult()
startTime = time()
test(result, runcondition=self._runcondition, options=self.options)
stopTime = time()
timeTaken = stopTime - startTime
result.printErrors()
if not self.batchmode:
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
if not result.wasSuccessful():
if self.colorize:
self.stream.write(textutils.colorize_ansi("FAILED", color='red'))
else:
self.stream.write("FAILED")
else:
if self.colorize:
self.stream.write(textutils.colorize_ansi("OK", color='green'))
else:
self.stream.write("OK")
failed, errored, skipped = map(len, (result.failures,
result.errors,
result.skipped))
det_results = []
for name, value in (("failures", result.failures),
("errors",result.errors),
("skipped", result.skipped)):
if value:
det_results.append("%s=%i" % (name, len(value)))
if det_results:
self.stream.write(" (")
self.stream.write(', '.join(det_results))
self.stream.write(")")
self.stream.writeln("")
return result
class NonStrictTestLoader(unittest.TestLoader):
"""
Overrides default testloader to be able to omit classname when
specifying tests to run on command line.
For example, if the file test_foo.py contains ::
class FooTC(TestCase):
def test_foo1(self): # ...
def test_foo2(self): # ...
def test_bar1(self): # ...
class BarTC(TestCase):
def test_bar2(self): # ...
'python test_foo.py' will run the 3 tests in FooTC
'python test_foo.py FooTC' will run the 3 tests in FooTC
'python test_foo.py test_foo' will run test_foo1 and test_foo2
'python test_foo.py test_foo1' will run test_foo1
'python test_foo.py test_bar' will run FooTC.test_bar1 and BarTC.test_bar2
"""
def __init__(self):
self.skipped_patterns = ()
# some magic here to accept empty list by extending
# and to provide callable capability
def loadTestsFromNames(self, names, module=None):
suites = []
for name in names:
suites.extend(self.loadTestsFromName(name, module))
return self.suiteClass(suites)
def _collect_tests(self, module):
tests = {}
for obj in vars(module).values():
if isclass(obj) and issubclass(obj, unittest.TestCase):
classname = obj.__name__
if classname[0] == '_' or self._this_is_skipped(classname):
continue
methodnames = []
# obj is a TestCase class
for attrname in dir(obj):
if attrname.startswith(self.testMethodPrefix):
attr = getattr(obj, attrname)
if callable(attr):
methodnames.append(attrname)
# keep track of class (obj) for convenience
tests[classname] = (obj, methodnames)
return tests
def loadTestsFromSuite(self, module, suitename):
try:
suite = getattr(module, suitename)()
except AttributeError:
return []
assert hasattr(suite, '_tests'), \
"%s.%s is not a valid TestSuite" % (module.__name__, suitename)
# python2.3 does not implement __iter__ on suites, we need to return
# _tests explicitly
return suite._tests
def loadTestsFromName(self, name, module=None):
parts = name.split('.')
if module is None or len(parts) > 2:
# let the base class do its job here
return [super(NonStrictTestLoader, self).loadTestsFromName(name)]
tests = self._collect_tests(module)
collected = []
if len(parts) == 1:
pattern = parts[0]
if callable(getattr(module, pattern, None)
) and pattern not in tests:
# consider it as a suite
return self.loadTestsFromSuite(module, pattern)
if pattern in tests:
# case python unittest_foo.py MyTestTC
klass, methodnames = tests[pattern]
for methodname in methodnames:
collected = [klass(methodname)
for methodname in methodnames]
else:
# case python unittest_foo.py something
for klass, methodnames in tests.values():
# skip methodname if matched by skipped_patterns
for skip_pattern in self.skipped_patterns:
methodnames = [methodname
for methodname in methodnames
if skip_pattern not in methodname]
collected += [klass(methodname)
for methodname in methodnames
if pattern in methodname]
elif len(parts) == 2:
# case "MyClass.test_1"
classname, pattern = parts
klass, methodnames = tests.get(classname, (None, []))
for methodname in methodnames:
collected = [klass(methodname) for methodname in methodnames
if pattern in methodname]
return collected
def _this_is_skipped(self, testedname):
return any([(pat in testedname) for pat in self.skipped_patterns])
def getTestCaseNames(self, testCaseClass):
"""Return a sorted sequence of method names found within testCaseClass
"""
is_skipped = self._this_is_skipped
classname = testCaseClass.__name__
if classname[0] == '_' or is_skipped(classname):
return []
testnames = super(NonStrictTestLoader, self).getTestCaseNames(
testCaseClass)
return [testname for testname in testnames if not is_skipped(testname)]
# The 2 functions below are modified versions of the TestSuite.run method
# that is provided with unittest2 for python 2.6, in unittest2/suite.py
# It is used to monkeypatch the original implementation to support
# extra runcondition and options arguments (see in testlib.py)
def _ts_run(self, result, runcondition=None, options=None):
self._wrapped_run(result, runcondition=runcondition, options=options)
self._tearDownPreviousClass(None, result)
self._handleModuleTearDown(result)
return result
def _ts_wrapped_run(self, result, debug=False, runcondition=None, options=None):
for test in self:
if result.shouldStop:
break
if unittest_suite._isnotsuite(test):
self._tearDownPreviousClass(test, result)
self._handleModuleFixture(test, result)
self._handleClassSetUp(test, result)
result._previousTestClass = test.__class__
if (getattr(test.__class__, '_classSetupFailed', False) or
getattr(result, '_moduleSetUpFailed', False)):
continue
# --- modifications to deal with _wrapped_run ---
# original code is:
#
# if not debug:
# test(result)
# else:
# test.debug()
if hasattr(test, '_wrapped_run'):
try:
test._wrapped_run(result, debug, runcondition=runcondition, options=options)
except TypeError:
test._wrapped_run(result, debug)
elif not debug:
try:
test(result, runcondition, options)
except TypeError:
test(result)
else:
test.debug()
# --- end of modifications to deal with _wrapped_run ---
return result
if sys.version_info >= (2, 7):
# The function below implements a modified version of the
# TestSuite.run method that is provided with python 2.7, in
# unittest/suite.py
def _ts_run(self, result, debug=False, runcondition=None, options=None):
topLevel = False
if getattr(result, '_testRunEntered', False) is False:
result._testRunEntered = topLevel = True
self._wrapped_run(result, debug, runcondition, options)
if topLevel:
self._tearDownPreviousClass(None, result)
self._handleModuleTearDown(result)
result._testRunEntered = False
return result
def enable_dbc(*args):
"""
Without arguments, return True if contracts can be enabled and should be
enabled (see option -d), return False otherwise.
With arguments, return False if contracts can't or shouldn't be enabled,
otherwise weave ContractAspect with items passed as arguments.
"""
if not ENABLE_DBC:
return False
try:
from logilab.aspects.weaver import weaver
from logilab.aspects.lib.contracts import ContractAspect
except ImportError:
sys.stderr.write(
'Warning: logilab.aspects is not available. Contracts disabled.')
return False
for arg in args:
weaver.weave_module(arg, ContractAspect)
return True
# monkeypatch unittest and doctest (ouch !)
unittest._TextTestResult = testlib.SkipAwareTestResult
unittest.TextTestRunner = SkipAwareTextTestRunner
unittest.TestLoader = NonStrictTestLoader
unittest.TestProgram = SkipAwareTestProgram
if sys.version_info >= (2, 4):
doctest.DocTestCase.__bases__ = (testlib.TestCase,)
# XXX check python2.6 compatibility
#doctest.DocTestCase._cleanups = []
#doctest.DocTestCase._out = []
else:
unittest.FunctionTestCase.__bases__ = (testlib.TestCase,)
unittest.TestSuite.run = _ts_run
unittest.TestSuite._wrapped_run = _ts_wrapped_run | unknown | codeparrot/codeparrot-clean | ||
"""
Test Custom Exceptions
"""
import ddt
from django.test import TestCase
from rest_framework import exceptions as drf_exceptions
import six
@ddt.ddt
class TestDictExceptionsAllowDictDetails(TestCase):
"""
Test that standard DRF exceptions can return dictionaries in error details.
"""
def test_drf_errors_are_not_coerced_to_strings(self):
# Demonstrate that dictionaries in exceptions are not coerced to strings.
exc = drf_exceptions.AuthenticationFailed({u'error_code': -1})
self.assertNotIsInstance(exc.detail, six.string_types)
@ddt.data(
drf_exceptions.AuthenticationFailed,
drf_exceptions.NotAuthenticated,
drf_exceptions.NotFound,
drf_exceptions.ParseError,
drf_exceptions.PermissionDenied,
)
def test_exceptions_allows_dict_detail(self, exception_class):
exc = exception_class({u'error_code': -1})
self.assertEqual(exc.detail, {u'error_code': u'-1'})
def test_method_not_allowed_allows_dict_detail(self):
exc = drf_exceptions.MethodNotAllowed(u'POST', {u'error_code': -1})
self.assertEqual(exc.detail, {u'error_code': u'-1'})
def test_not_acceptable_allows_dict_detail(self):
exc = drf_exceptions.NotAcceptable({u'error_code': -1}, available_renderers=['application/json'])
self.assertEqual(exc.detail, {u'error_code': u'-1'})
self.assertEqual(exc.available_renderers, ['application/json']) | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_C_EXPERIMENTAL_SAVED_MODEL_CORE_REVIVED_TYPES_TENSORHANDLE_CONVERTIBLE_H_
#define TENSORFLOW_C_EXPERIMENTAL_SAVED_MODEL_CORE_REVIVED_TYPES_TENSORHANDLE_CONVERTIBLE_H_
#include "tensorflow/c/eager/immediate_execution_tensor_handle.h"
namespace tensorflow {
// A common interface for objects that can be converted to a TensorHandle.
// Examples of objects that implement this include Variables, Constants, Assets,
// etc. This is used to convert captured objects into a ConcreteFunction's
// captured TensorHandles:
// https://github.com/tensorflow/tensorflow/blob/676a68963ea4b64fe479b9cede06aa8f5b290ab8/tensorflow/python/saved_model/load.py#L229-L240
class TensorHandleConvertible {
public:
explicit TensorHandleConvertible(ImmediateTensorHandlePtr handle)
: handle_(std::move(handle)) {}
ImmediateExecutionTensorHandle* handle() { return handle_.get(); }
// TensorHandleConvertible is movable, but not copyable.
TensorHandleConvertible(TensorHandleConvertible&& other) = default;
TensorHandleConvertible& operator=(TensorHandleConvertible&& other) = default;
virtual ~TensorHandleConvertible() = default;
protected:
TensorHandleConvertible(const TensorHandleConvertible&) = delete;
TensorHandleConvertible& operator=(const TensorHandleConvertible&) = delete;
ImmediateTensorHandlePtr handle_;
};
} // namespace tensorflow
#endif // TENSORFLOW_C_EXPERIMENTAL_SAVED_MODEL_CORE_REVIVED_TYPES_TENSORHANDLE_CONVERTIBLE_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/c/experimental/saved_model/core/revived_types/tensorhandle_convertible.h |
"""Convert a NT pathname to a file URL and vice versa."""
def url2pathname(url):
r"""Convert a URL to a DOS path.
///C|/foo/bar/spam.foo
becomes
C:\foo\bar\spam.foo
"""
import string, urllib
if not '|' in url:
# No drive specifier, just convert slashes
if url[:4] == '////':
# path is something like ////host/path/on/remote/host
# convert this to \\host\path\on\remote\host
# (notice halving of slashes at the start of the path)
url = url[2:]
components = url.split('/')
# make sure not to convert quoted slashes :-)
return urllib.unquote('\\'.join(components))
comp = url.split('|')
if len(comp) != 2 or comp[0][-1] not in string.letters:
error = 'Bad URL: ' + url
raise IOError, error
drive = comp[0][-1].upper()
components = comp[1].split('/')
path = drive + ':'
for comp in components:
if comp:
path = path + '\\' + urllib.unquote(comp)
return path
def pathname2url(p):
r"""Convert a DOS path name to a file url.
C:\foo\bar\spam.foo
becomes
///C|/foo/bar/spam.foo
"""
import string, urllib
if not ':' in p:
# No drive specifier, just convert slashes and quote the name
if p[:2] == '\\\\':
# path is something like \\host\path\on\remote\host
# convert this to ////host/path/on/remote/host
# (notice doubling of slashes at the start of the path)
p = '\\\\' + p
components = p.split('\\')
return urllib.quote('/'.join(components))
comp = p.split(':')
if len(comp) != 2 or len(comp[0]) > 1:
error = 'Bad path: ' + p
raise IOError, error
drive = urllib.quote(comp[0].upper())
components = comp[1].split('\\')
path = '///' + drive + '|'
for comp in components:
if comp:
path = path + '/' + urllib.quote(comp)
return path | unknown | codeparrot/codeparrot-clean | ||
# Copyright (C) 2018-present MongoDB, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the Server Side Public License, version 1,
# as published by MongoDB, Inc.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Server Side Public License for more details.
#
# You should have received a copy of the Server Side Public License
# along with this program. If not, see
# <http://www.mongodb.com/licensing/server-side-public-license>.
#
# As a special exception, the copyright holders give permission to link the
# code of portions of this program with the OpenSSL library under certain
# conditions as described in each individual source file and distribute
# linked combinations including the program with the OpenSSL library. You
# must comply with the Server Side Public License in all respects for
# all of the code used other than as permitted herein. If you modify file(s)
# with this exception, you may extend this exception to your version of the
# file(s), but you are not obligated to do so. If you do not wish to do so,
# delete this exception statement from your version. If you delete this
# exception statement from all source files in the program, then also delete
# it in the license file.
#
"""Text Writing Utilites."""
# Number of spaces to indent code
_INDENT_SPACE_COUNT = 4
def _fill_spaces(count):
# type: (int) -> str
"""Fill a string full of spaces."""
fill = ""
for _ in range(count * _INDENT_SPACE_COUNT):
fill += " "
return fill
def _indent_text(count, unindented_text):
# type: (int, str) -> str
"""Indent each line of a multi-line string."""
lines = unindented_text.splitlines()
fill = _fill_spaces(count)
return "\n".join(fill + line for line in lines)
def is_function(name):
# type: (str) -> bool
"""
Return True if a serializer/deserializer is function.
A function is prefixed with '::' so that the IDL generated code calls it as a function instead
of as a class method.
"""
return name.startswith("::")
def get_method_name(name):
# type: (str) -> str
"""Get a method name from a fully qualified method name."""
pos = name.rfind("::")
if pos == -1:
return name
return name[pos + 2 :]
def get_method_name_from_qualified_method_name(name):
# type: (str) -> str
"""Get a method name from a fully qualified method name."""
# TODO: in the future, we may want to support full-qualified calls to static methods
# Strip the global prefix from enum functions
if name.startswith("::"):
name = name[2:]
prefix = "mongo::"
pos = name.find(prefix)
if pos == -1:
return name
return name[len(prefix) :]
class IndentedTextWriter(object):
"""
A simple class to manage writing indented lines of text.
Supports both writing indented lines, and unindented lines.
Use write_empty_line() instead of write_line('') to avoid lines
full of blank spaces.
"""
def __init__(self, stream):
# type: (io.StringIO) -> None
"""Create an indented text writer."""
self._stream = stream
self._indent = 0
def write_unindented_line(self, msg):
# type: (str) -> None
"""Write an unindented line to the stream, no template formattin applied."""
self._stream.write(msg)
self._stream.write("\n")
def indent(self):
# type: () -> None
"""Indent the text by one level."""
self._indent += 1
def unindent(self):
# type: () -> None
"""Unindent the text by one level."""
assert self._indent > 0
self._indent -= 1
def write_line(self, msg):
# type: (str) -> None
"""Write a line to the stream, no template formattin applied."""
self._stream.write(_indent_text(self._indent, msg))
self._stream.write("\n")
def write_empty_line(self):
# type: () -> None
"""Write a line to the stream."""
self._stream.write("\n")
class WriterBlock(object):
"""Interface for block types below."""
def __enter__(self):
# type: () -> None
"""Open a block."""
pass
def __exit__(self, *args):
# type: (*str) -> None
"""Close the block."""
pass
class EmptyBlock(WriterBlock):
"""Do not generate an indented block."""
def __init__(self):
# type: () -> None
"""Create an empty block."""
pass
def __enter__(self):
# type: () -> None
"""Do nothing."""
pass
def __exit__(self, *args):
# type: (*str) -> None
"""Do nothing."""
pass
class IndentedScopedBlock(WriterBlock):
"""Generate a block, template the parameters, and indent the contents."""
def __init__(self, writer, opening, closing):
# type: (IndentedTextWriter, str, str) -> None
"""Create a block."""
self._writer = writer
self._opening = opening
self._closing = closing
def __enter__(self):
# type: () -> None
"""Write the beginning of the block and then indent."""
self._writer.write_line(self._opening)
self._writer.indent()
def __exit__(self, *args):
# type: (*str) -> None
"""Unindent the block and print the ending."""
self._writer.unindent()
self._writer.write_line(self._closing)
class NamespaceScopeBlock(WriterBlock):
"""Generate an unindented blocks for a list of namespaces, and do not indent the contents."""
def __init__(
self, indented_writer: IndentedTextWriter, namespaces: list[str], mod_vis_str: str = ""
):
# type: (IndentedTextWriter, List[str]) -> None
"""Create a block."""
self._writer = indented_writer
self._namespaces = namespaces
self._mod_vis_str = mod_vis_str
def __enter__(self):
# type: () -> None
"""Write the beginning of the block and do not indent."""
for namespace in self._namespaces:
self._writer.write_unindented_line(f"namespace {self._mod_vis_str}{namespace} {{")
def __exit__(self, *args):
# type: (*str) -> None
"""Write the end of the block and do not change indentation."""
self._namespaces.reverse()
for namespace in self._namespaces:
self._writer.write_unindented_line(f"}} // namespace {self._mod_vis_str}{namespace}")
class UnindentedBlock(WriterBlock):
"""Generate a block without indentation."""
def __init__(self, writer, opening, closing):
# type: (IndentedTextWriter, str, str) -> None
"""Create a block."""
self._writer = writer
self._opening = opening
self._closing = closing
def __enter__(self):
# type: () -> None
"""Write the beginning of the block."""
self._writer.write_unindented_line(self._opening)
def __exit__(self, *args):
# type: (*str) -> None
"""Write the ending of the block."""
self._writer.write_unindented_line(self._closing)
class MultiBlock(WriterBlock):
"""Proxy container for a list of WriterBlocks."""
def __init__(self, blocks):
# type: (MultiBlock, List[WriterBlock]) -> None
"""Create a multi-block."""
self._blocks = blocks
def __enter__(self):
# type: () -> None
"""Enter each block forwards."""
for i in self._blocks:
i.__enter__()
def __exit__(self, *args):
# type: (*str) -> None
"""And leave each block in reverse."""
for i in reversed(self._blocks):
i.__exit__(*args)
def _get_common_prefix(words):
# type: (List[str]) -> str
"""Returns a common prefix for a set of strings.
Returns empty string if there is no prefix or a empty string
"""
empty_words = [lw for lw in words if len(lw) == 0]
if empty_words:
return ""
first_letters = {w[0] for w in words}
if len(first_letters) == 1:
short_words = [lw for lw in words if len(lw) == 1]
if short_words:
return words[0][0]
suffix_words = [flw[1:] for flw in words]
return words[0][0] + _get_common_prefix(suffix_words)
else:
return ""
def gen_trie(words, writer, callback):
# type: (List[str], IndentedTextWriter, Callable[[str], None]) -> None
"""
Generate a trie for a list of strings.
Takes a callback function that can used to generate code that processes a specific word in the trie.
i.e. for ["abc", "def"], then callback() will be called twice, once for each string.
"""
words = sorted(words)
_gen_trie("", words, writer, callback)
def _gen_trie(prefix, words, writer, callback):
# type: (str, List[str], IndentedTextWriter, Callable[[str], None]) -> None
"""
Recursively generate a trie.
Prefix is a common prefix for all the strings in words, can be empty string.
"""
assert len(words) >= 1
# No duplicate strings allowed
assert len(words) == len(set(words))
prefix_len = len(prefix)
# Base case: one word
if len(words) == 1:
# Check remaining string is a string match
word_to_check = prefix + words[0]
suffix = words[0]
suffix_len = len(suffix)
predicate = (
f"fieldName.size() == {len(word_to_check)} && "
+ f'std::char_traits<char>::compare(fieldName.data() + {prefix_len}, "{suffix}", {suffix_len}) == 0'
)
# If there is no trailing text, we just need to check length to validate we matched
if suffix_len == 0:
predicate = f"fieldName.size() == {len(word_to_check)}"
# Optimization:
# Checking strings of length 1 or even length is efficient. Strings of 3 byte length are
# inefficient to check as they require two comparisons (1 uint16 and 1 uint8) but 4 byte
# length strings require just 1. Since we know the field name is zero terminated, we can
# just use memcmp and compare with the trailing null byte.
elif suffix_len % 4 == 3:
predicate = (
f"fieldName.size() == {len(word_to_check)} && "
+ f' memcmp(fieldName.data() + {prefix_len}, "{suffix}\\0", {suffix_len + 1}) == 0'
)
with IndentedScopedBlock(writer, f"if ({predicate}) {{", "}"):
callback(word_to_check)
return
# Handle the case where one word is a prefix of another
# For instance, ["short", "shorter"] will eventually call this function with
# (prefix = "short", ["", "er"]) as the tuple of prefix and list of words
empty_words = [lw for lw in words if len(lw) == 0]
if empty_words:
word_to_check = prefix
with IndentedScopedBlock(writer, f"if (fieldName.size() == {len(word_to_check)}) {{", "}"):
callback(word_to_check)
# Filter out empty words
words = [lw for lw in words if len(lw) > 0]
# Optimization for a common prefix
# Example: ["word1", "word2"]
# Instead of generating a trie to check for letters individually (i.e. ["w", "o", "r", "d"]),
# we check for the prefix all at once ("word")
gcp = _get_common_prefix(words)
if len(gcp) > 1:
gcp_len = len(gcp)
suffix_words = [flw[gcp_len:] for flw in words]
with IndentedScopedBlock(
writer,
f"if (fieldName.size() >= {gcp_len} && "
+ f'std::char_traits<char>::compare(fieldName.data() + {prefix_len}, "{gcp}", {gcp_len}) == 0) {{',
"}",
):
_gen_trie(prefix + gcp, suffix_words, writer, callback)
return
# Handle the main case for the trie
# We have a list of non-empty words with no common prefix between them,
# the first letters among the words may contain duplicates
sorted_words = sorted(words)
first_letters = sorted(list({w[0] for w in sorted_words}))
min_len = len(prefix) + min([len(w) for w in sorted_words])
with IndentedScopedBlock(writer, f"if (fieldName.size() >= {min_len}) {{", "}"):
first_if = True
for first_letter in first_letters:
fl_words = [flw[1:] for flw in words if flw[0] == first_letter]
ei = "else " if not first_if else ""
with IndentedScopedBlock(
writer, f"{ei}if (fieldName[{len(prefix)}] == '{first_letter}') {{", "}"
):
_gen_trie(prefix + first_letter, fl_words, writer, callback)
first_if = False
def gen_string_table_find_function_block(out, in_str, on_match, on_fail, words):
# type: (IndentedTextWriter, str, str, str, list[str]) -> None
"""Wrap a gen_trie generated block as a function."""
index = {word: i for i, word in enumerate(words)}
out.write_line(f"StringData fieldName{{{in_str}}};")
gen_trie(words, out, lambda w: out.write_line(f"return {on_match.format(index[w])};"))
out.write_line(f"return {on_fail};") | python | github | https://github.com/mongodb/mongo | buildscripts/idl/idl/writer.py |
---
c: Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
SPDX-License-Identifier: curl
Title: curl_url_strerror
Section: 3
Source: libcurl
See-also:
- curl_easy_strerror (3)
- curl_multi_strerror (3)
- curl_share_strerror (3)
- curl_url_get (3)
- curl_url_set (3)
- libcurl-errors (3)
Protocol:
- All
Added-in: 7.80.0
---
# NAME
curl_url_strerror - return string describing error code
# SYNOPSIS
~~~c
#include <curl/curl.h>
const char *curl_url_strerror(CURLUcode errornum);
~~~
# DESCRIPTION
This function returns a string describing the CURLUcode error code passed in
the argument *errornum*.
# %PROTOCOLS%
# EXAMPLE
~~~c
int main(void)
{
CURLUcode rc;
CURLU *url = curl_url();
rc = curl_url_set(url, CURLUPART_URL, "https://example.com", 0);
if(rc)
printf("URL error: %s\n", curl_url_strerror(rc));
curl_url_cleanup(url);
}
~~~
# %AVAILABILITY%
# RETURN VALUE
A pointer to a null-terminated string. | unknown | github | https://github.com/curl/curl | docs/libcurl/curl_url_strerror.md |
/*
* Copyright 2010-2024 JetBrains s.r.o. and Kotlin Programming Language contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package org.jetbrains.kotlin.analysis.api.fir.test.cases.generated.cases.components.diagnosticProvider;
import com.intellij.testFramework.TestDataPath;
import org.jetbrains.kotlin.test.util.KtTestUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.kotlin.analysis.api.fir.test.configurators.AnalysisApiFirTestConfiguratorFactory;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiTestConfiguratorFactoryData;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiTestConfigurator;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.TestModuleKind;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.FrontendKind;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisSessionMode;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiMode;
import org.jetbrains.kotlin.analysis.api.impl.base.test.cases.components.diagnosticProvider.AbstractElementDiagnosticsTest;
import org.jetbrains.kotlin.test.TestMetadata;
import org.junit.jupiter.api.Test;
import java.io.File;
import java.util.regex.Pattern;
/** This class is generated by {@link org.jetbrains.kotlin.generators.tests.analysis.api.GenerateAnalysisApiTestsKt}. DO NOT MODIFY MANUALLY */
@SuppressWarnings("all")
@TestMetadata("analysis/analysis-api/testData/components/diagnosticsProvider/elementDiagnostics")
@TestDataPath("$PROJECT_ROOT")
public class FirIdeNormalAnalysisSourceModuleElementDiagnosticsTestGenerated extends AbstractElementDiagnosticsTest {
@NotNull
@Override
public AnalysisApiTestConfigurator getConfigurator() {
return AnalysisApiFirTestConfiguratorFactory.INSTANCE.createConfigurator(
new AnalysisApiTestConfiguratorFactoryData(
FrontendKind.Fir,
TestModuleKind.Source,
AnalysisSessionMode.Normal,
AnalysisApiMode.Ide
)
);
}
@Test
public void testAllFilesPresentInElementDiagnostics() {
KtTestUtil.assertAllTestsPresentByMetadataWithExcluded(this.getClass(), new File("analysis/analysis-api/testData/components/diagnosticsProvider/elementDiagnostics"), Pattern.compile("^(.+)\\.kt$"), null, true);
}
@Test
@TestMetadata("classWithFunctionWithImplicitType.kt")
public void testClassWithFunctionWithImplicitType() {
runTest("analysis/analysis-api/testData/components/diagnosticsProvider/elementDiagnostics/classWithFunctionWithImplicitType.kt");
}
@Test
@TestMetadata("classWithNestedClass.kt")
public void testClassWithNestedClass() {
runTest("analysis/analysis-api/testData/components/diagnosticsProvider/elementDiagnostics/classWithNestedClass.kt");
}
@Test
@TestMetadata("delegatedClass.kt")
public void testDelegatedClass() {
runTest("analysis/analysis-api/testData/components/diagnosticsProvider/elementDiagnostics/delegatedClass.kt");
}
@Test
@TestMetadata("delegatedClassComplex.kt")
public void testDelegatedClassComplex() {
runTest("analysis/analysis-api/testData/components/diagnosticsProvider/elementDiagnostics/delegatedClassComplex.kt");
}
@Test
@TestMetadata("fileWithFunctionWithImplicitType.kt")
public void testFileWithFunctionWithImplicitType() {
runTest("analysis/analysis-api/testData/components/diagnosticsProvider/elementDiagnostics/fileWithFunctionWithImplicitType.kt");
}
@Test
@TestMetadata("hiddenFromObjectiveC.kt")
public void testHiddenFromObjectiveC() {
runTest("analysis/analysis-api/testData/components/diagnosticsProvider/elementDiagnostics/hiddenFromObjectiveC.kt");
}
@Test
@TestMetadata("javaScriptStableName.kt")
public void testJavaScriptStableName() {
runTest("analysis/analysis-api/testData/components/diagnosticsProvider/elementDiagnostics/javaScriptStableName.kt");
}
} | java | github | https://github.com/JetBrains/kotlin | analysis/analysis-api-fir/tests-gen/org/jetbrains/kotlin/analysis/api/fir/test/cases/generated/cases/components/diagnosticProvider/FirIdeNormalAnalysisSourceModuleElementDiagnosticsTestGenerated.java |
# Django settings for Player project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
ROOT_URL = '/Users/narobert/playery/'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': ROOT_URL + 'database.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Asia/Novosibirsk'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-US'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ROOT_URL + 'media/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = '/Users/narobert/static/'
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/admin_static/'
# Additional locations of static files
STATICFILES_DIRS = (
ROOT_URL + 'templates/static/',
ROOT_URL + 'media/',
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
#'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '95!c2=sq4ofja%!nycnz9zkqnbbzfv3km%2tw)bs=rg9g$t2sr'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'playery.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'playery.wsgi.application'
TEMPLATE_DIRS = (
ROOT_URL + "templates/"
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
'player',
'playlists',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Cache
CACHES = {
'default': {
#'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
#'LOCATION': '/var/tmp/player_cache',
# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
# 'LOCATION': '127.0.0.1:11211',
#'TIMEOUT': 12 * 60 * 60 # 12 hours
'BACKEND': 'django.core.cache.backends.dummy.DummyCache' #for development purposes only
}
}
# Users
LOGIN_URL = "/login"
FORCE_SCRIPT_NAME = ""
AUTH_PROFILE_MODULE = 'player.UserProfile'
APPEND_SLASH = True
# Application settings
LASTFM_KEY = "9530d1e6f1d1b81032f024e99d8771b9" # lastfm access key
LASTFM_SECRET = "f90ea4bc7fdd5e2c66a29f0741953f73" # lastfm secret key
ACCESS_TOKEN = "4ea3eddc86c352ca31fe5b10e75f7fa0f54bdc26672e258f0c919e91a6bb8ae132675efff1e9b3497b76c" # vkontakte desktop app token | unknown | codeparrot/codeparrot-clean | ||
from django.contrib.gis.db import models
from django.db import migrations
from django.db.models import deletion
class Migration(migrations.Migration):
dependencies = [
("rasterapp", "0001_setup_extensions"),
]
operations = [
migrations.CreateModel(
name="RasterModel",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"rast",
models.fields.RasterField(
blank=True,
null=True,
srid=4326,
verbose_name="A Verbose Raster Name",
),
),
(
"rastprojected",
models.fields.RasterField(
null=True,
srid=3086,
verbose_name="A Projected Raster Table",
),
),
("geom", models.fields.PointField(null=True, srid=4326)),
],
options={
"required_db_features": ["supports_raster"],
},
),
migrations.CreateModel(
name="RasterRelatedModel",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"rastermodel",
models.ForeignKey(
on_delete=deletion.CASCADE,
to="rasterapp.rastermodel",
),
),
],
options={
"required_db_features": ["supports_raster"],
},
),
] | python | github | https://github.com/django/django | tests/gis_tests/rasterapp/migrations/0002_rastermodels.py |
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cache.interceptor;
import java.io.Serializable;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Collection;
import org.jspecify.annotations.Nullable;
import org.springframework.util.Assert;
/**
* Composite {@link CacheOperationSource} implementation that iterates
* over a given array of {@code CacheOperationSource} instances.
*
* @author Costin Leau
* @author Juergen Hoeller
* @since 3.1
*/
@SuppressWarnings("serial")
public class CompositeCacheOperationSource implements CacheOperationSource, Serializable {
private final CacheOperationSource[] cacheOperationSources;
/**
* Create a new CompositeCacheOperationSource for the given sources.
* @param cacheOperationSources the CacheOperationSource instances to combine
*/
public CompositeCacheOperationSource(CacheOperationSource... cacheOperationSources) {
Assert.notEmpty(cacheOperationSources, "CacheOperationSource array must not be empty");
this.cacheOperationSources = cacheOperationSources;
}
/**
* Return the {@code CacheOperationSource} instances that this
* {@code CompositeCacheOperationSource} combines.
*/
public final CacheOperationSource[] getCacheOperationSources() {
return this.cacheOperationSources;
}
@Override
public boolean isCandidateClass(Class<?> targetClass) {
for (CacheOperationSource source : this.cacheOperationSources) {
if (source.isCandidateClass(targetClass)) {
return true;
}
}
return false;
}
@Override
public boolean hasCacheOperations(Method method, @Nullable Class<?> targetClass) {
for (CacheOperationSource source : this.cacheOperationSources) {
if (source.hasCacheOperations(method, targetClass)) {
return true;
}
}
return false;
}
@Override
public @Nullable Collection<CacheOperation> getCacheOperations(Method method, @Nullable Class<?> targetClass) {
Collection<CacheOperation> ops = null;
for (CacheOperationSource source : this.cacheOperationSources) {
Collection<CacheOperation> cacheOperations = source.getCacheOperations(method, targetClass);
if (cacheOperations != null) {
if (ops == null) {
ops = new ArrayList<>();
}
ops.addAll(cacheOperations);
}
}
return ops;
}
} | java | github | https://github.com/spring-projects/spring-framework | spring-context/src/main/java/org/springframework/cache/interceptor/CompositeCacheOperationSource.java |
import Vue from './runtime'
export default Vue
export * from '@vue/runtime-dom'
const configureCompat: typeof Vue.configureCompat = Vue.configureCompat
export { configureCompat } | typescript | github | https://github.com/vuejs/core | packages/vue-compat/src/esm-runtime.ts |
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
package event
import (
"context"
"fmt"
"reflect"
"strings"
"github.com/armon/go-metrics"
"github.com/hashicorp/eventlogger"
)
var _ eventlogger.Node = (*MetricsCounter)(nil)
// MetricsCounter offers a way for nodes to emit metrics which increment a label by 1.
type MetricsCounter struct {
Name string
Node eventlogger.Node
labeler Labeler
}
// Labeler provides a way to inject the logic required to determine labels based
// on the state of the eventlogger.Event being returned and the error resulting
// from processing the by the underlying eventlogger.Node.
type Labeler interface {
Labels(*eventlogger.Event, error) []string
}
// NewMetricsCounter should be used to create the MetricsCounter.
func NewMetricsCounter(name string, node eventlogger.Node, labeler Labeler) (*MetricsCounter, error) {
name = strings.TrimSpace(name)
if name == "" {
return nil, fmt.Errorf("name is required: %w", ErrInvalidParameter)
}
if node == nil || reflect.ValueOf(node).IsNil() {
return nil, fmt.Errorf("node is required: %w", ErrInvalidParameter)
}
if labeler == nil || reflect.ValueOf(labeler).IsNil() {
return nil, fmt.Errorf("labeler is required: %w", ErrInvalidParameter)
}
return &MetricsCounter{
Name: name,
Node: node,
labeler: labeler,
}, nil
}
// Process will process the event using the underlying eventlogger.Node, and then
// use the configured Labeler to provide a label which is used to increment a metric by 1.
func (m MetricsCounter) Process(ctx context.Context, e *eventlogger.Event) (*eventlogger.Event, error) {
// NOTE: We don't provide an 'op' here, as we're just wrapping the underlying node.
var err error
// Process the node first
e, err = m.Node.Process(ctx, e)
// Provide the results to the Labeler.
metrics.IncrCounter(m.labeler.Labels(e, err), 1)
return e, err
}
// Reopen attempts to reopen the underlying eventlogger.Node.
func (m MetricsCounter) Reopen() error {
return m.Node.Reopen()
}
// Type returns the type for the underlying eventlogger.Node.
func (m MetricsCounter) Type() eventlogger.NodeType {
return m.Node.Type()
} | go | github | https://github.com/hashicorp/vault | internal/observability/event/node_metrics_counter.go |
#[actix_web::test]
async fn my_test() {
assert!(async { 1 }.await, 1);
}
fn main() {} | rust | github | https://github.com/actix/actix-web | actix-web-codegen/tests/trybuild/test-runtime.rs |
# Eloipool - Python Bitcoin pool server
# Copyright (C) 2011-2012 Luke Dashjr <luke-jr+eloipool@utopios.org>
# Copyright (C) 2012 Peter Leurs <kinlo@triplemining.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from collections import deque
from datetime import date
from time import sleep, time
import threading
from util import shareLogFormatter
import logging
import traceback
_logger = logging.getLogger('sharelogging.logfile')
class logfile(threading.Thread):
def __init__(self, filename, **ka):
super().__init__(**ka.get('thropts', {}))
self.fn=filename
if 'format' not in ka:
_logger.warn('"format" not specified for logfile logger, but default may vary!')
ka['format'] = "{time} {Q(remoteHost)} {username} {YN(not(rejectReason))} {dash(YN(upstreamResult))} {dash(rejectReason)} {solution} {target2pdiff(target)}\n"
self.fmt = shareLogFormatter(ka['format'], '%s')
self.queue = deque()
self.start()
def queueshare(self, line):
self.queue.append(line)
def flushlog(self):
if len(self.queue) > 0:
with open(self.fn, "a") as logfile:
while len(self.queue)>0:
logfile.write(self.queue.popleft())
def run(self):
while True:
try:
sleep(0.2)
self.flushlog()
except:
_logger.critical(traceback.format_exc())
def logShare(self, share):
logline = self.fmt.formatShare(share)
self.queueshare(logline) | unknown | codeparrot/codeparrot-clean | ||
# mako/filters.py
# Copyright (C) 2006-2012 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import re, urllib, htmlentitydefs, codecs
from StringIO import StringIO
from mako import util
xml_escapes = {
'&' : '&',
'>' : '>',
'<' : '<',
'"' : '"', # also " in html-only
"'" : ''' # also ' in html-only
}
# XXX: " is valid in HTML and XML
# ' is not valid HTML, but is valid XML
def legacy_html_escape(string):
"""legacy HTML escape for non-unicode mode."""
return re.sub(r'([&<"\'>])', lambda m: xml_escapes[m.group()], string)
try:
import markupsafe
html_escape = markupsafe.escape
except ImportError:
html_escape = legacy_html_escape
def xml_escape(string):
return re.sub(r'([&<"\'>])', lambda m: xml_escapes[m.group()], string)
def url_escape(string):
# convert into a list of octets
string = string.encode("utf8")
return urllib.quote_plus(string)
def url_unescape(string):
text = urllib.unquote_plus(string)
if not is_ascii_str(text):
text = text.decode("utf8")
return text
def trim(string):
return string.strip()
class Decode(object):
def __getattr__(self, key):
def decode(x):
if isinstance(x, unicode):
return x
elif not isinstance(x, str):
return unicode(str(x), encoding=key)
else:
return unicode(x, encoding=key)
return decode
decode = Decode()
_ASCII_re = re.compile(r'\A[\x00-\x7f]*\Z')
def is_ascii_str(text):
return isinstance(text, str) and _ASCII_re.match(text)
################################################################
class XMLEntityEscaper(object):
def __init__(self, codepoint2name, name2codepoint):
self.codepoint2entity = dict([(c, u'&%s;' % n)
for c,n in codepoint2name.iteritems()])
self.name2codepoint = name2codepoint
def escape_entities(self, text):
"""Replace characters with their character entity references.
Only characters corresponding to a named entity are replaced.
"""
return unicode(text).translate(self.codepoint2entity)
def __escape(self, m):
codepoint = ord(m.group())
try:
return self.codepoint2entity[codepoint]
except (KeyError, IndexError):
return '&#x%X;' % codepoint
__escapable = re.compile(r'["&<>]|[^\x00-\x7f]')
def escape(self, text):
"""Replace characters with their character references.
Replace characters by their named entity references.
Non-ASCII characters, if they do not have a named entity reference,
are replaced by numerical character references.
The return value is guaranteed to be ASCII.
"""
return self.__escapable.sub(self.__escape, unicode(text)
).encode('ascii')
# XXX: This regexp will not match all valid XML entity names__.
# (It punts on details involving involving CombiningChars and Extenders.)
#
# .. __: http://www.w3.org/TR/2000/REC-xml-20001006#NT-EntityRef
__characterrefs = re.compile(r'''& (?:
\#(\d+)
| \#x([\da-f]+)
| ( (?!\d) [:\w] [-.:\w]+ )
) ;''',
re.X | re.UNICODE)
def __unescape(self, m):
dval, hval, name = m.groups()
if dval:
codepoint = int(dval)
elif hval:
codepoint = int(hval, 16)
else:
codepoint = self.name2codepoint.get(name, 0xfffd)
# U+FFFD = "REPLACEMENT CHARACTER"
if codepoint < 128:
return chr(codepoint)
return unichr(codepoint)
def unescape(self, text):
"""Unescape character references.
All character references (both entity references and numerical
character references) are unescaped.
"""
return self.__characterrefs.sub(self.__unescape, text)
_html_entities_escaper = XMLEntityEscaper(htmlentitydefs.codepoint2name,
htmlentitydefs.name2codepoint)
html_entities_escape = _html_entities_escaper.escape_entities
html_entities_unescape = _html_entities_escaper.unescape
def htmlentityreplace_errors(ex):
"""An encoding error handler.
This python `codecs`_ error handler replaces unencodable
characters with HTML entities, or, if no HTML entity exists for
the character, XML character references.
>>> u'The cost was \u20ac12.'.encode('latin1', 'htmlentityreplace')
'The cost was €12.'
"""
if isinstance(ex, UnicodeEncodeError):
# Handle encoding errors
bad_text = ex.object[ex.start:ex.end]
text = _html_entities_escaper.escape(bad_text)
return (unicode(text), ex.end)
raise ex
codecs.register_error('htmlentityreplace', htmlentityreplace_errors)
# TODO: options to make this dynamic per-compilation will be added in a later
# release
DEFAULT_ESCAPES = {
'x':'filters.xml_escape',
'h':'filters.html_escape',
'u':'filters.url_escape',
'trim':'filters.trim',
'entity':'filters.html_entities_escape',
'unicode':'unicode',
'decode':'decode',
'str':'str',
'n':'n'
}
if util.py3k:
DEFAULT_ESCAPES.update({
'unicode':'str'
})
NON_UNICODE_ESCAPES = DEFAULT_ESCAPES.copy()
NON_UNICODE_ESCAPES['h'] = 'filters.legacy_html_escape' | unknown | codeparrot/codeparrot-clean | ||
from collections import defaultdict
import logging
from django.conf import settings
from django.core.mail import mail_admins
from zerver.lib.actions import internal_send_message
def format_subject(subject):
"""
Escape CR and LF characters.
"""
return subject.replace('\n', '\\n').replace('\r', '\\r')
def user_info_str(report):
if report['user_full_name'] and report['user_email']:
user_info = "%(user_full_name)s (%(user_email)s)" % (report)
else:
user_info = "Anonymous user (not logged in)"
user_info += " on %s deployment" % (report['deployment'],)
return user_info
def notify_browser_error(report):
report = defaultdict(lambda: None, report)
if settings.ERROR_BOT:
zulip_browser_error(report)
email_browser_error(report)
def email_browser_error(report):
subject = "Browser error for %s" % (user_info_str(report))
body = ("User: %(user_full_name)s <%(user_email)s> on %(deployment)s\n\n"
"Message:\n%(message)s\n\nStacktrace:\n%(stacktrace)s\n\n"
"User agent: %(user_agent)s\n"
"href: %(href)s\n"
"Server path: %(server_path)s\n"
"Deployed version: %(version)s\n"
% report)
more_info = report['more_info']
if more_info is not None:
body += "\nAdditional information:"
for (key, value) in more_info.iteritems():
body += "\n %s: %s" % (key, value)
body += "\n\nLog:\n%s" % (report['log'],)
mail_admins(subject, body)
def zulip_browser_error(report):
subject = "JS error: %s" % (report['user_email'],)
user_info = user_info_str(report)
body = "User: %s\n" % (user_info,)
body += ("Message: %(message)s\n"
% report )
internal_send_message(settings.ERROR_BOT,
"stream", "errors", format_subject(subject), body)
def notify_server_error(report):
report = defaultdict(lambda: None, report)
email_server_error(report)
if settings.ERROR_BOT:
zulip_server_error(report)
def zulip_server_error(report):
subject = '%(node)s: %(message)s' % report
stack_trace = report['stack_trace'] or "No stack trace available"
user_info = user_info_str(report)
request_repr = (
"Request info:\n~~~~\n"
"- path: %(path)s\n"
"- %(method)s: %(data)s\n") % (report)
for field in ["REMOTE_ADDR", "QUERY_STRING", "SERVER_NAME"]:
request_repr += "- %s: \"%s\"\n" % (field, report.get(field.lower()))
request_repr += "~~~~"
internal_send_message(settings.ERROR_BOT,
"stream", "errors", format_subject(subject),
"Error generated by %s\n\n~~~~ pytb\n%s\n\n~~~~\n%s" % (
user_info, stack_trace, request_repr))
def email_server_error(report):
subject = '%(node)s: %(message)s' % (report)
user_info = user_info_str(report)
request_repr = (
"Request info:\n"
"- path: %(path)s\n"
"- %(method)s: %(data)s\n") % (report)
for field in ["REMOTE_ADDR", "QUERY_STRING", "SERVER_NAME"]:
request_repr += "- %s: \"%s\"\n" % (field, report.get(field.lower()))
message = "Error generated by %s\n\n%s\n\n%s" % (user_info, report['stack_trace'],
request_repr)
mail_admins(format_subject(subject), message, fail_silently=True) | unknown | codeparrot/codeparrot-clean | ||
# coding=utf-8
# "da" means Data Access, this file contains various quick (or dirty) methods for accessing data.
import hashlib
import logging
import zlib
import pickle
from google.appengine.ext import db
from google.appengine.api import memcache
from v2ex.babel import Member
from v2ex.babel import Counter
from v2ex.babel import Section
from v2ex.babel import Node
from v2ex.babel import Topic
from v2ex.babel import Reply
from v2ex.babel import Place
from v2ex.babel import Site
def GetKindByNum(kind, num):
K = str(kind.capitalize())
one = memcache.get(K + '_' + str(num))
if one:
return one
else:
q = db.GqlQuery("SELECT * FROM " + K + " WHERE num = :1", int(num))
if q.count() == 1:
one = q[0]
memcache.set(K + '_' + str(num), one, 86400)
return one
else:
return False
def GetKindByName(kind, name):
K = str(kind.capitalize())
one = memcache.get(K + '::' + str(name))
if one:
return one
else:
q = db.GqlQuery("SELECT * FROM " + K + " WHERE name = :1", str(name))
if q.count() == 1:
one = q[0]
memcache.set(K + '::' + str(name), one, 86400)
return one
else:
return False
def GetMemberByUsername(name):
one = memcache.get('Member::' + str(name).lower())
if one:
return one
else:
q = db.GqlQuery("SELECT * FROM Member WHERE username_lower = :1", str(name).lower())
if q.count() == 1:
one = q[0]
memcache.set('Member::' + str(name).lower(), one, 86400)
return one
else:
return False
def GetMemberByEmail(email):
cache = 'Member::email::' + hashlib.md5(email.lower()).hexdigest()
one = memcache.get(cache)
if one:
return one
else:
q = db.GqlQuery("SELECT * FROM Member WHERE email = :1", str(email).lower())
if q.count() == 1:
one = q[0]
memcache.set(cache, one, 86400)
return one
else:
return False
def ip2long(ip):
ip_array = ip.split('.')
ip_long = int(ip_array[0]) * 16777216 + int(ip_array[1]) * 65536 + int(ip_array[2]) * 256 + int(ip_array[3])
return ip_long
def GetPlaceByIP(ip):
cache = 'Place_' + ip
place = memcache.get(cache)
if place:
return place
else:
q = db.GqlQuery("SELECT * FROM Place WHERE ip = :1", ip)
if q.count() == 1:
place = q[0]
memcache.set(cache, place, 86400)
return place
else:
return False
def CreatePlaceByIP(ip):
place = Place()
q = db.GqlQuery('SELECT * FROM Counter WHERE name = :1', 'place.max')
if (q.count() == 1):
counter = q[0]
counter.value = counter.value + 1
else:
counter = Counter()
counter.name = 'place.max'
counter.value = 1
q2 = db.GqlQuery('SELECT * FROM Counter WHERE name = :1', 'place.total')
if (q2.count() == 1):
counter2 = q2[0]
counter2.value = counter2.value + 1
else:
counter2 = Counter()
counter2.name = 'place.total'
counter2.value = 1
place.num = ip2long(ip)
place.ip = ip
place.put()
counter.put()
counter2.put()
return place
def GetSite():
site = memcache.get('site')
if site is not None:
return site
else:
q = db.GqlQuery("SELECT * FROM Site WHERE num = 1")
if q.count() == 1:
site = q[0]
if site.l10n is None:
site.l10n = 'en'
if site.meta is None:
site.meta = ''
memcache.set('site', site, 86400)
return site
else:
site = Site()
site.num = 1
site.title = 'V2EX'
site.domain = 'v2ex.appspot.com'
site.slogan = 'way to explore'
site.l10n = 'en'
site.description = ''
site.meta = ''
site.put()
memcache.set('site', site, 86400)
return site
# input is a compressed string
# output is an object
def GetUnpacked(data):
decompressed = zlib.decompress(data)
return pickle.loads(decompressed)
# input is an object
# output is an compressed string
def GetPacked(data):
s = pickle.dumps(data)
return zlib.compress(s) | unknown | codeparrot/codeparrot-clean | ||
// SPDX-License-Identifier: GPL-2.0-only
/*
* common LSM auditing functions
*
* Based on code written for SELinux by :
* Stephen Smalley
* James Morris <jmorris@redhat.com>
* Author : Etienne Basset, <etienne.basset@ensta.org>
*/
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <net/sock.h>
#include <linux/un.h>
#include <net/af_unix.h>
#include <linux/audit.h>
#include <linux/ipv6.h>
#include <linux/ip.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/sctp.h>
#include <linux/lsm_audit.h>
#include <linux/security.h>
/**
* ipv4_skb_to_auditdata : fill auditdata from skb
* @skb : the skb
* @ad : the audit data to fill
* @proto : the layer 4 protocol
*
* return 0 on success
*/
int ipv4_skb_to_auditdata(struct sk_buff *skb,
struct common_audit_data *ad, u8 *proto)
{
int ret = 0;
struct iphdr *ih;
ih = ip_hdr(skb);
ad->u.net->v4info.saddr = ih->saddr;
ad->u.net->v4info.daddr = ih->daddr;
if (proto)
*proto = ih->protocol;
/* non initial fragment */
if (ntohs(ih->frag_off) & IP_OFFSET)
return 0;
switch (ih->protocol) {
case IPPROTO_TCP: {
struct tcphdr *th = tcp_hdr(skb);
ad->u.net->sport = th->source;
ad->u.net->dport = th->dest;
break;
}
case IPPROTO_UDP: {
struct udphdr *uh = udp_hdr(skb);
ad->u.net->sport = uh->source;
ad->u.net->dport = uh->dest;
break;
}
case IPPROTO_SCTP: {
struct sctphdr *sh = sctp_hdr(skb);
ad->u.net->sport = sh->source;
ad->u.net->dport = sh->dest;
break;
}
default:
ret = -EINVAL;
}
return ret;
}
#if IS_ENABLED(CONFIG_IPV6)
/**
* ipv6_skb_to_auditdata : fill auditdata from skb
* @skb : the skb
* @ad : the audit data to fill
* @proto : the layer 4 protocol
*
* return 0 on success
*/
int ipv6_skb_to_auditdata(struct sk_buff *skb,
struct common_audit_data *ad, u8 *proto)
{
int offset, ret = 0;
struct ipv6hdr *ip6;
u8 nexthdr;
__be16 frag_off;
ip6 = ipv6_hdr(skb);
ad->u.net->v6info.saddr = ip6->saddr;
ad->u.net->v6info.daddr = ip6->daddr;
/* IPv6 can have several extension header before the Transport header
* skip them */
offset = skb_network_offset(skb);
offset += sizeof(*ip6);
nexthdr = ip6->nexthdr;
offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
if (offset < 0)
return 0;
if (proto)
*proto = nexthdr;
switch (nexthdr) {
case IPPROTO_TCP: {
struct tcphdr _tcph, *th;
th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
if (th == NULL)
break;
ad->u.net->sport = th->source;
ad->u.net->dport = th->dest;
break;
}
case IPPROTO_UDP: {
struct udphdr _udph, *uh;
uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph);
if (uh == NULL)
break;
ad->u.net->sport = uh->source;
ad->u.net->dport = uh->dest;
break;
}
case IPPROTO_SCTP: {
struct sctphdr _sctph, *sh;
sh = skb_header_pointer(skb, offset, sizeof(_sctph), &_sctph);
if (sh == NULL)
break;
ad->u.net->sport = sh->source;
ad->u.net->dport = sh->dest;
break;
}
default:
ret = -EINVAL;
}
return ret;
}
#endif
static inline void print_ipv6_addr(struct audit_buffer *ab,
const struct in6_addr *addr, __be16 port,
const char *name1, const char *name2)
{
if (!ipv6_addr_any(addr))
audit_log_format(ab, " %s=%pI6c", name1, addr);
if (port)
audit_log_format(ab, " %s=%d", name2, ntohs(port));
}
static inline void print_ipv4_addr(struct audit_buffer *ab, __be32 addr,
__be16 port, const char *name1, const char *name2)
{
if (addr)
audit_log_format(ab, " %s=%pI4", name1, &addr);
if (port)
audit_log_format(ab, " %s=%d", name2, ntohs(port));
}
/**
* audit_log_lsm_data - helper to log common LSM audit data
* @ab : the audit buffer
* @a : common audit data
*/
void audit_log_lsm_data(struct audit_buffer *ab,
const struct common_audit_data *a)
{
/*
* To keep stack sizes in check force programmers to notice if they
* start making this union too large! See struct lsm_network_audit
* as an example of how to deal with large data.
*/
BUILD_BUG_ON(sizeof(a->u) > sizeof(void *)*2);
switch (a->type) {
case LSM_AUDIT_DATA_NONE:
return;
case LSM_AUDIT_DATA_IPC:
audit_log_format(ab, " ipc_key=%d ", a->u.ipc_id);
break;
case LSM_AUDIT_DATA_CAP:
audit_log_format(ab, " capability=%d ", a->u.cap);
break;
case LSM_AUDIT_DATA_PATH: {
struct inode *inode;
audit_log_d_path(ab, " path=", &a->u.path);
inode = d_backing_inode(a->u.path.dentry);
if (inode) {
audit_log_format(ab, " dev=");
audit_log_untrustedstring(ab, inode->i_sb->s_id);
audit_log_format(ab, " ino=%lu", inode->i_ino);
}
break;
}
case LSM_AUDIT_DATA_FILE: {
struct inode *inode;
audit_log_d_path(ab, " path=", &a->u.file->f_path);
inode = file_inode(a->u.file);
if (inode) {
audit_log_format(ab, " dev=");
audit_log_untrustedstring(ab, inode->i_sb->s_id);
audit_log_format(ab, " ino=%lu", inode->i_ino);
}
break;
}
case LSM_AUDIT_DATA_IOCTL_OP: {
struct inode *inode;
audit_log_d_path(ab, " path=", &a->u.op->path);
inode = a->u.op->path.dentry->d_inode;
if (inode) {
audit_log_format(ab, " dev=");
audit_log_untrustedstring(ab, inode->i_sb->s_id);
audit_log_format(ab, " ino=%lu", inode->i_ino);
}
audit_log_format(ab, " ioctlcmd=0x%hx", a->u.op->cmd);
break;
}
case LSM_AUDIT_DATA_DENTRY: {
struct inode *inode;
audit_log_format(ab, " name=");
spin_lock(&a->u.dentry->d_lock);
audit_log_untrustedstring(ab, a->u.dentry->d_name.name);
spin_unlock(&a->u.dentry->d_lock);
inode = d_backing_inode(a->u.dentry);
if (inode) {
audit_log_format(ab, " dev=");
audit_log_untrustedstring(ab, inode->i_sb->s_id);
audit_log_format(ab, " ino=%lu", inode->i_ino);
}
break;
}
case LSM_AUDIT_DATA_INODE: {
struct dentry *dentry;
struct inode *inode;
rcu_read_lock();
inode = a->u.inode;
dentry = d_find_alias_rcu(inode);
if (dentry) {
audit_log_format(ab, " name=");
spin_lock(&dentry->d_lock);
audit_log_untrustedstring(ab, dentry->d_name.name);
spin_unlock(&dentry->d_lock);
}
audit_log_format(ab, " dev=");
audit_log_untrustedstring(ab, inode->i_sb->s_id);
audit_log_format(ab, " ino=%lu", inode->i_ino);
rcu_read_unlock();
break;
}
case LSM_AUDIT_DATA_TASK: {
struct task_struct *tsk = a->u.tsk;
if (tsk) {
pid_t pid = task_tgid_nr(tsk);
if (pid) {
char tskcomm[sizeof(tsk->comm)];
audit_log_format(ab, " opid=%d ocomm=", pid);
audit_log_untrustedstring(ab,
get_task_comm(tskcomm, tsk));
}
}
break;
}
case LSM_AUDIT_DATA_NET:
if (a->u.net->sk) {
const struct sock *sk = a->u.net->sk;
const struct unix_sock *u;
struct unix_address *addr;
int len = 0;
char *p = NULL;
switch (sk->sk_family) {
case AF_INET: {
const struct inet_sock *inet = inet_sk(sk);
print_ipv4_addr(ab, inet->inet_rcv_saddr,
inet->inet_sport,
"laddr", "lport");
print_ipv4_addr(ab, inet->inet_daddr,
inet->inet_dport,
"faddr", "fport");
break;
}
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6: {
const struct inet_sock *inet = inet_sk(sk);
print_ipv6_addr(ab, &sk->sk_v6_rcv_saddr,
inet->inet_sport,
"laddr", "lport");
print_ipv6_addr(ab, &sk->sk_v6_daddr,
inet->inet_dport,
"faddr", "fport");
break;
}
#endif
case AF_UNIX:
u = unix_sk(sk);
addr = smp_load_acquire(&u->addr);
if (!addr)
break;
if (u->path.dentry) {
audit_log_d_path(ab, " path=", &u->path);
break;
}
len = addr->len-sizeof(short);
p = &addr->name->sun_path[0];
audit_log_format(ab, " path=");
if (*p)
audit_log_untrustedstring(ab, p);
else
audit_log_n_hex(ab, p, len);
break;
}
}
switch (a->u.net->family) {
case AF_INET:
print_ipv4_addr(ab, a->u.net->v4info.saddr,
a->u.net->sport,
"saddr", "src");
print_ipv4_addr(ab, a->u.net->v4info.daddr,
a->u.net->dport,
"daddr", "dest");
break;
case AF_INET6:
print_ipv6_addr(ab, &a->u.net->v6info.saddr,
a->u.net->sport,
"saddr", "src");
print_ipv6_addr(ab, &a->u.net->v6info.daddr,
a->u.net->dport,
"daddr", "dest");
break;
}
if (a->u.net->netif > 0) {
struct net_device *dev;
/* NOTE: we always use init's namespace */
dev = dev_get_by_index(&init_net, a->u.net->netif);
if (dev) {
audit_log_format(ab, " netif=%s", dev->name);
dev_put(dev);
}
}
break;
#ifdef CONFIG_KEYS
case LSM_AUDIT_DATA_KEY:
audit_log_format(ab, " key_serial=%u", a->u.key_struct.key);
if (a->u.key_struct.key_desc) {
audit_log_format(ab, " key_desc=");
audit_log_untrustedstring(ab, a->u.key_struct.key_desc);
}
break;
#endif
case LSM_AUDIT_DATA_KMOD:
audit_log_format(ab, " kmod=");
audit_log_untrustedstring(ab, a->u.kmod_name);
break;
case LSM_AUDIT_DATA_IBPKEY: {
struct in6_addr sbn_pfx;
memset(&sbn_pfx.s6_addr, 0,
sizeof(sbn_pfx.s6_addr));
memcpy(&sbn_pfx.s6_addr, &a->u.ibpkey->subnet_prefix,
sizeof(a->u.ibpkey->subnet_prefix));
audit_log_format(ab, " pkey=0x%x subnet_prefix=%pI6c",
a->u.ibpkey->pkey, &sbn_pfx);
break;
}
case LSM_AUDIT_DATA_IBENDPORT:
audit_log_format(ab, " device=%s port_num=%u",
a->u.ibendport->dev_name,
a->u.ibendport->port);
break;
case LSM_AUDIT_DATA_LOCKDOWN:
audit_log_format(ab, " lockdown_reason=\"%s\"",
lockdown_reasons[a->u.reason]);
break;
case LSM_AUDIT_DATA_ANONINODE:
audit_log_format(ab, " anonclass=%s", a->u.anonclass);
break;
case LSM_AUDIT_DATA_NLMSGTYPE:
audit_log_format(ab, " nl-msgtype=%hu", a->u.nlmsg_type);
break;
} /* switch (a->type) */
}
/**
* dump_common_audit_data - helper to dump common audit data
* @ab : the audit buffer
* @a : common audit data
*/
static void dump_common_audit_data(struct audit_buffer *ab,
const struct common_audit_data *a)
{
char comm[sizeof(current->comm)];
audit_log_format(ab, " pid=%d comm=", task_tgid_nr(current));
audit_log_untrustedstring(ab, get_task_comm(comm, current));
audit_log_lsm_data(ab, a);
}
/**
* common_lsm_audit - generic LSM auditing function
* @a: auxiliary audit data
* @pre_audit: lsm-specific pre-audit callback
* @post_audit: lsm-specific post-audit callback
*
* setup the audit buffer for common security information
* uses callback to print LSM specific information
*/
void common_lsm_audit(struct common_audit_data *a,
void (*pre_audit)(struct audit_buffer *, void *),
void (*post_audit)(struct audit_buffer *, void *))
{
struct audit_buffer *ab;
if (a == NULL)
return;
/* we use GFP_ATOMIC so we won't sleep */
ab = audit_log_start(audit_context(), GFP_ATOMIC | __GFP_NOWARN,
AUDIT_AVC);
if (ab == NULL)
return;
if (pre_audit)
pre_audit(ab, a);
dump_common_audit_data(ab, a);
if (post_audit)
post_audit(ab, a);
audit_log_end(ab);
} | c | github | https://github.com/torvalds/linux | security/lsm_audit.c |
///////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2002, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Industrial Light & Magic nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////
#ifndef INCLUDED_IMF_RLE_H_
#define INCLUDED_IMF_RLE_H_
#include "ImfNamespace.h"
#include "ImfExport.h"
OPENEXR_IMF_INTERNAL_NAMESPACE_HEADER_ENTER
//
// Compress an array of bytes, using run-length encoding,
// and return the length of the compressed data.
//
IMF_EXPORT
int rleCompress (int inLength, const char in[], signed char out[]);
//
// Uncompress an array of bytes compressed with rleCompress().
// Returns the length of the uncompressed data, or 0 if the
// length of the uncompressed data would be more than maxLength.
//
IMF_EXPORT
int rleUncompress (int inLength, int maxLength,
const signed char in[], char out[]);
OPENEXR_IMF_INTERNAL_NAMESPACE_HEADER_EXIT
#endif | c | github | https://github.com/opencv/opencv | 3rdparty/openexr/IlmImf/ImfRle.h |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.gs;
import java.net.URI;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotSame;
import static org.junit.jupiter.api.Assertions.assertSame;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.ValueSource;
public class TestStorageResourceId {
@Test
public void testConstructorInvalid() {
assertThrows(IllegalArgumentException.class, () -> {
new StorageResourceId(null);
});
assertThrows(IllegalArgumentException.class, () -> {
new StorageResourceId("");
});
assertThrows(IllegalArgumentException.class, () -> {
new StorageResourceId(null, null);
});
assertThrows(IllegalArgumentException.class, () -> {
new StorageResourceId("foo", null);
});
assertThrows(IllegalArgumentException.class, () -> {
new StorageResourceId("", null);
});
assertThrows(IllegalArgumentException.class, () -> {
new StorageResourceId(null, null, 0L);
});
assertThrows(IllegalArgumentException.class, () -> {
new StorageResourceId("foo", null, 0L);
});
assertThrows(IllegalArgumentException.class, () -> {
new StorageResourceId("", null, 0L);
});
assertThrows(IllegalArgumentException.class, () -> {
new StorageResourceId(null, 0L);
});
assertThrows(IllegalArgumentException.class, () -> {
new StorageResourceId("", 0L);
});
}
@Test
public void testFromStringPathInvalid() {
assertThrows(IllegalArgumentException.class, () -> {
StorageResourceId.fromStringPath(null);
});
assertThrows(IllegalArgumentException.class, () -> {
StorageResourceId.fromStringPath("");
});
assertThrows(IllegalArgumentException.class, () -> {
StorageResourceId.fromStringPath("foo");
});
assertThrows(IllegalArgumentException.class, () -> {
StorageResourceId.fromStringPath("/foo/bar");
});
assertThrows(IllegalArgumentException.class, () -> {
StorageResourceId.fromStringPath("gs:///foo/bar");
});
}
@Test
public void testConstructor() {
String bucketName = "testbucketname";
String objectName = "a/b/c.txt";
verify(new StorageResourceId(bucketName), bucketName,
StorageResourceId.UNKNOWN_GENERATION_ID, null, false,
true, true, false, false);
verify(new StorageResourceId(bucketName, objectName), bucketName,
StorageResourceId.UNKNOWN_GENERATION_ID, objectName, false,
false, false, true, false);
long genId = System.currentTimeMillis();
verify(new StorageResourceId(bucketName, objectName, genId), bucketName,
genId, objectName, true,
false, false, true, false);
verify(new StorageResourceId(bucketName, genId), bucketName,
genId, null, true,
true, true, false, false);
}
@Test
public void testEqualsBucket() {
StorageResourceId bucket1 = new StorageResourceId("test-bucket");
StorageResourceId bucket2 = new StorageResourceId("test-bucket");
assertTrue(bucket1.equals(bucket2));
assertEquals(bucket1.hashCode(), bucket2.hashCode());
}
@Test
public void testEqualsObject() {
StorageResourceId obj1 = new StorageResourceId("test-bucket", "test-object");
StorageResourceId obj2 = new StorageResourceId("test-bucket", "test-object");
assertTrue(obj1.equals(obj2));
assertEquals(obj1.hashCode(), obj2.hashCode());
}
@Test
public void testEqualsDifferentBucket() {
StorageResourceId bucket1 = new StorageResourceId("test-bucket");
StorageResourceId bucket2 = new StorageResourceId("other-bucket");
assertFalse(bucket1.equals(bucket2));
}
@Test
public void testEqualsDifferentObject() {
StorageResourceId obj1 = new StorageResourceId("test-bucket", "test-object");
StorageResourceId obj2 = new StorageResourceId("test-bucket", "other-object");
assertFalse(obj1.equals(obj2));
}
@Test
public void testToDirectoryIdFromFile() {
StorageResourceId fileId = new StorageResourceId("my-bucket", "path/to/file.txt");
StorageResourceId dirId = fileId.toDirectoryId();
assertNotSame(fileId, dirId); // Should return a new instance
assertTrue(dirId.isDirectory());
assertEquals("my-bucket", dirId.getBucketName());
assertEquals("path/to/file.txt/", dirId.getObjectName());
assertEquals(fileId.getGenerationId(), dirId.getGenerationId());
}
@Test
public void testToDirectoryIdFromDirectoryObject() {
StorageResourceId dirIdOriginal = new StorageResourceId("my-bucket", "path/to/dir/");
StorageResourceId dirIdConverted = dirIdOriginal.toDirectoryId();
assertSame(dirIdOriginal, dirIdConverted); // Should return the same instance
assertTrue(dirIdConverted.isDirectory());
assertEquals("path/to/dir/", dirIdConverted.getObjectName());
}
@Test
public void testToDirectoryIdFromBucket() {
StorageResourceId bucketId = new StorageResourceId("my-bucket");
StorageResourceId convertedId = bucketId.toDirectoryId();
assertSame(bucketId, convertedId);
assertTrue(convertedId.isBucket());
}
@Test
public void testFromStringPathRoot() {
StorageResourceId id = StorageResourceId.fromStringPath("gs://");
assertTrue(id.isRoot());
}
@Test
public void testFromStringPathBucket() {
StorageResourceId id = StorageResourceId.fromStringPath("gs://my-bucket");
assertTrue(id.isBucket());
assertEquals("my-bucket", id.getBucketName());
assertNull(id.getObjectName());
assertEquals(StorageResourceId.UNKNOWN_GENERATION_ID, id.getGenerationId());
}
@ParameterizedTest
@ValueSource(strings = {
"gs://my-bucket/object",
"gs://my-bucket/folder/file.txt",
"gs://my-bucket/folder/"
})
public void testFromStringPathObject(String path) {
String expectedBucket = path.split("/")[2];
String expectedObject =
path.substring(path.indexOf(expectedBucket) + expectedBucket.length() + 1);
StorageResourceId id = StorageResourceId.fromStringPath(path);
assertTrue(id.isStorageObject());
assertEquals(expectedBucket, id.getBucketName());
assertEquals(expectedObject, id.getObjectName());
assertEquals(StorageResourceId.UNKNOWN_GENERATION_ID, id.getGenerationId());
}
@Test
public void testFromStringPathObjectWithGenerationId() {
long genId = 12345L;
StorageResourceId id = StorageResourceId.fromStringPath("gs://my-bucket/object.txt", genId);
assertTrue(id.isStorageObject());
assertEquals("my-bucket", id.getBucketName());
assertEquals("object.txt", id.getObjectName());
assertEquals(genId, id.getGenerationId());
assertTrue(id.hasGenerationId());
}
@Test
public void testFromUriPathBucket() throws Exception {
URI uri = new URI("gs://my-bucket");
StorageResourceId id = StorageResourceId.fromUriPath(uri, true);
assertTrue(id.isBucket());
assertEquals("my-bucket", id.getBucketName());
assertNull(id.getObjectName());
}
@Test
public void testFromUriPathObject() throws Exception {
URI uri = new URI("gs://my-bucket/path/to/file.txt");
StorageResourceId id = StorageResourceId.fromUriPath(uri, false);
assertTrue(id.isStorageObject());
assertEquals("my-bucket", id.getBucketName());
assertEquals("path/to/file.txt", id.getObjectName());
}
@Test
public void testFromUriPathObjectWithGenerationId() throws Exception {
URI uri = new URI("gs://my-bucket/object.txt");
long genId = 54321L;
StorageResourceId id = StorageResourceId.fromUriPath(uri, false, genId);
assertTrue(id.isStorageObject());
assertEquals("my-bucket", id.getBucketName());
assertEquals("object.txt", id.getObjectName());
assertEquals(genId, id.getGenerationId());
assertTrue(id.hasGenerationId());
}
@Test
public void testFromUriPathBucketWithGenerationId() throws Exception {
assertThrows(IllegalArgumentException.class, () -> {
URI uri = new URI("gs://my-bucket");
long genId = 54321L;
StorageResourceId.fromUriPath(uri, false, genId);
});
}
private static void verify(
StorageResourceId id,
String bucketName,
long generationId,
String objectName,
boolean hasGenerationId,
boolean isBucket,
boolean isDirectory,
boolean isStorageObject,
boolean isRoot) {
assertEquals(bucketName, id.getBucketName());
assertEquals(generationId, id.getGenerationId());
assertEquals(objectName, id.getObjectName());
assertEquals(hasGenerationId, id.hasGenerationId());
assertEquals(isBucket, id.isBucket());
assertEquals(isDirectory, id.isDirectory());
assertEquals(isStorageObject, id.isStorageObject());
assertEquals(isRoot, id.isRoot());
}
} | java | github | https://github.com/apache/hadoop | hadoop-cloud-storage-project/hadoop-gcp/src/test/java/org/apache/hadoop/fs/gs/TestStorageResourceId.java |
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.scheduling.aspectj;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Role;
import org.springframework.scheduling.annotation.AbstractAsyncConfiguration;
import org.springframework.scheduling.annotation.EnableAsync;
import org.springframework.scheduling.config.TaskManagementConfigUtils;
/**
* {@code @Configuration} class that registers the Spring infrastructure beans necessary
* to enable AspectJ-based asynchronous method execution.
*
* @author Chris Beams
* @author Stephane Nicoll
* @author Juergen Hoeller
* @since 3.1
* @see EnableAsync
* @see org.springframework.scheduling.annotation.AsyncConfigurationSelector
* @see org.springframework.scheduling.annotation.ProxyAsyncConfiguration
*/
@Configuration(proxyBeanMethods = false)
@Role(BeanDefinition.ROLE_INFRASTRUCTURE)
public class AspectJAsyncConfiguration extends AbstractAsyncConfiguration {
@Bean(name = TaskManagementConfigUtils.ASYNC_EXECUTION_ASPECT_BEAN_NAME)
@Role(BeanDefinition.ROLE_INFRASTRUCTURE)
public AnnotationAsyncExecutionAspect asyncAdvisor() {
AnnotationAsyncExecutionAspect asyncAspect = AnnotationAsyncExecutionAspect.aspectOf();
asyncAspect.configure(this.executor, this.exceptionHandler);
return asyncAspect;
}
} | java | github | https://github.com/spring-projects/spring-framework | spring-aspects/src/main/java/org/springframework/scheduling/aspectj/AspectJAsyncConfiguration.java |
# Copyright (C) 2012 Johnny Vestergaard <jkv@unixcluster.dk>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import bottle
import shared_state
import os
import uuid
import shared_state as shared
import logging
import types
from bottle import run, install, mount, request
from bottle.ext import mongo
from beaker.middleware import SessionMiddleware
from datetime import datetime
from kumo.loggly import Loggly
from cork import Cork
logger = logging.getLogger(__name__)
class MnemoWebAPI():
"""Exposes raw and normalized data from hpfeeds through a RESTful api"""
def __init__(self, datebase_name, static_file_path=None, data_dir='./data', loggly_token=None):
cork_dir = os.path.join(data_dir, 'cork')
beaker_dir = os.path.join(data_dir, 'beaker')
bottle.TEMPLATE_PATH.insert(0,'webapi/views/')
#vars which must be visible across all webapi modules
shared.static_dir = static_file_path
shared.plug = bottle.ext.mongo.MongoPlugin(uri="localhost", db=datebase_name, json_mongo=True)
#install mongo plugin for root app
install(shared_state.plug)
#check if cork files exists
cork_files = ['users.json', 'roles.json', 'register.json']
if not set(cork_files).issubset(set(os.listdir(cork_dir))):
#if not, create them
logger.info('Cork authentication files not found, creating new files.')
shared.auth = self.populate_conf_directory(cork_dir)
else:
shared.auth = Cork(cork_dir)
#admin depends on shared.auth
import admin
#import and mount api version 1 (stable)
from webapi.api.v1 import app as api_v1
mount('/api/v1/', api_v1.app)
#import and mount development version (unstable)
from webapi.api.d import app as api_d
mount('/api/d/', api_d.app)
#must be imported AFTER mounts.
if shared.static_dir is not None:
import default_routes
#wrap root app in beaker middleware
session_opts = {
'session.type': 'file',
'session.cookie_expires': False,
'session.data_dir': beaker_dir,
'session.auto': True,
#set secure attribute on cookie
'session.secure': True
}
self.app = bottle.app()
if loggly_token:
self.app = Loggly(bottle.app(), loggly_token)
self.app = SessionMiddleware(self.app, session_opts)
root_app = bottle.app()
#setup logging hooks
@root_app.hook('before_request')
@api_d.app.hook('before_request')
@api_v1.app.hook('before_request')
def log_request():
user_agent = ""
if 'HTTP_USER_AGENT' in bottle.request.environ:
user_agent = bottle.request.environ['HTTP_USER_AGENT']
if 'REMOTE_ADDR' in bottle.request.environ:
remote_addr = bottle.request.environ['REMOTE_ADDR']
else:
remote_addr = ""
if 'beaker.session' in bottle.request.environ:
session = bottle.request.environ.get('beaker.session')
username = session.get('username', None)
else:
username = "None"
logger.info("[{0}/{1}] {2} {3} ({4})".format(remote_addr, username, request.method, request.fullpath, user_agent))
def return_text(self, e):
return e.status
#make sure error pages for API are pure text
api_d.app.default_error_handler = types.MethodType(return_text, self)
api_v1.app.default_error_handler = types.MethodType(return_text, self)
def start_listening(self, host, port):
logger.info('Starting web api, listening on {0}:{1}'.format(host, port))
run(app=self.app, host=host, port=port, debug=False, server='gevent',
log="wsgi", quiet=True, keyfile='server.key', certfile='server.crt')
#defaults
def populate_conf_directory(self, auth_dir):
"""
Creation of basic auth files.
"""
logger.info("Creating new authentication files, check STDOUT for the generated admin password.")
cork = Cork(auth_dir, initialize=True)
cork._store.roles['admin'] = 100
cork._store.roles['access_all'] = 70
cork._store.roles['access_normalized'] = 60
cork._store.roles['public'] = 10
cork._store.save_roles()
tstamp = str(datetime.utcnow())
#default admin combo: admin/admin
username = 'admin'
password = str(uuid.uuid4())
cork._store.users[username] = {
'role': 'admin',
'hash': cork._hash(username, password),
'email_addr': username + '@localhost.local',
'desc': 'Default administrative account',
'creation_date': tstamp
}
cork._store.save_users()
#for security reasons we fdo not want this in the log files.
print "A 'admin' account has been created with the password '{0}'".format(password)
return cork
#for debugging
if __name__ == '__main__':
m = MnemoWebAPI('mnemosyne')
m.start_listening(host='localhost', port='8181') | unknown | codeparrot/codeparrot-clean | ||
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/kernel/reboot.c
*
* Copyright (C) 2013 Linus Torvalds
*/
#define pr_fmt(fmt) "reboot: " fmt
#include <linux/atomic.h>
#include <linux/ctype.h>
#include <linux/export.h>
#include <linux/kexec.h>
#include <linux/kmod.h>
#include <linux/kmsg_dump.h>
#include <linux/reboot.h>
#include <linux/suspend.h>
#include <linux/syscalls.h>
#include <linux/syscore_ops.h>
#include <linux/uaccess.h>
/*
* this indicates whether you can reboot with ctrl-alt-del: the default is yes
*/
static int C_A_D = 1;
struct pid *cad_pid;
EXPORT_SYMBOL(cad_pid);
#if defined(CONFIG_ARM)
#define DEFAULT_REBOOT_MODE = REBOOT_HARD
#else
#define DEFAULT_REBOOT_MODE
#endif
enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE;
EXPORT_SYMBOL_GPL(reboot_mode);
enum reboot_mode panic_reboot_mode = REBOOT_UNDEFINED;
static enum hw_protection_action hw_protection_action = HWPROT_ACT_SHUTDOWN;
/*
* This variable is used privately to keep track of whether or not
* reboot_type is still set to its default value (i.e., reboot= hasn't
* been set on the command line). This is needed so that we can
* suppress DMI scanning for reboot quirks. Without it, it's
* impossible to override a faulty reboot quirk without recompiling.
*/
int reboot_default = 1;
int reboot_cpu;
enum reboot_type reboot_type = BOOT_ACPI;
int reboot_force;
struct sys_off_handler {
struct notifier_block nb;
int (*sys_off_cb)(struct sys_off_data *data);
void *cb_data;
enum sys_off_mode mode;
bool blocking;
void *list;
struct device *dev;
};
/*
* This variable is used to indicate if a halt was initiated instead of a
* reboot when the reboot call was invoked with LINUX_REBOOT_CMD_POWER_OFF, but
* the system cannot be powered off. This allowes kernel_halt() to notify users
* of that.
*/
static bool poweroff_fallback_to_halt;
/*
* Temporary stub that prevents linkage failure while we're in process
* of removing all uses of legacy pm_power_off() around the kernel.
*/
void __weak (*pm_power_off)(void);
/*
* Notifier list for kernel code which wants to be called
* at shutdown. This is used to stop any idling DMA operations
* and the like.
*/
static BLOCKING_NOTIFIER_HEAD(reboot_notifier_list);
/**
* emergency_restart - reboot the system
*
* Without shutting down any hardware or taking any locks
* reboot the system. This is called when we know we are in
* trouble so this is our best effort to reboot. This is
* safe to call in interrupt context.
*/
void emergency_restart(void)
{
kmsg_dump(KMSG_DUMP_EMERG);
system_state = SYSTEM_RESTART;
machine_emergency_restart();
}
EXPORT_SYMBOL_GPL(emergency_restart);
void kernel_restart_prepare(char *cmd)
{
blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
system_state = SYSTEM_RESTART;
usermodehelper_disable();
device_shutdown();
}
/**
* register_reboot_notifier - Register function to be called at reboot time
* @nb: Info about notifier function to be called
*
* Registers a function with the list of functions
* to be called at reboot time.
*
* Currently always returns zero, as blocking_notifier_chain_register()
* always returns zero.
*/
int register_reboot_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&reboot_notifier_list, nb);
}
EXPORT_SYMBOL(register_reboot_notifier);
/**
* unregister_reboot_notifier - Unregister previously registered reboot notifier
* @nb: Hook to be unregistered
*
* Unregisters a previously registered reboot
* notifier function.
*
* Returns zero on success, or %-ENOENT on failure.
*/
int unregister_reboot_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&reboot_notifier_list, nb);
}
EXPORT_SYMBOL(unregister_reboot_notifier);
static void devm_unregister_reboot_notifier(struct device *dev, void *res)
{
WARN_ON(unregister_reboot_notifier(*(struct notifier_block **)res));
}
int devm_register_reboot_notifier(struct device *dev, struct notifier_block *nb)
{
struct notifier_block **rcnb;
int ret;
rcnb = devres_alloc(devm_unregister_reboot_notifier,
sizeof(*rcnb), GFP_KERNEL);
if (!rcnb)
return -ENOMEM;
ret = register_reboot_notifier(nb);
if (!ret) {
*rcnb = nb;
devres_add(dev, rcnb);
} else {
devres_free(rcnb);
}
return ret;
}
EXPORT_SYMBOL(devm_register_reboot_notifier);
/*
* Notifier list for kernel code which wants to be called
* to restart the system.
*/
static ATOMIC_NOTIFIER_HEAD(restart_handler_list);
/**
* register_restart_handler - Register function to be called to reset
* the system
* @nb: Info about handler function to be called
* @nb->priority: Handler priority. Handlers should follow the
* following guidelines for setting priorities.
* 0: Restart handler of last resort,
* with limited restart capabilities
* 128: Default restart handler; use if no other
* restart handler is expected to be available,
* and/or if restart functionality is
* sufficient to restart the entire system
* 255: Highest priority restart handler, will
* preempt all other restart handlers
*
* Registers a function with code to be called to restart the
* system.
*
* Registered functions will be called from machine_restart as last
* step of the restart sequence (if the architecture specific
* machine_restart function calls do_kernel_restart - see below
* for details).
* Registered functions are expected to restart the system immediately.
* If more than one function is registered, the restart handler priority
* selects which function will be called first.
*
* Restart handlers are expected to be registered from non-architecture
* code, typically from drivers. A typical use case would be a system
* where restart functionality is provided through a watchdog. Multiple
* restart handlers may exist; for example, one restart handler might
* restart the entire system, while another only restarts the CPU.
* In such cases, the restart handler which only restarts part of the
* hardware is expected to register with low priority to ensure that
* it only runs if no other means to restart the system is available.
*
* Currently always returns zero, as atomic_notifier_chain_register()
* always returns zero.
*/
int register_restart_handler(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&restart_handler_list, nb);
}
EXPORT_SYMBOL(register_restart_handler);
/**
* unregister_restart_handler - Unregister previously registered
* restart handler
* @nb: Hook to be unregistered
*
* Unregisters a previously registered restart handler function.
*
* Returns zero on success, or %-ENOENT on failure.
*/
int unregister_restart_handler(struct notifier_block *nb)
{
return atomic_notifier_chain_unregister(&restart_handler_list, nb);
}
EXPORT_SYMBOL(unregister_restart_handler);
/**
* do_kernel_restart - Execute kernel restart handler call chain
*
* @cmd: pointer to buffer containing command to execute for restart
* or %NULL
*
* Calls functions registered with register_restart_handler.
*
* Expected to be called from machine_restart as last step of the restart
* sequence.
*
* Restarts the system immediately if a restart handler function has been
* registered. Otherwise does nothing.
*/
void do_kernel_restart(char *cmd)
{
atomic_notifier_call_chain(&restart_handler_list, reboot_mode, cmd);
}
void migrate_to_reboot_cpu(void)
{
/* The boot cpu is always logical cpu 0 */
int cpu = reboot_cpu;
cpu_hotplug_disable();
/* Make certain the cpu I'm about to reboot on is online */
if (!cpu_online(cpu))
cpu = cpumask_first(cpu_online_mask);
/* Prevent races with other tasks migrating this task */
current->flags |= PF_NO_SETAFFINITY;
/* Make certain I only run on the appropriate processor */
set_cpus_allowed_ptr(current, cpumask_of(cpu));
}
/*
* Notifier list for kernel code which wants to be called
* to prepare system for restart.
*/
static BLOCKING_NOTIFIER_HEAD(restart_prep_handler_list);
static void do_kernel_restart_prepare(void)
{
blocking_notifier_call_chain(&restart_prep_handler_list, 0, NULL);
}
/**
* kernel_restart - reboot the system
* @cmd: pointer to buffer containing command to execute for restart
* or %NULL
*
* Shutdown everything and perform a clean reboot.
* This is not safe to call in interrupt context.
*/
void kernel_restart(char *cmd)
{
kernel_restart_prepare(cmd);
do_kernel_restart_prepare();
migrate_to_reboot_cpu();
syscore_shutdown();
if (!cmd)
pr_emerg("Restarting system\n");
else
pr_emerg("Restarting system with command '%s'\n", cmd);
kmsg_dump(KMSG_DUMP_SHUTDOWN);
machine_restart(cmd);
}
EXPORT_SYMBOL_GPL(kernel_restart);
static void kernel_shutdown_prepare(enum system_states state)
{
blocking_notifier_call_chain(&reboot_notifier_list,
(state == SYSTEM_HALT) ? SYS_HALT : SYS_POWER_OFF, NULL);
system_state = state;
usermodehelper_disable();
device_shutdown();
}
/**
* kernel_halt - halt the system
*
* Shutdown everything and perform a clean system halt.
*/
void kernel_halt(void)
{
kernel_shutdown_prepare(SYSTEM_HALT);
migrate_to_reboot_cpu();
syscore_shutdown();
if (poweroff_fallback_to_halt)
pr_emerg("Power off not available: System halted instead\n");
else
pr_emerg("System halted\n");
kmsg_dump(KMSG_DUMP_SHUTDOWN);
machine_halt();
}
EXPORT_SYMBOL_GPL(kernel_halt);
/*
* Notifier list for kernel code which wants to be called
* to prepare system for power off.
*/
static BLOCKING_NOTIFIER_HEAD(power_off_prep_handler_list);
/*
* Notifier list for kernel code which wants to be called
* to power off system.
*/
static ATOMIC_NOTIFIER_HEAD(power_off_handler_list);
static int sys_off_notify(struct notifier_block *nb,
unsigned long mode, void *cmd)
{
struct sys_off_handler *handler;
struct sys_off_data data = {};
handler = container_of(nb, struct sys_off_handler, nb);
data.cb_data = handler->cb_data;
data.mode = mode;
data.cmd = cmd;
data.dev = handler->dev;
return handler->sys_off_cb(&data);
}
static struct sys_off_handler platform_sys_off_handler;
static struct sys_off_handler *alloc_sys_off_handler(int priority)
{
struct sys_off_handler *handler;
gfp_t flags;
/*
* Platforms like m68k can't allocate sys_off handler dynamically
* at the early boot time because memory allocator isn't available yet.
*/
if (priority == SYS_OFF_PRIO_PLATFORM) {
handler = &platform_sys_off_handler;
if (handler->cb_data)
return ERR_PTR(-EBUSY);
} else {
if (system_state > SYSTEM_RUNNING)
flags = GFP_ATOMIC;
else
flags = GFP_KERNEL;
handler = kzalloc(sizeof(*handler), flags);
if (!handler)
return ERR_PTR(-ENOMEM);
}
return handler;
}
static void free_sys_off_handler(struct sys_off_handler *handler)
{
if (handler == &platform_sys_off_handler)
memset(handler, 0, sizeof(*handler));
else
kfree(handler);
}
/**
* register_sys_off_handler - Register sys-off handler
* @mode: Sys-off mode
* @priority: Handler priority
* @callback: Callback function
* @cb_data: Callback argument
*
* Registers system power-off or restart handler that will be invoked
* at the step corresponding to the given sys-off mode. Handler's callback
* should return NOTIFY_DONE to permit execution of the next handler in
* the call chain or NOTIFY_STOP to break the chain (in error case for
* example).
*
* Multiple handlers can be registered at the default priority level.
*
* Only one handler can be registered at the non-default priority level,
* otherwise ERR_PTR(-EBUSY) is returned.
*
* Returns a new instance of struct sys_off_handler on success, or
* an ERR_PTR()-encoded error code otherwise.
*/
struct sys_off_handler *
register_sys_off_handler(enum sys_off_mode mode,
int priority,
int (*callback)(struct sys_off_data *data),
void *cb_data)
{
struct sys_off_handler *handler;
int err;
handler = alloc_sys_off_handler(priority);
if (IS_ERR(handler))
return handler;
switch (mode) {
case SYS_OFF_MODE_POWER_OFF_PREPARE:
handler->list = &power_off_prep_handler_list;
handler->blocking = true;
break;
case SYS_OFF_MODE_POWER_OFF:
handler->list = &power_off_handler_list;
break;
case SYS_OFF_MODE_RESTART_PREPARE:
handler->list = &restart_prep_handler_list;
handler->blocking = true;
break;
case SYS_OFF_MODE_RESTART:
handler->list = &restart_handler_list;
break;
default:
free_sys_off_handler(handler);
return ERR_PTR(-EINVAL);
}
handler->nb.notifier_call = sys_off_notify;
handler->nb.priority = priority;
handler->sys_off_cb = callback;
handler->cb_data = cb_data;
handler->mode = mode;
if (handler->blocking) {
if (priority == SYS_OFF_PRIO_DEFAULT)
err = blocking_notifier_chain_register(handler->list,
&handler->nb);
else
err = blocking_notifier_chain_register_unique_prio(handler->list,
&handler->nb);
} else {
if (priority == SYS_OFF_PRIO_DEFAULT)
err = atomic_notifier_chain_register(handler->list,
&handler->nb);
else
err = atomic_notifier_chain_register_unique_prio(handler->list,
&handler->nb);
}
if (err) {
free_sys_off_handler(handler);
return ERR_PTR(err);
}
return handler;
}
EXPORT_SYMBOL_GPL(register_sys_off_handler);
/**
* unregister_sys_off_handler - Unregister sys-off handler
* @handler: Sys-off handler
*
* Unregisters given sys-off handler.
*/
void unregister_sys_off_handler(struct sys_off_handler *handler)
{
int err;
if (IS_ERR_OR_NULL(handler))
return;
if (handler->blocking)
err = blocking_notifier_chain_unregister(handler->list,
&handler->nb);
else
err = atomic_notifier_chain_unregister(handler->list,
&handler->nb);
/* sanity check, shall never happen */
WARN_ON(err);
free_sys_off_handler(handler);
}
EXPORT_SYMBOL_GPL(unregister_sys_off_handler);
static void devm_unregister_sys_off_handler(void *data)
{
struct sys_off_handler *handler = data;
unregister_sys_off_handler(handler);
}
/**
* devm_register_sys_off_handler - Register sys-off handler
* @dev: Device that registers handler
* @mode: Sys-off mode
* @priority: Handler priority
* @callback: Callback function
* @cb_data: Callback argument
*
* Registers resource-managed sys-off handler.
*
* Returns zero on success, or error code on failure.
*/
int devm_register_sys_off_handler(struct device *dev,
enum sys_off_mode mode,
int priority,
int (*callback)(struct sys_off_data *data),
void *cb_data)
{
struct sys_off_handler *handler;
handler = register_sys_off_handler(mode, priority, callback, cb_data);
if (IS_ERR(handler))
return PTR_ERR(handler);
handler->dev = dev;
return devm_add_action_or_reset(dev, devm_unregister_sys_off_handler,
handler);
}
EXPORT_SYMBOL_GPL(devm_register_sys_off_handler);
/**
* devm_register_power_off_handler - Register power-off handler
* @dev: Device that registers callback
* @callback: Callback function
* @cb_data: Callback's argument
*
* Registers resource-managed sys-off handler with a default priority
* and using power-off mode.
*
* Returns zero on success, or error code on failure.
*/
int devm_register_power_off_handler(struct device *dev,
int (*callback)(struct sys_off_data *data),
void *cb_data)
{
return devm_register_sys_off_handler(dev,
SYS_OFF_MODE_POWER_OFF,
SYS_OFF_PRIO_DEFAULT,
callback, cb_data);
}
EXPORT_SYMBOL_GPL(devm_register_power_off_handler);
/**
* devm_register_restart_handler - Register restart handler
* @dev: Device that registers callback
* @callback: Callback function
* @cb_data: Callback's argument
*
* Registers resource-managed sys-off handler with a default priority
* and using restart mode.
*
* Returns zero on success, or error code on failure.
*/
int devm_register_restart_handler(struct device *dev,
int (*callback)(struct sys_off_data *data),
void *cb_data)
{
return devm_register_sys_off_handler(dev,
SYS_OFF_MODE_RESTART,
SYS_OFF_PRIO_DEFAULT,
callback, cb_data);
}
EXPORT_SYMBOL_GPL(devm_register_restart_handler);
static struct sys_off_handler *platform_power_off_handler;
static int platform_power_off_notify(struct sys_off_data *data)
{
void (*platform_power_power_off_cb)(void) = data->cb_data;
platform_power_power_off_cb();
return NOTIFY_DONE;
}
/**
* register_platform_power_off - Register platform-level power-off callback
* @power_off: Power-off callback
*
* Registers power-off callback that will be called as last step
* of the power-off sequence. This callback is expected to be invoked
* for the last resort. Only one platform power-off callback is allowed
* to be registered at a time.
*
* Returns zero on success, or error code on failure.
*/
int register_platform_power_off(void (*power_off)(void))
{
struct sys_off_handler *handler;
handler = register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
SYS_OFF_PRIO_PLATFORM,
platform_power_off_notify,
power_off);
if (IS_ERR(handler))
return PTR_ERR(handler);
platform_power_off_handler = handler;
return 0;
}
EXPORT_SYMBOL_GPL(register_platform_power_off);
/**
* unregister_platform_power_off - Unregister platform-level power-off callback
* @power_off: Power-off callback
*
* Unregisters previously registered platform power-off callback.
*/
void unregister_platform_power_off(void (*power_off)(void))
{
if (platform_power_off_handler &&
platform_power_off_handler->cb_data == power_off) {
unregister_sys_off_handler(platform_power_off_handler);
platform_power_off_handler = NULL;
}
}
EXPORT_SYMBOL_GPL(unregister_platform_power_off);
static int legacy_pm_power_off(struct sys_off_data *data)
{
if (pm_power_off)
pm_power_off();
return NOTIFY_DONE;
}
static void do_kernel_power_off_prepare(void)
{
blocking_notifier_call_chain(&power_off_prep_handler_list, 0, NULL);
}
/**
* do_kernel_power_off - Execute kernel power-off handler call chain
*
* Expected to be called as last step of the power-off sequence.
*
* Powers off the system immediately if a power-off handler function has
* been registered. Otherwise does nothing.
*/
void do_kernel_power_off(void)
{
struct sys_off_handler *sys_off = NULL;
/*
* Register sys-off handlers for legacy PM callback. This allows
* legacy PM callbacks temporary co-exist with the new sys-off API.
*
* TODO: Remove legacy handlers once all legacy PM users will be
* switched to the sys-off based APIs.
*/
if (pm_power_off)
sys_off = register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
SYS_OFF_PRIO_DEFAULT,
legacy_pm_power_off, NULL);
atomic_notifier_call_chain(&power_off_handler_list, 0, NULL);
unregister_sys_off_handler(sys_off);
}
/**
* kernel_can_power_off - check whether system can be powered off
*
* Returns true if power-off handler is registered and system can be
* powered off, false otherwise.
*/
bool kernel_can_power_off(void)
{
return !atomic_notifier_call_chain_is_empty(&power_off_handler_list) ||
pm_power_off;
}
EXPORT_SYMBOL_GPL(kernel_can_power_off);
/**
* kernel_power_off - power_off the system
*
* Shutdown everything and perform a clean system power_off.
*/
void kernel_power_off(void)
{
kernel_shutdown_prepare(SYSTEM_POWER_OFF);
do_kernel_power_off_prepare();
migrate_to_reboot_cpu();
syscore_shutdown();
pr_emerg("Power down\n");
pr_flush(1000, true);
kmsg_dump(KMSG_DUMP_SHUTDOWN);
machine_power_off();
}
EXPORT_SYMBOL_GPL(kernel_power_off);
DEFINE_MUTEX(system_transition_mutex);
/*
* Reboot system call: for obvious reasons only root may call it,
* and even root needs to set up some magic numbers in the registers
* so that some mistake won't make this reboot the whole machine.
* You can also set the meaning of the ctrl-alt-del-key here.
*
* reboot doesn't sync: do that yourself before calling this.
*/
SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
void __user *, arg)
{
struct pid_namespace *pid_ns = task_active_pid_ns(current);
char buffer[256];
int ret = 0;
/* We only trust the superuser with rebooting the system. */
if (!ns_capable(pid_ns->user_ns, CAP_SYS_BOOT))
return -EPERM;
/* For safety, we require "magic" arguments. */
if (magic1 != LINUX_REBOOT_MAGIC1 ||
(magic2 != LINUX_REBOOT_MAGIC2 &&
magic2 != LINUX_REBOOT_MAGIC2A &&
magic2 != LINUX_REBOOT_MAGIC2B &&
magic2 != LINUX_REBOOT_MAGIC2C))
return -EINVAL;
/*
* If pid namespaces are enabled and the current task is in a child
* pid_namespace, the command is handled by reboot_pid_ns() which will
* call do_exit().
*/
ret = reboot_pid_ns(pid_ns, cmd);
if (ret)
return ret;
/* Instead of trying to make the power_off code look like
* halt when pm_power_off is not set do it the easy way.
*/
if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !kernel_can_power_off()) {
poweroff_fallback_to_halt = true;
cmd = LINUX_REBOOT_CMD_HALT;
}
mutex_lock(&system_transition_mutex);
switch (cmd) {
case LINUX_REBOOT_CMD_RESTART:
kernel_restart(NULL);
break;
case LINUX_REBOOT_CMD_CAD_ON:
C_A_D = 1;
break;
case LINUX_REBOOT_CMD_CAD_OFF:
C_A_D = 0;
break;
case LINUX_REBOOT_CMD_HALT:
kernel_halt();
do_exit(0);
case LINUX_REBOOT_CMD_POWER_OFF:
kernel_power_off();
do_exit(0);
break;
case LINUX_REBOOT_CMD_RESTART2:
ret = strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1);
if (ret < 0) {
ret = -EFAULT;
break;
}
buffer[sizeof(buffer) - 1] = '\0';
kernel_restart(buffer);
break;
#ifdef CONFIG_KEXEC_CORE
case LINUX_REBOOT_CMD_KEXEC:
ret = kernel_kexec();
break;
#endif
#ifdef CONFIG_HIBERNATION
case LINUX_REBOOT_CMD_SW_SUSPEND:
ret = hibernate();
break;
#endif
default:
ret = -EINVAL;
break;
}
mutex_unlock(&system_transition_mutex);
return ret;
}
static void deferred_cad(struct work_struct *dummy)
{
kernel_restart(NULL);
}
/*
* This function gets called by ctrl-alt-del - ie the keyboard interrupt.
* As it's called within an interrupt, it may NOT sync: the only choice
* is whether to reboot at once, or just ignore the ctrl-alt-del.
*/
void ctrl_alt_del(void)
{
static DECLARE_WORK(cad_work, deferred_cad);
if (C_A_D)
schedule_work(&cad_work);
else
kill_cad_pid(SIGINT, 1);
}
#define POWEROFF_CMD_PATH_LEN 256
static char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff";
static const char reboot_cmd[] = "/sbin/reboot";
static int run_cmd(const char *cmd)
{
char **argv;
static char *envp[] = {
"HOME=/",
"PATH=/sbin:/bin:/usr/sbin:/usr/bin",
NULL
};
int ret;
argv = argv_split(GFP_KERNEL, cmd, NULL);
if (argv) {
ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
argv_free(argv);
} else {
ret = -ENOMEM;
}
return ret;
}
static int __orderly_reboot(void)
{
int ret;
ret = run_cmd(reboot_cmd);
if (ret) {
pr_warn("Failed to start orderly reboot: forcing the issue\n");
emergency_sync();
kernel_restart(NULL);
}
return ret;
}
static int __orderly_poweroff(bool force)
{
int ret;
ret = run_cmd(poweroff_cmd);
if (ret && force) {
pr_warn("Failed to start orderly shutdown: forcing the issue\n");
/*
* I guess this should try to kick off some daemon to sync and
* poweroff asap. Or not even bother syncing if we're doing an
* emergency shutdown?
*/
emergency_sync();
kernel_power_off();
}
return ret;
}
static bool poweroff_force;
static void poweroff_work_func(struct work_struct *work)
{
__orderly_poweroff(poweroff_force);
}
static DECLARE_WORK(poweroff_work, poweroff_work_func);
/**
* orderly_poweroff - Trigger an orderly system poweroff
* @force: force poweroff if command execution fails
*
* This may be called from any context to trigger a system shutdown.
* If the orderly shutdown fails, it will force an immediate shutdown.
*/
void orderly_poweroff(bool force)
{
if (force) /* do not override the pending "true" */
poweroff_force = true;
schedule_work(&poweroff_work);
}
EXPORT_SYMBOL_GPL(orderly_poweroff);
static void reboot_work_func(struct work_struct *work)
{
__orderly_reboot();
}
static DECLARE_WORK(reboot_work, reboot_work_func);
/**
* orderly_reboot - Trigger an orderly system reboot
*
* This may be called from any context to trigger a system reboot.
* If the orderly reboot fails, it will force an immediate reboot.
*/
void orderly_reboot(void)
{
schedule_work(&reboot_work);
}
EXPORT_SYMBOL_GPL(orderly_reboot);
static const char *hw_protection_action_str(enum hw_protection_action action)
{
switch (action) {
case HWPROT_ACT_SHUTDOWN:
return "shutdown";
case HWPROT_ACT_REBOOT:
return "reboot";
default:
return "undefined";
}
}
static enum hw_protection_action hw_failure_emergency_action;
/**
* hw_failure_emergency_action_func - emergency action work after a known delay
* @work: work_struct associated with the emergency action function
*
* This function is called in very critical situations to force
* a kernel poweroff or reboot after a configurable timeout value.
*/
static void hw_failure_emergency_action_func(struct work_struct *work)
{
const char *action_str = hw_protection_action_str(hw_failure_emergency_action);
pr_emerg("Hardware protection timed-out. Trying forced %s\n",
action_str);
/*
* We have reached here after the emergency action waiting period has
* expired. This means orderly_poweroff/reboot has not been able to
* shut off the system for some reason.
*
* Try to shut off the system immediately if possible
*/
if (hw_failure_emergency_action == HWPROT_ACT_REBOOT)
kernel_restart(NULL);
else
kernel_power_off();
/*
* Worst of the worst case trigger emergency restart
*/
pr_emerg("Hardware protection %s failed. Trying emergency restart\n",
action_str);
emergency_restart();
}
static DECLARE_DELAYED_WORK(hw_failure_emergency_action_work,
hw_failure_emergency_action_func);
/**
* hw_failure_emergency_schedule - Schedule an emergency system shutdown or reboot
*
* @action: The hardware protection action to be taken
* @action_delay_ms: Time in milliseconds to elapse before triggering action
*
* This may be called from any critical situation to trigger a system shutdown
* or reboot after a given period of time.
* If time is negative this is not scheduled.
*/
static void hw_failure_emergency_schedule(enum hw_protection_action action,
int action_delay_ms)
{
if (action_delay_ms <= 0)
return;
hw_failure_emergency_action = action;
schedule_delayed_work(&hw_failure_emergency_action_work,
msecs_to_jiffies(action_delay_ms));
}
/**
* __hw_protection_trigger - Trigger an emergency system shutdown or reboot
*
* @reason: Reason of emergency shutdown or reboot to be printed.
* @ms_until_forced: Time to wait for orderly shutdown or reboot before
* triggering it. Negative value disables the forced
* shutdown or reboot.
* @action: The hardware protection action to be taken.
*
* Initiate an emergency system shutdown or reboot in order to protect
* hardware from further damage. Usage examples include a thermal protection.
* NOTE: The request is ignored if protection shutdown or reboot is already
* pending even if the previous request has given a large timeout for forced
* shutdown/reboot.
*/
void __hw_protection_trigger(const char *reason, int ms_until_forced,
enum hw_protection_action action)
{
static atomic_t allow_proceed = ATOMIC_INIT(1);
if (action == HWPROT_ACT_DEFAULT)
action = hw_protection_action;
pr_emerg("HARDWARE PROTECTION %s (%s)\n",
hw_protection_action_str(action), reason);
/* Shutdown should be initiated only once. */
if (!atomic_dec_and_test(&allow_proceed))
return;
/*
* Queue a backup emergency shutdown in the event of
* orderly_poweroff failure
*/
hw_failure_emergency_schedule(action, ms_until_forced);
if (action == HWPROT_ACT_REBOOT)
orderly_reboot();
else
orderly_poweroff(true);
}
EXPORT_SYMBOL_GPL(__hw_protection_trigger);
static bool hw_protection_action_parse(const char *str,
enum hw_protection_action *action)
{
if (sysfs_streq(str, "shutdown"))
*action = HWPROT_ACT_SHUTDOWN;
else if (sysfs_streq(str, "reboot"))
*action = HWPROT_ACT_REBOOT;
else
return false;
return true;
}
static int __init hw_protection_setup(char *str)
{
hw_protection_action_parse(str, &hw_protection_action);
return 1;
}
__setup("hw_protection=", hw_protection_setup);
#ifdef CONFIG_SYSFS
static ssize_t hw_protection_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%s\n",
hw_protection_action_str(hw_protection_action));
}
static ssize_t hw_protection_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf,
size_t count)
{
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (!hw_protection_action_parse(buf, &hw_protection_action))
return -EINVAL;
return count;
}
static struct kobj_attribute hw_protection_attr = __ATTR_RW(hw_protection);
#endif
static int __init reboot_setup(char *str)
{
for (;;) {
enum reboot_mode *mode;
/*
* Having anything passed on the command line via
* reboot= will cause us to disable DMI checking
* below.
*/
reboot_default = 0;
if (!strncmp(str, "panic_", 6)) {
mode = &panic_reboot_mode;
str += 6;
} else {
mode = &reboot_mode;
}
switch (*str) {
case 'w':
*mode = REBOOT_WARM;
break;
case 'c':
*mode = REBOOT_COLD;
break;
case 'h':
*mode = REBOOT_HARD;
break;
case 's':
/*
* reboot_cpu is s[mp]#### with #### being the processor
* to be used for rebooting. Skip 's' or 'smp' prefix.
*/
str += str[1] == 'm' && str[2] == 'p' ? 3 : 1;
if (isdigit(str[0])) {
int cpu = simple_strtoul(str, NULL, 0);
if (cpu >= num_possible_cpus()) {
pr_err("Ignoring the CPU number in reboot= option. "
"CPU %d exceeds possible cpu number %d\n",
cpu, num_possible_cpus());
break;
}
reboot_cpu = cpu;
} else
*mode = REBOOT_SOFT;
break;
case 'g':
*mode = REBOOT_GPIO;
break;
case 'b':
case 'a':
case 'k':
case 't':
case 'e':
case 'p':
reboot_type = *str;
break;
case 'f':
reboot_force = 1;
break;
}
str = strchr(str, ',');
if (str)
str++;
else
break;
}
return 1;
}
__setup("reboot=", reboot_setup);
#ifdef CONFIG_SYSFS
#define REBOOT_COLD_STR "cold"
#define REBOOT_WARM_STR "warm"
#define REBOOT_HARD_STR "hard"
#define REBOOT_SOFT_STR "soft"
#define REBOOT_GPIO_STR "gpio"
#define REBOOT_UNDEFINED_STR "undefined"
#define BOOT_TRIPLE_STR "triple"
#define BOOT_KBD_STR "kbd"
#define BOOT_BIOS_STR "bios"
#define BOOT_ACPI_STR "acpi"
#define BOOT_EFI_STR "efi"
#define BOOT_PCI_STR "pci"
static ssize_t mode_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
const char *val;
switch (reboot_mode) {
case REBOOT_COLD:
val = REBOOT_COLD_STR;
break;
case REBOOT_WARM:
val = REBOOT_WARM_STR;
break;
case REBOOT_HARD:
val = REBOOT_HARD_STR;
break;
case REBOOT_SOFT:
val = REBOOT_SOFT_STR;
break;
case REBOOT_GPIO:
val = REBOOT_GPIO_STR;
break;
default:
val = REBOOT_UNDEFINED_STR;
}
return sysfs_emit(buf, "%s\n", val);
}
static ssize_t mode_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
if (!capable(CAP_SYS_BOOT))
return -EPERM;
if (!strncmp(buf, REBOOT_COLD_STR, strlen(REBOOT_COLD_STR)))
reboot_mode = REBOOT_COLD;
else if (!strncmp(buf, REBOOT_WARM_STR, strlen(REBOOT_WARM_STR)))
reboot_mode = REBOOT_WARM;
else if (!strncmp(buf, REBOOT_HARD_STR, strlen(REBOOT_HARD_STR)))
reboot_mode = REBOOT_HARD;
else if (!strncmp(buf, REBOOT_SOFT_STR, strlen(REBOOT_SOFT_STR)))
reboot_mode = REBOOT_SOFT;
else if (!strncmp(buf, REBOOT_GPIO_STR, strlen(REBOOT_GPIO_STR)))
reboot_mode = REBOOT_GPIO;
else
return -EINVAL;
reboot_default = 0;
return count;
}
static struct kobj_attribute reboot_mode_attr = __ATTR_RW(mode);
#ifdef CONFIG_X86
static ssize_t force_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%d\n", reboot_force);
}
static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
bool res;
if (!capable(CAP_SYS_BOOT))
return -EPERM;
if (kstrtobool(buf, &res))
return -EINVAL;
reboot_default = 0;
reboot_force = res;
return count;
}
static struct kobj_attribute reboot_force_attr = __ATTR_RW(force);
static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
const char *val;
switch (reboot_type) {
case BOOT_TRIPLE:
val = BOOT_TRIPLE_STR;
break;
case BOOT_KBD:
val = BOOT_KBD_STR;
break;
case BOOT_BIOS:
val = BOOT_BIOS_STR;
break;
case BOOT_ACPI:
val = BOOT_ACPI_STR;
break;
case BOOT_EFI:
val = BOOT_EFI_STR;
break;
case BOOT_CF9_FORCE:
val = BOOT_PCI_STR;
break;
default:
val = REBOOT_UNDEFINED_STR;
}
return sysfs_emit(buf, "%s\n", val);
}
static ssize_t type_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
if (!capable(CAP_SYS_BOOT))
return -EPERM;
if (!strncmp(buf, BOOT_TRIPLE_STR, strlen(BOOT_TRIPLE_STR)))
reboot_type = BOOT_TRIPLE;
else if (!strncmp(buf, BOOT_KBD_STR, strlen(BOOT_KBD_STR)))
reboot_type = BOOT_KBD;
else if (!strncmp(buf, BOOT_BIOS_STR, strlen(BOOT_BIOS_STR)))
reboot_type = BOOT_BIOS;
else if (!strncmp(buf, BOOT_ACPI_STR, strlen(BOOT_ACPI_STR)))
reboot_type = BOOT_ACPI;
else if (!strncmp(buf, BOOT_EFI_STR, strlen(BOOT_EFI_STR)))
reboot_type = BOOT_EFI;
else if (!strncmp(buf, BOOT_PCI_STR, strlen(BOOT_PCI_STR)))
reboot_type = BOOT_CF9_FORCE;
else
return -EINVAL;
reboot_default = 0;
return count;
}
static struct kobj_attribute reboot_type_attr = __ATTR_RW(type);
#endif
#ifdef CONFIG_SMP
static ssize_t cpu_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%d\n", reboot_cpu);
}
static ssize_t cpu_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
unsigned int cpunum;
int rc;
if (!capable(CAP_SYS_BOOT))
return -EPERM;
rc = kstrtouint(buf, 0, &cpunum);
if (rc)
return rc;
if (cpunum >= num_possible_cpus())
return -ERANGE;
reboot_default = 0;
reboot_cpu = cpunum;
return count;
}
static struct kobj_attribute reboot_cpu_attr = __ATTR_RW(cpu);
#endif
static struct attribute *reboot_attrs[] = {
&hw_protection_attr.attr,
&reboot_mode_attr.attr,
#ifdef CONFIG_X86
&reboot_force_attr.attr,
&reboot_type_attr.attr,
#endif
#ifdef CONFIG_SMP
&reboot_cpu_attr.attr,
#endif
NULL,
};
#ifdef CONFIG_SYSCTL
static const struct ctl_table kern_reboot_table[] = {
{
.procname = "poweroff_cmd",
.data = &poweroff_cmd,
.maxlen = POWEROFF_CMD_PATH_LEN,
.mode = 0644,
.proc_handler = proc_dostring,
},
{
.procname = "ctrl-alt-del",
.data = &C_A_D,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
};
static void __init kernel_reboot_sysctls_init(void)
{
register_sysctl_init("kernel", kern_reboot_table);
}
#else
#define kernel_reboot_sysctls_init() do { } while (0)
#endif /* CONFIG_SYSCTL */
static const struct attribute_group reboot_attr_group = {
.attrs = reboot_attrs,
};
static int __init reboot_ksysfs_init(void)
{
struct kobject *reboot_kobj;
int ret;
reboot_kobj = kobject_create_and_add("reboot", kernel_kobj);
if (!reboot_kobj)
return -ENOMEM;
ret = sysfs_create_group(reboot_kobj, &reboot_attr_group);
if (ret) {
kobject_put(reboot_kobj);
return ret;
}
kernel_reboot_sysctls_init();
return 0;
}
late_initcall(reboot_ksysfs_init);
#endif | c | github | https://github.com/torvalds/linux | kernel/reboot.c |
---
mapped_pages:
- https://www.elastic.co/guide/en/elasticsearch/reference/current/sql-syntax-select.html
---
# SELECT [sql-syntax-select]
```sql
SELECT [TOP [ count ] ] select_expr [, ...]
[ FROM table_name ]
[ WHERE condition ]
[ GROUP BY grouping_element [, ...] ]
[ HAVING condition]
[ ORDER BY expression [ ASC | DESC ] [, ...] ]
[ LIMIT [ count ] ]
[ PIVOT ( aggregation_expr FOR column IN ( value [ [ AS ] alias ] [, ...] ) ) ]
```
**Description**: Retrieves rows from zero or more tables.
The general execution of `SELECT` is as follows:
1. All elements in the `FROM` list are computed (each element can be base or alias table). Currently `FROM` supports exactly one table. Do note however that the table name can be a pattern (see [FROM Clause](#sql-syntax-from) below).
2. If the `WHERE` clause is specified, all rows that do not satisfy the condition are eliminated from the output. (See [WHERE Clause](#sql-syntax-where) below.)
3. If the `GROUP BY` clause is specified, or if there are aggregate function calls, the output is combined into groups of rows that match on one or more values, and the results of aggregate functions are computed. If the `HAVING` clause is present, it eliminates groups that do not satisfy the given condition. (See [GROUP BY Clause](#sql-syntax-group-by) and [HAVING Clause](#sql-syntax-having) below.)
4. The actual output rows are computed using the `SELECT` output expressions for each selected row or row group.
5. If the `ORDER BY` clause is specified, the returned rows are sorted in the specified order. If `ORDER BY` is not given, the rows are returned in whatever order the system finds fastest to produce. (See [ORDER BY Clause](#sql-syntax-order-by) below.)
6. If the `LIMIT` or `TOP` is specified (cannot use both in the same query), the `SELECT` statement only returns a subset of the result rows. (See [LIMIT Clause](#sql-syntax-limit) and [TOP clause](#sql-syntax-top) below.)
## `SELECT` List [sql-syntax-select-list]
`SELECT` list, namely the expressions between `SELECT` and `FROM`, represent the output rows of the `SELECT` statement.
As with a table, every output column of a `SELECT` has a name which can be either specified per column through the `AS` keyword :
```sql
SELECT 1 + 1 AS result;
result
---------------
2
```
Note: `AS` is an optional keyword however it helps with the readability and in some case ambiguity of the query which is why it is recommended to specify it.
assigned by Elasticsearch SQL if no name is given:
```sql
SELECT 1 + 1;
1 + 1
--------------
2
```
or if it’s a simple column reference, use its name as the column name:
```sql
SELECT emp_no FROM emp LIMIT 1;
emp_no
---------------
10001
```
## Wildcard [sql-syntax-select-wildcard]
To select all the columns in the source, one can use `*`:
```sql
SELECT * FROM emp LIMIT 1;
birth_date | emp_no | first_name | gender | hire_date | languages | last_name | name | salary
--------------------+---------------+---------------+---------------+------------------------+---------------+---------------+---------------+---------------
1953-09-02T00:00:00Z|10001 |Georgi |M |1986-06-26T00:00:00.000Z|2 |Facello |Georgi Facello |57305
```
which essentially returns all(top-level fields, sub-fields, such as multi-fields are ignored] columns found.
## TOP [sql-syntax-top]
The `TOP` clause can be used before the [`SELECT` list](#sql-syntax-select-list) or the <<sql-syntax-select-wildcard, `wildcard`> to restrict (limit) the number of rows returned using the format:
```sql
SELECT TOP <count> <select list> ...
```
where
count
: is a positive integer or zero indicating the maximum **possible** number of results being returned (as there might be fewer matches than the limit). If `0` is specified, no results are returned.
```sql
SELECT TOP 2 first_name, last_name, emp_no FROM emp;
first_name | last_name | emp_no
---------------+---------------+---------------
Georgi |Facello |10001
Bezalel |Simmel |10002
```
::::{note}
[`TOP`](#sql-syntax-top) and [`LIMIT`](#sql-syntax-limit) cannot be used together in the same query and an error is returned otherwise.
::::
## FROM Clause [sql-syntax-from]
The `FROM` clause specifies one table for the `SELECT` and has the following syntax:
```sql
FROM table_name [ [ AS ] alias ]
```
where:
`table_name`
: Represents the name (optionally qualified) of an existing table, either a concrete or base one (actual index) or alias.
If the table name contains special SQL characters (such as `.`,`-`,`*`,etc… ) use double quotes to escape them:
```sql
SELECT * FROM "emp" LIMIT 1;
birth_date | emp_no | first_name | gender | hire_date | languages | last_name | name | salary
--------------------+---------------+---------------+---------------+------------------------+---------------+---------------+---------------+---------------
1953-09-02T00:00:00Z|10001 |Georgi |M |1986-06-26T00:00:00.000Z|2 |Facello |Georgi Facello |57305
```
The name can be a [pattern](/reference/elasticsearch/rest-apis/api-conventions.md#api-multi-index) pointing to multiple indices (likely requiring quoting as mentioned above) with the restriction that **all** resolved concrete tables have **exact mapping**.
```sql
SELECT emp_no FROM "e*p" LIMIT 1;
emp_no
---------------
10001
```
{applies_to}`stack: preview` {applies_to}`serverless: preview` To run a [{{ccs}}](docs-content://explore-analyze/cross-cluster-search.md), specify a cluster name using the `<remote_cluster>:<target>` syntax, where `<remote_cluster>` maps to a SQL catalog (cluster) and `<target>` to a table (index or data stream). The `<remote_cluster>` supports wildcards (`*`) and `<target>` can be an [index pattern](/reference/query-languages/sql/sql-index-patterns.md).
```sql
SELECT emp_no FROM "my*cluster:*emp" LIMIT 1;
emp_no
---------------
10001
```
`alias`
: A substitute name for the `FROM` item containing the alias. An alias is used for brevity or to eliminate ambiguity. When an alias is provided, it completely hides the actual name of the table and must be used in its place.
```sql
SELECT e.emp_no FROM emp AS e LIMIT 1;
emp_no
-------------
10001
```
## WHERE Clause [sql-syntax-where]
The optional `WHERE` clause is used to filter rows from the query and has the following syntax:
```sql
WHERE condition
```
where:
`condition`
: Represents an expression that evaluates to a `boolean`. Only the rows that match the condition (to `true`) are returned.
```sql
SELECT last_name FROM emp WHERE emp_no = 10001;
last_name
---------------
Facello
```
## GROUP BY [sql-syntax-group-by]
The `GROUP BY` clause is used to divide the results into groups of rows on matching values from the designated columns. It has the following syntax:
```sql
GROUP BY grouping_element [, ...]
```
where:
`grouping_element`
: Represents an expression on which rows are being grouped *on*. It can be a column name, alias or ordinal number of a column or an arbitrary expression of column values.
A common, group by column name:
```sql
SELECT gender AS g FROM emp GROUP BY gender;
g
---------------
null
F
M
```
Grouping by output ordinal:
```sql
SELECT gender FROM emp GROUP BY 1;
gender
---------------
null
F
M
```
Grouping by alias:
```sql
SELECT gender AS g FROM emp GROUP BY g;
g
---------------
null
F
M
```
And grouping by column expression (typically used along-side an alias):
```sql
SELECT languages + 1 AS l FROM emp GROUP BY l;
l
---------------
null
2
3
4
5
6
```
Or a mixture of the above:
```sql
SELECT gender g, languages l, COUNT(*) c FROM "emp" GROUP BY g, l ORDER BY languages ASC, gender DESC;
g | l | c
---------------+---------------+---------------
M |null |7
F |null |3
M |1 |9
F |1 |4
null |1 |2
M |2 |11
F |2 |5
null |2 |3
M |3 |11
F |3 |6
M |4 |11
F |4 |6
null |4 |1
M |5 |8
F |5 |9
null |5 |4
```
When a `GROUP BY` clause is used in a `SELECT`, *all* output expressions must be either aggregate functions or expressions used for grouping or derivatives of (otherwise there would be more than one possible value to return for each ungrouped column).
To wit:
```sql
SELECT gender AS g, COUNT(*) AS c FROM emp GROUP BY gender;
g | c
---------------+---------------
null |10
F |33
M |57
```
Expressions over aggregates used in output:
```sql
SELECT gender AS g, ROUND((MIN(salary) / 100)) AS salary FROM emp GROUP BY gender;
g | salary
---------------+---------------
null |253
F |259
M |259
```
Multiple aggregates used:
```sql
SELECT gender AS g, KURTOSIS(salary) AS k, SKEWNESS(salary) AS s FROM emp GROUP BY gender;
g | k | s
---------------+------------------+-------------------
null |2.2215791166941923|-0.03373126000214023
F |1.7873117044424276|0.05504995122217512
M |2.280646181070106 |0.44302407229580243
```
::::{tip}
If custom bucketing is required, it can be achieved with the use of [`CASE`](/reference/query-languages/sql/sql-functions-conditional.md#sql-functions-conditional-case), as shown [here](/reference/query-languages/sql/sql-functions-conditional.md#sql-functions-conditional-case-groupby-custom-buckets).
::::
### Implicit Grouping [sql-syntax-group-by-implicit]
When an aggregation is used without an associated `GROUP BY`, an *implicit grouping* is applied, meaning all selected rows are considered to form a single default, or implicit group. As such, the query emits only a single row (as there is only a single group).
A common example is counting the number of records:
```sql
SELECT COUNT(*) AS count FROM emp;
count
---------------
100
```
Of course, multiple aggregations can be applied:
```sql
SELECT MIN(salary) AS min, MAX(salary) AS max, AVG(salary) AS avg, COUNT(*) AS count FROM emp;
min:i | max:i | avg:d | count:l
---------------+---------------+---------------+---------------
25324 |74999 |48248.55 |100
```
## HAVING [sql-syntax-having]
The `HAVING` clause can be used *only* along aggregate functions (and thus `GROUP BY`) to filter what groups are kept or not and has the following syntax:
```sql
HAVING condition
```
where:
`condition`
: Represents an expression that evaluates to a `boolean`. Only groups that match the condition (to `true`) are returned.
Both `WHERE` and `HAVING` are used for filtering however there are several significant differences between them:
1. `WHERE` works on individual **rows**, `HAVING` works on the **groups** created by ``GROUP BY``
2. `WHERE` is evaluated **before** grouping, `HAVING` is evaluated **after** grouping
```sql
SELECT languages AS l, COUNT(*) AS c FROM emp GROUP BY l HAVING c BETWEEN 15 AND 20;
l | c
---------------+---------------
1 |15
2 |19
3 |17
4 |18
```
Further more, one can use multiple aggregate expressions inside `HAVING` even ones that are not used in the output (`SELECT`):
```sql
SELECT MIN(salary) AS min, MAX(salary) AS max, MAX(salary) - MIN(salary) AS diff FROM emp GROUP BY languages HAVING diff - max % min > 0 AND AVG(salary) > 30000;
min | max | diff
---------------+---------------+---------------
28336 |74999 |46663
25976 |73717 |47741
29175 |73578 |44403
26436 |74970 |48534
27215 |74572 |47357
25324 |66817 |41493
```
### Implicit Grouping [sql-syntax-having-group-by-implicit]
As indicated above, it is possible to have a `HAVING` clause without a `GROUP BY`. In this case, the so-called [*implicit grouping*](#sql-syntax-group-by-implicit) is applied, meaning all selected rows are considered to form a single group and `HAVING` can be applied on any of the aggregate functions specified on this group. As such, the query emits only a single row (as there is only a single group) and `HAVING` condition returns either one row (the group) or zero if the condition fails.
In this example, `HAVING` matches:
```sql
SELECT MIN(salary) AS min, MAX(salary) AS max FROM emp HAVING min > 25000;
min | max
---------------+---------------
25324 |74999
```
## ORDER BY [sql-syntax-order-by]
The `ORDER BY` clause is used to sort the results of `SELECT` by one or more expressions:
```sql
ORDER BY expression [ ASC | DESC ] [, ...]
```
where:
`expression`
: Represents an input column, an output column or an ordinal number of the position (starting from one) of an output column. Additionally, ordering can be done based on the results *score*. The direction, if not specified, is by default `ASC` (ascending). Regardless of the ordering specified, null values are ordered last (at the end).
::::{important}
When used along-side, `GROUP BY` expression can point *only* to the columns used for grouping or aggregate functions.
::::
For example, the following query sorts by an arbitrary input field (`page_count`):
```sql
SELECT * FROM library ORDER BY page_count DESC LIMIT 5;
author | name | page_count | release_date
-----------------+--------------------+---------------+--------------------
Peter F. Hamilton|Pandora's Star |768 |2004-03-02T00:00:00Z
Vernor Vinge |A Fire Upon the Deep|613 |1992-06-01T00:00:00Z
Frank Herbert |Dune |604 |1965-06-01T00:00:00Z
Alastair Reynolds|Revelation Space |585 |2000-03-15T00:00:00Z
James S.A. Corey |Leviathan Wakes |561 |2011-06-02T00:00:00Z
```
## Order By and Grouping [sql-syntax-order-by-grouping]
For queries that perform grouping, ordering can be applied either on the grouping columns (by default ascending) or on aggregate functions.
::::{note}
With `GROUP BY`, make sure the ordering targets the resulting group - applying it to individual elements inside the group will have no impact on the results since regardless of the order, values inside the group are aggregated.
::::
For example, to order groups simply indicate the grouping key:
```sql
SELECT gender AS g, COUNT(*) AS c FROM emp GROUP BY gender ORDER BY g DESC;
g | c
---------------+---------------
M |57
F |33
null |10
```
Multiple keys can be specified of course:
```sql
SELECT gender g, languages l, COUNT(*) c FROM "emp" GROUP BY g, l ORDER BY languages ASC, gender DESC;
g | l | c
---------------+---------------+---------------
M |null |7
F |null |3
M |1 |9
F |1 |4
null |1 |2
M |2 |11
F |2 |5
null |2 |3
M |3 |11
F |3 |6
M |4 |11
F |4 |6
null |4 |1
M |5 |8
F |5 |9
null |5 |4
```
Further more, it is possible to order groups based on aggregations of their values:
```sql
SELECT gender AS g, MIN(salary) AS salary FROM emp GROUP BY gender ORDER BY salary DESC;
g | salary
---------------+---------------
F |25976
M |25945
null |25324
```
::::{important}
Ordering by aggregation is possible for up to **10000** entries for memory consumption reasons. In cases where the results pass this threshold, use [`LIMIT`](#sql-syntax-limit) or [`TOP`](#sql-syntax-top) to reduce the number of results.
::::
## Order By Score [sql-syntax-order-by-score]
When doing full-text queries in the `WHERE` clause, results can be returned based on their [score](https://www.elastic.co/guide/en/elasticsearch/guide/2.x/relevance-intro.html) or *relevance* to the given query.
::::{note}
When doing multiple text queries in the `WHERE` clause then, their scores will be combined using the same rules as {{es}}'s [bool query](/reference/query-languages/query-dsl/query-dsl-bool-query.md).
::::
To sort based on the `score`, use the special function `SCORE()`:
```sql
SELECT SCORE(), * FROM library WHERE MATCH(name, 'dune') ORDER BY SCORE() DESC;
SCORE() | author | name | page_count | release_date
---------------+---------------+-------------------+---------------+--------------------
2.2886353 |Frank Herbert |Dune |604 |1965-06-01T00:00:00Z
1.8893257 |Frank Herbert |Dune Messiah |331 |1969-10-15T00:00:00Z
1.6086556 |Frank Herbert |Children of Dune |408 |1976-04-21T00:00:00Z
1.4005898 |Frank Herbert |God Emperor of Dune|454 |1981-05-28T00:00:00Z
```
Note that you can return `SCORE()` by using a full-text search predicate in the `WHERE` clause. This is possible even if `SCORE()` is not used for sorting:
```sql
SELECT SCORE(), * FROM library WHERE MATCH(name, 'dune') ORDER BY page_count DESC;
SCORE() | author | name | page_count | release_date
---------------+---------------+-------------------+---------------+--------------------
2.2886353 |Frank Herbert |Dune |604 |1965-06-01T00:00:00Z
1.4005898 |Frank Herbert |God Emperor of Dune|454 |1981-05-28T00:00:00Z
1.6086556 |Frank Herbert |Children of Dune |408 |1976-04-21T00:00:00Z
1.8893257 |Frank Herbert |Dune Messiah |331 |1969-10-15T00:00:00Z
```
NOTE: Trying to return `score` from a non full-text query will return the same value for all results, as all are equally relevant.
## LIMIT [sql-syntax-limit]
The `LIMIT` clause restricts (limits) the number of rows returned using the format:
```sql
LIMIT ( <count> | ALL )
```
where
count
: is a positive integer or zero indicating the maximum **possible** number of results being returned (as there might be fewer matches than the limit). If `0` is specified, no results are returned.
ALL
: indicates there is no limit and thus all results are being returned.
```sql
SELECT first_name, last_name, emp_no FROM emp LIMIT 1;
first_name | last_name | emp_no
---------------+---------------+---------------
Georgi |Facello |10001
```
::::{note}
[`TOP`](#sql-syntax-top) and [`LIMIT`](#sql-syntax-limit) cannot be used together in the same query and an error is returned otherwise.
::::
## PIVOT [sql-syntax-pivot]
The `PIVOT` clause performs a cross tabulation on the results of the query: it aggregates the results and rotates rows into columns. The rotation is done by turning unique values from one column in the expression - the pivoting column - into multiple columns in the output. The column values are aggregations on the remaining columns specified in the expression.
The clause can be broken down in three parts: the aggregation, the `FOR`- and the `IN`-subclause.
The `aggregation_expr` subclause specifies an expression containing an [aggregation function](/reference/query-languages/sql/sql-functions-aggs.md) to be applied on one of the source columns. Only one aggregation can be provided, currently.
The `FOR`-subclause specifies the pivoting column: the distinct values of this column will become the candidate set of values to be rotated.
The `IN`-subclause defines a filter: the intersection between the set provided here and the candidate set from the `FOR`-subclause will be rotated to become the headers of the columns appended to the end result. The filter can not be a subquery, one must provide here literal values, obtained in advance.
The pivoting operation will perform an implicit [GROUP BY](#sql-syntax-group-by) on all source columns not specified in the `PIVOT` clause, along with the values filtered through the `IN`-clause. Consider the following statement:
```sql
SELECT * FROM test_emp PIVOT (SUM(salary) FOR languages IN (1, 2)) LIMIT 5;
birth_date | emp_no | first_name | gender | hire_date | last_name | name | 1 | 2
---------------------+---------------+---------------+---------------+---------------------+---------------+------------------+---------------+---------------
null |10041 |Uri |F |1989-11-12 00:00:00.0|Lenart |Uri Lenart |56415 |null
null |10043 |Yishay |M |1990-10-20 00:00:00.0|Tzvieli |Yishay Tzvieli |34341 |null
null |10044 |Mingsen |F |1994-05-21 00:00:00.0|Casley |Mingsen Casley |39728 |null
1952-04-19 00:00:00.0|10009 |Sumant |F |1985-02-18 00:00:00.0|Peac |Sumant Peac |66174 |null
1953-01-07 00:00:00.0|10067 |Claudi |M |1987-03-04 00:00:00.0|Stavenow |Claudi Stavenow |null |52044
```
The query execution could logically be broken down in the following steps:
1. a [GROUP BY](#sql-syntax-group-by) on the column in the `FOR`-clause: `languages`;
2. the resulting values are filtered through the set provided in the `IN`-clause;
3. the now filtered column is pivoted to form the headers of the two additional columns appended to the result: `1` and `2`;
4. a [GROUP BY](#sql-syntax-group-by) on all columns of the source table `test_emp`, except `salary` (part of the aggregation subclause) and `languages` (part of the `FOR`-clause);
5. the values in these appended columns are the `SUM` aggregations of `salary`, grouped by the respective language.
The table-value expression to cross-tabulate can also be the result of a subquery:
```sql
SELECT * FROM (SELECT languages, gender, salary FROM test_emp) PIVOT (AVG(salary) FOR gender IN ('F'));
languages | 'F'
---------------+------------------
null |62140.666666666664
1 |47073.25
2 |50684.4
3 |53660.0
4 |49291.5
5 |46705.555555555555
```
The pivoted columns can be aliased (and quoting is required to accommodate white spaces), with or without a supporting `AS` token:
```sql
SELECT * FROM (SELECT languages, gender, salary FROM test_emp) PIVOT (AVG(salary) FOR gender IN ('M' AS "XY", 'F' "XX"));
languages | XY | XX
---------------+-----------------+------------------
null |48396.28571428572|62140.666666666664
1 |49767.22222222222|47073.25
2 |44103.90909090909|50684.4
3 |51741.90909090909|53660.0
4 |47058.90909090909|49291.5
5 |39052.875 |46705.555555555555
```
The resulting cross tabulation can further have the [ORDER BY](#sql-syntax-order-by) and [LIMIT](#sql-syntax-limit) clauses applied:
```sql
SELECT * FROM (SELECT languages, gender, salary FROM test_emp) PIVOT (AVG(salary) FOR gender IN ('F')) ORDER BY languages DESC LIMIT 4;
languages | 'F'
---------------+------------------
5 |46705.555555555555
4 |49291.5
3 |53660.0
2 |50684.4
``` | unknown | github | https://github.com/elastic/elasticsearch | docs/reference/query-languages/sql/sql-syntax-select.md |
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.gradle.transform;
import org.apache.commons.io.IOUtils;
import org.apache.tools.zip.ZipEntry;
import org.apache.tools.zip.ZipFile;
import org.gradle.api.artifacts.transform.TransformOutputs;
import org.gradle.api.logging.Logging;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Enumeration;
import java.util.function.Function;
import static org.elasticsearch.gradle.util.PermissionUtils.chmod;
public abstract class UnzipTransform implements UnpackTransform {
public void unpack(File zipFile, File targetDir, TransformOutputs outputs, boolean asFiletreeOutput) throws IOException {
Logging.getLogger(UnzipTransform.class)
.info("Unpacking " + zipFile.getName() + " using " + UnzipTransform.class.getSimpleName() + ".");
Function<String, Path> pathModifier = pathResolver();
ZipFile zip = new ZipFile(zipFile);
try {
Enumeration<ZipEntry> entries = zip.getEntries();
while (entries.hasMoreElements()) {
ZipEntry zipEntry = entries.nextElement();
Path child = pathModifier.apply(zipEntry.getName());
if (child == null) {
continue;
}
Path outputPath = targetDir.toPath().resolve(child);
Files.createDirectories(outputPath.getParent());
if (zipEntry.isDirectory()) {
outputPath.toFile().mkdirs();
chmod(outputPath, zipEntry.getUnixMode());
continue;
}
try (FileOutputStream outputStream = new FileOutputStream(outputPath.toFile())) {
IOUtils.copyLarge(zip.getInputStream(zipEntry), outputStream);
}
chmod(outputPath, zipEntry.getUnixMode());
if (asFiletreeOutput) {
outputs.file(outputPath.toFile());
}
}
} finally {
zip.close();
}
}
} | java | github | https://github.com/elastic/elasticsearch | build-tools/src/main/java/org/elasticsearch/gradle/transform/UnzipTransform.java |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_intf_policy_l2
short_description: Manage Layer 2 interface policies on Cisco ACI fabrics (l2:IfPol)
description:
- Manage Layer 2 interface policies on Cisco ACI fabrics.
- More information from the internal APIC class
I(l2:IfPol) at U(https://developer.cisco.com/media/mim-ref/MO-l2IfPol.html).
author:
- Swetha Chunduri (@schunduri)
- Dag Wieers (@dagwieers)
- Jacob McGill (@jmcgill298)
version_added: '2.4'
requirements:
- ACI Fabric 1.0(3f)+
options:
l2_policy:
description:
- The name of the Layer 2 interface policy.
required: yes
aliases: [ name ]
description:
description:
- The description of the Layer 2 interface policy.
aliases: [ descr ]
qinq:
description:
- Determines if QinQ is disabled or if the port should be considered a core or edge port.
choices: [ core, disabled, edge ]
default: disabled
vepa:
description:
- Determines if Virtual Ethernet Port Aggregator is disabled or enabled.
choices: [ disabled, enabled ]
default: disabled
vlan_scope:
description:
- The scope of the VLAN.
choices: [ global, portlocal ]
default: global
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- aci_intf_policy_l2:
hostname: '{{ hostname }}'
username: '{{ username }}'
password: '{{ password }}'
l2_policy: '{{ l2_policy }}'
vlan_scope: '{{ vlan_policy }}'
description: '{{ description }}'
'''
RETURN = r'''
#
'''
from ansible.module_utils.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
# Mapping dicts are used to normalize the proposed data to what the APIC expects, which will keep diffs accurate
QINQ_MAPPING = dict(core='corePort', disabled='disabled', edge='edgePort')
def main():
argument_spec = aci_argument_spec
argument_spec.update(
l2_policy=dict(type='str', required=False, aliases=['name']), # Not required for querying all policies
description=dict(type='str', aliases=['descr']),
vlan_scope=dict(type='str', choices=['global', 'portlocal']), # No default provided on purpose
qinq=dict(type='str', choices=['core', 'disabled', 'edge']),
vepa=dict(type='str', choices=['disabled', 'enabled']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
method=dict(type='str', choices=['delete', 'get', 'post'], aliases=['action'], removed_in_version='2.6'), # Deprecated starting from v2.6
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['l2_policy']],
['state', 'present', ['l2_policy']],
],
)
l2_policy = module.params['l2_policy']
vlan_scope = module.params['vlan_scope']
qinq = module.params['qinq']
if qinq is not None:
qinq = QINQ_MAPPING[qinq]
vepa = module.params['vepa']
description = module.params['description']
state = module.params['state']
aci = ACIModule(module)
aci.construct_url(root_class='l2_policy')
aci.get_existing()
if state == 'present':
# Filter out module parameters with null values
aci.payload(
aci_class='l2IfPol',
class_config=dict(
name=l2_policy,
descr=description,
vlanScope=vlan_scope,
qinq=qinq, vepa=vepa,
),
)
# Generate config diff which will be used as POST request body
aci.get_diff(aci_class='l2IfPol')
# Submit changes if module not in check_mode and the proposed is different than existing
aci.post_config()
elif state == 'absent':
aci.delete_config()
module.exit_json(**aci.result)
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python3
# -------------------------------------------------------------------------------
# Name: fffbuild
# Purpose:
#
# Author: Amir Geva
#
# Created: 07/04/2014
# Copyright: (c) Amir Geva 2014
# Licence: GPL V2
#-------------------------------------------------------------------------------
import sqlite3 as sq
import os
import sys
import time
def getWindowsDrives():
import win32api
import win32file
drives = win32api.GetLogicalDriveStrings()
drives = drives.split('\000')[:-1]
phys=[]
for d in drives:
if win32file.GetDriveType(d) == win32file.DRIVE_FIXED:
phys.append(d)
return phys
def getLinuxDrive():
return ['/']
def getDrives():
if sys.platform == 'win32':
return getWindowsDrives()
if sys.platform.startswith('linux'):
return getLinuxDrive()
print("Unknown platform '{}'".format(sys.platform))
return []
def createTables(cur):
try:
cur.execute('CREATE TABLE t_dirs(id INTEGER PRIMARY KEY, path TEXT)')
cur.execute('CREATE INDEX dirs_path_idx ON t_dirs(path);')
except sq.Error as e:
cur.execute('DELETE from t_dirs')
try:
cur.execute(
'CREATE TABLE t_files(file_id INTEGER PRIMARY KEY, name TEXT, dir INTEGER, ext TEXT, size INT, time INT)')
cur.execute('CREATE INDEX files_name_idx ON t_files(name);')
cur.execute('CREATE INDEX files_ext_idx ON t_files(ext);')
cur.execute('CREATE INDEX files_size_idx ON t_files(size);')
cur.execute('CREATE INDEX files_time_idx ON t_files(time);')
except sq.Error as e:
cur.execute('DELETE from t_files')
def writeDirs(cur,dirs):
id=0;
for d in dirs:
try:
cur.execute('INSERT INTO t_dirs (id,path) VALUES ("{}","{}")'.format(id,d))
id=id+1
except sq.Error as e:
print("Error : {}".format(e.args[0]))
def writeFiles(cur,files):
for f in files:
try:
cur.execute('INSERT INTO t_files (name,dir,ext,size,time) VALUES ("{}",{},"{}",{},{})'.format(*f))
except sq.Error as e:
print("Error : {}".format(e.args[0]))
def writeDB(dirs,files,dbDir):
try:
con=sq.connect(os.path.join(dbDir,'sindex.db'))
cur=con.cursor()
createTables(cur)
writeDirs(cur,dirs)
writeFiles(cur,files)
con.commit()
except sq.Error as e:
print("Error : {}".format(e.args[0]))
def scan(excludes,dirs,files,base):
total=0
for dirpath,dirnames,filelist in os.walk(base):
dirnames[:] = [
dn for dn in dirnames
if os.path.join(dirpath, dn) not in excludes ]
idx=len(dirs)
if ((idx&255)==0):
print("{} {}".format(len(files),dirpath))
dirs.append(dirpath)
for f in filelist:
fullpath=os.path.join(dirpath,f)
base,ext=os.path.splitext(fullpath)
filesize=0
filetime=0
try:
filesize=os.path.getsize(fullpath);
filetime=os.path.getmtime(fullpath)
except Exception as e:
pass
files.append([f,idx,ext,filesize,filetime])
def main():
start=time.time()
drives=getDrives()
excludes=[]
dbDir=os.path.dirname(os.path.abspath(__file__))
for line in open(os.path.join(dbDir,"fffbuild.cfg")):
if (line.startswith("exclude=")):
dir=line[8:]
excludes.append(dir.strip())
print("Excluding:")
print(excludes)
dirs=[]
files=[]
for d in drives:
if d not in excludes:
scan(excludes,dirs,files,d)
end=time.time()
print("Scan took {} second".format(int(end-start)))
start=end
writeDB(dirs,files,dbDir)
end=time.time()
print("Database write took {} seconds".format(int(end-start)))
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !openbsd
package runtime
// osStackAlloc performs OS-specific initialization before s is used
// as stack memory.
func osStackAlloc(s *mspan) {
}
// osStackFree undoes the effect of osStackAlloc before s is returned
// to the heap.
func osStackFree(s *mspan) {
} | go | github | https://github.com/golang/go | src/runtime/os_nonopenbsd.go |
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cache.aspectj;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.cache.config.CacheManagementConfigUtils;
import org.springframework.cache.jcache.config.AbstractJCacheConfiguration;
import org.springframework.cache.jcache.interceptor.JCacheOperationSource;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Role;
/**
* {@code @Configuration} class that registers the Spring infrastructure beans necessary
* to enable AspectJ-based annotation-driven cache management for standard JSR-107
* annotations.
*
* @author Stephane Nicoll
* @since 4.1
* @see org.springframework.cache.annotation.EnableCaching
* @see org.springframework.cache.annotation.CachingConfigurationSelector
*/
@Configuration(proxyBeanMethods = false)
@Role(BeanDefinition.ROLE_INFRASTRUCTURE)
public class AspectJJCacheConfiguration extends AbstractJCacheConfiguration {
@Bean(name = CacheManagementConfigUtils.JCACHE_ASPECT_BEAN_NAME)
@Role(BeanDefinition.ROLE_INFRASTRUCTURE)
public JCacheCacheAspect cacheAspect(JCacheOperationSource jCacheOperationSource) {
JCacheCacheAspect cacheAspect = JCacheCacheAspect.aspectOf();
cacheAspect.setCacheOperationSource(jCacheOperationSource);
return cacheAspect;
}
} | java | github | https://github.com/spring-projects/spring-framework | spring-aspects/src/main/java/org/springframework/cache/aspectj/AspectJJCacheConfiguration.java |
/*
* Copyright 2010-2024 JetBrains s.r.o. and Kotlin Programming Language contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package org.jetbrains.kotlin.analysis.api.fir.test.cases.generated.cases.components.relationProvider;
import com.intellij.testFramework.TestDataPath;
import org.jetbrains.kotlin.test.util.KtTestUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.kotlin.analysis.api.fir.test.configurators.AnalysisApiFirTestConfiguratorFactory;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiTestConfiguratorFactoryData;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiTestConfigurator;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.TestModuleKind;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.FrontendKind;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisSessionMode;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiMode;
import org.jetbrains.kotlin.analysis.api.impl.base.test.cases.components.relationProvider.AbstractHasConflictingSignatureWithTest;
import org.jetbrains.kotlin.test.TestMetadata;
import org.junit.jupiter.api.Nested;
import org.junit.jupiter.api.Test;
import java.io.File;
import java.util.regex.Pattern;
/** This class is generated by {@link org.jetbrains.kotlin.generators.tests.analysis.api.GenerateAnalysisApiTestsKt}. DO NOT MODIFY MANUALLY */
@SuppressWarnings("all")
@TestMetadata("analysis/analysis-api/testData/components/relationProvider/hasConflictingSignatureWith")
@TestDataPath("$PROJECT_ROOT")
public class FirIdeNormalAnalysisSourceModuleHasConflictingSignatureWithTestGenerated extends AbstractHasConflictingSignatureWithTest {
@NotNull
@Override
public AnalysisApiTestConfigurator getConfigurator() {
return AnalysisApiFirTestConfiguratorFactory.INSTANCE.createConfigurator(
new AnalysisApiTestConfiguratorFactoryData(
FrontendKind.Fir,
TestModuleKind.Source,
AnalysisSessionMode.Normal,
AnalysisApiMode.Ide
)
);
}
@Test
public void testAllFilesPresentInHasConflictingSignatureWith() {
KtTestUtil.assertAllTestsPresentByMetadataWithExcluded(this.getClass(), new File("analysis/analysis-api/testData/components/relationProvider/hasConflictingSignatureWith"), Pattern.compile("^(.+)\\.kt$"), null, true);
}
@Test
@TestMetadata("contextParametersDifferentOrder.kt")
public void testContextParametersDifferentOrder() {
runTest("analysis/analysis-api/testData/components/relationProvider/hasConflictingSignatureWith/contextParametersDifferentOrder.kt");
}
@Test
@TestMetadata("differentFunctionNamesWithSameSignature.kt")
public void testDifferentFunctionNamesWithSameSignature() {
runTest("analysis/analysis-api/testData/components/relationProvider/hasConflictingSignatureWith/differentFunctionNamesWithSameSignature.kt");
}
@Test
@TestMetadata("differentNumberOfContextParameters.kt")
public void testDifferentNumberOfContextParameters() {
runTest("analysis/analysis-api/testData/components/relationProvider/hasConflictingSignatureWith/differentNumberOfContextParameters.kt");
}
@Test
@TestMetadata("dynamicParameterJS.kt")
public void testDynamicParameterJS() {
runTest("analysis/analysis-api/testData/components/relationProvider/hasConflictingSignatureWith/dynamicParameterJS.kt");
}
@Test
@TestMetadata("extensionAndRegularFunctions.kt")
public void testExtensionAndRegularFunctions() {
runTest("analysis/analysis-api/testData/components/relationProvider/hasConflictingSignatureWith/extensionAndRegularFunctions.kt");
}
@Test
@TestMetadata("functionAndMethod.kt")
public void testFunctionAndMethod() {
runTest("analysis/analysis-api/testData/components/relationProvider/hasConflictingSignatureWith/functionAndMethod.kt");
}
@Test
@TestMetadata("functionTypeErasure.kt")
public void testFunctionTypeErasure() {
runTest("analysis/analysis-api/testData/components/relationProvider/hasConflictingSignatureWith/functionTypeErasure.kt");
}
@Test
@TestMetadata("functionsWithTypeParameters.kt")
public void testFunctionsWithTypeParameters() {
runTest("analysis/analysis-api/testData/components/relationProvider/hasConflictingSignatureWith/functionsWithTypeParameters.kt");
}
@Test
@TestMetadata("functionsWithTypeParametersJS.kt")
public void testFunctionsWithTypeParametersJS() {
runTest("analysis/analysis-api/testData/components/relationProvider/hasConflictingSignatureWith/functionsWithTypeParametersJS.kt");
}
@Test
@TestMetadata("functionsWithTypeParametersNative.kt")
public void testFunctionsWithTypeParametersNative() {
runTest("analysis/analysis-api/testData/components/relationProvider/hasConflictingSignatureWith/functionsWithTypeParametersNative.kt");
}
@Test
@TestMetadata("genericsWithDifferentBounds.kt")
public void testGenericsWithDifferentBounds() {
runTest("analysis/analysis-api/testData/components/relationProvider/hasConflictingSignatureWith/genericsWithDifferentBounds.kt");
}
@Test
@TestMetadata("hiddenDeprecatedAndNonDeprecated.kt")
public void testHiddenDeprecatedAndNonDeprecated() {
runTest("analysis/analysis-api/testData/components/relationProvider/hasConflictingSignatureWith/hiddenDeprecatedAndNonDeprecated.kt");
}
@Test
@TestMetadata("localAndGlobalFunction.kt")
public void testLocalAndGlobalFunction() {
runTest("analysis/analysis-api/testData/components/relationProvider/hasConflictingSignatureWith/localAndGlobalFunction.kt");
}
@Test
@TestMetadata("lowPriorityAnnotation.kt")
public void testLowPriorityAnnotation() {
runTest("analysis/analysis-api/testData/components/relationProvider/hasConflictingSignatureWith/lowPriorityAnnotation.kt");
}
@Test
@TestMetadata("methodAndExtensionFunction.kt")
public void testMethodAndExtensionFunction() {
runTest("analysis/analysis-api/testData/components/relationProvider/hasConflictingSignatureWith/methodAndExtensionFunction.kt");
}
@Test
@TestMetadata("overloadedFunction.kt")
public void testOverloadedFunction() {
runTest("analysis/analysis-api/testData/components/relationProvider/hasConflictingSignatureWith/overloadedFunction.kt");
}
@Test
@TestMetadata("returnValueOverload.kt")
public void testReturnValueOverload() {
runTest("analysis/analysis-api/testData/components/relationProvider/hasConflictingSignatureWith/returnValueOverload.kt");
}
@Test
@TestMetadata("suspendAndNonSuspendContextParameter.kt")
public void testSuspendAndNonSuspendContextParameter() {
runTest("analysis/analysis-api/testData/components/relationProvider/hasConflictingSignatureWith/suspendAndNonSuspendContextParameter.kt");
}
@Test
@TestMetadata("suspendAndNonSuspendFunctions.kt")
public void testSuspendAndNonSuspendFunctions() {
runTest("analysis/analysis-api/testData/components/relationProvider/hasConflictingSignatureWith/suspendAndNonSuspendFunctions.kt");
}
@Test
@TestMetadata("suspendAndNonSuspendLambdaParameter.kt")
public void testSuspendAndNonSuspendLambdaParameter() {
runTest("analysis/analysis-api/testData/components/relationProvider/hasConflictingSignatureWith/suspendAndNonSuspendLambdaParameter.kt");
}
@Test
@TestMetadata("typealiasedContextParameter.kt")
public void testTypealiasedContextParameter() {
runTest("analysis/analysis-api/testData/components/relationProvider/hasConflictingSignatureWith/typealiasedContextParameter.kt");
}
@Test
@TestMetadata("varargParameterAndArrayParameter.kt")
public void testVarargParameterAndArrayParameter() {
runTest("analysis/analysis-api/testData/components/relationProvider/hasConflictingSignatureWith/varargParameterAndArrayParameter.kt");
}
@Test
@TestMetadata("varargParameterAndArrayParameterJS.kt")
public void testVarargParameterAndArrayParameterJS() {
runTest("analysis/analysis-api/testData/components/relationProvider/hasConflictingSignatureWith/varargParameterAndArrayParameterJS.kt");
}
@Test
@TestMetadata("varargParameterAndArrayParameterNative.kt")
public void testVarargParameterAndArrayParameterNative() {
runTest("analysis/analysis-api/testData/components/relationProvider/hasConflictingSignatureWith/varargParameterAndArrayParameterNative.kt");
}
@Test
@TestMetadata("withAndWithoutContextParameter.kt")
public void testWithAndWithoutContextParameter() {
runTest("analysis/analysis-api/testData/components/relationProvider/hasConflictingSignatureWith/withAndWithoutContextParameter.kt");
}
@Nested
@TestMetadata("analysis/analysis-api/testData/components/relationProvider/hasConflictingSignatureWith/withErrors")
@TestDataPath("$PROJECT_ROOT")
public class WithErrors {
@Test
public void testAllFilesPresentInWithErrors() {
KtTestUtil.assertAllTestsPresentByMetadataWithExcluded(this.getClass(), new File("analysis/analysis-api/testData/components/relationProvider/hasConflictingSignatureWith/withErrors"), Pattern.compile("^(.+)\\.kt$"), null, true);
}
@Test
@TestMetadata("errorValueParameters.kt")
public void testErrorValueParameters() {
runTest("analysis/analysis-api/testData/components/relationProvider/hasConflictingSignatureWith/withErrors/errorValueParameters.kt");
}
}
} | java | github | https://github.com/JetBrains/kotlin | analysis/analysis-api-fir/tests-gen/org/jetbrains/kotlin/analysis/api/fir/test/cases/generated/cases/components/relationProvider/FirIdeNormalAnalysisSourceModuleHasConflictingSignatureWithTestGenerated.java |
from __future__ import absolute_import, print_function, division
import itertools
from petl.compat import next, text_type
from petl.errors import ArgumentError
from petl.util.base import Table
def unpack(table, field, newfields=None, include_original=False, missing=None):
"""
Unpack data values that are lists or tuples. E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar'],
... [1, ['a', 'b']],
... [2, ['c', 'd']],
... [3, ['e', 'f']]]
>>> table2 = etl.unpack(table1, 'bar', ['baz', 'quux'])
>>> table2
+-----+-----+------+
| foo | baz | quux |
+=====+=====+======+
| 1 | 'a' | 'b' |
+-----+-----+------+
| 2 | 'c' | 'd' |
+-----+-----+------+
| 3 | 'e' | 'f' |
+-----+-----+------+
This function will attempt to unpack exactly the number of values as
given by the number of new fields specified. If there are more values
than new fields, remaining values will not be unpacked. If there are less
values than new fields, `missing` values will be added.
See also :func:`petl.transform.unpacks.unpackdict`.
"""
return UnpackView(table, field, newfields=newfields,
include_original=include_original, missing=missing)
Table.unpack = unpack
class UnpackView(Table):
def __init__(self, source, field, newfields=None, include_original=False,
missing=None):
self.source = source
self.field = field
self.newfields = newfields
self.include_original = include_original
self.missing = missing
def __iter__(self):
return iterunpack(self.source, self.field, self.newfields,
self.include_original, self.missing)
def iterunpack(source, field, newfields, include_original, missing):
it = iter(source)
hdr = next(it)
flds = list(map(text_type, hdr))
if field in flds:
field_index = flds.index(field)
elif isinstance(field, int) and field < len(flds):
field_index = field
field = flds[field_index]
else:
raise ArgumentError('field invalid: must be either field name or index')
# determine output fields
outhdr = list(flds)
if not include_original:
outhdr.remove(field)
if isinstance(newfields, (list, tuple)):
outhdr.extend(newfields)
nunpack = len(newfields)
elif isinstance(newfields, int):
nunpack = newfields
newfields = [text_type(field) + text_type(i+1) for i in range(newfields)]
outhdr.extend(newfields)
elif newfields is None:
nunpack = 0
else:
raise ArgumentError('newfields argument must be list or tuple of field '
'names, or int (number of values to unpack)')
yield tuple(outhdr)
# construct the output data
for row in it:
value = row[field_index]
if include_original:
out_row = list(row)
else:
out_row = [v for i, v in enumerate(row) if i != field_index]
nvals = len(value)
if nunpack > 0:
if nvals >= nunpack:
newvals = value[:nunpack]
else:
newvals = list(value) + ([missing] * (nunpack - nvals))
out_row.extend(newvals)
yield tuple(out_row)
def unpackdict(table, field, keys=None, includeoriginal=False,
samplesize=1000, missing=None):
"""
Unpack dictionary values into separate fields. E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar'],
... [1, {'baz': 'a', 'quux': 'b'}],
... [2, {'baz': 'c', 'quux': 'd'}],
... [3, {'baz': 'e', 'quux': 'f'}]]
>>> table2 = etl.unpackdict(table1, 'bar')
>>> table2
+-----+-----+------+
| foo | baz | quux |
+=====+=====+======+
| 1 | 'a' | 'b' |
+-----+-----+------+
| 2 | 'c' | 'd' |
+-----+-----+------+
| 3 | 'e' | 'f' |
+-----+-----+------+
See also :func:`petl.transform.unpacks.unpack`.
"""
return UnpackDictView(table, field, keys=keys,
includeoriginal=includeoriginal,
samplesize=samplesize, missing=missing)
Table.unpackdict = unpackdict
class UnpackDictView(Table):
def __init__(self, table, field, keys=None, includeoriginal=False,
samplesize=1000, missing=None):
self.table = table
self.field = field
self.keys = keys
self.includeoriginal = includeoriginal
self.samplesize = samplesize
self.missing = missing
def __iter__(self):
return iterunpackdict(self.table, self.field, self.keys,
self.includeoriginal, self.samplesize,
self.missing)
def iterunpackdict(table, field, keys, includeoriginal, samplesize, missing):
# set up
it = iter(table)
hdr = next(it)
flds = list(map(text_type, hdr))
fidx = flds.index(field)
outhdr = list(flds)
if not includeoriginal:
del outhdr[fidx]
# are keys specified?
if not keys:
# need to sample to find keys
sample = list(itertools.islice(it, samplesize))
keys = set()
for row in sample:
try:
keys |= set(row[fidx].keys())
except AttributeError:
pass
it = itertools.chain(sample, it)
keys = sorted(keys)
outhdr.extend(keys)
yield tuple(outhdr)
# generate the data rows
for row in it:
outrow = list(row)
if not includeoriginal:
del outrow[fidx]
for key in keys:
try:
outrow.append(row[fidx][key])
except (IndexError, KeyError, TypeError):
outrow.append(missing)
yield tuple(outrow) | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// -----------------------------------------------------------------------------
// File: hash.h
// -----------------------------------------------------------------------------
//
// This header file defines the Abseil `hash` library and the Abseil hashing
// framework. This framework consists of the following:
//
// * The `absl::Hash` functor, which is used to invoke the hasher within the
// Abseil hashing framework. `absl::Hash<T>` supports most basic types and
// a number of Abseil types out of the box.
// * `AbslHashValue`, an extension point that allows you to extend types to
// support Abseil hashing without requiring you to define a hashing
// algorithm.
// * `HashState`, a type-erased class which implements the manipulation of the
// hash state (H) itself; contains member functions `combine()`,
// `combine_contiguous()`, and `combine_unordered()`; and which you can use
// to contribute to an existing hash state when hashing your types.
//
// Unlike `std::hash` or other hashing frameworks, the Abseil hashing framework
// provides most of its utility by abstracting away the hash algorithm (and its
// implementation) entirely. Instead, a type invokes the Abseil hashing
// framework by simply combining its state with the state of known, hashable
// types. Hashing of that combined state is separately done by `absl::Hash`.
//
// One should assume that a hash algorithm is chosen randomly at the start of
// each process. E.g., `absl::Hash<int>{}(9)` in one process and
// `absl::Hash<int>{}(9)` in another process are likely to differ.
//
// `absl::Hash` may also produce different values from different dynamically
// loaded libraries. For this reason, `absl::Hash` values must never cross
// boundaries in dynamically loaded libraries (including when used in types like
// hash containers.)
//
// `absl::Hash` is intended to strongly mix input bits with a target of passing
// an [Avalanche Test](https://en.wikipedia.org/wiki/Avalanche_effect).
//
// Example:
//
// // Suppose we have a class `Circle` for which we want to add hashing:
// class Circle {
// public:
// ...
// private:
// std::pair<int, int> center_;
// int radius_;
// };
//
// // To add hashing support to `Circle`, we simply need to add a free
// // (non-member) function `AbslHashValue()`, and return the combined hash
// // state of the existing hash state and the class state. You can add such a
// // free function using a friend declaration within the body of the class:
// class Circle {
// public:
// ...
// template <typename H>
// friend H AbslHashValue(H h, const Circle& c) {
// return H::combine(std::move(h), c.center_, c.radius_);
// }
// ...
// };
//
// For more information, see Adding Type Support to `absl::Hash` below.
//
#ifndef ABSL_HASH_HASH_H_
#define ABSL_HASH_HASH_H_
#include <tuple>
#include <utility>
#include "absl/functional/function_ref.h"
#include "absl/hash/internal/hash.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
// -----------------------------------------------------------------------------
// `absl::Hash`
// -----------------------------------------------------------------------------
//
// `absl::Hash<T>` is a convenient general-purpose hash functor for any type `T`
// satisfying any of the following conditions (in order):
//
// * T is an arithmetic or pointer type
// * T defines an overload for `AbslHashValue(H, const T&)` for an arbitrary
// hash state `H`.
// - T defines a specialization of `std::hash<T>`
//
// `absl::Hash` intrinsically supports the following types:
//
// * All integral types (including bool)
// * All enum types
// * All floating-point types (although hashing them is discouraged)
// * All pointer types, including nullptr_t
// * std::pair<T1, T2>, if T1 and T2 are hashable
// * std::tuple<Ts...>, if all the Ts... are hashable
// * std::unique_ptr and std::shared_ptr
// * All string-like types including:
// * absl::Cord
// * std::string (as well as any instance of std::basic_string that
// uses one of {char, wchar_t, char16_t, char32_t} and its associated
// std::char_traits)
// * std::string_view (as well as any instance of std::basic_string_view
// that uses one of {char, wchar_t, char16_t, char32_t} and its associated
// std::char_traits)
// * All the standard sequence containers (provided the elements are hashable)
// * All the standard associative containers (provided the elements are
// hashable)
// * absl types such as the following:
// * absl::string_view
// * absl::uint128
// * absl::Time, absl::Duration, and absl::TimeZone
// * absl containers (provided the elements are hashable) such as the
// following:
// * absl::flat_hash_set, absl::node_hash_set, absl::btree_set
// * absl::flat_hash_map, absl::node_hash_map, absl::btree_map
// * absl::btree_multiset, absl::btree_multimap
// * absl::InlinedVector
// * absl::FixedArray
//
// When absl::Hash is used to hash an unordered container with a custom hash
// functor, the elements are hashed using default absl::Hash semantics, not
// the custom hash functor. This is consistent with the behavior of
// operator==() on unordered containers, which compares elements pairwise with
// operator==() rather than the custom equality functor. It is usually a
// mistake to use either operator==() or absl::Hash on unordered collections
// that use functors incompatible with operator==() equality.
//
// Note: the list above is not meant to be exhaustive. Additional type support
// may be added, in which case the above list will be updated.
//
// -----------------------------------------------------------------------------
// absl::Hash Invocation Evaluation
// -----------------------------------------------------------------------------
//
// When invoked, `absl::Hash<T>` searches for supplied hash functions in the
// following order:
//
// * Natively supported types out of the box (see above)
// * Types for which an `AbslHashValue()` overload is provided (such as
// user-defined types). See "Adding Type Support to `absl::Hash`" below.
// * Types which define a `std::hash<T>` specialization
//
// The fallback to legacy hash functions exists mainly for backwards
// compatibility. If you have a choice, prefer defining an `AbslHashValue`
// overload instead of specializing any legacy hash functors.
//
// -----------------------------------------------------------------------------
// The Hash State Concept, and using `HashState` for Type Erasure
// -----------------------------------------------------------------------------
//
// The `absl::Hash` framework relies on the Concept of a "hash state." Such a
// hash state is used in several places:
//
// * Within existing implementations of `absl::Hash<T>` to store the hashed
// state of an object. Note that it is up to the implementation how it stores
// such state. A hash table, for example, may mix the state to produce an
// integer value; a testing framework may simply hold a vector of that state.
// * Within implementations of `AbslHashValue()` used to extend user-defined
// types. (See "Adding Type Support to absl::Hash" below.)
// * Inside a `HashState`, providing type erasure for the concept of a hash
// state, which you can use to extend the `absl::Hash` framework for types
// that are otherwise difficult to extend using `AbslHashValue()`. (See the
// `HashState` class below.)
//
// The "hash state" concept contains three member functions for mixing hash
// state:
//
// * `H::combine(state, values...)`
//
// Combines an arbitrary number of values into a hash state, returning the
// updated state. Note that the existing hash state is move-only and must be
// passed by value.
//
// Each of the value types T must be hashable by H.
//
// NOTE:
//
// state = H::combine(std::move(state), value1, value2, value3);
//
// must be guaranteed to produce the same hash expansion as
//
// state = H::combine(std::move(state), value1);
// state = H::combine(std::move(state), value2);
// state = H::combine(std::move(state), value3);
//
// * `H::combine_contiguous(state, data, size)`
//
// Combines a contiguous array of `size` elements into a hash state,
// returning the updated state. Note that the existing hash state is
// move-only and must be passed by value.
//
// NOTE:
//
// state = H::combine_contiguous(std::move(state), data, size);
//
// need NOT be guaranteed to produce the same hash expansion as a loop
// (it may perform internal optimizations). If you need this guarantee, use a
// loop instead.
//
// * `H::combine_unordered(state, begin, end)`
//
// Combines a set of elements denoted by an iterator pair into a hash
// state, returning the updated state. Note that the existing hash
// state is move-only and must be passed by value.
//
// Unlike the other two methods, the hashing is order-independent.
// This can be used to hash unordered collections.
//
// -----------------------------------------------------------------------------
// Adding Type Support to `absl::Hash`
// -----------------------------------------------------------------------------
//
// To add support for your user-defined type, add a proper `AbslHashValue()`
// overload as a free (non-member) function. The overload will take an
// existing hash state and should combine that state with state from the type.
//
// Example:
//
// template <typename H>
// H AbslHashValue(H state, const MyType& v) {
// return H::combine(std::move(state), v.field1, ..., v.fieldN);
// }
//
// where `(field1, ..., fieldN)` are the members you would use on your
// `operator==` to define equality.
//
// Notice that `AbslHashValue` is not a class member, but an ordinary function.
// An `AbslHashValue` overload for a type should only be declared in the same
// file and namespace as said type. The proper `AbslHashValue` implementation
// for a given type will be discovered via ADL.
//
// Note: unlike `std::hash', `absl::Hash` should never be specialized. It must
// only be extended by adding `AbslHashValue()` overloads.
//
template <typename T>
using Hash = absl::hash_internal::Hash<T>;
// HashOf
//
// absl::HashOf() is a helper that generates a hash from the values of its
// arguments. It dispatches to absl::Hash directly, as follows:
// * HashOf(t) == absl::Hash<T>{}(t)
// * HashOf(a, b, c) == HashOf(std::make_tuple(a, b, c))
//
// HashOf(a1, a2, ...) == HashOf(b1, b2, ...) is guaranteed when
// * The argument lists have pairwise identical C++ types
// * a1 == b1 && a2 == b2 && ...
//
// The requirement that the arguments match in both type and value is critical.
// It means that `a == b` does not necessarily imply `HashOf(a) == HashOf(b)` if
// `a` and `b` have different types. For example, `HashOf(2) != HashOf(2.0)`.
template <int&... ExplicitArgumentBarrier, typename... Types>
size_t HashOf(const Types&... values) {
auto tuple = std::tie(values...);
return absl::Hash<decltype(tuple)>{}(tuple);
}
// HashState
//
// A type erased version of the hash state concept, for use in user-defined
// `AbslHashValue` implementations that can't use templates (such as PImpl
// classes, virtual functions, etc.). The type erasure adds overhead so it
// should be avoided unless necessary.
//
// Note: This wrapper will only erase calls to
// combine_contiguous(H, const unsigned char*, size_t)
// RunCombineUnordered(H, CombinerF)
//
// All other calls will be handled internally and will not invoke overloads
// provided by the wrapped class.
//
// Users of this class should still define a template `AbslHashValue` function,
// but can use `absl::HashState::Create(&state)` to erase the type of the hash
// state and dispatch to their private hashing logic.
//
// This state can be used like any other hash state. In particular, you can call
// `HashState::combine()` and `HashState::combine_contiguous()` on it.
//
// Example:
//
// class Interface {
// public:
// template <typename H>
// friend H AbslHashValue(H state, const Interface& value) {
// state = H::combine(std::move(state), std::type_index(typeid(*this)));
// value.HashValue(absl::HashState::Create(&state));
// return state;
// }
// private:
// virtual void HashValue(absl::HashState state) const = 0;
// };
//
// class Impl : Interface {
// private:
// void HashValue(absl::HashState state) const override {
// absl::HashState::combine(std::move(state), v1_, v2_);
// }
// int v1_;
// std::string v2_;
// };
class HashState : public hash_internal::HashStateBase<HashState> {
public:
// HashState::Create()
//
// Create a new `HashState` instance that wraps `state`. All calls to
// `combine()` and `combine_contiguous()` on the new instance will be
// redirected to the original `state` object. The `state` object must outlive
// the `HashState` instance.
template <typename T>
static HashState Create(T* state) {
HashState s;
s.Init(state);
return s;
}
HashState(const HashState&) = delete;
HashState& operator=(const HashState&) = delete;
HashState(HashState&&) = default;
HashState& operator=(HashState&&) = default;
// HashState::combine()
//
// Combines an arbitrary number of values into a hash state, returning the
// updated state.
using HashState::HashStateBase::combine;
// HashState::combine_contiguous()
//
// Combines a contiguous array of `size` elements into a hash state, returning
// the updated state.
static HashState combine_contiguous(HashState hash_state,
const unsigned char* first, size_t size) {
hash_state.combine_contiguous_(hash_state.state_, first, size);
return hash_state;
}
using HashState::HashStateBase::combine_contiguous;
private:
HashState() = default;
friend class HashState::HashStateBase;
template <typename T>
static void CombineContiguousImpl(void* p, const unsigned char* first,
size_t size) {
T& state = *static_cast<T*>(p);
state = T::combine_contiguous(std::move(state), first, size);
}
template <typename T>
void Init(T* state) {
state_ = state;
combine_contiguous_ = &CombineContiguousImpl<T>;
run_combine_unordered_ = &RunCombineUnorderedImpl<T>;
}
template <typename HS>
struct CombineUnorderedInvoker {
template <typename T, typename ConsumerT>
void operator()(T inner_state, ConsumerT inner_cb) {
f(HashState::Create(&inner_state),
[&](HashState& inner_erased) { inner_cb(inner_erased.Real<T>()); });
}
absl::FunctionRef<void(HS, absl::FunctionRef<void(HS&)>)> f;
};
template <typename T>
static HashState RunCombineUnorderedImpl(
HashState state,
absl::FunctionRef<void(HashState, absl::FunctionRef<void(HashState&)>)>
f) {
// Note that this implementation assumes that inner_state and outer_state
// are the same type. This isn't true in the SpyHash case, but SpyHash
// types are move-convertible to each other, so this still works.
T& real_state = state.Real<T>();
real_state = T::RunCombineUnordered(
std::move(real_state), CombineUnorderedInvoker<HashState>{f});
return state;
}
template <typename CombinerT>
static HashState RunCombineUnordered(HashState state, CombinerT combiner) {
auto* run = state.run_combine_unordered_;
return run(std::move(state), std::ref(combiner));
}
// Do not erase an already erased state.
void Init(HashState* state) {
state_ = state->state_;
combine_contiguous_ = state->combine_contiguous_;
run_combine_unordered_ = state->run_combine_unordered_;
}
template <typename T>
T& Real() {
return *static_cast<T*>(state_);
}
void* state_;
void (*combine_contiguous_)(void*, const unsigned char*, size_t);
HashState (*run_combine_unordered_)(
HashState state,
absl::FunctionRef<void(HashState, absl::FunctionRef<void(HashState&)>)>);
};
ABSL_NAMESPACE_END
} // namespace absl
#endif // ABSL_HASH_HASH_H_ | c | github | https://github.com/mysql/mysql-server | extra/abseil/abseil-cpp-20230802.1/absl/hash/hash.h |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.