hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c2b0f1ae13427ff0ae9ad22417d98eefea202f7 | 62,178 | py | Python | bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py | DaveCheez/google-cloud-python | fc03d4d41f13e9d13db7206438163b3a471fdabd | [
"Apache-2.0"
] | 2 | 2021-11-26T07:08:43.000Z | 2022-03-07T20:20:04.000Z | bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py | DaveCheez/google-cloud-python | fc03d4d41f13e9d13db7206438163b3a471fdabd | [
"Apache-2.0"
] | 6 | 2019-05-27T22:05:58.000Z | 2019-08-05T16:46:16.000Z | bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py | DaveCheez/google-cloud-python | fc03d4d41f13e9d13db7206438163b3a471fdabd | [
"Apache-2.0"
] | 1 | 2021-07-21T17:59:33.000Z | 2021-07-21T17:59:33.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/bigtable_v2/proto/bigtable.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.cloud.bigtable_v2.proto import (
data_pb2 as google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2,
)
from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/bigtable_v2/proto/bigtable.proto",
package="google.bigtable.v2",
syntax="proto3",
serialized_options=_b(
"\n\026com.google.bigtable.v2B\rBigtableProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2"
),
serialized_pb=_b(
'\n-google/cloud/bigtable_v2/proto/bigtable.proto\x12\x12google.bigtable.v2\x1a\x1cgoogle/api/annotations.proto\x1a)google/cloud/bigtable_v2/proto/data.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto"\xaa\x01\n\x0fReadRowsRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x05 \x01(\t\x12(\n\x04rows\x18\x02 \x01(\x0b\x32\x1a.google.bigtable.v2.RowSet\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x12\n\nrows_limit\x18\x04 \x01(\x03"\xf8\x02\n\x10ReadRowsResponse\x12>\n\x06\x63hunks\x18\x01 \x03(\x0b\x32..google.bigtable.v2.ReadRowsResponse.CellChunk\x12\x1c\n\x14last_scanned_row_key\x18\x02 \x01(\x0c\x1a\x85\x02\n\tCellChunk\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x31\n\x0b\x66\x61mily_name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\tqualifier\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.BytesValue\x12\x18\n\x10timestamp_micros\x18\x04 \x01(\x03\x12\x0e\n\x06labels\x18\x05 \x03(\t\x12\r\n\x05value\x18\x06 \x01(\x0c\x12\x12\n\nvalue_size\x18\x07 \x01(\x05\x12\x13\n\treset_row\x18\x08 \x01(\x08H\x00\x12\x14\n\ncommit_row\x18\t \x01(\x08H\x00\x42\x0c\n\nrow_status"B\n\x14SampleRowKeysRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t">\n\x15SampleRowKeysResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x14\n\x0coffset_bytes\x18\x02 \x01(\x03"\x80\x01\n\x10MutateRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12/\n\tmutations\x18\x03 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation"\x13\n\x11MutateRowResponse"\xc8\x01\n\x11MutateRowsRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x03 \x01(\t\x12<\n\x07\x65ntries\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.MutateRowsRequest.Entry\x1aI\n\x05\x45ntry\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12/\n\tmutations\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation"\x8f\x01\n\x12MutateRowsResponse\x12=\n\x07\x65ntries\x18\x01 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\x1a:\n\x05\x45ntry\x12\r\n\x05index\x18\x01 \x01(\x03\x12"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status"\xfd\x01\n\x18\x43heckAndMutateRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x07 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12\x37\n\x10predicate_filter\x18\x06 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x34\n\x0etrue_mutations\x18\x04 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\x12\x35\n\x0f\x66\x61lse_mutations\x18\x05 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation"6\n\x19\x43heckAndMutateRowResponse\x12\x19\n\x11predicate_matched\x18\x01 \x01(\x08"\x90\x01\n\x19ReadModifyWriteRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12\x36\n\x05rules\x18\x03 \x03(\x0b\x32\'.google.bigtable.v2.ReadModifyWriteRule"B\n\x1aReadModifyWriteRowResponse\x12$\n\x03row\x18\x01 \x01(\x0b\x32\x17.google.bigtable.v2.Row2\xad\x08\n\x08\x42igtable\x12\x9d\x01\n\x08ReadRows\x12#.google.bigtable.v2.ReadRowsRequest\x1a$.google.bigtable.v2.ReadRowsResponse"D\x82\xd3\xe4\x93\x02>"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\x01*0\x01\x12\xae\x01\n\rSampleRowKeys\x12(.google.bigtable.v2.SampleRowKeysRequest\x1a).google.bigtable.v2.SampleRowKeysResponse"F\x82\xd3\xe4\x93\x02@\x12>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys0\x01\x12\x9f\x01\n\tMutateRow\x12$.google.bigtable.v2.MutateRowRequest\x1a%.google.bigtable.v2.MutateRowResponse"E\x82\xd3\xe4\x93\x02?":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\x01*\x12\xa5\x01\n\nMutateRows\x12%.google.bigtable.v2.MutateRowsRequest\x1a&.google.bigtable.v2.MutateRowsResponse"F\x82\xd3\xe4\x93\x02@";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\x01*0\x01\x12\xbf\x01\n\x11\x43heckAndMutateRow\x12,.google.bigtable.v2.CheckAndMutateRowRequest\x1a-.google.bigtable.v2.CheckAndMutateRowResponse"M\x82\xd3\xe4\x93\x02G"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\x01*\x12\xc3\x01\n\x12ReadModifyWriteRow\x12-.google.bigtable.v2.ReadModifyWriteRowRequest\x1a..google.bigtable.v2.ReadModifyWriteRowResponse"N\x82\xd3\xe4\x93\x02H"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\x01*B\x9b\x01\n\x16\x63om.google.bigtable.v2B\rBigtableProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2b\x06proto3'
),
dependencies=[
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2.DESCRIPTOR,
google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,
google_dot_rpc_dot_status__pb2.DESCRIPTOR,
],
)
_READROWSREQUEST = _descriptor.Descriptor(
name="ReadRowsRequest",
full_name="google.bigtable.v2.ReadRowsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="table_name",
full_name="google.bigtable.v2.ReadRowsRequest.table_name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="app_profile_id",
full_name="google.bigtable.v2.ReadRowsRequest.app_profile_id",
index=1,
number=5,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="rows",
full_name="google.bigtable.v2.ReadRowsRequest.rows",
index=2,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="filter",
full_name="google.bigtable.v2.ReadRowsRequest.filter",
index=3,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="rows_limit",
full_name="google.bigtable.v2.ReadRowsRequest.rows_limit",
index=4,
number=4,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=200,
serialized_end=370,
)
_READROWSRESPONSE_CELLCHUNK = _descriptor.Descriptor(
name="CellChunk",
full_name="google.bigtable.v2.ReadRowsResponse.CellChunk",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="row_key",
full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.row_key",
index=0,
number=1,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b(""),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="family_name",
full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.family_name",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="qualifier",
full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.qualifier",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="timestamp_micros",
full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.timestamp_micros",
index=3,
number=4,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="labels",
full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.labels",
index=4,
number=5,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="value",
full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.value",
index=5,
number=6,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b(""),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="value_size",
full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.value_size",
index=6,
number=7,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="reset_row",
full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.reset_row",
index=7,
number=8,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="commit_row",
full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.commit_row",
index=8,
number=9,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="row_status",
full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.row_status",
index=0,
containing_type=None,
fields=[],
)
],
serialized_start=488,
serialized_end=749,
)
_READROWSRESPONSE = _descriptor.Descriptor(
name="ReadRowsResponse",
full_name="google.bigtable.v2.ReadRowsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="chunks",
full_name="google.bigtable.v2.ReadRowsResponse.chunks",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="last_scanned_row_key",
full_name="google.bigtable.v2.ReadRowsResponse.last_scanned_row_key",
index=1,
number=2,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b(""),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_READROWSRESPONSE_CELLCHUNK],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=373,
serialized_end=749,
)
_SAMPLEROWKEYSREQUEST = _descriptor.Descriptor(
name="SampleRowKeysRequest",
full_name="google.bigtable.v2.SampleRowKeysRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="table_name",
full_name="google.bigtable.v2.SampleRowKeysRequest.table_name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="app_profile_id",
full_name="google.bigtable.v2.SampleRowKeysRequest.app_profile_id",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=751,
serialized_end=817,
)
_SAMPLEROWKEYSRESPONSE = _descriptor.Descriptor(
name="SampleRowKeysResponse",
full_name="google.bigtable.v2.SampleRowKeysResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="row_key",
full_name="google.bigtable.v2.SampleRowKeysResponse.row_key",
index=0,
number=1,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b(""),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="offset_bytes",
full_name="google.bigtable.v2.SampleRowKeysResponse.offset_bytes",
index=1,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=819,
serialized_end=881,
)
_MUTATEROWREQUEST = _descriptor.Descriptor(
name="MutateRowRequest",
full_name="google.bigtable.v2.MutateRowRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="table_name",
full_name="google.bigtable.v2.MutateRowRequest.table_name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="app_profile_id",
full_name="google.bigtable.v2.MutateRowRequest.app_profile_id",
index=1,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="row_key",
full_name="google.bigtable.v2.MutateRowRequest.row_key",
index=2,
number=2,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b(""),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="mutations",
full_name="google.bigtable.v2.MutateRowRequest.mutations",
index=3,
number=3,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=884,
serialized_end=1012,
)
_MUTATEROWRESPONSE = _descriptor.Descriptor(
name="MutateRowResponse",
full_name="google.bigtable.v2.MutateRowResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1014,
serialized_end=1033,
)
_MUTATEROWSREQUEST_ENTRY = _descriptor.Descriptor(
name="Entry",
full_name="google.bigtable.v2.MutateRowsRequest.Entry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="row_key",
full_name="google.bigtable.v2.MutateRowsRequest.Entry.row_key",
index=0,
number=1,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b(""),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="mutations",
full_name="google.bigtable.v2.MutateRowsRequest.Entry.mutations",
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1163,
serialized_end=1236,
)
_MUTATEROWSREQUEST = _descriptor.Descriptor(
name="MutateRowsRequest",
full_name="google.bigtable.v2.MutateRowsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="table_name",
full_name="google.bigtable.v2.MutateRowsRequest.table_name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="app_profile_id",
full_name="google.bigtable.v2.MutateRowsRequest.app_profile_id",
index=1,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="entries",
full_name="google.bigtable.v2.MutateRowsRequest.entries",
index=2,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_MUTATEROWSREQUEST_ENTRY],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1036,
serialized_end=1236,
)
_MUTATEROWSRESPONSE_ENTRY = _descriptor.Descriptor(
name="Entry",
full_name="google.bigtable.v2.MutateRowsResponse.Entry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="index",
full_name="google.bigtable.v2.MutateRowsResponse.Entry.index",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="status",
full_name="google.bigtable.v2.MutateRowsResponse.Entry.status",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1324,
serialized_end=1382,
)
_MUTATEROWSRESPONSE = _descriptor.Descriptor(
name="MutateRowsResponse",
full_name="google.bigtable.v2.MutateRowsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="entries",
full_name="google.bigtable.v2.MutateRowsResponse.entries",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[_MUTATEROWSRESPONSE_ENTRY],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1239,
serialized_end=1382,
)
_CHECKANDMUTATEROWREQUEST = _descriptor.Descriptor(
name="CheckAndMutateRowRequest",
full_name="google.bigtable.v2.CheckAndMutateRowRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="table_name",
full_name="google.bigtable.v2.CheckAndMutateRowRequest.table_name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="app_profile_id",
full_name="google.bigtable.v2.CheckAndMutateRowRequest.app_profile_id",
index=1,
number=7,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="row_key",
full_name="google.bigtable.v2.CheckAndMutateRowRequest.row_key",
index=2,
number=2,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b(""),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="predicate_filter",
full_name="google.bigtable.v2.CheckAndMutateRowRequest.predicate_filter",
index=3,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="true_mutations",
full_name="google.bigtable.v2.CheckAndMutateRowRequest.true_mutations",
index=4,
number=4,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="false_mutations",
full_name="google.bigtable.v2.CheckAndMutateRowRequest.false_mutations",
index=5,
number=5,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1385,
serialized_end=1638,
)
_CHECKANDMUTATEROWRESPONSE = _descriptor.Descriptor(
name="CheckAndMutateRowResponse",
full_name="google.bigtable.v2.CheckAndMutateRowResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="predicate_matched",
full_name="google.bigtable.v2.CheckAndMutateRowResponse.predicate_matched",
index=0,
number=1,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1640,
serialized_end=1694,
)
_READMODIFYWRITEROWREQUEST = _descriptor.Descriptor(
name="ReadModifyWriteRowRequest",
full_name="google.bigtable.v2.ReadModifyWriteRowRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="table_name",
full_name="google.bigtable.v2.ReadModifyWriteRowRequest.table_name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="app_profile_id",
full_name="google.bigtable.v2.ReadModifyWriteRowRequest.app_profile_id",
index=1,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="row_key",
full_name="google.bigtable.v2.ReadModifyWriteRowRequest.row_key",
index=2,
number=2,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b(""),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="rules",
full_name="google.bigtable.v2.ReadModifyWriteRowRequest.rules",
index=3,
number=3,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1697,
serialized_end=1841,
)
_READMODIFYWRITEROWRESPONSE = _descriptor.Descriptor(
name="ReadModifyWriteRowResponse",
full_name="google.bigtable.v2.ReadModifyWriteRowResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="row",
full_name="google.bigtable.v2.ReadModifyWriteRowResponse.row",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1843,
serialized_end=1909,
)
_READROWSREQUEST.fields_by_name[
"rows"
].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROWSET
_READROWSREQUEST.fields_by_name[
"filter"
].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROWFILTER
_READROWSRESPONSE_CELLCHUNK.fields_by_name[
"family_name"
].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_READROWSRESPONSE_CELLCHUNK.fields_by_name[
"qualifier"
].message_type = google_dot_protobuf_dot_wrappers__pb2._BYTESVALUE
_READROWSRESPONSE_CELLCHUNK.containing_type = _READROWSRESPONSE
_READROWSRESPONSE_CELLCHUNK.oneofs_by_name["row_status"].fields.append(
_READROWSRESPONSE_CELLCHUNK.fields_by_name["reset_row"]
)
_READROWSRESPONSE_CELLCHUNK.fields_by_name[
"reset_row"
].containing_oneof = _READROWSRESPONSE_CELLCHUNK.oneofs_by_name["row_status"]
_READROWSRESPONSE_CELLCHUNK.oneofs_by_name["row_status"].fields.append(
_READROWSRESPONSE_CELLCHUNK.fields_by_name["commit_row"]
)
_READROWSRESPONSE_CELLCHUNK.fields_by_name[
"commit_row"
].containing_oneof = _READROWSRESPONSE_CELLCHUNK.oneofs_by_name["row_status"]
_READROWSRESPONSE.fields_by_name["chunks"].message_type = _READROWSRESPONSE_CELLCHUNK
_MUTATEROWREQUEST.fields_by_name[
"mutations"
].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION
_MUTATEROWSREQUEST_ENTRY.fields_by_name[
"mutations"
].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION
_MUTATEROWSREQUEST_ENTRY.containing_type = _MUTATEROWSREQUEST
_MUTATEROWSREQUEST.fields_by_name["entries"].message_type = _MUTATEROWSREQUEST_ENTRY
_MUTATEROWSRESPONSE_ENTRY.fields_by_name[
"status"
].message_type = google_dot_rpc_dot_status__pb2._STATUS
_MUTATEROWSRESPONSE_ENTRY.containing_type = _MUTATEROWSRESPONSE
_MUTATEROWSRESPONSE.fields_by_name["entries"].message_type = _MUTATEROWSRESPONSE_ENTRY
_CHECKANDMUTATEROWREQUEST.fields_by_name[
"predicate_filter"
].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROWFILTER
_CHECKANDMUTATEROWREQUEST.fields_by_name[
"true_mutations"
].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION
_CHECKANDMUTATEROWREQUEST.fields_by_name[
"false_mutations"
].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION
_READMODIFYWRITEROWREQUEST.fields_by_name[
"rules"
].message_type = (
google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._READMODIFYWRITERULE
)
_READMODIFYWRITEROWRESPONSE.fields_by_name[
"row"
].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROW
DESCRIPTOR.message_types_by_name["ReadRowsRequest"] = _READROWSREQUEST
DESCRIPTOR.message_types_by_name["ReadRowsResponse"] = _READROWSRESPONSE
DESCRIPTOR.message_types_by_name["SampleRowKeysRequest"] = _SAMPLEROWKEYSREQUEST
DESCRIPTOR.message_types_by_name["SampleRowKeysResponse"] = _SAMPLEROWKEYSRESPONSE
DESCRIPTOR.message_types_by_name["MutateRowRequest"] = _MUTATEROWREQUEST
DESCRIPTOR.message_types_by_name["MutateRowResponse"] = _MUTATEROWRESPONSE
DESCRIPTOR.message_types_by_name["MutateRowsRequest"] = _MUTATEROWSREQUEST
DESCRIPTOR.message_types_by_name["MutateRowsResponse"] = _MUTATEROWSRESPONSE
DESCRIPTOR.message_types_by_name["CheckAndMutateRowRequest"] = _CHECKANDMUTATEROWREQUEST
DESCRIPTOR.message_types_by_name[
"CheckAndMutateRowResponse"
] = _CHECKANDMUTATEROWRESPONSE
DESCRIPTOR.message_types_by_name[
"ReadModifyWriteRowRequest"
] = _READMODIFYWRITEROWREQUEST
DESCRIPTOR.message_types_by_name[
"ReadModifyWriteRowResponse"
] = _READMODIFYWRITEROWRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ReadRowsRequest = _reflection.GeneratedProtocolMessageType(
"ReadRowsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_READROWSREQUEST,
__module__="google.cloud.bigtable_v2.proto.bigtable_pb2",
__doc__="""Request message for Bigtable.ReadRows.
Attributes:
table_name:
The unique name of the table from which to read. Values are of
the form
``projects/<project>/instances/<instance>/tables/<table>``.
app_profile_id:
This value specifies routing for replication. If not
specified, the "default" application profile will be used.
rows:
The row keys and/or ranges to read. If not specified, reads
from all rows.
filter:
The filter to apply to the contents of the specified row(s).
If unset, reads the entirety of each row.
rows_limit:
The read will terminate after committing to N rows' worth of
results. The default (zero) is to return all results.
""",
# @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsRequest)
),
)
_sym_db.RegisterMessage(ReadRowsRequest)
ReadRowsResponse = _reflection.GeneratedProtocolMessageType(
"ReadRowsResponse",
(_message.Message,),
dict(
CellChunk=_reflection.GeneratedProtocolMessageType(
"CellChunk",
(_message.Message,),
dict(
DESCRIPTOR=_READROWSRESPONSE_CELLCHUNK,
__module__="google.cloud.bigtable_v2.proto.bigtable_pb2",
__doc__="""Specifies a piece of a row's contents returned as part of the read
response stream.
Attributes:
row_key:
The row key for this chunk of data. If the row key is empty,
this CellChunk is a continuation of the same row as the
previous CellChunk in the response stream, even if that
CellChunk was in a previous ReadRowsResponse message.
family_name:
The column family name for this chunk of data. If this message
is not present this CellChunk is a continuation of the same
column family as the previous CellChunk. The empty string can
occur as a column family name in a response so clients must
check explicitly for the presence of this message, not just
for ``family_name.value`` being non-empty.
qualifier:
The column qualifier for this chunk of data. If this message
is not present, this CellChunk is a continuation of the same
column as the previous CellChunk. Column qualifiers may be
empty so clients must check for the presence of this message,
not just for ``qualifier.value`` being non-empty.
timestamp_micros:
The cell's stored timestamp, which also uniquely identifies it
within its column. Values are always expressed in
microseconds, but individual tables may set a coarser
granularity to further restrict the allowed values. For
example, a table which specifies millisecond granularity will
only allow values of ``timestamp_micros`` which are multiples
of 1000. Timestamps are only set in the first CellChunk per
cell (for cells split into multiple chunks).
labels:
Labels applied to the cell by a
[RowFilter][google.bigtable.v2.RowFilter]. Labels are only set
on the first CellChunk per cell.
value:
The value stored in the cell. Cell values can be split across
multiple CellChunks. In that case only the value field will be
set in CellChunks after the first: the timestamp and labels
will only be present in the first CellChunk, even if the first
CellChunk came in a previous ReadRowsResponse.
value_size:
If this CellChunk is part of a chunked cell value and this is
not the final chunk of that cell, value\_size will be set to
the total length of the cell value. The client can use this
size to pre-allocate memory to hold the full cell value.
reset_row:
Indicates that the client should drop all previous chunks for
``row_key``, as it will be re-read from the beginning.
commit_row:
Indicates that the client can safely process all previous
chunks for ``row_key``, as its data has been fully read.
""",
# @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsResponse.CellChunk)
),
),
DESCRIPTOR=_READROWSRESPONSE,
__module__="google.cloud.bigtable_v2.proto.bigtable_pb2",
__doc__="""Response message for Bigtable.ReadRows.
Attributes:
last_scanned_row_key:
Optionally the server might return the row key of the last row
it has scanned. The client can use this to construct a more
efficient retry request if needed: any row keys or portions of
ranges less than this row key can be dropped from the request.
This is primarily useful for cases where the server has read a
lot of data that was filtered out since the last committed row
key, allowing the client to skip that work on a retry.
""",
# @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsResponse)
),
)
_sym_db.RegisterMessage(ReadRowsResponse)
_sym_db.RegisterMessage(ReadRowsResponse.CellChunk)
SampleRowKeysRequest = _reflection.GeneratedProtocolMessageType(
"SampleRowKeysRequest",
(_message.Message,),
dict(
DESCRIPTOR=_SAMPLEROWKEYSREQUEST,
__module__="google.cloud.bigtable_v2.proto.bigtable_pb2",
__doc__="""Request message for Bigtable.SampleRowKeys.
Attributes:
table_name:
The unique name of the table from which to sample row keys.
Values are of the form
``projects/<project>/instances/<instance>/tables/<table>``.
app_profile_id:
This value specifies routing for replication. If not
specified, the "default" application profile will be used.
""",
# @@protoc_insertion_point(class_scope:google.bigtable.v2.SampleRowKeysRequest)
),
)
_sym_db.RegisterMessage(SampleRowKeysRequest)
SampleRowKeysResponse = _reflection.GeneratedProtocolMessageType(
"SampleRowKeysResponse",
(_message.Message,),
dict(
DESCRIPTOR=_SAMPLEROWKEYSRESPONSE,
__module__="google.cloud.bigtable_v2.proto.bigtable_pb2",
__doc__="""Response message for Bigtable.SampleRowKeys.
Attributes:
row_key:
Sorted streamed sequence of sample row keys in the table. The
table might have contents before the first row key in the list
and after the last one, but a key containing the empty string
indicates "end of table" and will be the last response given,
if present. Note that row keys in this list may not have ever
been written to or read from, and users should therefore not
make any assumptions about the row key structure that are
specific to their use case.
offset_bytes:
Approximate total storage space used by all rows in the table
which precede ``row_key``. Buffering the contents of all rows
between two subsequent samples would require space roughly
equal to the difference in their ``offset_bytes`` fields.
""",
# @@protoc_insertion_point(class_scope:google.bigtable.v2.SampleRowKeysResponse)
),
)
_sym_db.RegisterMessage(SampleRowKeysResponse)
MutateRowRequest = _reflection.GeneratedProtocolMessageType(
"MutateRowRequest",
(_message.Message,),
dict(
DESCRIPTOR=_MUTATEROWREQUEST,
__module__="google.cloud.bigtable_v2.proto.bigtable_pb2",
__doc__="""Request message for Bigtable.MutateRow.
Attributes:
table_name:
The unique name of the table to which the mutation should be
applied. Values are of the form
``projects/<project>/instances/<instance>/tables/<table>``.
app_profile_id:
This value specifies routing for replication. If not
specified, the "default" application profile will be used.
row_key:
The key of the row to which the mutation should be applied.
mutations:
Changes to be atomically applied to the specified row. Entries
are applied in order, meaning that earlier mutations can be
masked by later ones. Must contain at least one entry and at
most 100000.
""",
# @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowRequest)
),
)
_sym_db.RegisterMessage(MutateRowRequest)
MutateRowResponse = _reflection.GeneratedProtocolMessageType(
"MutateRowResponse",
(_message.Message,),
dict(
DESCRIPTOR=_MUTATEROWRESPONSE,
__module__="google.cloud.bigtable_v2.proto.bigtable_pb2",
__doc__="""Response message for Bigtable.MutateRow.
""",
# @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowResponse)
),
)
_sym_db.RegisterMessage(MutateRowResponse)
MutateRowsRequest = _reflection.GeneratedProtocolMessageType(
"MutateRowsRequest",
(_message.Message,),
dict(
Entry=_reflection.GeneratedProtocolMessageType(
"Entry",
(_message.Message,),
dict(
DESCRIPTOR=_MUTATEROWSREQUEST_ENTRY,
__module__="google.cloud.bigtable_v2.proto.bigtable_pb2",
__doc__="""Attributes:
row_key:
The key of the row to which the ``mutations`` should be
applied.
mutations:
Changes to be atomically applied to the specified row.
Mutations are applied in order, meaning that earlier mutations
can be masked by later ones. You must specify at least one
mutation.
""",
# @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsRequest.Entry)
),
),
DESCRIPTOR=_MUTATEROWSREQUEST,
__module__="google.cloud.bigtable_v2.proto.bigtable_pb2",
__doc__="""Request message for BigtableService.MutateRows.
Attributes:
table_name:
The unique name of the table to which the mutations should be
applied.
app_profile_id:
This value specifies routing for replication. If not
specified, the "default" application profile will be used.
entries:
The row keys and corresponding mutations to be applied in
bulk. Each entry is applied as an atomic mutation, but the
entries may be applied in arbitrary order (even between
entries for the same row). At least one entry must be
specified, and in total the entries can contain at most 100000
mutations.
""",
# @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsRequest)
),
)
_sym_db.RegisterMessage(MutateRowsRequest)
_sym_db.RegisterMessage(MutateRowsRequest.Entry)
MutateRowsResponse = _reflection.GeneratedProtocolMessageType(
"MutateRowsResponse",
(_message.Message,),
dict(
Entry=_reflection.GeneratedProtocolMessageType(
"Entry",
(_message.Message,),
dict(
DESCRIPTOR=_MUTATEROWSRESPONSE_ENTRY,
__module__="google.cloud.bigtable_v2.proto.bigtable_pb2",
__doc__="""Attributes:
index:
The index into the original request's ``entries`` list of the
Entry for which a result is being reported.
status:
The result of the request Entry identified by ``index``.
Depending on how requests are batched during execution, it is
possible for one Entry to fail due to an error with another
Entry. In the event that this occurs, the same error will be
reported for both entries.
""",
# @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsResponse.Entry)
),
),
DESCRIPTOR=_MUTATEROWSRESPONSE,
__module__="google.cloud.bigtable_v2.proto.bigtable_pb2",
__doc__="""Response message for BigtableService.MutateRows.
Attributes:
entries:
One or more results for Entries from the batch request.
""",
# @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsResponse)
),
)
_sym_db.RegisterMessage(MutateRowsResponse)
_sym_db.RegisterMessage(MutateRowsResponse.Entry)
CheckAndMutateRowRequest = _reflection.GeneratedProtocolMessageType(
"CheckAndMutateRowRequest",
(_message.Message,),
dict(
DESCRIPTOR=_CHECKANDMUTATEROWREQUEST,
__module__="google.cloud.bigtable_v2.proto.bigtable_pb2",
__doc__="""Request message for Bigtable.CheckAndMutateRow.
Attributes:
table_name:
The unique name of the table to which the conditional mutation
should be applied. Values are of the form
``projects/<project>/instances/<instance>/tables/<table>``.
app_profile_id:
This value specifies routing for replication. If not
specified, the "default" application profile will be used.
row_key:
The key of the row to which the conditional mutation should be
applied.
predicate_filter:
The filter to be applied to the contents of the specified row.
Depending on whether or not any results are yielded, either
``true_mutations`` or ``false_mutations`` will be executed. If
unset, checks that the row contains any values at all.
true_mutations:
Changes to be atomically applied to the specified row if
``predicate_filter`` yields at least one cell when applied to
``row_key``. Entries are applied in order, meaning that
earlier mutations can be masked by later ones. Must contain at
least one entry if ``false_mutations`` is empty, and at most
100000.
false_mutations:
Changes to be atomically applied to the specified row if
``predicate_filter`` does not yield any cells when applied to
``row_key``. Entries are applied in order, meaning that
earlier mutations can be masked by later ones. Must contain at
least one entry if ``true_mutations`` is empty, and at most
100000.
""",
# @@protoc_insertion_point(class_scope:google.bigtable.v2.CheckAndMutateRowRequest)
),
)
_sym_db.RegisterMessage(CheckAndMutateRowRequest)
CheckAndMutateRowResponse = _reflection.GeneratedProtocolMessageType(
"CheckAndMutateRowResponse",
(_message.Message,),
dict(
DESCRIPTOR=_CHECKANDMUTATEROWRESPONSE,
__module__="google.cloud.bigtable_v2.proto.bigtable_pb2",
__doc__="""Response message for Bigtable.CheckAndMutateRow.
Attributes:
predicate_matched:
Whether or not the request's ``predicate_filter`` yielded any
results for the specified row.
""",
# @@protoc_insertion_point(class_scope:google.bigtable.v2.CheckAndMutateRowResponse)
),
)
_sym_db.RegisterMessage(CheckAndMutateRowResponse)
ReadModifyWriteRowRequest = _reflection.GeneratedProtocolMessageType(
"ReadModifyWriteRowRequest",
(_message.Message,),
dict(
DESCRIPTOR=_READMODIFYWRITEROWREQUEST,
__module__="google.cloud.bigtable_v2.proto.bigtable_pb2",
__doc__="""Request message for Bigtable.ReadModifyWriteRow.
Attributes:
table_name:
The unique name of the table to which the read/modify/write
rules should be applied. Values are of the form
``projects/<project>/instances/<instance>/tables/<table>``.
app_profile_id:
This value specifies routing for replication. If not
specified, the "default" application profile will be used.
row_key:
The key of the row to which the read/modify/write rules should
be applied.
rules:
Rules specifying how the specified row's contents are to be
transformed into writes. Entries are applied in order, meaning
that earlier rules will affect the results of later ones.
""",
# @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRowRequest)
),
)
_sym_db.RegisterMessage(ReadModifyWriteRowRequest)
ReadModifyWriteRowResponse = _reflection.GeneratedProtocolMessageType(
"ReadModifyWriteRowResponse",
(_message.Message,),
dict(
DESCRIPTOR=_READMODIFYWRITEROWRESPONSE,
__module__="google.cloud.bigtable_v2.proto.bigtable_pb2",
__doc__="""Response message for Bigtable.ReadModifyWriteRow.
Attributes:
row:
A Row containing the new contents of all cells modified by the
request.
""",
# @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRowResponse)
),
)
_sym_db.RegisterMessage(ReadModifyWriteRowResponse)
DESCRIPTOR._options = None
_BIGTABLE = _descriptor.ServiceDescriptor(
name="Bigtable",
full_name="google.bigtable.v2.Bigtable",
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=1912,
serialized_end=2981,
methods=[
_descriptor.MethodDescriptor(
name="ReadRows",
full_name="google.bigtable.v2.Bigtable.ReadRows",
index=0,
containing_service=None,
input_type=_READROWSREQUEST,
output_type=_READROWSRESPONSE,
serialized_options=_b(
'\202\323\344\223\002>"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\001*'
),
),
_descriptor.MethodDescriptor(
name="SampleRowKeys",
full_name="google.bigtable.v2.Bigtable.SampleRowKeys",
index=1,
containing_service=None,
input_type=_SAMPLEROWKEYSREQUEST,
output_type=_SAMPLEROWKEYSRESPONSE,
serialized_options=_b(
"\202\323\344\223\002@\022>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys"
),
),
_descriptor.MethodDescriptor(
name="MutateRow",
full_name="google.bigtable.v2.Bigtable.MutateRow",
index=2,
containing_service=None,
input_type=_MUTATEROWREQUEST,
output_type=_MUTATEROWRESPONSE,
serialized_options=_b(
'\202\323\344\223\002?":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\001*'
),
),
_descriptor.MethodDescriptor(
name="MutateRows",
full_name="google.bigtable.v2.Bigtable.MutateRows",
index=3,
containing_service=None,
input_type=_MUTATEROWSREQUEST,
output_type=_MUTATEROWSRESPONSE,
serialized_options=_b(
'\202\323\344\223\002@";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\001*'
),
),
_descriptor.MethodDescriptor(
name="CheckAndMutateRow",
full_name="google.bigtable.v2.Bigtable.CheckAndMutateRow",
index=4,
containing_service=None,
input_type=_CHECKANDMUTATEROWREQUEST,
output_type=_CHECKANDMUTATEROWRESPONSE,
serialized_options=_b(
'\202\323\344\223\002G"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\001*'
),
),
_descriptor.MethodDescriptor(
name="ReadModifyWriteRow",
full_name="google.bigtable.v2.Bigtable.ReadModifyWriteRow",
index=5,
containing_service=None,
input_type=_READMODIFYWRITEROWREQUEST,
output_type=_READMODIFYWRITEROWRESPONSE,
serialized_options=_b(
'\202\323\344\223\002H"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\001*'
),
),
],
)
_sym_db.RegisterServiceDescriptor(_BIGTABLE)
DESCRIPTOR.services_by_name["Bigtable"] = _BIGTABLE
# @@protoc_insertion_point(module_scope)
| 35.878823 | 4,612 | 0.629515 |
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.cloud.bigtable_v2.proto import (
data_pb2 as google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2,
)
from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/bigtable_v2/proto/bigtable.proto",
package="google.bigtable.v2",
syntax="proto3",
serialized_options=_b(
"\n\026com.google.bigtable.v2B\rBigtableProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2"
),
serialized_pb=_b(
'\n-google/cloud/bigtable_v2/proto/bigtable.proto\x12\x12google.bigtable.v2\x1a\x1cgoogle/api/annotations.proto\x1a)google/cloud/bigtable_v2/proto/data.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto"\xaa\x01\n\x0fReadRowsRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x05 \x01(\t\x12(\n\x04rows\x18\x02 \x01(\x0b\x32\x1a.google.bigtable.v2.RowSet\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x12\n\nrows_limit\x18\x04 \x01(\x03"\xf8\x02\n\x10ReadRowsResponse\x12>\n\x06\x63hunks\x18\x01 \x03(\x0b\x32..google.bigtable.v2.ReadRowsResponse.CellChunk\x12\x1c\n\x14last_scanned_row_key\x18\x02 \x01(\x0c\x1a\x85\x02\n\tCellChunk\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x31\n\x0b\x66\x61mily_name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\tqualifier\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.BytesValue\x12\x18\n\x10timestamp_micros\x18\x04 \x01(\x03\x12\x0e\n\x06labels\x18\x05 \x03(\t\x12\r\n\x05value\x18\x06 \x01(\x0c\x12\x12\n\nvalue_size\x18\x07 \x01(\x05\x12\x13\n\treset_row\x18\x08 \x01(\x08H\x00\x12\x14\n\ncommit_row\x18\t \x01(\x08H\x00\x42\x0c\n\nrow_status"B\n\x14SampleRowKeysRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t">\n\x15SampleRowKeysResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x14\n\x0coffset_bytes\x18\x02 \x01(\x03"\x80\x01\n\x10MutateRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12/\n\tmutations\x18\x03 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation"\x13\n\x11MutateRowResponse"\xc8\x01\n\x11MutateRowsRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x03 \x01(\t\x12<\n\x07\x65ntries\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.MutateRowsRequest.Entry\x1aI\n\x05\x45ntry\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12/\n\tmutations\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation"\x8f\x01\n\x12MutateRowsResponse\x12=\n\x07\x65ntries\x18\x01 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\x1a:\n\x05\x45ntry\x12\r\n\x05index\x18\x01 \x01(\x03\x12"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status"\xfd\x01\n\x18\x43heckAndMutateRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x07 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12\x37\n\x10predicate_filter\x18\x06 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x34\n\x0etrue_mutations\x18\x04 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\x12\x35\n\x0f\x66\x61lse_mutations\x18\x05 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation"6\n\x19\x43heckAndMutateRowResponse\x12\x19\n\x11predicate_matched\x18\x01 \x01(\x08"\x90\x01\n\x19ReadModifyWriteRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12\x36\n\x05rules\x18\x03 \x03(\x0b\x32\'.google.bigtable.v2.ReadModifyWriteRule"B\n\x1aReadModifyWriteRowResponse\x12$\n\x03row\x18\x01 \x01(\x0b\x32\x17.google.bigtable.v2.Row2\xad\x08\n\x08\x42igtable\x12\x9d\x01\n\x08ReadRows\x12#.google.bigtable.v2.ReadRowsRequest\x1a$.google.bigtable.v2.ReadRowsResponse"D\x82\xd3\xe4\x93\x02>"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\x01*0\x01\x12\xae\x01\n\rSampleRowKeys\x12(.google.bigtable.v2.SampleRowKeysRequest\x1a).google.bigtable.v2.SampleRowKeysResponse"F\x82\xd3\xe4\x93\x02@\x12>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys0\x01\x12\x9f\x01\n\tMutateRow\x12$.google.bigtable.v2.MutateRowRequest\x1a%.google.bigtable.v2.MutateRowResponse"E\x82\xd3\xe4\x93\x02?":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\x01*\x12\xa5\x01\n\nMutateRows\x12%.google.bigtable.v2.MutateRowsRequest\x1a&.google.bigtable.v2.MutateRowsResponse"F\x82\xd3\xe4\x93\x02@";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\x01*0\x01\x12\xbf\x01\n\x11\x43heckAndMutateRow\x12,.google.bigtable.v2.CheckAndMutateRowRequest\x1a-.google.bigtable.v2.CheckAndMutateRowResponse"M\x82\xd3\xe4\x93\x02G"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\x01*\x12\xc3\x01\n\x12ReadModifyWriteRow\x12-.google.bigtable.v2.ReadModifyWriteRowRequest\x1a..google.bigtable.v2.ReadModifyWriteRowResponse"N\x82\xd3\xe4\x93\x02H"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\x01*B\x9b\x01\n\x16\x63om.google.bigtable.v2B\rBigtableProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2b\x06proto3'
),
dependencies=[
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2.DESCRIPTOR,
google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,
google_dot_rpc_dot_status__pb2.DESCRIPTOR,
],
)
_READROWSREQUEST = _descriptor.Descriptor(
name="ReadRowsRequest",
full_name="google.bigtable.v2.ReadRowsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="table_name",
full_name="google.bigtable.v2.ReadRowsRequest.table_name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="app_profile_id",
full_name="google.bigtable.v2.ReadRowsRequest.app_profile_id",
index=1,
number=5,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="rows",
full_name="google.bigtable.v2.ReadRowsRequest.rows",
index=2,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="filter",
full_name="google.bigtable.v2.ReadRowsRequest.filter",
index=3,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="rows_limit",
full_name="google.bigtable.v2.ReadRowsRequest.rows_limit",
index=4,
number=4,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=200,
serialized_end=370,
)
_READROWSRESPONSE_CELLCHUNK = _descriptor.Descriptor(
name="CellChunk",
full_name="google.bigtable.v2.ReadRowsResponse.CellChunk",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="row_key",
full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.row_key",
index=0,
number=1,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b(""),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="family_name",
full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.family_name",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="qualifier",
full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.qualifier",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="timestamp_micros",
full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.timestamp_micros",
index=3,
number=4,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="labels",
full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.labels",
index=4,
number=5,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="value",
full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.value",
index=5,
number=6,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b(""),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="value_size",
full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.value_size",
index=6,
number=7,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="reset_row",
full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.reset_row",
index=7,
number=8,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="commit_row",
full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.commit_row",
index=8,
number=9,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="row_status",
full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.row_status",
index=0,
containing_type=None,
fields=[],
)
],
serialized_start=488,
serialized_end=749,
)
_READROWSRESPONSE = _descriptor.Descriptor(
name="ReadRowsResponse",
full_name="google.bigtable.v2.ReadRowsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="chunks",
full_name="google.bigtable.v2.ReadRowsResponse.chunks",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="last_scanned_row_key",
full_name="google.bigtable.v2.ReadRowsResponse.last_scanned_row_key",
index=1,
number=2,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b(""),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_READROWSRESPONSE_CELLCHUNK],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=373,
serialized_end=749,
)
_SAMPLEROWKEYSREQUEST = _descriptor.Descriptor(
name="SampleRowKeysRequest",
full_name="google.bigtable.v2.SampleRowKeysRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="table_name",
full_name="google.bigtable.v2.SampleRowKeysRequest.table_name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="app_profile_id",
full_name="google.bigtable.v2.SampleRowKeysRequest.app_profile_id",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=751,
serialized_end=817,
)
_SAMPLEROWKEYSRESPONSE = _descriptor.Descriptor(
name="SampleRowKeysResponse",
full_name="google.bigtable.v2.SampleRowKeysResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="row_key",
full_name="google.bigtable.v2.SampleRowKeysResponse.row_key",
index=0,
number=1,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b(""),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="offset_bytes",
full_name="google.bigtable.v2.SampleRowKeysResponse.offset_bytes",
index=1,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=819,
serialized_end=881,
)
_MUTATEROWREQUEST = _descriptor.Descriptor(
name="MutateRowRequest",
full_name="google.bigtable.v2.MutateRowRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="table_name",
full_name="google.bigtable.v2.MutateRowRequest.table_name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="app_profile_id",
full_name="google.bigtable.v2.MutateRowRequest.app_profile_id",
index=1,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="row_key",
full_name="google.bigtable.v2.MutateRowRequest.row_key",
index=2,
number=2,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b(""),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="mutations",
full_name="google.bigtable.v2.MutateRowRequest.mutations",
index=3,
number=3,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=884,
serialized_end=1012,
)
_MUTATEROWRESPONSE = _descriptor.Descriptor(
name="MutateRowResponse",
full_name="google.bigtable.v2.MutateRowResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1014,
serialized_end=1033,
)
_MUTATEROWSREQUEST_ENTRY = _descriptor.Descriptor(
name="Entry",
full_name="google.bigtable.v2.MutateRowsRequest.Entry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="row_key",
full_name="google.bigtable.v2.MutateRowsRequest.Entry.row_key",
index=0,
number=1,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b(""),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="mutations",
full_name="google.bigtable.v2.MutateRowsRequest.Entry.mutations",
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1163,
serialized_end=1236,
)
_MUTATEROWSREQUEST = _descriptor.Descriptor(
name="MutateRowsRequest",
full_name="google.bigtable.v2.MutateRowsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="table_name",
full_name="google.bigtable.v2.MutateRowsRequest.table_name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="app_profile_id",
full_name="google.bigtable.v2.MutateRowsRequest.app_profile_id",
index=1,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="entries",
full_name="google.bigtable.v2.MutateRowsRequest.entries",
index=2,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_MUTATEROWSREQUEST_ENTRY],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1036,
serialized_end=1236,
)
_MUTATEROWSRESPONSE_ENTRY = _descriptor.Descriptor(
name="Entry",
full_name="google.bigtable.v2.MutateRowsResponse.Entry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="index",
full_name="google.bigtable.v2.MutateRowsResponse.Entry.index",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="status",
full_name="google.bigtable.v2.MutateRowsResponse.Entry.status",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1324,
serialized_end=1382,
)
_MUTATEROWSRESPONSE = _descriptor.Descriptor(
name="MutateRowsResponse",
full_name="google.bigtable.v2.MutateRowsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="entries",
full_name="google.bigtable.v2.MutateRowsResponse.entries",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[_MUTATEROWSRESPONSE_ENTRY],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1239,
serialized_end=1382,
)
_CHECKANDMUTATEROWREQUEST = _descriptor.Descriptor(
name="CheckAndMutateRowRequest",
full_name="google.bigtable.v2.CheckAndMutateRowRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="table_name",
full_name="google.bigtable.v2.CheckAndMutateRowRequest.table_name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="app_profile_id",
full_name="google.bigtable.v2.CheckAndMutateRowRequest.app_profile_id",
index=1,
number=7,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="row_key",
full_name="google.bigtable.v2.CheckAndMutateRowRequest.row_key",
index=2,
number=2,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b(""),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="predicate_filter",
full_name="google.bigtable.v2.CheckAndMutateRowRequest.predicate_filter",
index=3,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="true_mutations",
full_name="google.bigtable.v2.CheckAndMutateRowRequest.true_mutations",
index=4,
number=4,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="false_mutations",
full_name="google.bigtable.v2.CheckAndMutateRowRequest.false_mutations",
index=5,
number=5,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1385,
serialized_end=1638,
)
_CHECKANDMUTATEROWRESPONSE = _descriptor.Descriptor(
name="CheckAndMutateRowResponse",
full_name="google.bigtable.v2.CheckAndMutateRowResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="predicate_matched",
full_name="google.bigtable.v2.CheckAndMutateRowResponse.predicate_matched",
index=0,
number=1,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1640,
serialized_end=1694,
)
_READMODIFYWRITEROWREQUEST = _descriptor.Descriptor(
name="ReadModifyWriteRowRequest",
full_name="google.bigtable.v2.ReadModifyWriteRowRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="table_name",
full_name="google.bigtable.v2.ReadModifyWriteRowRequest.table_name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="app_profile_id",
full_name="google.bigtable.v2.ReadModifyWriteRowRequest.app_profile_id",
index=1,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="row_key",
full_name="google.bigtable.v2.ReadModifyWriteRowRequest.row_key",
index=2,
number=2,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b(""),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="rules",
full_name="google.bigtable.v2.ReadModifyWriteRowRequest.rules",
index=3,
number=3,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1697,
serialized_end=1841,
)
_READMODIFYWRITEROWRESPONSE = _descriptor.Descriptor(
name="ReadModifyWriteRowResponse",
full_name="google.bigtable.v2.ReadModifyWriteRowResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="row",
full_name="google.bigtable.v2.ReadModifyWriteRowResponse.row",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1843,
serialized_end=1909,
)
_READROWSREQUEST.fields_by_name[
"rows"
].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROWSET
_READROWSREQUEST.fields_by_name[
"filter"
].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROWFILTER
_READROWSRESPONSE_CELLCHUNK.fields_by_name[
"family_name"
].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_READROWSRESPONSE_CELLCHUNK.fields_by_name[
"qualifier"
].message_type = google_dot_protobuf_dot_wrappers__pb2._BYTESVALUE
_READROWSRESPONSE_CELLCHUNK.containing_type = _READROWSRESPONSE
_READROWSRESPONSE_CELLCHUNK.oneofs_by_name["row_status"].fields.append(
_READROWSRESPONSE_CELLCHUNK.fields_by_name["reset_row"]
)
_READROWSRESPONSE_CELLCHUNK.fields_by_name[
"reset_row"
].containing_oneof = _READROWSRESPONSE_CELLCHUNK.oneofs_by_name["row_status"]
_READROWSRESPONSE_CELLCHUNK.oneofs_by_name["row_status"].fields.append(
_READROWSRESPONSE_CELLCHUNK.fields_by_name["commit_row"]
)
_READROWSRESPONSE_CELLCHUNK.fields_by_name[
"commit_row"
].containing_oneof = _READROWSRESPONSE_CELLCHUNK.oneofs_by_name["row_status"]
_READROWSRESPONSE.fields_by_name["chunks"].message_type = _READROWSRESPONSE_CELLCHUNK
_MUTATEROWREQUEST.fields_by_name[
"mutations"
].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION
_MUTATEROWSREQUEST_ENTRY.fields_by_name[
"mutations"
].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION
_MUTATEROWSREQUEST_ENTRY.containing_type = _MUTATEROWSREQUEST
_MUTATEROWSREQUEST.fields_by_name["entries"].message_type = _MUTATEROWSREQUEST_ENTRY
_MUTATEROWSRESPONSE_ENTRY.fields_by_name[
"status"
].message_type = google_dot_rpc_dot_status__pb2._STATUS
_MUTATEROWSRESPONSE_ENTRY.containing_type = _MUTATEROWSRESPONSE
_MUTATEROWSRESPONSE.fields_by_name["entries"].message_type = _MUTATEROWSRESPONSE_ENTRY
_CHECKANDMUTATEROWREQUEST.fields_by_name[
"predicate_filter"
].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROWFILTER
_CHECKANDMUTATEROWREQUEST.fields_by_name[
"true_mutations"
].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION
_CHECKANDMUTATEROWREQUEST.fields_by_name[
"false_mutations"
].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION
_READMODIFYWRITEROWREQUEST.fields_by_name[
"rules"
].message_type = (
google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._READMODIFYWRITERULE
)
_READMODIFYWRITEROWRESPONSE.fields_by_name[
"row"
].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROW
DESCRIPTOR.message_types_by_name["ReadRowsRequest"] = _READROWSREQUEST
DESCRIPTOR.message_types_by_name["ReadRowsResponse"] = _READROWSRESPONSE
DESCRIPTOR.message_types_by_name["SampleRowKeysRequest"] = _SAMPLEROWKEYSREQUEST
DESCRIPTOR.message_types_by_name["SampleRowKeysResponse"] = _SAMPLEROWKEYSRESPONSE
DESCRIPTOR.message_types_by_name["MutateRowRequest"] = _MUTATEROWREQUEST
DESCRIPTOR.message_types_by_name["MutateRowResponse"] = _MUTATEROWRESPONSE
DESCRIPTOR.message_types_by_name["MutateRowsRequest"] = _MUTATEROWSREQUEST
DESCRIPTOR.message_types_by_name["MutateRowsResponse"] = _MUTATEROWSRESPONSE
DESCRIPTOR.message_types_by_name["CheckAndMutateRowRequest"] = _CHECKANDMUTATEROWREQUEST
DESCRIPTOR.message_types_by_name[
"CheckAndMutateRowResponse"
] = _CHECKANDMUTATEROWRESPONSE
DESCRIPTOR.message_types_by_name[
"ReadModifyWriteRowRequest"
] = _READMODIFYWRITEROWREQUEST
DESCRIPTOR.message_types_by_name[
"ReadModifyWriteRowResponse"
] = _READMODIFYWRITEROWRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ReadRowsRequest = _reflection.GeneratedProtocolMessageType(
"ReadRowsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_READROWSREQUEST,
__module__="google.cloud.bigtable_v2.proto.bigtable_pb2",
__doc__="""Request message for Bigtable.ReadRows.
Attributes:
table_name:
The unique name of the table from which to read. Values are of
the form
``projects/<project>/instances/<instance>/tables/<table>``.
app_profile_id:
This value specifies routing for replication. If not
specified, the "default" application profile will be used.
rows:
The row keys and/or ranges to read. If not specified, reads
from all rows.
filter:
The filter to apply to the contents of the specified row(s).
If unset, reads the entirety of each row.
rows_limit:
The read will terminate after committing to N rows' worth of
results. The default (zero) is to return all results.
""",
),
)
_sym_db.RegisterMessage(ReadRowsRequest)
ReadRowsResponse = _reflection.GeneratedProtocolMessageType(
"ReadRowsResponse",
(_message.Message,),
dict(
CellChunk=_reflection.GeneratedProtocolMessageType(
"CellChunk",
(_message.Message,),
dict(
DESCRIPTOR=_READROWSRESPONSE_CELLCHUNK,
__module__="google.cloud.bigtable_v2.proto.bigtable_pb2",
__doc__="""Specifies a piece of a row's contents returned as part of the read
response stream.
Attributes:
row_key:
The row key for this chunk of data. If the row key is empty,
this CellChunk is a continuation of the same row as the
previous CellChunk in the response stream, even if that
CellChunk was in a previous ReadRowsResponse message.
family_name:
The column family name for this chunk of data. If this message
is not present this CellChunk is a continuation of the same
column family as the previous CellChunk. The empty string can
occur as a column family name in a response so clients must
check explicitly for the presence of this message, not just
for ``family_name.value`` being non-empty.
qualifier:
The column qualifier for this chunk of data. If this message
is not present, this CellChunk is a continuation of the same
column as the previous CellChunk. Column qualifiers may be
empty so clients must check for the presence of this message,
not just for ``qualifier.value`` being non-empty.
timestamp_micros:
The cell's stored timestamp, which also uniquely identifies it
within its column. Values are always expressed in
microseconds, but individual tables may set a coarser
granularity to further restrict the allowed values. For
example, a table which specifies millisecond granularity will
only allow values of ``timestamp_micros`` which are multiples
of 1000. Timestamps are only set in the first CellChunk per
cell (for cells split into multiple chunks).
labels:
Labels applied to the cell by a
[RowFilter][google.bigtable.v2.RowFilter]. Labels are only set
on the first CellChunk per cell.
value:
The value stored in the cell. Cell values can be split across
multiple CellChunks. In that case only the value field will be
set in CellChunks after the first: the timestamp and labels
will only be present in the first CellChunk, even if the first
CellChunk came in a previous ReadRowsResponse.
value_size:
If this CellChunk is part of a chunked cell value and this is
not the final chunk of that cell, value\_size will be set to
the total length of the cell value. The client can use this
size to pre-allocate memory to hold the full cell value.
reset_row:
Indicates that the client should drop all previous chunks for
``row_key``, as it will be re-read from the beginning.
commit_row:
Indicates that the client can safely process all previous
chunks for ``row_key``, as its data has been fully read.
""",
),
),
DESCRIPTOR=_READROWSRESPONSE,
__module__="google.cloud.bigtable_v2.proto.bigtable_pb2",
__doc__="""Response message for Bigtable.ReadRows.
Attributes:
last_scanned_row_key:
Optionally the server might return the row key of the last row
it has scanned. The client can use this to construct a more
efficient retry request if needed: any row keys or portions of
ranges less than this row key can be dropped from the request.
This is primarily useful for cases where the server has read a
lot of data that was filtered out since the last committed row
key, allowing the client to skip that work on a retry.
""",
),
)
_sym_db.RegisterMessage(ReadRowsResponse)
_sym_db.RegisterMessage(ReadRowsResponse.CellChunk)
SampleRowKeysRequest = _reflection.GeneratedProtocolMessageType(
"SampleRowKeysRequest",
(_message.Message,),
dict(
DESCRIPTOR=_SAMPLEROWKEYSREQUEST,
__module__="google.cloud.bigtable_v2.proto.bigtable_pb2",
__doc__="""Request message for Bigtable.SampleRowKeys.
Attributes:
table_name:
The unique name of the table from which to sample row keys.
Values are of the form
``projects/<project>/instances/<instance>/tables/<table>``.
app_profile_id:
This value specifies routing for replication. If not
specified, the "default" application profile will be used.
""",
),
)
_sym_db.RegisterMessage(SampleRowKeysRequest)
SampleRowKeysResponse = _reflection.GeneratedProtocolMessageType(
"SampleRowKeysResponse",
(_message.Message,),
dict(
DESCRIPTOR=_SAMPLEROWKEYSRESPONSE,
__module__="google.cloud.bigtable_v2.proto.bigtable_pb2",
__doc__="""Response message for Bigtable.SampleRowKeys.
Attributes:
row_key:
Sorted streamed sequence of sample row keys in the table. The
table might have contents before the first row key in the list
and after the last one, but a key containing the empty string
indicates "end of table" and will be the last response given,
if present. Note that row keys in this list may not have ever
been written to or read from, and users should therefore not
make any assumptions about the row key structure that are
specific to their use case.
offset_bytes:
Approximate total storage space used by all rows in the table
which precede ``row_key``. Buffering the contents of all rows
between two subsequent samples would require space roughly
equal to the difference in their ``offset_bytes`` fields.
""",
),
)
_sym_db.RegisterMessage(SampleRowKeysResponse)
MutateRowRequest = _reflection.GeneratedProtocolMessageType(
"MutateRowRequest",
(_message.Message,),
dict(
DESCRIPTOR=_MUTATEROWREQUEST,
__module__="google.cloud.bigtable_v2.proto.bigtable_pb2",
__doc__="""Request message for Bigtable.MutateRow.
Attributes:
table_name:
The unique name of the table to which the mutation should be
applied. Values are of the form
``projects/<project>/instances/<instance>/tables/<table>``.
app_profile_id:
This value specifies routing for replication. If not
specified, the "default" application profile will be used.
row_key:
The key of the row to which the mutation should be applied.
mutations:
Changes to be atomically applied to the specified row. Entries
are applied in order, meaning that earlier mutations can be
masked by later ones. Must contain at least one entry and at
most 100000.
""",
),
)
_sym_db.RegisterMessage(MutateRowRequest)
MutateRowResponse = _reflection.GeneratedProtocolMessageType(
"MutateRowResponse",
(_message.Message,),
dict(
DESCRIPTOR=_MUTATEROWRESPONSE,
__module__="google.cloud.bigtable_v2.proto.bigtable_pb2",
__doc__="""Response message for Bigtable.MutateRow.
""",
),
)
_sym_db.RegisterMessage(MutateRowResponse)
MutateRowsRequest = _reflection.GeneratedProtocolMessageType(
"MutateRowsRequest",
(_message.Message,),
dict(
Entry=_reflection.GeneratedProtocolMessageType(
"Entry",
(_message.Message,),
dict(
DESCRIPTOR=_MUTATEROWSREQUEST_ENTRY,
__module__="google.cloud.bigtable_v2.proto.bigtable_pb2",
__doc__="""Attributes:
row_key:
The key of the row to which the ``mutations`` should be
applied.
mutations:
Changes to be atomically applied to the specified row.
Mutations are applied in order, meaning that earlier mutations
can be masked by later ones. You must specify at least one
mutation.
""",
),
),
DESCRIPTOR=_MUTATEROWSREQUEST,
__module__="google.cloud.bigtable_v2.proto.bigtable_pb2",
__doc__="""Request message for BigtableService.MutateRows.
Attributes:
table_name:
The unique name of the table to which the mutations should be
applied.
app_profile_id:
This value specifies routing for replication. If not
specified, the "default" application profile will be used.
entries:
The row keys and corresponding mutations to be applied in
bulk. Each entry is applied as an atomic mutation, but the
entries may be applied in arbitrary order (even between
entries for the same row). At least one entry must be
specified, and in total the entries can contain at most 100000
mutations.
""",
),
)
_sym_db.RegisterMessage(MutateRowsRequest)
_sym_db.RegisterMessage(MutateRowsRequest.Entry)
MutateRowsResponse = _reflection.GeneratedProtocolMessageType(
"MutateRowsResponse",
(_message.Message,),
dict(
Entry=_reflection.GeneratedProtocolMessageType(
"Entry",
(_message.Message,),
dict(
DESCRIPTOR=_MUTATEROWSRESPONSE_ENTRY,
__module__="google.cloud.bigtable_v2.proto.bigtable_pb2",
__doc__="""Attributes:
index:
The index into the original request's ``entries`` list of the
Entry for which a result is being reported.
status:
The result of the request Entry identified by ``index``.
Depending on how requests are batched during execution, it is
possible for one Entry to fail due to an error with another
Entry. In the event that this occurs, the same error will be
reported for both entries.
""",
# @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsResponse.Entry)
),
),
DESCRIPTOR=_MUTATEROWSRESPONSE,
__module__="google.cloud.bigtable_v2.proto.bigtable_pb2",
__doc__="""Response message for BigtableService.MutateRows.
Attributes:
entries:
One or more results for Entries from the batch request.
""",
# @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsResponse)
),
)
_sym_db.RegisterMessage(MutateRowsResponse)
_sym_db.RegisterMessage(MutateRowsResponse.Entry)
CheckAndMutateRowRequest = _reflection.GeneratedProtocolMessageType(
"CheckAndMutateRowRequest",
(_message.Message,),
dict(
DESCRIPTOR=_CHECKANDMUTATEROWREQUEST,
__module__="google.cloud.bigtable_v2.proto.bigtable_pb2",
__doc__="""Request message for Bigtable.CheckAndMutateRow.
Attributes:
table_name:
The unique name of the table to which the conditional mutation
should be applied. Values are of the form
``projects/<project>/instances/<instance>/tables/<table>``.
app_profile_id:
This value specifies routing for replication. If not
specified, the "default" application profile will be used.
row_key:
The key of the row to which the conditional mutation should be
applied.
predicate_filter:
The filter to be applied to the contents of the specified row.
Depending on whether or not any results are yielded, either
``true_mutations`` or ``false_mutations`` will be executed. If
unset, checks that the row contains any values at all.
true_mutations:
Changes to be atomically applied to the specified row if
``predicate_filter`` yields at least one cell when applied to
``row_key``. Entries are applied in order, meaning that
earlier mutations can be masked by later ones. Must contain at
least one entry if ``false_mutations`` is empty, and at most
100000.
false_mutations:
Changes to be atomically applied to the specified row if
``predicate_filter`` does not yield any cells when applied to
``row_key``. Entries are applied in order, meaning that
earlier mutations can be masked by later ones. Must contain at
least one entry if ``true_mutations`` is empty, and at most
100000.
""",
# @@protoc_insertion_point(class_scope:google.bigtable.v2.CheckAndMutateRowRequest)
),
)
_sym_db.RegisterMessage(CheckAndMutateRowRequest)
CheckAndMutateRowResponse = _reflection.GeneratedProtocolMessageType(
"CheckAndMutateRowResponse",
(_message.Message,),
dict(
DESCRIPTOR=_CHECKANDMUTATEROWRESPONSE,
__module__="google.cloud.bigtable_v2.proto.bigtable_pb2",
__doc__="""Response message for Bigtable.CheckAndMutateRow.
Attributes:
predicate_matched:
Whether or not the request's ``predicate_filter`` yielded any
results for the specified row.
""",
),
)
_sym_db.RegisterMessage(CheckAndMutateRowResponse)
ReadModifyWriteRowRequest = _reflection.GeneratedProtocolMessageType(
"ReadModifyWriteRowRequest",
(_message.Message,),
dict(
DESCRIPTOR=_READMODIFYWRITEROWREQUEST,
__module__="google.cloud.bigtable_v2.proto.bigtable_pb2",
__doc__="""Request message for Bigtable.ReadModifyWriteRow.
Attributes:
table_name:
The unique name of the table to which the read/modify/write
rules should be applied. Values are of the form
``projects/<project>/instances/<instance>/tables/<table>``.
app_profile_id:
This value specifies routing for replication. If not
specified, the "default" application profile will be used.
row_key:
The key of the row to which the read/modify/write rules should
be applied.
rules:
Rules specifying how the specified row's contents are to be
transformed into writes. Entries are applied in order, meaning
that earlier rules will affect the results of later ones.
""",
# @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRowRequest)
),
)
_sym_db.RegisterMessage(ReadModifyWriteRowRequest)
ReadModifyWriteRowResponse = _reflection.GeneratedProtocolMessageType(
"ReadModifyWriteRowResponse",
(_message.Message,),
dict(
DESCRIPTOR=_READMODIFYWRITEROWRESPONSE,
__module__="google.cloud.bigtable_v2.proto.bigtable_pb2",
__doc__="""Response message for Bigtable.ReadModifyWriteRow.
Attributes:
row:
A Row containing the new contents of all cells modified by the
request.
""",
# @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRowResponse)
),
)
_sym_db.RegisterMessage(ReadModifyWriteRowResponse)
DESCRIPTOR._options = None
_BIGTABLE = _descriptor.ServiceDescriptor(
name="Bigtable",
full_name="google.bigtable.v2.Bigtable",
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=1912,
serialized_end=2981,
methods=[
_descriptor.MethodDescriptor(
name="ReadRows",
full_name="google.bigtable.v2.Bigtable.ReadRows",
index=0,
containing_service=None,
input_type=_READROWSREQUEST,
output_type=_READROWSRESPONSE,
serialized_options=_b(
'\202\323\344\223\002>"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\001*'
),
),
_descriptor.MethodDescriptor(
name="SampleRowKeys",
full_name="google.bigtable.v2.Bigtable.SampleRowKeys",
index=1,
containing_service=None,
input_type=_SAMPLEROWKEYSREQUEST,
output_type=_SAMPLEROWKEYSRESPONSE,
serialized_options=_b(
"\202\323\344\223\002@\022>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys"
),
),
_descriptor.MethodDescriptor(
name="MutateRow",
full_name="google.bigtable.v2.Bigtable.MutateRow",
index=2,
containing_service=None,
input_type=_MUTATEROWREQUEST,
output_type=_MUTATEROWRESPONSE,
serialized_options=_b(
'\202\323\344\223\002?":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\001*'
),
),
_descriptor.MethodDescriptor(
name="MutateRows",
full_name="google.bigtable.v2.Bigtable.MutateRows",
index=3,
containing_service=None,
input_type=_MUTATEROWSREQUEST,
output_type=_MUTATEROWSRESPONSE,
serialized_options=_b(
'\202\323\344\223\002@";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\001*'
),
),
_descriptor.MethodDescriptor(
name="CheckAndMutateRow",
full_name="google.bigtable.v2.Bigtable.CheckAndMutateRow",
index=4,
containing_service=None,
input_type=_CHECKANDMUTATEROWREQUEST,
output_type=_CHECKANDMUTATEROWRESPONSE,
serialized_options=_b(
'\202\323\344\223\002G"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\001*'
),
),
_descriptor.MethodDescriptor(
name="ReadModifyWriteRow",
full_name="google.bigtable.v2.Bigtable.ReadModifyWriteRow",
index=5,
containing_service=None,
input_type=_READMODIFYWRITEROWREQUEST,
output_type=_READMODIFYWRITEROWRESPONSE,
serialized_options=_b(
'\202\323\344\223\002H"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\001*'
),
),
],
)
_sym_db.RegisterServiceDescriptor(_BIGTABLE)
DESCRIPTOR.services_by_name["Bigtable"] = _BIGTABLE
# @@protoc_insertion_point(module_scope)
| true | true |
1c2b0f985ef8e35ab5fe8006ac361bf9a33ddac1 | 18,262 | py | Python | openpeerpower/components/xiaomi_miio/vacuum.py | pcaston/core | e74d946cef7a9d4e232ae9e0ba150d18018cfe33 | [
"Apache-2.0"
] | 1 | 2021-07-08T20:09:55.000Z | 2021-07-08T20:09:55.000Z | openpeerpower/components/xiaomi_miio/vacuum.py | pcaston/core | e74d946cef7a9d4e232ae9e0ba150d18018cfe33 | [
"Apache-2.0"
] | 47 | 2021-02-21T23:43:07.000Z | 2022-03-31T06:07:10.000Z | openpeerpower/components/xiaomi_miio/vacuum.py | OpenPeerPower/core | f673dfac9f2d0c48fa30af37b0a99df9dd6640ee | [
"Apache-2.0"
] | null | null | null | """Support for the Xiaomi vacuum cleaner robot."""
from functools import partial
import logging
from miio import DeviceException, Vacuum
import voluptuous as vol
from openpeerpower.components.vacuum import (
ATTR_CLEANED_AREA,
PLATFORM_SCHEMA,
STATE_CLEANING,
STATE_DOCKED,
STATE_ERROR,
STATE_IDLE,
STATE_PAUSED,
STATE_RETURNING,
SUPPORT_BATTERY,
SUPPORT_CLEAN_SPOT,
SUPPORT_FAN_SPEED,
SUPPORT_LOCATE,
SUPPORT_PAUSE,
SUPPORT_RETURN_HOME,
SUPPORT_SEND_COMMAND,
SUPPORT_START,
SUPPORT_STATE,
SUPPORT_STOP,
StateVacuumEntity,
)
from openpeerpower.config_entries import SOURCE_IMPORT
from openpeerpower.const import CONF_HOST, CONF_NAME, CONF_TOKEN, STATE_OFF, STATE_ON
from openpeerpower.helpers import config_validation as cv, entity_platform
from openpeerpower.util.dt import as_utc
from .const import (
CONF_DEVICE,
CONF_FLOW_TYPE,
DOMAIN,
SERVICE_CLEAN_SEGMENT,
SERVICE_CLEAN_ZONE,
SERVICE_GOTO,
SERVICE_MOVE_REMOTE_CONTROL,
SERVICE_MOVE_REMOTE_CONTROL_STEP,
SERVICE_START_REMOTE_CONTROL,
SERVICE_STOP_REMOTE_CONTROL,
)
from .device import XiaomiMiioEntity
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Xiaomi Vacuum cleaner"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): vol.All(str, vol.Length(min=32, max=32)),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
},
extra=vol.ALLOW_EXTRA,
)
ATTR_CLEAN_START = "clean_start"
ATTR_CLEAN_STOP = "clean_stop"
ATTR_CLEANING_TIME = "cleaning_time"
ATTR_DO_NOT_DISTURB = "do_not_disturb"
ATTR_DO_NOT_DISTURB_START = "do_not_disturb_start"
ATTR_DO_NOT_DISTURB_END = "do_not_disturb_end"
ATTR_MAIN_BRUSH_LEFT = "main_brush_left"
ATTR_SIDE_BRUSH_LEFT = "side_brush_left"
ATTR_FILTER_LEFT = "filter_left"
ATTR_SENSOR_DIRTY_LEFT = "sensor_dirty_left"
ATTR_CLEANING_COUNT = "cleaning_count"
ATTR_CLEANED_TOTAL_AREA = "total_cleaned_area"
ATTR_CLEANING_TOTAL_TIME = "total_cleaning_time"
ATTR_ERROR = "error"
ATTR_RC_DURATION = "duration"
ATTR_RC_ROTATION = "rotation"
ATTR_RC_VELOCITY = "velocity"
ATTR_STATUS = "status"
ATTR_ZONE_ARRAY = "zone"
ATTR_ZONE_REPEATER = "repeats"
ATTR_TIMERS = "timers"
ATTR_MOP_ATTACHED = "mop_attached"
SUPPORT_XIAOMI = (
SUPPORT_STATE
| SUPPORT_PAUSE
| SUPPORT_STOP
| SUPPORT_RETURN_HOME
| SUPPORT_FAN_SPEED
| SUPPORT_SEND_COMMAND
| SUPPORT_LOCATE
| SUPPORT_BATTERY
| SUPPORT_CLEAN_SPOT
| SUPPORT_START
)
STATE_CODE_TO_STATE = {
1: STATE_IDLE, # "Starting"
2: STATE_IDLE, # "Charger disconnected"
3: STATE_IDLE, # "Idle"
4: STATE_CLEANING, # "Remote control active"
5: STATE_CLEANING, # "Cleaning"
6: STATE_RETURNING, # "Returning home"
7: STATE_CLEANING, # "Manual mode"
8: STATE_DOCKED, # "Charging"
9: STATE_ERROR, # "Charging problem"
10: STATE_PAUSED, # "Paused"
11: STATE_CLEANING, # "Spot cleaning"
12: STATE_ERROR, # "Error"
13: STATE_IDLE, # "Shutting down"
14: STATE_DOCKED, # "Updating"
15: STATE_RETURNING, # "Docking"
16: STATE_CLEANING, # "Going to target"
17: STATE_CLEANING, # "Zoned cleaning"
18: STATE_CLEANING, # "Segment cleaning"
100: STATE_DOCKED, # "Charging complete"
101: STATE_ERROR, # "Device offline"
}
async def async_setup_platform(opp, config, async_add_entities, discovery_info=None):
"""Import Miio configuration from YAML."""
_LOGGER.warning(
"Loading Xiaomi Miio Vacuum via platform setup is deprecated; Please remove it from your configuration"
)
opp.async_create_task(
opp.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=config,
)
)
async def async_setup_entry(opp, config_entry, async_add_entities):
"""Set up the Xiaomi vacuum cleaner robot from a config entry."""
entities = []
if config_entry.data[CONF_FLOW_TYPE] == CONF_DEVICE:
host = config_entry.data[CONF_HOST]
token = config_entry.data[CONF_TOKEN]
name = config_entry.title
unique_id = config_entry.unique_id
# Create handler
_LOGGER.debug("Initializing with host %s (token %s...)", host, token[:5])
vacuum = Vacuum(host, token)
mirobo = MiroboVacuum(name, vacuum, config_entry, unique_id)
entities.append(mirobo)
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_START_REMOTE_CONTROL,
{},
MiroboVacuum.async_remote_control_start.__name__,
)
platform.async_register_entity_service(
SERVICE_STOP_REMOTE_CONTROL,
{},
MiroboVacuum.async_remote_control_stop.__name__,
)
platform.async_register_entity_service(
SERVICE_MOVE_REMOTE_CONTROL,
{
vol.Optional(ATTR_RC_VELOCITY): vol.All(
vol.Coerce(float), vol.Clamp(min=-0.29, max=0.29)
),
vol.Optional(ATTR_RC_ROTATION): vol.All(
vol.Coerce(int), vol.Clamp(min=-179, max=179)
),
vol.Optional(ATTR_RC_DURATION): cv.positive_int,
},
MiroboVacuum.async_remote_control_move.__name__,
)
platform.async_register_entity_service(
SERVICE_MOVE_REMOTE_CONTROL_STEP,
{
vol.Optional(ATTR_RC_VELOCITY): vol.All(
vol.Coerce(float), vol.Clamp(min=-0.29, max=0.29)
),
vol.Optional(ATTR_RC_ROTATION): vol.All(
vol.Coerce(int), vol.Clamp(min=-179, max=179)
),
vol.Optional(ATTR_RC_DURATION): cv.positive_int,
},
MiroboVacuum.async_remote_control_move_step.__name__,
)
platform.async_register_entity_service(
SERVICE_CLEAN_ZONE,
{
vol.Required(ATTR_ZONE_ARRAY): vol.All(
list,
[
vol.ExactSequence(
[
vol.Coerce(int),
vol.Coerce(int),
vol.Coerce(int),
vol.Coerce(int),
]
)
],
),
vol.Required(ATTR_ZONE_REPEATER): vol.All(
vol.Coerce(int), vol.Clamp(min=1, max=3)
),
},
MiroboVacuum.async_clean_zone.__name__,
)
platform.async_register_entity_service(
SERVICE_GOTO,
{
vol.Required("x_coord"): vol.Coerce(int),
vol.Required("y_coord"): vol.Coerce(int),
},
MiroboVacuum.async_goto.__name__,
)
platform.async_register_entity_service(
SERVICE_CLEAN_SEGMENT,
{vol.Required("segments"): vol.Any(vol.Coerce(int), [vol.Coerce(int)])},
MiroboVacuum.async_clean_segment.__name__,
)
async_add_entities(entities, update_before_add=True)
class MiroboVacuum(XiaomiMiioEntity, StateVacuumEntity):
"""Representation of a Xiaomi Vacuum cleaner robot."""
def __init__(self, name, device, entry, unique_id):
"""Initialize the Xiaomi vacuum cleaner robot handler."""
super().__init__(name, device, entry, unique_id)
self.vacuum_state = None
self._available = False
self.consumable_state = None
self.clean_history = None
self.dnd_state = None
self.last_clean = None
self._fan_speeds = None
self._fan_speeds_reverse = None
self._timers = None
@property
def state(self):
"""Return the status of the vacuum cleaner."""
if self.vacuum_state is not None:
# The vacuum reverts back to an idle state after erroring out.
# We want to keep returning an error until it has been cleared.
if self.vacuum_state.got_error:
return STATE_ERROR
try:
return STATE_CODE_TO_STATE[int(self.vacuum_state.state_code)]
except KeyError:
_LOGGER.error(
"STATE not supported: %s, state_code: %s",
self.vacuum_state.state,
self.vacuum_state.state_code,
)
return None
@property
def battery_level(self):
"""Return the battery level of the vacuum cleaner."""
if self.vacuum_state is not None:
return self.vacuum_state.battery
@property
def fan_speed(self):
"""Return the fan speed of the vacuum cleaner."""
if self.vacuum_state is not None:
speed = self.vacuum_state.fanspeed
if speed in self._fan_speeds_reverse:
return self._fan_speeds_reverse[speed]
_LOGGER.debug("Unable to find reverse for %s", speed)
return speed
@property
def fan_speed_list(self):
"""Get the list of available fan speed steps of the vacuum cleaner."""
return list(self._fan_speeds) if self._fan_speeds else []
@property
def timers(self):
"""Get the list of added timers of the vacuum cleaner."""
return [
{
"enabled": timer.enabled,
"cron": timer.cron,
"next_schedule": as_utc(timer.next_schedule),
}
for timer in self._timers
]
@property
def extra_state_attributes(self):
"""Return the specific state attributes of this vacuum cleaner."""
attrs = {}
if self.vacuum_state is not None:
attrs.update(
{
ATTR_DO_NOT_DISTURB: STATE_ON
if self.dnd_state.enabled
else STATE_OFF,
ATTR_DO_NOT_DISTURB_START: str(self.dnd_state.start),
ATTR_DO_NOT_DISTURB_END: str(self.dnd_state.end),
# Not working --> 'Cleaning mode':
# STATE_ON if self.vacuum_state.in_cleaning else STATE_OFF,
ATTR_CLEANING_TIME: int(
self.vacuum_state.clean_time.total_seconds() / 60
),
ATTR_CLEANED_AREA: int(self.vacuum_state.clean_area),
ATTR_CLEANING_COUNT: int(self.clean_history.count),
ATTR_CLEANED_TOTAL_AREA: int(self.clean_history.total_area),
ATTR_CLEANING_TOTAL_TIME: int(
self.clean_history.total_duration.total_seconds() / 60
),
ATTR_MAIN_BRUSH_LEFT: int(
self.consumable_state.main_brush_left.total_seconds() / 3600
),
ATTR_SIDE_BRUSH_LEFT: int(
self.consumable_state.side_brush_left.total_seconds() / 3600
),
ATTR_FILTER_LEFT: int(
self.consumable_state.filter_left.total_seconds() / 3600
),
ATTR_SENSOR_DIRTY_LEFT: int(
self.consumable_state.sensor_dirty_left.total_seconds() / 3600
),
ATTR_STATUS: str(self.vacuum_state.state),
ATTR_MOP_ATTACHED: self.vacuum_state.is_water_box_attached,
}
)
if self.last_clean:
attrs[ATTR_CLEAN_START] = self.last_clean.start
attrs[ATTR_CLEAN_STOP] = self.last_clean.end
if self.vacuum_state.got_error:
attrs[ATTR_ERROR] = self.vacuum_state.error
if self.timers:
attrs[ATTR_TIMERS] = self.timers
return attrs
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._available
@property
def supported_features(self):
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORT_XIAOMI
async def _try_command(self, mask_error, func, *args, **kwargs):
"""Call a vacuum command handling error messages."""
try:
await self.opp.async_add_executor_job(partial(func, *args, **kwargs))
return True
except DeviceException as exc:
_LOGGER.error(mask_error, exc)
return False
async def async_start(self):
"""Start or resume the cleaning task."""
await self._try_command(
"Unable to start the vacuum: %s", self._device.resume_or_start
)
async def async_pause(self):
"""Pause the cleaning task."""
await self._try_command("Unable to set start/pause: %s", self._device.pause)
async def async_stop(self, **kwargs):
"""Stop the vacuum cleaner."""
await self._try_command("Unable to stop: %s", self._device.stop)
async def async_set_fan_speed(self, fan_speed, **kwargs):
"""Set fan speed."""
if fan_speed in self._fan_speeds:
fan_speed = self._fan_speeds[fan_speed]
else:
try:
fan_speed = int(fan_speed)
except ValueError as exc:
_LOGGER.error(
"Fan speed step not recognized (%s). Valid speeds are: %s",
exc,
self.fan_speed_list,
)
return
await self._try_command(
"Unable to set fan speed: %s", self._device.set_fan_speed, fan_speed
)
async def async_return_to_base(self, **kwargs):
"""Set the vacuum cleaner to return to the dock."""
await self._try_command("Unable to return home: %s", self._device.home)
async def async_clean_spot(self, **kwargs):
"""Perform a spot clean-up."""
await self._try_command(
"Unable to start the vacuum for a spot clean-up: %s", self._device.spot
)
async def async_locate(self, **kwargs):
"""Locate the vacuum cleaner."""
await self._try_command("Unable to locate the botvac: %s", self._device.find)
async def async_send_command(self, command, params=None, **kwargs):
"""Send raw command."""
await self._try_command(
"Unable to send command to the vacuum: %s",
self._device.raw_command,
command,
params,
)
async def async_remote_control_start(self):
"""Start remote control mode."""
await self._try_command(
"Unable to start remote control the vacuum: %s", self._device.manual_start
)
async def async_remote_control_stop(self):
"""Stop remote control mode."""
await self._try_command(
"Unable to stop remote control the vacuum: %s", self._device.manual_stop
)
async def async_remote_control_move(
self, rotation: int = 0, velocity: float = 0.3, duration: int = 1500
):
"""Move vacuum with remote control mode."""
await self._try_command(
"Unable to move with remote control the vacuum: %s",
self._device.manual_control,
velocity=velocity,
rotation=rotation,
duration=duration,
)
async def async_remote_control_move_step(
self, rotation: int = 0, velocity: float = 0.2, duration: int = 1500
):
"""Move vacuum one step with remote control mode."""
await self._try_command(
"Unable to remote control the vacuum: %s",
self._device.manual_control_once,
velocity=velocity,
rotation=rotation,
duration=duration,
)
async def async_goto(self, x_coord: int, y_coord: int):
"""Goto the specified coordinates."""
await self._try_command(
"Unable to send the vacuum cleaner to the specified coordinates: %s",
self._device.goto,
x_coord=x_coord,
y_coord=y_coord,
)
async def async_clean_segment(self, segments):
"""Clean the specified segments(s)."""
if isinstance(segments, int):
segments = [segments]
await self._try_command(
"Unable to start cleaning of the specified segments: %s",
self._device.segment_clean,
segments=segments,
)
def update(self):
"""Fetch state from the device."""
try:
state = self._device.status()
self.vacuum_state = state
self._fan_speeds = self._device.fan_speed_presets()
self._fan_speeds_reverse = {v: k for k, v in self._fan_speeds.items()}
self.consumable_state = self._device.consumable_status()
self.clean_history = self._device.clean_history()
self.last_clean = self._device.last_clean_details()
self.dnd_state = self._device.dnd_status()
self._available = True
except (OSError, DeviceException) as exc:
if self._available:
self._available = False
_LOGGER.warning("Got exception while fetching the state: %s", exc)
# Fetch timers separately, see #38285
try:
self._timers = self._device.timer()
except DeviceException as exc:
_LOGGER.debug(
"Unable to fetch timers, this may happen on some devices: %s", exc
)
self._timers = []
async def async_clean_zone(self, zone, repeats=1):
"""Clean selected area for the number of repeats indicated."""
for _zone in zone:
_zone.append(repeats)
_LOGGER.debug("Zone with repeats: %s", zone)
try:
await self.opp.async_add_executor_job(self._device.zoned_clean, zone)
except (OSError, DeviceException) as exc:
_LOGGER.error("Unable to send zoned_clean command to the vacuum: %s", exc)
| 34.718631 | 111 | 0.59966 | from functools import partial
import logging
from miio import DeviceException, Vacuum
import voluptuous as vol
from openpeerpower.components.vacuum import (
ATTR_CLEANED_AREA,
PLATFORM_SCHEMA,
STATE_CLEANING,
STATE_DOCKED,
STATE_ERROR,
STATE_IDLE,
STATE_PAUSED,
STATE_RETURNING,
SUPPORT_BATTERY,
SUPPORT_CLEAN_SPOT,
SUPPORT_FAN_SPEED,
SUPPORT_LOCATE,
SUPPORT_PAUSE,
SUPPORT_RETURN_HOME,
SUPPORT_SEND_COMMAND,
SUPPORT_START,
SUPPORT_STATE,
SUPPORT_STOP,
StateVacuumEntity,
)
from openpeerpower.config_entries import SOURCE_IMPORT
from openpeerpower.const import CONF_HOST, CONF_NAME, CONF_TOKEN, STATE_OFF, STATE_ON
from openpeerpower.helpers import config_validation as cv, entity_platform
from openpeerpower.util.dt import as_utc
from .const import (
CONF_DEVICE,
CONF_FLOW_TYPE,
DOMAIN,
SERVICE_CLEAN_SEGMENT,
SERVICE_CLEAN_ZONE,
SERVICE_GOTO,
SERVICE_MOVE_REMOTE_CONTROL,
SERVICE_MOVE_REMOTE_CONTROL_STEP,
SERVICE_START_REMOTE_CONTROL,
SERVICE_STOP_REMOTE_CONTROL,
)
from .device import XiaomiMiioEntity
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Xiaomi Vacuum cleaner"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): vol.All(str, vol.Length(min=32, max=32)),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
},
extra=vol.ALLOW_EXTRA,
)
ATTR_CLEAN_START = "clean_start"
ATTR_CLEAN_STOP = "clean_stop"
ATTR_CLEANING_TIME = "cleaning_time"
ATTR_DO_NOT_DISTURB = "do_not_disturb"
ATTR_DO_NOT_DISTURB_START = "do_not_disturb_start"
ATTR_DO_NOT_DISTURB_END = "do_not_disturb_end"
ATTR_MAIN_BRUSH_LEFT = "main_brush_left"
ATTR_SIDE_BRUSH_LEFT = "side_brush_left"
ATTR_FILTER_LEFT = "filter_left"
ATTR_SENSOR_DIRTY_LEFT = "sensor_dirty_left"
ATTR_CLEANING_COUNT = "cleaning_count"
ATTR_CLEANED_TOTAL_AREA = "total_cleaned_area"
ATTR_CLEANING_TOTAL_TIME = "total_cleaning_time"
ATTR_ERROR = "error"
ATTR_RC_DURATION = "duration"
ATTR_RC_ROTATION = "rotation"
ATTR_RC_VELOCITY = "velocity"
ATTR_STATUS = "status"
ATTR_ZONE_ARRAY = "zone"
ATTR_ZONE_REPEATER = "repeats"
ATTR_TIMERS = "timers"
ATTR_MOP_ATTACHED = "mop_attached"
SUPPORT_XIAOMI = (
SUPPORT_STATE
| SUPPORT_PAUSE
| SUPPORT_STOP
| SUPPORT_RETURN_HOME
| SUPPORT_FAN_SPEED
| SUPPORT_SEND_COMMAND
| SUPPORT_LOCATE
| SUPPORT_BATTERY
| SUPPORT_CLEAN_SPOT
| SUPPORT_START
)
STATE_CODE_TO_STATE = {
1: STATE_IDLE,
2: STATE_IDLE,
3: STATE_IDLE,
4: STATE_CLEANING,
5: STATE_CLEANING,
6: STATE_RETURNING,
7: STATE_CLEANING,
8: STATE_DOCKED,
9: STATE_ERROR,
10: STATE_PAUSED,
11: STATE_CLEANING,
12: STATE_ERROR,
13: STATE_IDLE,
14: STATE_DOCKED,
15: STATE_RETURNING,
16: STATE_CLEANING,
17: STATE_CLEANING,
18: STATE_CLEANING,
100: STATE_DOCKED,
101: STATE_ERROR,
}
async def async_setup_platform(opp, config, async_add_entities, discovery_info=None):
_LOGGER.warning(
"Loading Xiaomi Miio Vacuum via platform setup is deprecated; Please remove it from your configuration"
)
opp.async_create_task(
opp.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=config,
)
)
async def async_setup_entry(opp, config_entry, async_add_entities):
entities = []
if config_entry.data[CONF_FLOW_TYPE] == CONF_DEVICE:
host = config_entry.data[CONF_HOST]
token = config_entry.data[CONF_TOKEN]
name = config_entry.title
unique_id = config_entry.unique_id
_LOGGER.debug("Initializing with host %s (token %s...)", host, token[:5])
vacuum = Vacuum(host, token)
mirobo = MiroboVacuum(name, vacuum, config_entry, unique_id)
entities.append(mirobo)
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_START_REMOTE_CONTROL,
{},
MiroboVacuum.async_remote_control_start.__name__,
)
platform.async_register_entity_service(
SERVICE_STOP_REMOTE_CONTROL,
{},
MiroboVacuum.async_remote_control_stop.__name__,
)
platform.async_register_entity_service(
SERVICE_MOVE_REMOTE_CONTROL,
{
vol.Optional(ATTR_RC_VELOCITY): vol.All(
vol.Coerce(float), vol.Clamp(min=-0.29, max=0.29)
),
vol.Optional(ATTR_RC_ROTATION): vol.All(
vol.Coerce(int), vol.Clamp(min=-179, max=179)
),
vol.Optional(ATTR_RC_DURATION): cv.positive_int,
},
MiroboVacuum.async_remote_control_move.__name__,
)
platform.async_register_entity_service(
SERVICE_MOVE_REMOTE_CONTROL_STEP,
{
vol.Optional(ATTR_RC_VELOCITY): vol.All(
vol.Coerce(float), vol.Clamp(min=-0.29, max=0.29)
),
vol.Optional(ATTR_RC_ROTATION): vol.All(
vol.Coerce(int), vol.Clamp(min=-179, max=179)
),
vol.Optional(ATTR_RC_DURATION): cv.positive_int,
},
MiroboVacuum.async_remote_control_move_step.__name__,
)
platform.async_register_entity_service(
SERVICE_CLEAN_ZONE,
{
vol.Required(ATTR_ZONE_ARRAY): vol.All(
list,
[
vol.ExactSequence(
[
vol.Coerce(int),
vol.Coerce(int),
vol.Coerce(int),
vol.Coerce(int),
]
)
],
),
vol.Required(ATTR_ZONE_REPEATER): vol.All(
vol.Coerce(int), vol.Clamp(min=1, max=3)
),
},
MiroboVacuum.async_clean_zone.__name__,
)
platform.async_register_entity_service(
SERVICE_GOTO,
{
vol.Required("x_coord"): vol.Coerce(int),
vol.Required("y_coord"): vol.Coerce(int),
},
MiroboVacuum.async_goto.__name__,
)
platform.async_register_entity_service(
SERVICE_CLEAN_SEGMENT,
{vol.Required("segments"): vol.Any(vol.Coerce(int), [vol.Coerce(int)])},
MiroboVacuum.async_clean_segment.__name__,
)
async_add_entities(entities, update_before_add=True)
class MiroboVacuum(XiaomiMiioEntity, StateVacuumEntity):
def __init__(self, name, device, entry, unique_id):
super().__init__(name, device, entry, unique_id)
self.vacuum_state = None
self._available = False
self.consumable_state = None
self.clean_history = None
self.dnd_state = None
self.last_clean = None
self._fan_speeds = None
self._fan_speeds_reverse = None
self._timers = None
@property
def state(self):
if self.vacuum_state is not None:
if self.vacuum_state.got_error:
return STATE_ERROR
try:
return STATE_CODE_TO_STATE[int(self.vacuum_state.state_code)]
except KeyError:
_LOGGER.error(
"STATE not supported: %s, state_code: %s",
self.vacuum_state.state,
self.vacuum_state.state_code,
)
return None
@property
def battery_level(self):
if self.vacuum_state is not None:
return self.vacuum_state.battery
@property
def fan_speed(self):
if self.vacuum_state is not None:
speed = self.vacuum_state.fanspeed
if speed in self._fan_speeds_reverse:
return self._fan_speeds_reverse[speed]
_LOGGER.debug("Unable to find reverse for %s", speed)
return speed
@property
def fan_speed_list(self):
return list(self._fan_speeds) if self._fan_speeds else []
@property
def timers(self):
return [
{
"enabled": timer.enabled,
"cron": timer.cron,
"next_schedule": as_utc(timer.next_schedule),
}
for timer in self._timers
]
@property
def extra_state_attributes(self):
attrs = {}
if self.vacuum_state is not None:
attrs.update(
{
ATTR_DO_NOT_DISTURB: STATE_ON
if self.dnd_state.enabled
else STATE_OFF,
ATTR_DO_NOT_DISTURB_START: str(self.dnd_state.start),
ATTR_DO_NOT_DISTURB_END: str(self.dnd_state.end),
ATTR_CLEANING_TIME: int(
self.vacuum_state.clean_time.total_seconds() / 60
),
ATTR_CLEANED_AREA: int(self.vacuum_state.clean_area),
ATTR_CLEANING_COUNT: int(self.clean_history.count),
ATTR_CLEANED_TOTAL_AREA: int(self.clean_history.total_area),
ATTR_CLEANING_TOTAL_TIME: int(
self.clean_history.total_duration.total_seconds() / 60
),
ATTR_MAIN_BRUSH_LEFT: int(
self.consumable_state.main_brush_left.total_seconds() / 3600
),
ATTR_SIDE_BRUSH_LEFT: int(
self.consumable_state.side_brush_left.total_seconds() / 3600
),
ATTR_FILTER_LEFT: int(
self.consumable_state.filter_left.total_seconds() / 3600
),
ATTR_SENSOR_DIRTY_LEFT: int(
self.consumable_state.sensor_dirty_left.total_seconds() / 3600
),
ATTR_STATUS: str(self.vacuum_state.state),
ATTR_MOP_ATTACHED: self.vacuum_state.is_water_box_attached,
}
)
if self.last_clean:
attrs[ATTR_CLEAN_START] = self.last_clean.start
attrs[ATTR_CLEAN_STOP] = self.last_clean.end
if self.vacuum_state.got_error:
attrs[ATTR_ERROR] = self.vacuum_state.error
if self.timers:
attrs[ATTR_TIMERS] = self.timers
return attrs
@property
def available(self) -> bool:
return self._available
@property
def supported_features(self):
return SUPPORT_XIAOMI
async def _try_command(self, mask_error, func, *args, **kwargs):
try:
await self.opp.async_add_executor_job(partial(func, *args, **kwargs))
return True
except DeviceException as exc:
_LOGGER.error(mask_error, exc)
return False
async def async_start(self):
await self._try_command(
"Unable to start the vacuum: %s", self._device.resume_or_start
)
async def async_pause(self):
await self._try_command("Unable to set start/pause: %s", self._device.pause)
async def async_stop(self, **kwargs):
await self._try_command("Unable to stop: %s", self._device.stop)
async def async_set_fan_speed(self, fan_speed, **kwargs):
if fan_speed in self._fan_speeds:
fan_speed = self._fan_speeds[fan_speed]
else:
try:
fan_speed = int(fan_speed)
except ValueError as exc:
_LOGGER.error(
"Fan speed step not recognized (%s). Valid speeds are: %s",
exc,
self.fan_speed_list,
)
return
await self._try_command(
"Unable to set fan speed: %s", self._device.set_fan_speed, fan_speed
)
async def async_return_to_base(self, **kwargs):
await self._try_command("Unable to return home: %s", self._device.home)
async def async_clean_spot(self, **kwargs):
await self._try_command(
"Unable to start the vacuum for a spot clean-up: %s", self._device.spot
)
async def async_locate(self, **kwargs):
await self._try_command("Unable to locate the botvac: %s", self._device.find)
async def async_send_command(self, command, params=None, **kwargs):
await self._try_command(
"Unable to send command to the vacuum: %s",
self._device.raw_command,
command,
params,
)
async def async_remote_control_start(self):
await self._try_command(
"Unable to start remote control the vacuum: %s", self._device.manual_start
)
async def async_remote_control_stop(self):
await self._try_command(
"Unable to stop remote control the vacuum: %s", self._device.manual_stop
)
async def async_remote_control_move(
self, rotation: int = 0, velocity: float = 0.3, duration: int = 1500
):
await self._try_command(
"Unable to move with remote control the vacuum: %s",
self._device.manual_control,
velocity=velocity,
rotation=rotation,
duration=duration,
)
async def async_remote_control_move_step(
self, rotation: int = 0, velocity: float = 0.2, duration: int = 1500
):
await self._try_command(
"Unable to remote control the vacuum: %s",
self._device.manual_control_once,
velocity=velocity,
rotation=rotation,
duration=duration,
)
async def async_goto(self, x_coord: int, y_coord: int):
await self._try_command(
"Unable to send the vacuum cleaner to the specified coordinates: %s",
self._device.goto,
x_coord=x_coord,
y_coord=y_coord,
)
async def async_clean_segment(self, segments):
if isinstance(segments, int):
segments = [segments]
await self._try_command(
"Unable to start cleaning of the specified segments: %s",
self._device.segment_clean,
segments=segments,
)
def update(self):
try:
state = self._device.status()
self.vacuum_state = state
self._fan_speeds = self._device.fan_speed_presets()
self._fan_speeds_reverse = {v: k for k, v in self._fan_speeds.items()}
self.consumable_state = self._device.consumable_status()
self.clean_history = self._device.clean_history()
self.last_clean = self._device.last_clean_details()
self.dnd_state = self._device.dnd_status()
self._available = True
except (OSError, DeviceException) as exc:
if self._available:
self._available = False
_LOGGER.warning("Got exception while fetching the state: %s", exc)
try:
self._timers = self._device.timer()
except DeviceException as exc:
_LOGGER.debug(
"Unable to fetch timers, this may happen on some devices: %s", exc
)
self._timers = []
async def async_clean_zone(self, zone, repeats=1):
for _zone in zone:
_zone.append(repeats)
_LOGGER.debug("Zone with repeats: %s", zone)
try:
await self.opp.async_add_executor_job(self._device.zoned_clean, zone)
except (OSError, DeviceException) as exc:
_LOGGER.error("Unable to send zoned_clean command to the vacuum: %s", exc)
| true | true |
1c2b101cc49648ec3fb251fd993a7676d6e524ba | 110 | py | Python | gui/theme.py | YannThorimbert/FantasyStrategia | e9e26cbd95faba6f1223aaa34bc0b2c6e60cf5f5 | [
"MIT"
] | null | null | null | gui/theme.py | YannThorimbert/FantasyStrategia | e9e26cbd95faba6f1223aaa34bc0b2c6e60cf5f5 | [
"MIT"
] | null | null | null | gui/theme.py | YannThorimbert/FantasyStrategia | e9e26cbd95faba6f1223aaa34bc0b2c6e60cf5f5 | [
"MIT"
] | null | null | null | import thorpy
def set_theme(theme):
thorpy.set_theme(theme)
thorpy.style.DEF_COLOR = (150,150,150)
| 13.75 | 42 | 0.709091 | import thorpy
def set_theme(theme):
thorpy.set_theme(theme)
thorpy.style.DEF_COLOR = (150,150,150)
| true | true |
1c2b10e1d2d7618f869f5dc83857e9caf1174723 | 3,877 | py | Python | nipype/interfaces/fsl/possum.py | mfalkiewicz/nipype | 775e21b78fb1ffa2ff9cb12e6f052868bd44d052 | [
"Apache-2.0"
] | 1 | 2015-01-19T13:12:27.000Z | 2015-01-19T13:12:27.000Z | nipype/interfaces/fsl/possum.py | bpinsard/nipype | 373bdddba9f675ef153951afa368729e2d8950d2 | [
"Apache-2.0"
] | null | null | null | nipype/interfaces/fsl/possum.py | bpinsard/nipype | 373bdddba9f675ef153951afa368729e2d8950d2 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
The possum module provides classes for interfacing with `POSSUM
<http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/POSSUM>`_ command line tools.
Please, check out the link for pertinent citations using POSSUM.
.. Note:: This was written to work with FSL version 5.0.6.
.. testsetup::
# Change directory to provide relative paths for doctests
import os
filepath = os.path.dirname( os.path.realpath( __file__ ) )
datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
os.chdir(datadir)
"""
from .base import FSLCommand, FSLCommandInputSpec
from ..base import TraitedSpec, File, traits
class B0CalcInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, mandatory=True, argstr='-i %s', position=0,
desc='filename of input image (usually a tissue/air segmentation)')
out_file = File(argstr='-o %s', position=1, name_source=['in_file'],
name_template='%s_b0field', output_name='out_file',
desc='filename of B0 output volume')
x_grad = traits.Float(0.0, argstr='--gx=%0.4f',
desc='Value for zeroth-order x-gradient field (per mm)')
y_grad = traits.Float(0.0, argstr='--gy=%0.4f',
desc='Value for zeroth-order y-gradient field (per mm)')
z_grad = traits.Float(0.0, argstr='--gz=%0.4f',
desc='Value for zeroth-order z-gradient field (per mm)')
x_b0 = traits.Float(0.0, argstr='--b0x=%0.2f', xor=['xyz_b0'],
desc='Value for zeroth-order b0 field (x-component), in Tesla')
y_b0 = traits.Float(0.0, argstr='--b0y=%0.2f', xor=['xyz_b0'],
desc='Value for zeroth-order b0 field (y-component), in Tesla')
z_b0 = traits.Float(1.0, argstr='--b0=%0.2f', xor=['xyz_b0'],
desc='Value for zeroth-order b0 field (z-component), in Tesla')
xyz_b0 = traits.Tuple(
traits.Float, traits.Float, traits.Float,
argstr='--b0x=%0.2f --b0y=%0.2f --b0=%0.2f', xor=['x_b0', 'y_b0', 'z_b0'],
desc='Zeroth-order B0 field in Tesla')
delta = traits.Float(-9.45e-6, argstr='-d %e',
desc='Delta value (chi_tissue - chi_air)')
chi_air = traits.Float(
4.0e-7, argstr='--chi0=%e', desc='susceptibility of air')
compute_xyz = traits.Bool(False, argstr='--xyz',
desc='calculate and save all 3 field components (i.e. x,y,z)')
extendboundary = traits.Float(1.0, argstr='--extendboundary=%0.2f',
desc='Relative proportion to extend voxels at boundary')
directconv = traits.Bool(False, argstr='--directconv',
desc='use direct (image space) convolution, not FFT')
class B0CalcOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='filename of B0 output volume')
class B0Calc(FSLCommand):
"""
B0 inhomogeneities occur at interfaces of materials with different magnetic susceptibilities,
such as tissue-air interfaces. These differences lead to distortion in the local magnetic field,
as Maxwell’s equations need to be satisfied. An example of B0 inhomogneity is the first volume
of the 4D volume ```$FSLDIR/data/possum/b0_ppm.nii.gz```.
Examples
--------
>>> from nipype.interfaces.fsl import B0Calc
>>> b0calc = B0Calc()
>>> b0calc.inputs.in_file = 'tissue+air_map.nii'
>>> b0calc.inputs.z_b0 = 3.0
>>> b0calc.inputs.output_type = "NIFTI_GZ"
>>> b0calc.cmdline
'b0calc -i tissue+air_map.nii -o tissue+air_map_b0field.nii.gz --b0=3.00'
"""
_cmd = 'b0calc'
input_spec = B0CalcInputSpec
output_spec = B0CalcOutputSpec
| 42.604396 | 100 | 0.622904 |
from .base import FSLCommand, FSLCommandInputSpec
from ..base import TraitedSpec, File, traits
class B0CalcInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, mandatory=True, argstr='-i %s', position=0,
desc='filename of input image (usually a tissue/air segmentation)')
out_file = File(argstr='-o %s', position=1, name_source=['in_file'],
name_template='%s_b0field', output_name='out_file',
desc='filename of B0 output volume')
x_grad = traits.Float(0.0, argstr='--gx=%0.4f',
desc='Value for zeroth-order x-gradient field (per mm)')
y_grad = traits.Float(0.0, argstr='--gy=%0.4f',
desc='Value for zeroth-order y-gradient field (per mm)')
z_grad = traits.Float(0.0, argstr='--gz=%0.4f',
desc='Value for zeroth-order z-gradient field (per mm)')
x_b0 = traits.Float(0.0, argstr='--b0x=%0.2f', xor=['xyz_b0'],
desc='Value for zeroth-order b0 field (x-component), in Tesla')
y_b0 = traits.Float(0.0, argstr='--b0y=%0.2f', xor=['xyz_b0'],
desc='Value for zeroth-order b0 field (y-component), in Tesla')
z_b0 = traits.Float(1.0, argstr='--b0=%0.2f', xor=['xyz_b0'],
desc='Value for zeroth-order b0 field (z-component), in Tesla')
xyz_b0 = traits.Tuple(
traits.Float, traits.Float, traits.Float,
argstr='--b0x=%0.2f --b0y=%0.2f --b0=%0.2f', xor=['x_b0', 'y_b0', 'z_b0'],
desc='Zeroth-order B0 field in Tesla')
delta = traits.Float(-9.45e-6, argstr='-d %e',
desc='Delta value (chi_tissue - chi_air)')
chi_air = traits.Float(
4.0e-7, argstr='--chi0=%e', desc='susceptibility of air')
compute_xyz = traits.Bool(False, argstr='--xyz',
desc='calculate and save all 3 field components (i.e. x,y,z)')
extendboundary = traits.Float(1.0, argstr='--extendboundary=%0.2f',
desc='Relative proportion to extend voxels at boundary')
directconv = traits.Bool(False, argstr='--directconv',
desc='use direct (image space) convolution, not FFT')
class B0CalcOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='filename of B0 output volume')
class B0Calc(FSLCommand):
_cmd = 'b0calc'
input_spec = B0CalcInputSpec
output_spec = B0CalcOutputSpec
| true | true |
1c2b112f87c0b3e81cdf00f95790f71463ad4e52 | 269 | py | Python | tester_web/tables/__init__.py | Ma-Jun-a/TMW_backend | 32e15d18ee826c8b4167041690b33c417076b0d7 | [
"MIT"
] | null | null | null | tester_web/tables/__init__.py | Ma-Jun-a/TMW_backend | 32e15d18ee826c8b4167041690b33c417076b0d7 | [
"MIT"
] | null | null | null | tester_web/tables/__init__.py | Ma-Jun-a/TMW_backend | 32e15d18ee826c8b4167041690b33c417076b0d7 | [
"MIT"
] | null | null | null | #执行此方法依据模型建表
from sqlalchemy.orm import sessionmaker
from tester_web.tables.user import Model, engine
def init_db():
Model.metadata.create_all(bind=engine)
# 先迁移模型完成后 再初始化session
# init_db()
Session = sessionmaker()
db_session = Session(bind=engine) | 20.692308 | 49 | 0.750929 |
from sqlalchemy.orm import sessionmaker
from tester_web.tables.user import Model, engine
def init_db():
Model.metadata.create_all(bind=engine)
Session = sessionmaker()
db_session = Session(bind=engine) | true | true |
1c2b1196866d2ab7c2c40f6972b59e183b485d0e | 417 | py | Python | tests/functional/config_files/config.py | elementechemlyn/pdssandpit | 0c3eff557b00ca721919135d804878a6ab583016 | [
"MIT"
] | null | null | null | tests/functional/config_files/config.py | elementechemlyn/pdssandpit | 0c3eff557b00ca721919135d804878a6ab583016 | [
"MIT"
] | null | null | null | tests/functional/config_files/config.py | elementechemlyn/pdssandpit | 0c3eff557b00ca721919135d804878a6ab583016 | [
"MIT"
] | null | null | null | from .environment import ENV
# Apigee Details
ENVIRONMENT = ENV["environment"]
BASE_URL = f"https://{ENVIRONMENT}.api.service.nhs.uk"
# Unattended access details
APPLICATION_RESTRICTED_API_KEY = ENV["application_restricted_api_key"]
SIGNING_KEY = ENV["signing_key"]
KEY_ID = ENV["key_id"]
# PDS
PDS_BASE_PATH = ENV["pds_base_path"]
# App details
CLIENT_ID = ENV['client_id']
CLIENT_SECRET = ENV['client_secret']
| 21.947368 | 70 | 0.767386 | from .environment import ENV
ENVIRONMENT = ENV["environment"]
BASE_URL = f"https://{ENVIRONMENT}.api.service.nhs.uk"
APPLICATION_RESTRICTED_API_KEY = ENV["application_restricted_api_key"]
SIGNING_KEY = ENV["signing_key"]
KEY_ID = ENV["key_id"]
PDS_BASE_PATH = ENV["pds_base_path"]
CLIENT_ID = ENV['client_id']
CLIENT_SECRET = ENV['client_secret']
| true | true |
1c2b11f5ebeb7193ca31b4b1407ed0dbc832c795 | 502 | py | Python | js_events/migrations/0026_eventsconfig_allow_post.py | evgeny-dmi3ev/js-events | 766fea79591ad0c4d2c0fcd5580f9699aa232c29 | [
"BSD-3-Clause"
] | null | null | null | js_events/migrations/0026_eventsconfig_allow_post.py | evgeny-dmi3ev/js-events | 766fea79591ad0c4d2c0fcd5580f9699aa232c29 | [
"BSD-3-Clause"
] | null | null | null | js_events/migrations/0026_eventsconfig_allow_post.py | evgeny-dmi3ev/js-events | 766fea79591ad0c4d2c0fcd5580f9699aa232c29 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2019-08-28 15:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('js_events', '0025_create_placeholders'),
]
operations = [
migrations.AddField(
model_name='eventsconfig',
name='allow_post',
field=models.BooleanField(default=False, verbose_name='Allow POST requests'),
),
]
| 23.904762 | 89 | 0.641434 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('js_events', '0025_create_placeholders'),
]
operations = [
migrations.AddField(
model_name='eventsconfig',
name='allow_post',
field=models.BooleanField(default=False, verbose_name='Allow POST requests'),
),
]
| true | true |
1c2b12c18e1552b07dc7dbe9e5f32552a7e7e8b6 | 3,073 | py | Python | sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/jira_object_dataset.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/jira_object_dataset.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/jira_object_dataset.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | 1 | 2019-06-17T22:18:23.000Z | 2019-06-17T22:18:23.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .dataset import Dataset
class JiraObjectDataset(Dataset):
"""Jira Service dataset.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param description: Dataset description.
:type description: str
:param structure: Columns that define the structure of the dataset. Type:
array (or Expression with resultType array), itemType: DatasetDataElement.
:type structure: object
:param schema: Columns that define the physical type schema of the
dataset. Type: array (or Expression with resultType array), itemType:
DatasetSchemaDataElement.
:type schema: object
:param linked_service_name: Required. Linked service reference.
:type linked_service_name:
~azure.mgmt.datafactory.models.LinkedServiceReference
:param parameters: Parameters for dataset.
:type parameters: dict[str,
~azure.mgmt.datafactory.models.ParameterSpecification]
:param annotations: List of tags that can be used for describing the
Dataset.
:type annotations: list[object]
:param folder: The folder that this Dataset is in. If not specified,
Dataset will appear at the root level.
:type folder: ~azure.mgmt.datafactory.models.DatasetFolder
:param type: Required. Constant filled by server.
:type type: str
:param table_name: The table name. Type: string (or Expression with
resultType string).
:type table_name: object
"""
_validation = {
'linked_service_name': {'required': True},
'type': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'description': {'key': 'description', 'type': 'str'},
'structure': {'key': 'structure', 'type': 'object'},
'schema': {'key': 'schema', 'type': 'object'},
'linked_service_name': {'key': 'linkedServiceName', 'type': 'LinkedServiceReference'},
'parameters': {'key': 'parameters', 'type': '{ParameterSpecification}'},
'annotations': {'key': 'annotations', 'type': '[object]'},
'folder': {'key': 'folder', 'type': 'DatasetFolder'},
'type': {'key': 'type', 'type': 'str'},
'table_name': {'key': 'typeProperties.tableName', 'type': 'object'},
}
def __init__(self, **kwargs):
super(JiraObjectDataset, self).__init__(**kwargs)
self.table_name = kwargs.get('table_name', None)
self.type = 'JiraObject'
| 42.09589 | 94 | 0.645298 |
from .dataset import Dataset
class JiraObjectDataset(Dataset):
_validation = {
'linked_service_name': {'required': True},
'type': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'description': {'key': 'description', 'type': 'str'},
'structure': {'key': 'structure', 'type': 'object'},
'schema': {'key': 'schema', 'type': 'object'},
'linked_service_name': {'key': 'linkedServiceName', 'type': 'LinkedServiceReference'},
'parameters': {'key': 'parameters', 'type': '{ParameterSpecification}'},
'annotations': {'key': 'annotations', 'type': '[object]'},
'folder': {'key': 'folder', 'type': 'DatasetFolder'},
'type': {'key': 'type', 'type': 'str'},
'table_name': {'key': 'typeProperties.tableName', 'type': 'object'},
}
def __init__(self, **kwargs):
super(JiraObjectDataset, self).__init__(**kwargs)
self.table_name = kwargs.get('table_name', None)
self.type = 'JiraObject'
| true | true |
1c2b12ca1a7d5f6be3a49a9439835fd39b13bb6f | 39,095 | py | Python | tests/test_optimize.py | rmoyard/pennylane | 14fdee89d8c3673840708b002c304aee4b31c507 | [
"Apache-2.0"
] | 3 | 2021-02-22T18:30:55.000Z | 2021-02-23T10:54:58.000Z | tests/test_optimize.py | rmoyard/pennylane | 14fdee89d8c3673840708b002c304aee4b31c507 | [
"Apache-2.0"
] | null | null | null | tests/test_optimize.py | rmoyard/pennylane | 14fdee89d8c3673840708b002c304aee4b31c507 | [
"Apache-2.0"
] | 1 | 2021-03-27T09:03:15.000Z | 2021-03-27T09:03:15.000Z | # Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the :mod:`pennylane` optimizers.
"""
# pylint: disable=redefined-outer-name
import itertools as it
import numpy as onp
import pytest
import pennylane as qml
from pennylane import numpy as np
from pennylane.optimize import (
GradientDescentOptimizer,
MomentumOptimizer,
NesterovMomentumOptimizer,
AdagradOptimizer,
RMSPropOptimizer,
AdamOptimizer,
RotoselectOptimizer,
RotosolveOptimizer,
)
x_vals = np.linspace(-10, 10, 16, endpoint=False)
# Hyperparameters for optimizers
stepsize = 0.1
gamma = 0.5
delta = 0.8
# function arguments in various formats
mixed_list = [(0.2, 0.3), np.array([0.4, 0.2, 0.4]), 0.1]
mixed_tuple = (np.array([0.2, 0.3]), [0.4, 0.2, 0.4], 0.1)
flat_list = [0.2, 0.3, 0.1, 0.4, -0.1]
multid_array = np.array([[0.1, 0.2], [-0.1, -0.4]])
multid_list = [[0.1, 0.2], [-0.1, -0.4]]
# functions and their gradients
fnames = ["test_function_1", "test_function_2", "test_function_3"]
univariate_funcs = [np.sin, lambda x: np.exp(x / 10.0), lambda x: x ** 2]
grad_uni_fns = [lambda x: (np.cos(x),), lambda x: (np.exp(x / 10.0) / 10.0,), lambda x: (2 * x,)]
multivariate_funcs = [
lambda x: np.sin(x[0]) + np.cos(x[1]),
lambda x: np.exp(x[0] / 3) * np.tanh(x[1]),
lambda x: np.sum([x_ ** 2 for x_ in x]),
]
grad_multi_funcs = [
lambda x: (np.array([np.cos(x[0]), -np.sin(x[1])]),),
lambda x: (
np.array(
[np.exp(x[0] / 3) / 3 * np.tanh(x[1]), np.exp(x[0] / 3) * (1 - np.tanh(x[1]) ** 2)]
),
),
lambda x: (np.array([2 * x_ for x_ in x]),),
]
mvar_mdim_funcs = [
lambda x: np.sin(x[0, 0]) + np.cos(x[1, 0]) - np.sin(x[0, 1]) + x[1, 1],
lambda x: np.exp(x[0, 0] / 3) * np.tanh(x[0, 1]),
lambda x: np.sum([x_[0] ** 2 for x_ in x]),
]
grad_mvar_mdim_funcs = [
lambda x: (np.array([[np.cos(x[0, 0]), -np.cos(x[0, 1])], [-np.sin(x[1, 0]), 1.0]]),),
lambda x: (
np.array(
[
[
np.exp(x[0, 0] / 3) / 3 * np.tanh(x[0, 1]),
np.exp(x[0, 0] / 3) * (1 - np.tanh(x[0, 1]) ** 2),
],
[0.0, 0.0],
]
),
),
lambda x: (np.array([[2 * x_[0], 0.0] for x_ in x]),),
]
@qml.qnode(qml.device("default.qubit", wires=1))
def quant_fun(*variables):
qml.RX(variables[0][1], wires=[0])
qml.RY(variables[1][2], wires=[0])
qml.RY(variables[2], wires=[0])
return qml.expval(qml.PauliZ(0))
@qml.qnode(qml.device("default.qubit", wires=1))
def quant_fun_flat(var):
qml.RX(var[0], wires=[0])
qml.RY(var[1], wires=[0])
qml.RY(var[2], wires=[0])
qml.RX(var[3], wires=[0])
return qml.expval(qml.PauliZ(0))
@qml.qnode(qml.device("default.qubit", wires=1))
def quant_fun_mdarr(var):
qml.RX(var[0, 1], wires=[0])
qml.RY(var[1, 0], wires=[0])
qml.RY(var[1, 1], wires=[0])
return qml.expval(qml.PauliZ(0))
@qml.qnode(qml.device("default.qubit", wires=1))
def quant_fun_mdlist(var):
qml.RX(var[0][1], wires=[0])
qml.RY(var[1][0], wires=[0])
qml.RY(var[1][1], wires=[0])
return qml.expval(qml.PauliZ(0))
@pytest.fixture(scope="function")
def bunch():
class A:
sgd_opt = GradientDescentOptimizer(stepsize)
mom_opt = MomentumOptimizer(stepsize, momentum=gamma)
nesmom_opt = NesterovMomentumOptimizer(stepsize, momentum=gamma)
adag_opt = AdagradOptimizer(stepsize)
rms_opt = RMSPropOptimizer(stepsize, decay=gamma)
adam_opt = AdamOptimizer(stepsize, beta1=gamma, beta2=delta)
rotosolve_opt = RotosolveOptimizer()
rotoselect_opt = RotoselectOptimizer()
return A()
class TestOptimizer:
"""Basic optimizer tests."""
def test_mixed_inputs_for_hybrid_optimization(self, bunch, tol):
"""Tests that gradient descent optimizer treats parameters of mixed types the same
for hybrid optimization tasks."""
def hybrid_fun(variables):
return quant_fun(*variables) + variables[0][1]
hybrid_list = bunch.sgd_opt.step(hybrid_fun, mixed_list)
hybrid_tuple = bunch.sgd_opt.step(hybrid_fun, mixed_tuple)
assert hybrid_list[0] == pytest.approx(hybrid_tuple[0], abs=tol)
assert hybrid_list[1] == pytest.approx(hybrid_tuple[1], abs=tol)
assert hybrid_list[2] == pytest.approx(hybrid_tuple[2], abs=tol)
def test_mixed_inputs_for_classical_optimization(self, bunch, tol):
"""Tests that gradient descent optimizer treats parameters of mixed types the same
for purely classical optimization tasks."""
def class_fun(var):
return var[0][1] * 2.0 + var[1][2] + var[2]
class_list = bunch.sgd_opt.step(class_fun, mixed_list)
class_tuple = bunch.sgd_opt.step(class_fun, mixed_tuple)
assert class_list[0] == pytest.approx(class_tuple[0], abs=tol)
assert class_list[1] == pytest.approx(class_tuple[1], abs=tol)
assert class_list[2] == pytest.approx(class_tuple[2], abs=tol)
def test_mixed_inputs_for_quantum_optimization(self, bunch, tol):
"""Tests that gradient descent optimizer treats parameters of mixed types the same
for purely quantum optimization tasks."""
quant_list = bunch.sgd_opt.step(quant_fun, *mixed_list)
quant_tuple = bunch.sgd_opt.step(quant_fun, *mixed_tuple)
assert quant_list[0] == pytest.approx(quant_tuple[0], abs=tol)
assert quant_list[1] == pytest.approx(quant_tuple[1], abs=tol)
assert quant_list[2] == pytest.approx(quant_tuple[2], abs=tol)
def test_array_and_list_return_same_update(self, bunch, tol):
"""Tests that gradient descent optimizer has the same output for
lists and arrays."""
def hybrid_fun_mdarr(var):
return quant_fun_mdarr(var) + var[0, 0]
def hybrid_fun_mdlist(var):
return quant_fun_mdlist(var) + var[0][0]
array = bunch.sgd_opt.step(hybrid_fun_mdarr, multid_array)
ls = bunch.sgd_opt.step(hybrid_fun_mdlist, multid_list)
assert array == pytest.approx(np.asarray(ls), abs=tol)
def test_step_and_cost_autograd_sgd_mixed_list(self, bunch):
"""Test that the correct cost is returned via the step_and_cost method for the
gradient-descent optimizer"""
_, res = bunch.sgd_opt.step_and_cost(quant_fun, *mixed_list)
expected = quant_fun(*mixed_list)
assert np.all(res == expected)
def test_step_and_cost_autograd_sgd_multid_array(self, bunch):
"""Test that the correct cost is returned via the step_and_cost method for the
gradient-descent optimizer"""
_, res = bunch.sgd_opt.step_and_cost(quant_fun_mdarr, multid_array)
expected = quant_fun_mdarr(multid_array)
assert np.all(res == expected)
def test_step_and_cost_autograd_nesterov_mixed_list(self, bunch):
"""Test that the correct cost is returned via the step_and_cost method for the
Nesterov momentum optimizer"""
_, res = bunch.nesmom_opt.step_and_cost(quant_fun, *mixed_list)
expected = quant_fun(*mixed_list)
assert np.all(res == expected)
def test_step_and_cost_autograd_nesterov_multid_array(self, bunch):
"""Test that the correct cost is returned via the step_and_cost method for the
Nesterov momentum optimizer"""
_, res = bunch.nesmom_opt.step_and_cost(quant_fun_mdarr, multid_array)
expected = quant_fun_mdarr(multid_array)
assert np.all(res == expected)
def test_step_and_cost_autograd_rotosolve_mixed_list(self, bunch):
"""Test that the correct cost is returned via the step_and_cost method for the
Rotosolve optimizer"""
_, res = bunch.rotosolve_opt.step_and_cost(quant_fun, *mixed_list)
expected = quant_fun(*mixed_list)
assert np.all(res == expected)
def test_step_and_cost_autograd_rotosolve_multid_array(self, bunch):
"""Test that the correct cost is returned via the step_and_cost method for the
Rotosolve optimizer"""
_, res = bunch.rotosolve_opt.step_and_cost(quant_fun_mdarr, multid_array)
expected = quant_fun_mdarr(multid_array)
assert np.all(res == expected)
@pytest.mark.parametrize("params", [[1.7, 2.2], [-1.42, 0.1], [0.05, -0.8]])
def test_step_and_cost_autograd_rotoselect(self, bunch, params):
"""Test that the correct cost is returned via the step_and_cost method for the
Rotoselect momentum optimizer"""
generators = [qml.RY, qml.RX]
possible_generators = [qml.RX, qml.RY, qml.RZ]
bunch.rotoselect_opt.possible_generators = possible_generators
dev = qml.device("default.qubit", shots=None, wires=2)
def ansatz(params, generators):
generators[0](params[0], wires=0)
generators[1](params[1], wires=1)
qml.CNOT(wires=[0, 1])
@qml.qnode(dev)
def circuit_1(params, generators=None): # generators will be passed as a keyword arg
ansatz(params, generators)
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliY(1))
@qml.qnode(dev)
def circuit_2(params, generators=None): # generators will be passed as a keyword arg
ansatz(params, generators)
return qml.expval(qml.PauliX(0))
def cost_fn(params, generators):
Z_1, Y_2 = circuit_1(params, generators=generators)
X_1 = circuit_2(params, generators=generators)
return 0.5 * Y_2 + 0.8 * Z_1 - 0.2 * X_1
_, _, res = bunch.rotoselect_opt.step_and_cost(cost_fn, params, generators)
expected = cost_fn(params, generators)
assert np.all(res == expected)
@pytest.mark.parametrize("func, f_grad", list(zip(univariate_funcs, grad_uni_fns)))
@pytest.mark.parametrize("var", [0, -3, 42])
def test_step_and_cost_supplied_grad(self, bunch, func, var, f_grad):
"""Test that returned cost is correct if gradient function is supplied"""
_, res = bunch.sgd_opt.step_and_cost(func, var, grad_fn=f_grad)
expected = func(var)
assert np.all(res == expected)
@pytest.mark.parametrize("x_start", x_vals)
def test_gradient_descent_optimizer_univar(self, x_start, bunch, tol):
"""Tests that basic stochastic gradient descent takes gradient-descent steps correctly
for uni-variate functions."""
# TODO parametrize this for also
for gradf, f, name in zip(grad_uni_fns, univariate_funcs, fnames):
x_new = bunch.sgd_opt.step(f, x_start)
x_correct = x_start - gradf(x_start)[0] * stepsize
assert x_new == pytest.approx(x_correct, abs=tol)
def test_gradient_descent_optimizer_multivar(self, bunch, tol):
"""Tests that basic stochastic gradient descent takes gradient-descent steps correctly
for multi-variate functions."""
for gradf, f, name in zip(grad_multi_funcs, multivariate_funcs, fnames):
for jdx in range(len(x_vals[:-1])):
x_vec = x_vals[jdx : jdx + 2]
x_new = bunch.sgd_opt.step(f, x_vec)
x_correct = x_vec - gradf(x_vec)[0] * stepsize
assert x_new == pytest.approx(x_correct, abs=tol)
def test_gradient_descent_optimizer_multivar_multidim(self, bunch, tol):
"""Tests that basic stochastic gradient descent takes gradient-descent steps correctly
for multi-variate functions and with higher dimensional inputs."""
for gradf, f, name in zip(grad_mvar_mdim_funcs, mvar_mdim_funcs, fnames):
for jdx in range(len(x_vals[:-3])):
x_vec = x_vals[jdx : jdx + 4]
x_vec_multidim = np.reshape(x_vec, (2, 2))
x_new = bunch.sgd_opt.step(f, x_vec_multidim)
x_correct = x_vec_multidim - gradf(x_vec_multidim)[0] * stepsize
x_new_flat = x_new.flatten()
x_correct_flat = x_correct.flatten()
assert x_new_flat == pytest.approx(x_correct_flat, abs=tol)
@pytest.mark.parametrize("x_start", x_vals)
def test_gradient_descent_optimizer_usergrad(self, x_start, bunch, tol):
"""Tests that basic stochastic gradient descent takes gradient-descent steps correctly
using user-provided gradients."""
for gradf, f, name in zip(grad_uni_fns[::-1], univariate_funcs, fnames):
x_new = bunch.sgd_opt.step(f, x_start, grad_fn=gradf)
x_correct = x_start - gradf(x_start)[0] * stepsize
assert x_new == pytest.approx(x_correct, abs=tol)
@pytest.mark.parametrize("x_start", x_vals)
def test_momentum_optimizer_univar(self, x_start, bunch, tol):
"""Tests that momentum optimizer takes one and two steps correctly
for uni-variate functions."""
for gradf, f, name in zip(grad_uni_fns, univariate_funcs, fnames):
bunch.mom_opt.reset()
x_onestep = bunch.mom_opt.step(f, x_start)
x_onestep_target = x_start - gradf(x_start)[0] * stepsize
assert x_onestep == pytest.approx(x_onestep_target, abs=tol)
x_twosteps = bunch.mom_opt.step(f, x_onestep)
momentum_term = gamma * gradf(x_start)[0]
x_twosteps_target = x_onestep - (gradf(x_onestep)[0] + momentum_term) * stepsize
assert x_twosteps == pytest.approx(x_twosteps_target, abs=tol)
def test_momentum_optimizer_multivar(self, bunch, tol):
"""Tests that momentum optimizer takes one and two steps correctly
for multi-variate functions."""
for gradf, f, name in zip(grad_multi_funcs, multivariate_funcs, fnames):
for jdx in range(len(x_vals[:-1])):
bunch.mom_opt.reset()
x_vec = x_vals[jdx : jdx + 2]
x_onestep = bunch.mom_opt.step(f, x_vec)
x_onestep_target = x_vec - gradf(x_vec)[0] * stepsize
assert x_onestep == pytest.approx(x_onestep_target, abs=tol)
x_twosteps = bunch.mom_opt.step(f, x_onestep)
momentum_term = gamma * gradf(x_vec)[0]
x_twosteps_target = x_onestep - (gradf(x_onestep)[0] + momentum_term) * stepsize
assert x_twosteps == pytest.approx(x_twosteps_target, abs=tol)
@pytest.mark.parametrize("x_start", x_vals)
def test_nesterovmomentum_optimizer_univar(self, x_start, bunch, tol):
"""Tests that nesterov momentum optimizer takes one and two steps correctly
for uni-variate functions."""
for gradf, f, name in zip(grad_uni_fns, univariate_funcs, fnames):
bunch.nesmom_opt.reset()
x_onestep = bunch.nesmom_opt.step(f, x_start)
x_onestep_target = x_start - gradf(x_start)[0] * stepsize
assert x_onestep == pytest.approx(x_onestep_target, abs=tol)
x_twosteps = bunch.nesmom_opt.step(f, x_onestep)
momentum_term = gamma * gradf(x_start)[0]
shifted_grad_term = gradf(x_onestep - stepsize * momentum_term)[0]
x_twosteps_target = x_onestep - (shifted_grad_term + momentum_term) * stepsize
assert x_twosteps == pytest.approx(x_twosteps_target, abs=tol)
def test_nesterovmomentum_optimizer_multivar(self, bunch, tol):
"""Tests that nesterov momentum optimizer takes one and two steps correctly
for multi-variate functions."""
for gradf, f, name in zip(grad_multi_funcs, multivariate_funcs, fnames):
for jdx in range(len(x_vals[:-1])):
bunch.nesmom_opt.reset()
x_vec = x_vals[jdx : jdx + 2]
x_onestep = bunch.nesmom_opt.step(f, x_vec)
x_onestep_target = x_vec - gradf(x_vec)[0] * stepsize
assert x_onestep == pytest.approx(x_onestep_target, abs=tol)
x_twosteps = bunch.nesmom_opt.step(f, x_onestep)
momentum_term = gamma * gradf(x_vec)[0]
shifted_grad_term = gradf(x_onestep - stepsize * momentum_term)[0]
x_twosteps_target = x_onestep - (shifted_grad_term + momentum_term) * stepsize
assert x_twosteps == pytest.approx(x_twosteps_target, abs=tol)
@pytest.mark.parametrize("x_start", x_vals)
def test_nesterovmomentum_optimizer_usergrad(self, x_start, bunch, tol):
"""Tests that nesterov momentum optimizer takes gradient-descent steps correctly
using user-provided gradients."""
for gradf, f, name in zip(grad_uni_fns[::-1], univariate_funcs, fnames):
bunch.nesmom_opt.reset()
x_onestep = bunch.nesmom_opt.step(f, x_start, grad_fn=gradf)
x_onestep_target = x_start - gradf(x_start)[0] * stepsize
assert x_onestep == pytest.approx(x_onestep_target, abs=tol)
x_twosteps = bunch.nesmom_opt.step(f, x_onestep, grad_fn=gradf)
momentum_term = gamma * gradf(x_start)[0]
shifted_grad_term = gradf(x_onestep - stepsize * momentum_term)[0]
x_twosteps_target = x_onestep - (shifted_grad_term + momentum_term) * stepsize
assert x_twosteps == pytest.approx(x_twosteps_target, abs=tol)
@pytest.mark.parametrize("x_start", x_vals)
def test_adagrad_optimizer_univar(self, x_start, bunch, tol):
"""Tests that adagrad optimizer takes one and two steps correctly
for uni-variate functions."""
for gradf, f, name in zip(grad_uni_fns, univariate_funcs, fnames):
bunch.adag_opt.reset()
x_onestep = bunch.adag_opt.step(f, x_start)
past_grads = gradf(x_start)[0] * gradf(x_start)[0]
adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
x_onestep_target = x_start - gradf(x_start)[0] * adapt_stepsize
assert x_onestep == pytest.approx(x_onestep_target, abs=tol)
x_twosteps = bunch.adag_opt.step(f, x_onestep)
past_grads = (
gradf(x_start)[0] * gradf(x_start)[0] + gradf(x_onestep)[0] * gradf(x_onestep)[0]
)
adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
x_twosteps_target = x_onestep - gradf(x_onestep)[0] * adapt_stepsize
assert x_twosteps == pytest.approx(x_twosteps_target, abs=tol)
def test_adagrad_optimizer_multivar(self, bunch, tol):
"""Tests that adagrad optimizer takes one and two steps correctly
for multi-variate functions."""
for gradf, f, name in zip(grad_multi_funcs, multivariate_funcs, fnames):
for jdx in range(len(x_vals[:-1])):
bunch.adag_opt.reset()
x_vec = x_vals[jdx : jdx + 2]
x_onestep = bunch.adag_opt.step(f, x_vec)
past_grads = gradf(x_vec)[0] * gradf(x_vec)[0]
adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
x_onestep_target = x_vec - gradf(x_vec)[0] * adapt_stepsize
assert x_onestep == pytest.approx(x_onestep_target, abs=tol)
x_twosteps = bunch.adag_opt.step(f, x_onestep)
past_grads = (
gradf(x_vec)[0] * gradf(x_vec)[0] + gradf(x_onestep)[0] * gradf(x_onestep)[0]
)
adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
x_twosteps_target = x_onestep - gradf(x_onestep)[0] * adapt_stepsize
assert x_twosteps == pytest.approx(x_twosteps_target, abs=tol)
@pytest.mark.parametrize("x_start", x_vals)
def test_rmsprop_optimizer_univar(self, x_start, bunch, tol):
"""Tests that rmsprop optimizer takes one and two steps correctly
for uni-variate functions."""
for gradf, f, name in zip(grad_uni_fns, univariate_funcs, fnames):
bunch.rms_opt.reset()
x_onestep = bunch.rms_opt.step(f, x_start)
past_grads = (1 - gamma) * gradf(x_start)[0] * gradf(x_start)[0]
adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
x_onestep_target = x_start - gradf(x_start)[0] * adapt_stepsize
assert x_onestep == pytest.approx(x_onestep_target, abs=tol)
x_twosteps = bunch.rms_opt.step(f, x_onestep)
past_grads = (1 - gamma) * gamma * gradf(x_start)[0] * gradf(x_start)[0] + (
1 - gamma
) * gradf(x_onestep)[0] * gradf(x_onestep)[0]
adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
x_twosteps_target = x_onestep - gradf(x_onestep)[0] * adapt_stepsize
assert x_twosteps == pytest.approx(x_twosteps_target, abs=tol)
def test_rmsprop_optimizer_multivar(self, bunch, tol):
"""Tests that rmsprop optimizer takes one and two steps correctly
for multi-variate functions."""
for gradf, f, name in zip(grad_multi_funcs, multivariate_funcs, fnames):
for jdx in range(len(x_vals[:-1])):
bunch.rms_opt.reset()
x_vec = x_vals[jdx : jdx + 2]
x_onestep = bunch.rms_opt.step(f, x_vec)
past_grads = (1 - gamma) * gradf(x_vec)[0] * gradf(x_vec)[0]
adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
x_onestep_target = x_vec - gradf(x_vec)[0] * adapt_stepsize
assert x_onestep == pytest.approx(x_onestep_target, abs=tol)
x_twosteps = bunch.rms_opt.step(f, x_onestep)
past_grads = (1 - gamma) * gamma * gradf(x_vec)[0] * gradf(x_vec)[0] + (
1 - gamma
) * gradf(x_onestep)[0] * gradf(x_onestep)[0]
adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
x_twosteps_target = x_onestep - gradf(x_onestep)[0] * adapt_stepsize
assert x_twosteps == pytest.approx(x_twosteps_target, abs=tol)
@pytest.mark.parametrize("x_start", x_vals)
def test_adam_optimizer_univar(self, x_start, bunch, tol):
"""Tests that adam optimizer takes one and two steps correctly
for uni-variate functions."""
for gradf, f, name in zip(grad_uni_fns, univariate_funcs, fnames):
bunch.adam_opt.reset()
x_onestep = bunch.adam_opt.step(f, x_start)
adapted_stepsize = stepsize * np.sqrt(1 - delta) / (1 - gamma)
firstmoment = gradf(x_start)[0]
secondmoment = gradf(x_start)[0] * gradf(x_start)[0]
x_onestep_target = x_start - adapted_stepsize * firstmoment / (
np.sqrt(secondmoment) + 1e-8
)
assert x_onestep == pytest.approx(x_onestep_target, abs=tol)
x_twosteps = bunch.adam_opt.step(f, x_onestep)
adapted_stepsize = stepsize * np.sqrt(1 - delta ** 2) / (1 - gamma ** 2)
firstmoment = gamma * gradf(x_start)[0] + (1 - gamma) * gradf(x_onestep)[0]
secondmoment = (
delta * gradf(x_start)[0] * gradf(x_start)[0]
+ (1 - delta) * gradf(x_onestep)[0] * gradf(x_onestep)[0]
)
x_twosteps_target = x_onestep - adapted_stepsize * firstmoment / (
np.sqrt(secondmoment) + 1e-8
)
assert x_twosteps == pytest.approx(x_twosteps_target, abs=tol)
def test_adam_optimizer_multivar(self, bunch, tol):
"""Tests that adam optimizer takes one and two steps correctly
for multi-variate functions."""
for gradf, f, name in zip(grad_multi_funcs, multivariate_funcs, fnames):
for jdx in range(len(x_vals[:-1])):
bunch.adam_opt.reset()
x_vec = x_vals[jdx : jdx + 2]
x_onestep = bunch.adam_opt.step(f, x_vec)
adapted_stepsize = stepsize * np.sqrt(1 - delta) / (1 - gamma)
firstmoment = gradf(x_vec)[0]
secondmoment = gradf(x_vec)[0] * gradf(x_vec)[0]
x_onestep_target = x_vec - adapted_stepsize * firstmoment / (
np.sqrt(secondmoment) + 1e-8
)
assert x_onestep == pytest.approx(x_onestep_target, abs=tol)
x_twosteps = bunch.adam_opt.step(f, x_onestep)
adapted_stepsize = stepsize * np.sqrt(1 - delta ** 2) / (1 - gamma ** 2)
firstmoment = gamma * gradf(x_vec)[0] + (1 - gamma) * gradf(x_onestep)[0]
secondmoment = (
delta * gradf(x_vec)[0] * gradf(x_vec)[0]
+ (1 - delta) * gradf(x_onestep)[0] * gradf(x_onestep)[0]
)
x_twosteps_target = x_onestep - adapted_stepsize * firstmoment / (
np.sqrt(secondmoment) + 1e-8
)
assert x_twosteps == pytest.approx(x_twosteps_target, abs=tol)
@staticmethod
def rotosolve_step(f, x):
"""Helper function to test the Rotosolve and Rotoselect optimizers"""
# make sure that x is an array
if np.ndim(x) == 0:
x = np.array([x])
# helper function for x[d] = theta
def insert(xf, d, theta):
xf[d] = theta
return xf
for d, _ in enumerate(x):
H_0 = float(f(insert(x, d, 0)))
H_p = float(f(insert(x, d, np.pi / 2)))
H_m = float(f(insert(x, d, -np.pi / 2)))
a = onp.arctan2(2 * H_0 - H_p - H_m, H_p - H_m)
x[d] = -np.pi / 2 - a
if x[d] <= -np.pi:
x[d] += 2 * np.pi
return x
@pytest.mark.parametrize("x_start", x_vals)
def test_rotosolve_optimizer_univar(self, x_start, bunch, tol):
"""Tests that rotosolve optimizer takes one and two steps correctly
for uni-variate functions."""
for f in univariate_funcs:
x_onestep = bunch.rotosolve_opt.step(f, x_start)
x_onestep_target = self.rotosolve_step(f, x_start)
assert np.allclose(x_onestep, x_onestep_target, atol=tol, rtol=0)
x_twosteps = bunch.rotosolve_opt.step(f, x_onestep)
x_twosteps_target = self.rotosolve_step(f, x_onestep_target)
assert np.allclose(x_twosteps, x_twosteps_target, atol=tol, rtol=0)
@pytest.mark.parametrize(
"x_start",
[
[1.2, 0.2],
[-0.62, -2.1],
[0.05, 0.8],
[[0.3], [0.25]],
[[-0.6], [0.45]],
[[1.3], [-0.9]],
],
)
def test_rotosolve_optimizer_multivar(self, x_start, bunch, tol):
"""Tests that rotosolve optimizer takes one and two steps correctly
for multi-variate functions."""
for func in multivariate_funcs:
# alter multivariate_func to accept nested lists of parameters
f = lambda x: func(np.ravel(x))
x_onestep = bunch.rotosolve_opt.step(f, x_start)
x_onestep_target = self.rotosolve_step(f, x_start)
assert x_onestep == pytest.approx(x_onestep_target, abs=tol)
x_twosteps = bunch.rotosolve_opt.step(f, x_onestep)
x_twosteps_target = self.rotosolve_step(f, x_onestep_target)
assert x_twosteps == pytest.approx(x_twosteps_target, abs=tol)
@pytest.mark.parametrize("x_start", [[1.2, 0.2], [-0.62, -2.1], [0.05, 0.8]])
@pytest.mark.parametrize(
"generators", [list(tup) for tup in it.product([qml.RX, qml.RY, qml.RZ], repeat=2)]
)
def test_rotoselect_optimizer(self, x_start, generators, bunch, tol):
"""Tests that rotoselect optimizer finds the optimal generators and parameters for the VQE circuit
defined in `this rotoselect tutorial <https://pennylane.ai/qml/demos/tutorial_rotoselect.html>`_."""
# the optimal generators for the 2-qubit VQE circuit
# H = 0.5 * Y_2 + 0.8 * Z_1 - 0.2 * X_1
optimal_generators = [qml.RY, qml.RX]
possible_generators = [qml.RX, qml.RY, qml.RZ]
bunch.rotoselect_opt.possible_generators = possible_generators
dev = qml.device("default.qubit", shots=None, wires=2)
def ansatz(params, generators):
generators[0](params[0], wires=0)
generators[1](params[1], wires=1)
qml.CNOT(wires=[0, 1])
@qml.qnode(dev)
def circuit_1(params, generators=None): # generators will be passed as a keyword arg
ansatz(params, generators)
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliY(1))
@qml.qnode(dev)
def circuit_2(params, generators=None): # generators will be passed as a keyword arg
ansatz(params, generators)
return qml.expval(qml.PauliX(0))
def cost_fn(params, generators):
Z_1, Y_2 = circuit_1(params, generators=generators)
X_1 = circuit_2(params, generators=generators)
return 0.5 * Y_2 + 0.8 * Z_1 - 0.2 * X_1
f_best_gen = lambda x: cost_fn(x, optimal_generators)
optimal_x_start = x_start.copy()
# after four steps the optimzer should find the optimal generators/x_start values
for _ in range(4):
x_start, generators = bunch.rotoselect_opt.step(cost_fn, x_start, generators)
optimal_x_start = self.rotosolve_step(f_best_gen, optimal_x_start)
assert x_start == pytest.approx(optimal_x_start, abs=tol)
assert generators == optimal_generators
@pytest.mark.parametrize("x_start", [[1.2, 0.2], [-0.62, -2.1], [0.05, 0.8]])
def test_keywords_rotoselect(self, bunch, x_start, tol):
"""test rotoselect accepts keywords"""
generators = [qml.RY, qml.RX]
possible_generators = [qml.RX, qml.RY, qml.RZ]
bunch.rotoselect_opt.possible_generators = possible_generators
dev = qml.device("default.qubit", shots=None, wires=2)
def ansatz(params, generators):
generators[0](params[0], wires=0)
generators[1](params[1], wires=1)
qml.CNOT(wires=[0, 1])
@qml.qnode(dev)
def circuit_1(params, generators=None): # generators will be passed as a keyword arg
ansatz(params, generators)
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliY(1))
@qml.qnode(dev)
def circuit_2(params, generators=None): # generators will be passed as a keyword arg
ansatz(params, generators)
return qml.expval(qml.PauliX(0))
def cost_fn(params, generators, shift=0.0):
Z_1, Y_2 = circuit_1(params, generators=generators)
X_1 = circuit_2(params, generators=generators)
return 0.5 * (Y_2 - shift) ** 2 + 0.8 * (Z_1 - shift) ** 2 - 0.2 * (X_1 - shift) ** 2
params_new, _, res_new = bunch.rotoselect_opt.step_and_cost(
cost_fn, x_start, generators, shift=0.0
)
params_new2, _, res_new2 = bunch.rotoselect_opt.step_and_cost(
cost_fn, x_start, generators, shift=1.0
)
assert params_new != pytest.approx(params_new2, abs=tol)
assert res_new2 == pytest.approx(cost_fn(x_start, generators, shift=1.0), abs=tol)
def test_update_stepsize(self):
"""Tests that the stepsize correctly updates"""
eta = 0.5
opt = AdamOptimizer(eta)
assert opt._stepsize == eta
eta2 = 0.1
opt.update_stepsize(eta2)
assert opt._stepsize == eta2
def reset(opt):
if getattr(opt, "reset", None):
opt.reset()
@pytest.fixture
def opt(opt_name):
if opt_name == "gd":
return GradientDescentOptimizer(stepsize)
if opt_name == "nest":
return NesterovMomentumOptimizer(stepsize, momentum=gamma)
if opt_name == "moment":
return MomentumOptimizer(stepsize, momentum=gamma)
if opt_name == "ada":
return AdagradOptimizer(stepsize)
if opt_name == "rms":
return RMSPropOptimizer(stepsize, decay=gamma)
if opt_name == "adam":
return AdamOptimizer(stepsize, beta1=gamma, beta2=delta)
if opt_name == "roto":
return RotosolveOptimizer()
@pytest.mark.parametrize(
"opt_name",
[
"gd",
"moment",
"nest",
"ada",
"rms",
"adam",
"roto",
],
)
class TestOverOpts:
"""Tests keywords, multiple arguements, and non-training arguments in relevent optimizers"""
def test_kwargs(self, mocker, opt, opt_name, tol):
"""Test that the keywords get passed and alter the function"""
class func_wrapper:
@staticmethod
def func(x, c=1.0):
return (x - c) ** 2
x = 1.0
wrapper = func_wrapper()
spy = mocker.spy(wrapper, "func")
x_new_two = opt.step(wrapper.func, x, c=2.0)
reset(opt)
args2, kwargs2 = spy.call_args_list[-1]
x_new_three_wc, cost_three = opt.step_and_cost(wrapper.func, x, c=3.0)
reset(opt)
args3, kwargs3 = spy.call_args_list[-1]
if opt_name != "roto":
assert args2 == (x,)
assert args3 == (x,)
else:
assert x_new_two != pytest.approx(x, abs=tol)
assert x_new_three_wc != pytest.approx(x, abs=tol)
assert kwargs2 == {"c": 2.0}
assert kwargs3 == {"c": 3.0}
assert cost_three == pytest.approx(wrapper.func(x, c=3.0), abs=tol)
def test_multi_args(self, mocker, opt, opt_name, tol):
"""Test passing multiple arguments to function"""
class func_wrapper:
@staticmethod
def func(x, y, z):
return x[0] * y[0] + z[0]
wrapper = func_wrapper()
spy = mocker.spy(wrapper, "func")
x = np.array([1.0])
y = np.array([2.0])
z = np.array([3.0])
(x_new, y_new, z_new), cost = opt.step_and_cost(wrapper.func, x, y, z)
reset(opt)
args_called1, kwargs1 = spy.call_args_list[-1] # just take last call
x_new2, y_new2, z_new2 = opt.step(wrapper.func, x_new, y_new, z_new)
reset(opt)
args_called2, kwargs2 = spy.call_args_list[-1] # just take last call
if opt_name != "roto":
assert args_called1 == (x, y, z)
assert args_called2 == (x_new, y_new, z_new)
else:
assert x_new != pytest.approx(x, abs=tol)
assert y_new != pytest.approx(y, abs=tol)
assert z_new != pytest.approx(z, abs=tol)
assert kwargs1 == {}
assert kwargs2 == {}
assert cost == pytest.approx(wrapper.func(x, y, z), abs=tol)
def test_nontrainable_data(self, opt, opt_name, tol):
"""Check non-trainable argument does not get updated"""
def func(x, data):
return x[0] * data[0]
x = np.array([1.0])
data = np.array([1.0], requires_grad=False)
args_new = opt.step(func, x, data)
reset(opt)
args_new_wc, cost = opt.step_and_cost(func, *args_new)
reset(opt)
assert len(args_new) == pytest.approx(2, abs=tol)
assert args_new[0] != pytest.approx(x, abs=tol)
assert args_new[1] == pytest.approx(data, abs=tol)
assert cost == pytest.approx(func(*args_new), abs=tol)
def test_steps_the_same(self, opt, opt_name, tol):
"""Tests whether separating the args into different inputs affects their
optimization step. Assumes single argument optimization is correct, as tested elsewhere."""
def func1(x, y, z):
return x[0] * y[0] * z[0]
def func2(args):
return args[0][0] * args[1][0] * args[2][0]
x = np.array([1.0])
y = np.array([2.0])
z = np.array([3.0])
args = (x, y, z)
x_seperate, y_seperate, z_seperate = opt.step(func1, x, y, z)
reset(opt)
args_new = opt.step(func2, args)
reset(opt)
assert x_seperate == pytest.approx(args_new[0], abs=tol)
assert y_seperate == pytest.approx(args_new[1], abs=tol)
assert z_seperate == pytest.approx(args_new[2], abs=tol)
def test_one_trainable_one_non_trainable(self, opt, opt_name, tol):
"""Tests that a cost function that takes one trainable and one
non-trainable parameter executes well."""
dev = qml.device("default.qubit", wires=2)
@qml.qnode(dev)
def circuit(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
def cost(x, target):
return (circuit(x) - target) ** 2
ev = np.tensor(0.7781, requires_grad=False)
x = np.tensor(0.0, requires_grad=True)
original_ev = ev
(x, ev), cost = opt.step_and_cost(cost, x, ev)
# check that the argument to RX doesn't change, as the X rotation doesn't influence <Z>
assert x == 0
assert ev == original_ev
def test_one_non_trainable_one_trainable(self, opt, opt_name, tol):
"""Tests that a cost function that takes one non-trainable and one
trainable parameter executes well."""
dev = qml.device("default.qubit", wires=2)
@qml.qnode(dev)
def circuit(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
def cost(target, x): # Note: the order of the arguments has been swapped
return (circuit(x) - target) ** 2
ev = np.tensor(0.7781, requires_grad=False)
x = np.tensor(0.0, requires_grad=True)
original_ev = ev
(ev, x), cost = opt.step_and_cost(cost, ev, x)
# check that the argument to RX doesn't change, as the X rotation doesn't influence <Z>
assert x == 0
assert ev == original_ev
def test_two_trainable_args(self, opt, opt_name, tol):
"""Tests that a cost function that takes at least two trainable
arguments executes well."""
dev = qml.device("default.qubit", wires=2)
@qml.qnode(dev)
def circuit(x, y):
qml.RX(x, wires=0)
qml.RX(y, wires=0)
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
def cost(x, y, target):
return (circuit(x, y) - target) ** 2
ev = np.tensor(0.7781, requires_grad=False)
x = np.tensor(0.0, requires_grad=True)
y = np.tensor(0.0, requires_grad=True)
original_ev = ev
(x, y, ev), cost = opt.step_and_cost(cost, x, y, ev)
# check that the argument to RX doesn't change, as the X rotation doesn't influence <Z>
assert x == 0
assert ev == original_ev
| 40.015353 | 108 | 0.614503 |
import itertools as it
import numpy as onp
import pytest
import pennylane as qml
from pennylane import numpy as np
from pennylane.optimize import (
GradientDescentOptimizer,
MomentumOptimizer,
NesterovMomentumOptimizer,
AdagradOptimizer,
RMSPropOptimizer,
AdamOptimizer,
RotoselectOptimizer,
RotosolveOptimizer,
)
x_vals = np.linspace(-10, 10, 16, endpoint=False)
stepsize = 0.1
gamma = 0.5
delta = 0.8
mixed_list = [(0.2, 0.3), np.array([0.4, 0.2, 0.4]), 0.1]
mixed_tuple = (np.array([0.2, 0.3]), [0.4, 0.2, 0.4], 0.1)
flat_list = [0.2, 0.3, 0.1, 0.4, -0.1]
multid_array = np.array([[0.1, 0.2], [-0.1, -0.4]])
multid_list = [[0.1, 0.2], [-0.1, -0.4]]
fnames = ["test_function_1", "test_function_2", "test_function_3"]
univariate_funcs = [np.sin, lambda x: np.exp(x / 10.0), lambda x: x ** 2]
grad_uni_fns = [lambda x: (np.cos(x),), lambda x: (np.exp(x / 10.0) / 10.0,), lambda x: (2 * x,)]
multivariate_funcs = [
lambda x: np.sin(x[0]) + np.cos(x[1]),
lambda x: np.exp(x[0] / 3) * np.tanh(x[1]),
lambda x: np.sum([x_ ** 2 for x_ in x]),
]
grad_multi_funcs = [
lambda x: (np.array([np.cos(x[0]), -np.sin(x[1])]),),
lambda x: (
np.array(
[np.exp(x[0] / 3) / 3 * np.tanh(x[1]), np.exp(x[0] / 3) * (1 - np.tanh(x[1]) ** 2)]
),
),
lambda x: (np.array([2 * x_ for x_ in x]),),
]
mvar_mdim_funcs = [
lambda x: np.sin(x[0, 0]) + np.cos(x[1, 0]) - np.sin(x[0, 1]) + x[1, 1],
lambda x: np.exp(x[0, 0] / 3) * np.tanh(x[0, 1]),
lambda x: np.sum([x_[0] ** 2 for x_ in x]),
]
grad_mvar_mdim_funcs = [
lambda x: (np.array([[np.cos(x[0, 0]), -np.cos(x[0, 1])], [-np.sin(x[1, 0]), 1.0]]),),
lambda x: (
np.array(
[
[
np.exp(x[0, 0] / 3) / 3 * np.tanh(x[0, 1]),
np.exp(x[0, 0] / 3) * (1 - np.tanh(x[0, 1]) ** 2),
],
[0.0, 0.0],
]
),
),
lambda x: (np.array([[2 * x_[0], 0.0] for x_ in x]),),
]
@qml.qnode(qml.device("default.qubit", wires=1))
def quant_fun(*variables):
qml.RX(variables[0][1], wires=[0])
qml.RY(variables[1][2], wires=[0])
qml.RY(variables[2], wires=[0])
return qml.expval(qml.PauliZ(0))
@qml.qnode(qml.device("default.qubit", wires=1))
def quant_fun_flat(var):
qml.RX(var[0], wires=[0])
qml.RY(var[1], wires=[0])
qml.RY(var[2], wires=[0])
qml.RX(var[3], wires=[0])
return qml.expval(qml.PauliZ(0))
@qml.qnode(qml.device("default.qubit", wires=1))
def quant_fun_mdarr(var):
qml.RX(var[0, 1], wires=[0])
qml.RY(var[1, 0], wires=[0])
qml.RY(var[1, 1], wires=[0])
return qml.expval(qml.PauliZ(0))
@qml.qnode(qml.device("default.qubit", wires=1))
def quant_fun_mdlist(var):
qml.RX(var[0][1], wires=[0])
qml.RY(var[1][0], wires=[0])
qml.RY(var[1][1], wires=[0])
return qml.expval(qml.PauliZ(0))
@pytest.fixture(scope="function")
def bunch():
class A:
sgd_opt = GradientDescentOptimizer(stepsize)
mom_opt = MomentumOptimizer(stepsize, momentum=gamma)
nesmom_opt = NesterovMomentumOptimizer(stepsize, momentum=gamma)
adag_opt = AdagradOptimizer(stepsize)
rms_opt = RMSPropOptimizer(stepsize, decay=gamma)
adam_opt = AdamOptimizer(stepsize, beta1=gamma, beta2=delta)
rotosolve_opt = RotosolveOptimizer()
rotoselect_opt = RotoselectOptimizer()
return A()
class TestOptimizer:
def test_mixed_inputs_for_hybrid_optimization(self, bunch, tol):
def hybrid_fun(variables):
return quant_fun(*variables) + variables[0][1]
hybrid_list = bunch.sgd_opt.step(hybrid_fun, mixed_list)
hybrid_tuple = bunch.sgd_opt.step(hybrid_fun, mixed_tuple)
assert hybrid_list[0] == pytest.approx(hybrid_tuple[0], abs=tol)
assert hybrid_list[1] == pytest.approx(hybrid_tuple[1], abs=tol)
assert hybrid_list[2] == pytest.approx(hybrid_tuple[2], abs=tol)
def test_mixed_inputs_for_classical_optimization(self, bunch, tol):
def class_fun(var):
return var[0][1] * 2.0 + var[1][2] + var[2]
class_list = bunch.sgd_opt.step(class_fun, mixed_list)
class_tuple = bunch.sgd_opt.step(class_fun, mixed_tuple)
assert class_list[0] == pytest.approx(class_tuple[0], abs=tol)
assert class_list[1] == pytest.approx(class_tuple[1], abs=tol)
assert class_list[2] == pytest.approx(class_tuple[2], abs=tol)
def test_mixed_inputs_for_quantum_optimization(self, bunch, tol):
quant_list = bunch.sgd_opt.step(quant_fun, *mixed_list)
quant_tuple = bunch.sgd_opt.step(quant_fun, *mixed_tuple)
assert quant_list[0] == pytest.approx(quant_tuple[0], abs=tol)
assert quant_list[1] == pytest.approx(quant_tuple[1], abs=tol)
assert quant_list[2] == pytest.approx(quant_tuple[2], abs=tol)
def test_array_and_list_return_same_update(self, bunch, tol):
def hybrid_fun_mdarr(var):
return quant_fun_mdarr(var) + var[0, 0]
def hybrid_fun_mdlist(var):
return quant_fun_mdlist(var) + var[0][0]
array = bunch.sgd_opt.step(hybrid_fun_mdarr, multid_array)
ls = bunch.sgd_opt.step(hybrid_fun_mdlist, multid_list)
assert array == pytest.approx(np.asarray(ls), abs=tol)
def test_step_and_cost_autograd_sgd_mixed_list(self, bunch):
_, res = bunch.sgd_opt.step_and_cost(quant_fun, *mixed_list)
expected = quant_fun(*mixed_list)
assert np.all(res == expected)
def test_step_and_cost_autograd_sgd_multid_array(self, bunch):
_, res = bunch.sgd_opt.step_and_cost(quant_fun_mdarr, multid_array)
expected = quant_fun_mdarr(multid_array)
assert np.all(res == expected)
def test_step_and_cost_autograd_nesterov_mixed_list(self, bunch):
_, res = bunch.nesmom_opt.step_and_cost(quant_fun, *mixed_list)
expected = quant_fun(*mixed_list)
assert np.all(res == expected)
def test_step_and_cost_autograd_nesterov_multid_array(self, bunch):
_, res = bunch.nesmom_opt.step_and_cost(quant_fun_mdarr, multid_array)
expected = quant_fun_mdarr(multid_array)
assert np.all(res == expected)
def test_step_and_cost_autograd_rotosolve_mixed_list(self, bunch):
_, res = bunch.rotosolve_opt.step_and_cost(quant_fun, *mixed_list)
expected = quant_fun(*mixed_list)
assert np.all(res == expected)
def test_step_and_cost_autograd_rotosolve_multid_array(self, bunch):
_, res = bunch.rotosolve_opt.step_and_cost(quant_fun_mdarr, multid_array)
expected = quant_fun_mdarr(multid_array)
assert np.all(res == expected)
@pytest.mark.parametrize("params", [[1.7, 2.2], [-1.42, 0.1], [0.05, -0.8]])
def test_step_and_cost_autograd_rotoselect(self, bunch, params):
generators = [qml.RY, qml.RX]
possible_generators = [qml.RX, qml.RY, qml.RZ]
bunch.rotoselect_opt.possible_generators = possible_generators
dev = qml.device("default.qubit", shots=None, wires=2)
def ansatz(params, generators):
generators[0](params[0], wires=0)
generators[1](params[1], wires=1)
qml.CNOT(wires=[0, 1])
@qml.qnode(dev)
def circuit_1(params, generators=None):
ansatz(params, generators)
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliY(1))
@qml.qnode(dev)
def circuit_2(params, generators=None):
ansatz(params, generators)
return qml.expval(qml.PauliX(0))
def cost_fn(params, generators):
Z_1, Y_2 = circuit_1(params, generators=generators)
X_1 = circuit_2(params, generators=generators)
return 0.5 * Y_2 + 0.8 * Z_1 - 0.2 * X_1
_, _, res = bunch.rotoselect_opt.step_and_cost(cost_fn, params, generators)
expected = cost_fn(params, generators)
assert np.all(res == expected)
@pytest.mark.parametrize("func, f_grad", list(zip(univariate_funcs, grad_uni_fns)))
@pytest.mark.parametrize("var", [0, -3, 42])
def test_step_and_cost_supplied_grad(self, bunch, func, var, f_grad):
_, res = bunch.sgd_opt.step_and_cost(func, var, grad_fn=f_grad)
expected = func(var)
assert np.all(res == expected)
@pytest.mark.parametrize("x_start", x_vals)
def test_gradient_descent_optimizer_univar(self, x_start, bunch, tol):
for gradf, f, name in zip(grad_uni_fns, univariate_funcs, fnames):
x_new = bunch.sgd_opt.step(f, x_start)
x_correct = x_start - gradf(x_start)[0] * stepsize
assert x_new == pytest.approx(x_correct, abs=tol)
def test_gradient_descent_optimizer_multivar(self, bunch, tol):
for gradf, f, name in zip(grad_multi_funcs, multivariate_funcs, fnames):
for jdx in range(len(x_vals[:-1])):
x_vec = x_vals[jdx : jdx + 2]
x_new = bunch.sgd_opt.step(f, x_vec)
x_correct = x_vec - gradf(x_vec)[0] * stepsize
assert x_new == pytest.approx(x_correct, abs=tol)
def test_gradient_descent_optimizer_multivar_multidim(self, bunch, tol):
for gradf, f, name in zip(grad_mvar_mdim_funcs, mvar_mdim_funcs, fnames):
for jdx in range(len(x_vals[:-3])):
x_vec = x_vals[jdx : jdx + 4]
x_vec_multidim = np.reshape(x_vec, (2, 2))
x_new = bunch.sgd_opt.step(f, x_vec_multidim)
x_correct = x_vec_multidim - gradf(x_vec_multidim)[0] * stepsize
x_new_flat = x_new.flatten()
x_correct_flat = x_correct.flatten()
assert x_new_flat == pytest.approx(x_correct_flat, abs=tol)
@pytest.mark.parametrize("x_start", x_vals)
def test_gradient_descent_optimizer_usergrad(self, x_start, bunch, tol):
for gradf, f, name in zip(grad_uni_fns[::-1], univariate_funcs, fnames):
x_new = bunch.sgd_opt.step(f, x_start, grad_fn=gradf)
x_correct = x_start - gradf(x_start)[0] * stepsize
assert x_new == pytest.approx(x_correct, abs=tol)
@pytest.mark.parametrize("x_start", x_vals)
def test_momentum_optimizer_univar(self, x_start, bunch, tol):
for gradf, f, name in zip(grad_uni_fns, univariate_funcs, fnames):
bunch.mom_opt.reset()
x_onestep = bunch.mom_opt.step(f, x_start)
x_onestep_target = x_start - gradf(x_start)[0] * stepsize
assert x_onestep == pytest.approx(x_onestep_target, abs=tol)
x_twosteps = bunch.mom_opt.step(f, x_onestep)
momentum_term = gamma * gradf(x_start)[0]
x_twosteps_target = x_onestep - (gradf(x_onestep)[0] + momentum_term) * stepsize
assert x_twosteps == pytest.approx(x_twosteps_target, abs=tol)
def test_momentum_optimizer_multivar(self, bunch, tol):
for gradf, f, name in zip(grad_multi_funcs, multivariate_funcs, fnames):
for jdx in range(len(x_vals[:-1])):
bunch.mom_opt.reset()
x_vec = x_vals[jdx : jdx + 2]
x_onestep = bunch.mom_opt.step(f, x_vec)
x_onestep_target = x_vec - gradf(x_vec)[0] * stepsize
assert x_onestep == pytest.approx(x_onestep_target, abs=tol)
x_twosteps = bunch.mom_opt.step(f, x_onestep)
momentum_term = gamma * gradf(x_vec)[0]
x_twosteps_target = x_onestep - (gradf(x_onestep)[0] + momentum_term) * stepsize
assert x_twosteps == pytest.approx(x_twosteps_target, abs=tol)
@pytest.mark.parametrize("x_start", x_vals)
def test_nesterovmomentum_optimizer_univar(self, x_start, bunch, tol):
for gradf, f, name in zip(grad_uni_fns, univariate_funcs, fnames):
bunch.nesmom_opt.reset()
x_onestep = bunch.nesmom_opt.step(f, x_start)
x_onestep_target = x_start - gradf(x_start)[0] * stepsize
assert x_onestep == pytest.approx(x_onestep_target, abs=tol)
x_twosteps = bunch.nesmom_opt.step(f, x_onestep)
momentum_term = gamma * gradf(x_start)[0]
shifted_grad_term = gradf(x_onestep - stepsize * momentum_term)[0]
x_twosteps_target = x_onestep - (shifted_grad_term + momentum_term) * stepsize
assert x_twosteps == pytest.approx(x_twosteps_target, abs=tol)
def test_nesterovmomentum_optimizer_multivar(self, bunch, tol):
for gradf, f, name in zip(grad_multi_funcs, multivariate_funcs, fnames):
for jdx in range(len(x_vals[:-1])):
bunch.nesmom_opt.reset()
x_vec = x_vals[jdx : jdx + 2]
x_onestep = bunch.nesmom_opt.step(f, x_vec)
x_onestep_target = x_vec - gradf(x_vec)[0] * stepsize
assert x_onestep == pytest.approx(x_onestep_target, abs=tol)
x_twosteps = bunch.nesmom_opt.step(f, x_onestep)
momentum_term = gamma * gradf(x_vec)[0]
shifted_grad_term = gradf(x_onestep - stepsize * momentum_term)[0]
x_twosteps_target = x_onestep - (shifted_grad_term + momentum_term) * stepsize
assert x_twosteps == pytest.approx(x_twosteps_target, abs=tol)
@pytest.mark.parametrize("x_start", x_vals)
def test_nesterovmomentum_optimizer_usergrad(self, x_start, bunch, tol):
for gradf, f, name in zip(grad_uni_fns[::-1], univariate_funcs, fnames):
bunch.nesmom_opt.reset()
x_onestep = bunch.nesmom_opt.step(f, x_start, grad_fn=gradf)
x_onestep_target = x_start - gradf(x_start)[0] * stepsize
assert x_onestep == pytest.approx(x_onestep_target, abs=tol)
x_twosteps = bunch.nesmom_opt.step(f, x_onestep, grad_fn=gradf)
momentum_term = gamma * gradf(x_start)[0]
shifted_grad_term = gradf(x_onestep - stepsize * momentum_term)[0]
x_twosteps_target = x_onestep - (shifted_grad_term + momentum_term) * stepsize
assert x_twosteps == pytest.approx(x_twosteps_target, abs=tol)
@pytest.mark.parametrize("x_start", x_vals)
def test_adagrad_optimizer_univar(self, x_start, bunch, tol):
for gradf, f, name in zip(grad_uni_fns, univariate_funcs, fnames):
bunch.adag_opt.reset()
x_onestep = bunch.adag_opt.step(f, x_start)
past_grads = gradf(x_start)[0] * gradf(x_start)[0]
adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
x_onestep_target = x_start - gradf(x_start)[0] * adapt_stepsize
assert x_onestep == pytest.approx(x_onestep_target, abs=tol)
x_twosteps = bunch.adag_opt.step(f, x_onestep)
past_grads = (
gradf(x_start)[0] * gradf(x_start)[0] + gradf(x_onestep)[0] * gradf(x_onestep)[0]
)
adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
x_twosteps_target = x_onestep - gradf(x_onestep)[0] * adapt_stepsize
assert x_twosteps == pytest.approx(x_twosteps_target, abs=tol)
def test_adagrad_optimizer_multivar(self, bunch, tol):
for gradf, f, name in zip(grad_multi_funcs, multivariate_funcs, fnames):
for jdx in range(len(x_vals[:-1])):
bunch.adag_opt.reset()
x_vec = x_vals[jdx : jdx + 2]
x_onestep = bunch.adag_opt.step(f, x_vec)
past_grads = gradf(x_vec)[0] * gradf(x_vec)[0]
adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
x_onestep_target = x_vec - gradf(x_vec)[0] * adapt_stepsize
assert x_onestep == pytest.approx(x_onestep_target, abs=tol)
x_twosteps = bunch.adag_opt.step(f, x_onestep)
past_grads = (
gradf(x_vec)[0] * gradf(x_vec)[0] + gradf(x_onestep)[0] * gradf(x_onestep)[0]
)
adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
x_twosteps_target = x_onestep - gradf(x_onestep)[0] * adapt_stepsize
assert x_twosteps == pytest.approx(x_twosteps_target, abs=tol)
@pytest.mark.parametrize("x_start", x_vals)
def test_rmsprop_optimizer_univar(self, x_start, bunch, tol):
for gradf, f, name in zip(grad_uni_fns, univariate_funcs, fnames):
bunch.rms_opt.reset()
x_onestep = bunch.rms_opt.step(f, x_start)
past_grads = (1 - gamma) * gradf(x_start)[0] * gradf(x_start)[0]
adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
x_onestep_target = x_start - gradf(x_start)[0] * adapt_stepsize
assert x_onestep == pytest.approx(x_onestep_target, abs=tol)
x_twosteps = bunch.rms_opt.step(f, x_onestep)
past_grads = (1 - gamma) * gamma * gradf(x_start)[0] * gradf(x_start)[0] + (
1 - gamma
) * gradf(x_onestep)[0] * gradf(x_onestep)[0]
adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
x_twosteps_target = x_onestep - gradf(x_onestep)[0] * adapt_stepsize
assert x_twosteps == pytest.approx(x_twosteps_target, abs=tol)
def test_rmsprop_optimizer_multivar(self, bunch, tol):
for gradf, f, name in zip(grad_multi_funcs, multivariate_funcs, fnames):
for jdx in range(len(x_vals[:-1])):
bunch.rms_opt.reset()
x_vec = x_vals[jdx : jdx + 2]
x_onestep = bunch.rms_opt.step(f, x_vec)
past_grads = (1 - gamma) * gradf(x_vec)[0] * gradf(x_vec)[0]
adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
x_onestep_target = x_vec - gradf(x_vec)[0] * adapt_stepsize
assert x_onestep == pytest.approx(x_onestep_target, abs=tol)
x_twosteps = bunch.rms_opt.step(f, x_onestep)
past_grads = (1 - gamma) * gamma * gradf(x_vec)[0] * gradf(x_vec)[0] + (
1 - gamma
) * gradf(x_onestep)[0] * gradf(x_onestep)[0]
adapt_stepsize = stepsize / np.sqrt(past_grads + 1e-8)
x_twosteps_target = x_onestep - gradf(x_onestep)[0] * adapt_stepsize
assert x_twosteps == pytest.approx(x_twosteps_target, abs=tol)
@pytest.mark.parametrize("x_start", x_vals)
def test_adam_optimizer_univar(self, x_start, bunch, tol):
for gradf, f, name in zip(grad_uni_fns, univariate_funcs, fnames):
bunch.adam_opt.reset()
x_onestep = bunch.adam_opt.step(f, x_start)
adapted_stepsize = stepsize * np.sqrt(1 - delta) / (1 - gamma)
firstmoment = gradf(x_start)[0]
secondmoment = gradf(x_start)[0] * gradf(x_start)[0]
x_onestep_target = x_start - adapted_stepsize * firstmoment / (
np.sqrt(secondmoment) + 1e-8
)
assert x_onestep == pytest.approx(x_onestep_target, abs=tol)
x_twosteps = bunch.adam_opt.step(f, x_onestep)
adapted_stepsize = stepsize * np.sqrt(1 - delta ** 2) / (1 - gamma ** 2)
firstmoment = gamma * gradf(x_start)[0] + (1 - gamma) * gradf(x_onestep)[0]
secondmoment = (
delta * gradf(x_start)[0] * gradf(x_start)[0]
+ (1 - delta) * gradf(x_onestep)[0] * gradf(x_onestep)[0]
)
x_twosteps_target = x_onestep - adapted_stepsize * firstmoment / (
np.sqrt(secondmoment) + 1e-8
)
assert x_twosteps == pytest.approx(x_twosteps_target, abs=tol)
def test_adam_optimizer_multivar(self, bunch, tol):
for gradf, f, name in zip(grad_multi_funcs, multivariate_funcs, fnames):
for jdx in range(len(x_vals[:-1])):
bunch.adam_opt.reset()
x_vec = x_vals[jdx : jdx + 2]
x_onestep = bunch.adam_opt.step(f, x_vec)
adapted_stepsize = stepsize * np.sqrt(1 - delta) / (1 - gamma)
firstmoment = gradf(x_vec)[0]
secondmoment = gradf(x_vec)[0] * gradf(x_vec)[0]
x_onestep_target = x_vec - adapted_stepsize * firstmoment / (
np.sqrt(secondmoment) + 1e-8
)
assert x_onestep == pytest.approx(x_onestep_target, abs=tol)
x_twosteps = bunch.adam_opt.step(f, x_onestep)
adapted_stepsize = stepsize * np.sqrt(1 - delta ** 2) / (1 - gamma ** 2)
firstmoment = gamma * gradf(x_vec)[0] + (1 - gamma) * gradf(x_onestep)[0]
secondmoment = (
delta * gradf(x_vec)[0] * gradf(x_vec)[0]
+ (1 - delta) * gradf(x_onestep)[0] * gradf(x_onestep)[0]
)
x_twosteps_target = x_onestep - adapted_stepsize * firstmoment / (
np.sqrt(secondmoment) + 1e-8
)
assert x_twosteps == pytest.approx(x_twosteps_target, abs=tol)
@staticmethod
def rotosolve_step(f, x):
if np.ndim(x) == 0:
x = np.array([x])
def insert(xf, d, theta):
xf[d] = theta
return xf
for d, _ in enumerate(x):
H_0 = float(f(insert(x, d, 0)))
H_p = float(f(insert(x, d, np.pi / 2)))
H_m = float(f(insert(x, d, -np.pi / 2)))
a = onp.arctan2(2 * H_0 - H_p - H_m, H_p - H_m)
x[d] = -np.pi / 2 - a
if x[d] <= -np.pi:
x[d] += 2 * np.pi
return x
@pytest.mark.parametrize("x_start", x_vals)
def test_rotosolve_optimizer_univar(self, x_start, bunch, tol):
for f in univariate_funcs:
x_onestep = bunch.rotosolve_opt.step(f, x_start)
x_onestep_target = self.rotosolve_step(f, x_start)
assert np.allclose(x_onestep, x_onestep_target, atol=tol, rtol=0)
x_twosteps = bunch.rotosolve_opt.step(f, x_onestep)
x_twosteps_target = self.rotosolve_step(f, x_onestep_target)
assert np.allclose(x_twosteps, x_twosteps_target, atol=tol, rtol=0)
@pytest.mark.parametrize(
"x_start",
[
[1.2, 0.2],
[-0.62, -2.1],
[0.05, 0.8],
[[0.3], [0.25]],
[[-0.6], [0.45]],
[[1.3], [-0.9]],
],
)
def test_rotosolve_optimizer_multivar(self, x_start, bunch, tol):
for func in multivariate_funcs:
f = lambda x: func(np.ravel(x))
x_onestep = bunch.rotosolve_opt.step(f, x_start)
x_onestep_target = self.rotosolve_step(f, x_start)
assert x_onestep == pytest.approx(x_onestep_target, abs=tol)
x_twosteps = bunch.rotosolve_opt.step(f, x_onestep)
x_twosteps_target = self.rotosolve_step(f, x_onestep_target)
assert x_twosteps == pytest.approx(x_twosteps_target, abs=tol)
@pytest.mark.parametrize("x_start", [[1.2, 0.2], [-0.62, -2.1], [0.05, 0.8]])
@pytest.mark.parametrize(
"generators", [list(tup) for tup in it.product([qml.RX, qml.RY, qml.RZ], repeat=2)]
)
def test_rotoselect_optimizer(self, x_start, generators, bunch, tol):
optimal_generators = [qml.RY, qml.RX]
possible_generators = [qml.RX, qml.RY, qml.RZ]
bunch.rotoselect_opt.possible_generators = possible_generators
dev = qml.device("default.qubit", shots=None, wires=2)
def ansatz(params, generators):
generators[0](params[0], wires=0)
generators[1](params[1], wires=1)
qml.CNOT(wires=[0, 1])
@qml.qnode(dev)
def circuit_1(params, generators=None):
ansatz(params, generators)
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliY(1))
@qml.qnode(dev)
def circuit_2(params, generators=None):
ansatz(params, generators)
return qml.expval(qml.PauliX(0))
def cost_fn(params, generators):
Z_1, Y_2 = circuit_1(params, generators=generators)
X_1 = circuit_2(params, generators=generators)
return 0.5 * Y_2 + 0.8 * Z_1 - 0.2 * X_1
f_best_gen = lambda x: cost_fn(x, optimal_generators)
optimal_x_start = x_start.copy()
for _ in range(4):
x_start, generators = bunch.rotoselect_opt.step(cost_fn, x_start, generators)
optimal_x_start = self.rotosolve_step(f_best_gen, optimal_x_start)
assert x_start == pytest.approx(optimal_x_start, abs=tol)
assert generators == optimal_generators
@pytest.mark.parametrize("x_start", [[1.2, 0.2], [-0.62, -2.1], [0.05, 0.8]])
def test_keywords_rotoselect(self, bunch, x_start, tol):
generators = [qml.RY, qml.RX]
possible_generators = [qml.RX, qml.RY, qml.RZ]
bunch.rotoselect_opt.possible_generators = possible_generators
dev = qml.device("default.qubit", shots=None, wires=2)
def ansatz(params, generators):
generators[0](params[0], wires=0)
generators[1](params[1], wires=1)
qml.CNOT(wires=[0, 1])
@qml.qnode(dev)
def circuit_1(params, generators=None):
ansatz(params, generators)
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliY(1))
@qml.qnode(dev)
def circuit_2(params, generators=None):
ansatz(params, generators)
return qml.expval(qml.PauliX(0))
def cost_fn(params, generators, shift=0.0):
Z_1, Y_2 = circuit_1(params, generators=generators)
X_1 = circuit_2(params, generators=generators)
return 0.5 * (Y_2 - shift) ** 2 + 0.8 * (Z_1 - shift) ** 2 - 0.2 * (X_1 - shift) ** 2
params_new, _, res_new = bunch.rotoselect_opt.step_and_cost(
cost_fn, x_start, generators, shift=0.0
)
params_new2, _, res_new2 = bunch.rotoselect_opt.step_and_cost(
cost_fn, x_start, generators, shift=1.0
)
assert params_new != pytest.approx(params_new2, abs=tol)
assert res_new2 == pytest.approx(cost_fn(x_start, generators, shift=1.0), abs=tol)
def test_update_stepsize(self):
eta = 0.5
opt = AdamOptimizer(eta)
assert opt._stepsize == eta
eta2 = 0.1
opt.update_stepsize(eta2)
assert opt._stepsize == eta2
def reset(opt):
if getattr(opt, "reset", None):
opt.reset()
@pytest.fixture
def opt(opt_name):
if opt_name == "gd":
return GradientDescentOptimizer(stepsize)
if opt_name == "nest":
return NesterovMomentumOptimizer(stepsize, momentum=gamma)
if opt_name == "moment":
return MomentumOptimizer(stepsize, momentum=gamma)
if opt_name == "ada":
return AdagradOptimizer(stepsize)
if opt_name == "rms":
return RMSPropOptimizer(stepsize, decay=gamma)
if opt_name == "adam":
return AdamOptimizer(stepsize, beta1=gamma, beta2=delta)
if opt_name == "roto":
return RotosolveOptimizer()
@pytest.mark.parametrize(
"opt_name",
[
"gd",
"moment",
"nest",
"ada",
"rms",
"adam",
"roto",
],
)
class TestOverOpts:
def test_kwargs(self, mocker, opt, opt_name, tol):
class func_wrapper:
@staticmethod
def func(x, c=1.0):
return (x - c) ** 2
x = 1.0
wrapper = func_wrapper()
spy = mocker.spy(wrapper, "func")
x_new_two = opt.step(wrapper.func, x, c=2.0)
reset(opt)
args2, kwargs2 = spy.call_args_list[-1]
x_new_three_wc, cost_three = opt.step_and_cost(wrapper.func, x, c=3.0)
reset(opt)
args3, kwargs3 = spy.call_args_list[-1]
if opt_name != "roto":
assert args2 == (x,)
assert args3 == (x,)
else:
assert x_new_two != pytest.approx(x, abs=tol)
assert x_new_three_wc != pytest.approx(x, abs=tol)
assert kwargs2 == {"c": 2.0}
assert kwargs3 == {"c": 3.0}
assert cost_three == pytest.approx(wrapper.func(x, c=3.0), abs=tol)
def test_multi_args(self, mocker, opt, opt_name, tol):
class func_wrapper:
@staticmethod
def func(x, y, z):
return x[0] * y[0] + z[0]
wrapper = func_wrapper()
spy = mocker.spy(wrapper, "func")
x = np.array([1.0])
y = np.array([2.0])
z = np.array([3.0])
(x_new, y_new, z_new), cost = opt.step_and_cost(wrapper.func, x, y, z)
reset(opt)
args_called1, kwargs1 = spy.call_args_list[-1]
x_new2, y_new2, z_new2 = opt.step(wrapper.func, x_new, y_new, z_new)
reset(opt)
args_called2, kwargs2 = spy.call_args_list[-1]
if opt_name != "roto":
assert args_called1 == (x, y, z)
assert args_called2 == (x_new, y_new, z_new)
else:
assert x_new != pytest.approx(x, abs=tol)
assert y_new != pytest.approx(y, abs=tol)
assert z_new != pytest.approx(z, abs=tol)
assert kwargs1 == {}
assert kwargs2 == {}
assert cost == pytest.approx(wrapper.func(x, y, z), abs=tol)
def test_nontrainable_data(self, opt, opt_name, tol):
def func(x, data):
return x[0] * data[0]
x = np.array([1.0])
data = np.array([1.0], requires_grad=False)
args_new = opt.step(func, x, data)
reset(opt)
args_new_wc, cost = opt.step_and_cost(func, *args_new)
reset(opt)
assert len(args_new) == pytest.approx(2, abs=tol)
assert args_new[0] != pytest.approx(x, abs=tol)
assert args_new[1] == pytest.approx(data, abs=tol)
assert cost == pytest.approx(func(*args_new), abs=tol)
def test_steps_the_same(self, opt, opt_name, tol):
def func1(x, y, z):
return x[0] * y[0] * z[0]
def func2(args):
return args[0][0] * args[1][0] * args[2][0]
x = np.array([1.0])
y = np.array([2.0])
z = np.array([3.0])
args = (x, y, z)
x_seperate, y_seperate, z_seperate = opt.step(func1, x, y, z)
reset(opt)
args_new = opt.step(func2, args)
reset(opt)
assert x_seperate == pytest.approx(args_new[0], abs=tol)
assert y_seperate == pytest.approx(args_new[1], abs=tol)
assert z_seperate == pytest.approx(args_new[2], abs=tol)
def test_one_trainable_one_non_trainable(self, opt, opt_name, tol):
dev = qml.device("default.qubit", wires=2)
@qml.qnode(dev)
def circuit(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
def cost(x, target):
return (circuit(x) - target) ** 2
ev = np.tensor(0.7781, requires_grad=False)
x = np.tensor(0.0, requires_grad=True)
original_ev = ev
(x, ev), cost = opt.step_and_cost(cost, x, ev)
assert x == 0
assert ev == original_ev
def test_one_non_trainable_one_trainable(self, opt, opt_name, tol):
dev = qml.device("default.qubit", wires=2)
@qml.qnode(dev)
def circuit(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
def cost(target, x):
return (circuit(x) - target) ** 2
ev = np.tensor(0.7781, requires_grad=False)
x = np.tensor(0.0, requires_grad=True)
original_ev = ev
(ev, x), cost = opt.step_and_cost(cost, ev, x)
assert x == 0
assert ev == original_ev
def test_two_trainable_args(self, opt, opt_name, tol):
dev = qml.device("default.qubit", wires=2)
@qml.qnode(dev)
def circuit(x, y):
qml.RX(x, wires=0)
qml.RX(y, wires=0)
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
def cost(x, y, target):
return (circuit(x, y) - target) ** 2
ev = np.tensor(0.7781, requires_grad=False)
x = np.tensor(0.0, requires_grad=True)
y = np.tensor(0.0, requires_grad=True)
original_ev = ev
(x, y, ev), cost = opt.step_and_cost(cost, x, y, ev)
assert x == 0
assert ev == original_ev
| true | true |
1c2b137919800c11b9af9def7b77d30093b844cf | 3,324 | py | Python | lib/carbon/tests/benchmark_cache.py | readevalprint/carbon | 5264d53a3ed6f97721ae76ae3821ca8ce4950a66 | [
"Apache-2.0"
] | 961 | 2015-01-01T14:20:35.000Z | 2022-03-29T22:15:35.000Z | lib/carbon/tests/benchmark_cache.py | readevalprint/carbon | 5264d53a3ed6f97721ae76ae3821ca8ce4950a66 | [
"Apache-2.0"
] | 611 | 2015-01-03T20:31:23.000Z | 2022-03-31T21:30:23.000Z | lib/carbon/tests/benchmark_cache.py | readevalprint/carbon | 5264d53a3ed6f97721ae76ae3821ca8ce4950a66 | [
"Apache-2.0"
] | 326 | 2015-01-03T14:55:33.000Z | 2022-03-31T01:43:49.000Z | import timeit
from carbon.cache import _MetricCache, DrainStrategy, \
NaiveStrategy, MaxStrategy, RandomStrategy, SortedStrategy, \
TimeSortedStrategy, BucketMaxStrategy
metric_cache = _MetricCache(DrainStrategy)
count = 0
strategies = {
'naive': NaiveStrategy,
'max': MaxStrategy,
'random': RandomStrategy,
'sorted': SortedStrategy,
'timesorted': TimeSortedStrategy,
'bucketmax': BucketMaxStrategy,
}
def command_store_foo():
global count
count = count + 1
return metric_cache.store('foo', (count, 1.0))
def command_store_foo_n():
global count
count = count + 1
return metric_cache.store("foo.%d" % count, (count, 1.0))
def command_drain():
while metric_cache:
metric_cache.drain_metric()
return metric_cache.size
def print_stats(n, t):
usec = t * 1e6
if usec < 1000:
print(" datapoints: %-10d usecs: %d" % (n, int(usec)))
else:
msec = usec / 1000
if msec < 1000:
print(" datapoints: %-10d msecs: %d" % (n, int(msec)))
else:
sec = msec / 1000
print(" datapoints: %-10d secs: %3g" % (n, sec))
if __name__ == '__main__':
print("Benchmarking single metric MetricCache store...")
for n in [1000, 10000, 100000, 1000000]:
count = 0
metric_cache = _MetricCache(DrainStrategy)
t = timeit.timeit(command_store_foo, number=n)
print_stats(n, t)
print("Benchmarking unique metric MetricCache store...")
for n in [1000, 10000, 100000, 1000000]:
count = 0
metric_cache = _MetricCache(DrainStrategy)
t = timeit.timeit(command_store_foo_n, number=n)
print_stats(n, t)
print("Benchmarking single metric MetricCache store..., BucketMax")
for n in [1000, 10000, 100000, 1000000]:
count = 0
metric_cache = _MetricCache(BucketMaxStrategy)
t = timeit.timeit(command_store_foo, number=n)
print_stats(n, t)
print("Benchmarking unique metric MetricCache store..., BucketMax")
for n in [1000, 10000, 100000, 1000000]:
count = 0
metric_cache = _MetricCache(BucketMaxStrategy)
t = timeit.timeit(command_store_foo_n, number=n)
print_stats(n, t)
print("Benchmarking single metric MetricCache drain...")
for name, strategy in sorted(strategies.items()):
print("CACHE_WRITE_STRATEGY: %s" % name)
for n in [1000, 10000, 100000, 1000000]:
count = 0
metric_cache = _MetricCache(strategy)
timeit.timeit(command_store_foo, number=n)
t = timeit.timeit(command_drain, number=1)
print_stats(n, t)
print("Benchmarking unique metric MetricCache drain...")
for name, strategy in sorted(strategies.items()):
print("CACHE_WRITE_STRATEGY: %s" % name)
for n in [1000, 10000, 100000, 1000000]:
# remove me when strategy is fast
if (name == 'max' and n > 10000) or (name == 'random' and n > 10000):
print(" datapoints: %-10d [skipped]" % n)
continue
count = 0
metric_cache = _MetricCache(strategy)
timeit.timeit(command_store_foo_n, number=n)
t = timeit.timeit(command_drain, number=1)
print_stats(n, t)
| 32.271845 | 81 | 0.620638 | import timeit
from carbon.cache import _MetricCache, DrainStrategy, \
NaiveStrategy, MaxStrategy, RandomStrategy, SortedStrategy, \
TimeSortedStrategy, BucketMaxStrategy
metric_cache = _MetricCache(DrainStrategy)
count = 0
strategies = {
'naive': NaiveStrategy,
'max': MaxStrategy,
'random': RandomStrategy,
'sorted': SortedStrategy,
'timesorted': TimeSortedStrategy,
'bucketmax': BucketMaxStrategy,
}
def command_store_foo():
global count
count = count + 1
return metric_cache.store('foo', (count, 1.0))
def command_store_foo_n():
global count
count = count + 1
return metric_cache.store("foo.%d" % count, (count, 1.0))
def command_drain():
while metric_cache:
metric_cache.drain_metric()
return metric_cache.size
def print_stats(n, t):
usec = t * 1e6
if usec < 1000:
print(" datapoints: %-10d usecs: %d" % (n, int(usec)))
else:
msec = usec / 1000
if msec < 1000:
print(" datapoints: %-10d msecs: %d" % (n, int(msec)))
else:
sec = msec / 1000
print(" datapoints: %-10d secs: %3g" % (n, sec))
if __name__ == '__main__':
print("Benchmarking single metric MetricCache store...")
for n in [1000, 10000, 100000, 1000000]:
count = 0
metric_cache = _MetricCache(DrainStrategy)
t = timeit.timeit(command_store_foo, number=n)
print_stats(n, t)
print("Benchmarking unique metric MetricCache store...")
for n in [1000, 10000, 100000, 1000000]:
count = 0
metric_cache = _MetricCache(DrainStrategy)
t = timeit.timeit(command_store_foo_n, number=n)
print_stats(n, t)
print("Benchmarking single metric MetricCache store..., BucketMax")
for n in [1000, 10000, 100000, 1000000]:
count = 0
metric_cache = _MetricCache(BucketMaxStrategy)
t = timeit.timeit(command_store_foo, number=n)
print_stats(n, t)
print("Benchmarking unique metric MetricCache store..., BucketMax")
for n in [1000, 10000, 100000, 1000000]:
count = 0
metric_cache = _MetricCache(BucketMaxStrategy)
t = timeit.timeit(command_store_foo_n, number=n)
print_stats(n, t)
print("Benchmarking single metric MetricCache drain...")
for name, strategy in sorted(strategies.items()):
print("CACHE_WRITE_STRATEGY: %s" % name)
for n in [1000, 10000, 100000, 1000000]:
count = 0
metric_cache = _MetricCache(strategy)
timeit.timeit(command_store_foo, number=n)
t = timeit.timeit(command_drain, number=1)
print_stats(n, t)
print("Benchmarking unique metric MetricCache drain...")
for name, strategy in sorted(strategies.items()):
print("CACHE_WRITE_STRATEGY: %s" % name)
for n in [1000, 10000, 100000, 1000000]:
if (name == 'max' and n > 10000) or (name == 'random' and n > 10000):
print(" datapoints: %-10d [skipped]" % n)
continue
count = 0
metric_cache = _MetricCache(strategy)
timeit.timeit(command_store_foo_n, number=n)
t = timeit.timeit(command_drain, number=1)
print_stats(n, t)
| true | true |
1c2b14c77c1356483a7ac7cfe0ef500bc57ab76f | 10,555 | py | Python | rllib/env/atari_wrappers.py | fbudrowski/ray | 4853aa96cbbea76e69c3e48802ce7408f08669ee | [
"Apache-2.0"
] | null | null | null | rllib/env/atari_wrappers.py | fbudrowski/ray | 4853aa96cbbea76e69c3e48802ce7408f08669ee | [
"Apache-2.0"
] | 5 | 2021-08-25T16:17:15.000Z | 2022-03-12T01:00:29.000Z | rllib/env/atari_wrappers.py | fbudrowski/ray | 4853aa96cbbea76e69c3e48802ce7408f08669ee | [
"Apache-2.0"
] | 2 | 2020-05-22T15:36:27.000Z | 2020-05-22T15:52:03.000Z | import numpy as np
from collections import deque
import gym
from gym import spaces
import cv2
cv2.ocl.setUseOpenCL(False)
def is_atari(env):
if (hasattr(env.observation_space, "shape")
and env.observation_space.shape is not None
and len(env.observation_space.shape) <= 2):
return False
return hasattr(env, "unwrapped") and hasattr(env.unwrapped, "ale")
def get_wrapper_by_cls(env, cls):
"""Returns the gym env wrapper of the given class, or None."""
currentenv = env
while True:
if isinstance(currentenv, cls):
return currentenv
elif isinstance(currentenv, gym.Wrapper):
currentenv = currentenv.env
else:
return None
class MonitorEnv(gym.Wrapper):
def __init__(self, env=None):
"""Record episodes stats prior to EpisodicLifeEnv, etc."""
gym.Wrapper.__init__(self, env)
self._current_reward = None
self._num_steps = None
self._total_steps = None
self._episode_rewards = []
self._episode_lengths = []
self._num_episodes = 0
self._num_returned = 0
def reset(self, **kwargs):
obs = self.env.reset(**kwargs)
if self._total_steps is None:
self._total_steps = sum(self._episode_lengths)
if self._current_reward is not None:
self._episode_rewards.append(self._current_reward)
self._episode_lengths.append(self._num_steps)
self._num_episodes += 1
self._current_reward = 0
self._num_steps = 0
return obs
def step(self, action):
obs, rew, done, info = self.env.step(action)
self._current_reward += rew
self._num_steps += 1
self._total_steps += 1
return (obs, rew, done, info)
def get_episode_rewards(self):
return self._episode_rewards
def get_episode_lengths(self):
return self._episode_lengths
def get_total_steps(self):
return self._total_steps
def next_episode_results(self):
for i in range(self._num_returned, len(self._episode_rewards)):
yield (self._episode_rewards[i], self._episode_lengths[i])
self._num_returned = len(self._episode_rewards)
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == "NOOP"
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1)
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset.
For environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == "FIRE"
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condtion for a few fr
# so its important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros(
(2, ) + env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2:
self._obs_buffer[0] = obs
if i == self._skip - 1:
self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env, dim):
"""Warp frames to the specified size (dim x dim)."""
gym.ObservationWrapper.__init__(self, env)
self.width = dim
self.height = dim
self.observation_space = spaces.Box(
low=0,
high=255,
shape=(self.height, self.width, 1),
dtype=np.uint8)
def observation(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(
frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
return frame[:, :, None]
# TODO: (sven) Deprecated class. Remove once traj. view is the norm.
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames."""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(
low=0,
high=255,
shape=(shp[0], shp[1], shp[2] * k),
dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return np.concatenate(self.frames, axis=2)
class FrameStackTrajectoryView(gym.ObservationWrapper):
def __init__(self, env):
"""No stacking. Trajectory View API takes care of this."""
gym.Wrapper.__init__(self, env)
shp = env.observation_space.shape
assert shp[2] == 1
self.observation_space = spaces.Box(
low=0,
high=255,
shape=(shp[0], shp[1]),
dtype=env.observation_space.dtype)
def observation(self, observation):
return np.squeeze(observation, axis=-1)
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(
low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
def wrap_deepmind(
env,
dim=84,
# TODO: (sven) Remove once traj. view is norm.
framestack=True,
framestack_via_traj_view_api=False):
"""Configure environment for DeepMind-style Atari.
Note that we assume reward clipping is done outside the wrapper.
Args:
dim (int): Dimension to resize observations to (dim x dim).
framestack (bool): Whether to framestack observations.
"""
env = MonitorEnv(env)
env = NoopResetEnv(env, noop_max=30)
if env.spec is not None and "NoFrameskip" in env.spec.id:
env = MaxAndSkipEnv(env, skip=4)
env = EpisodicLifeEnv(env)
if "FIRE" in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env, dim)
# env = ScaledFloatFrame(env) # TODO: use for dqn?
# env = ClipRewardEnv(env) # reward clipping is handled by policy eval
# New way of frame stacking via the trajectory view API (model config key:
# `num_framestacks=[int]`.
if framestack_via_traj_view_api:
env = FrameStackTrajectoryView(env)
# Old way (w/o traj. view API) via model config key: `framestack=True`.
# TODO: (sven) Remove once traj. view is norm.
elif framestack is True:
env = FrameStack(env, 4)
return env
| 32.984375 | 80 | 0.61099 | import numpy as np
from collections import deque
import gym
from gym import spaces
import cv2
cv2.ocl.setUseOpenCL(False)
def is_atari(env):
if (hasattr(env.observation_space, "shape")
and env.observation_space.shape is not None
and len(env.observation_space.shape) <= 2):
return False
return hasattr(env, "unwrapped") and hasattr(env.unwrapped, "ale")
def get_wrapper_by_cls(env, cls):
currentenv = env
while True:
if isinstance(currentenv, cls):
return currentenv
elif isinstance(currentenv, gym.Wrapper):
currentenv = currentenv.env
else:
return None
class MonitorEnv(gym.Wrapper):
def __init__(self, env=None):
gym.Wrapper.__init__(self, env)
self._current_reward = None
self._num_steps = None
self._total_steps = None
self._episode_rewards = []
self._episode_lengths = []
self._num_episodes = 0
self._num_returned = 0
def reset(self, **kwargs):
obs = self.env.reset(**kwargs)
if self._total_steps is None:
self._total_steps = sum(self._episode_lengths)
if self._current_reward is not None:
self._episode_rewards.append(self._current_reward)
self._episode_lengths.append(self._num_steps)
self._num_episodes += 1
self._current_reward = 0
self._num_steps = 0
return obs
def step(self, action):
obs, rew, done, info = self.env.step(action)
self._current_reward += rew
self._num_steps += 1
self._total_steps += 1
return (obs, rew, done, info)
def get_episode_rewards(self):
return self._episode_rewards
def get_episode_lengths(self):
return self._episode_lengths
def get_total_steps(self):
return self._total_steps
def next_episode_results(self):
for i in range(self._num_returned, len(self._episode_rewards)):
yield (self._episode_rewards[i], self._episode_lengths[i])
self._num_returned = len(self._episode_rewards)
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == "NOOP"
def reset(self, **kwargs):
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1)
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
return np.sign(reward)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == "FIRE"
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
gym.Wrapper.__init__(self, env)
self._obs_buffer = np.zeros(
(2, ) + env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2:
self._obs_buffer[0] = obs
if i == self._skip - 1:
self._obs_buffer[1] = obs
total_reward += reward
if done:
break
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env, dim):
gym.ObservationWrapper.__init__(self, env)
self.width = dim
self.height = dim
self.observation_space = spaces.Box(
low=0,
high=255,
shape=(self.height, self.width, 1),
dtype=np.uint8)
def observation(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(
frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
return frame[:, :, None]
# TODO: (sven) Deprecated class. Remove once traj. view is the norm.
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(
low=0,
high=255,
shape=(shp[0], shp[1], shp[2] * k),
dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return np.concatenate(self.frames, axis=2)
class FrameStackTrajectoryView(gym.ObservationWrapper):
def __init__(self, env):
gym.Wrapper.__init__(self, env)
shp = env.observation_space.shape
assert shp[2] == 1
self.observation_space = spaces.Box(
low=0,
high=255,
shape=(shp[0], shp[1]),
dtype=env.observation_space.dtype)
def observation(self, observation):
return np.squeeze(observation, axis=-1)
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(
low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
def wrap_deepmind(
env,
dim=84,
# TODO: (sven) Remove once traj. view is norm.
framestack=True,
framestack_via_traj_view_api=False):
env = MonitorEnv(env)
env = NoopResetEnv(env, noop_max=30)
if env.spec is not None and "NoFrameskip" in env.spec.id:
env = MaxAndSkipEnv(env, skip=4)
env = EpisodicLifeEnv(env)
if "FIRE" in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env, dim)
# env = ScaledFloatFrame(env) # TODO: use for dqn?
# env = ClipRewardEnv(env) # reward clipping is handled by policy eval
# New way of frame stacking via the trajectory view API (model config key:
# `num_framestacks=[int]`.
if framestack_via_traj_view_api:
env = FrameStackTrajectoryView(env)
# Old way (w/o traj. view API) via model config key: `framestack=True`.
# TODO: (sven) Remove once traj. view is norm.
elif framestack is True:
env = FrameStack(env, 4)
return env
| true | true |
1c2b16be4e7a1c043617782a7bd0de87fc9ab569 | 2,934 | py | Python | examples/reproducing/hunziker2015.py | prisae/empymod | c01eae0ac51b37864c0b68bf0c207c1bd7c7e585 | [
"Apache-2.0"
] | 31 | 2017-06-07T00:47:10.000Z | 2020-11-02T13:45:29.000Z | examples/reproducing/hunziker2015.py | prisae/empymod | c01eae0ac51b37864c0b68bf0c207c1bd7c7e585 | [
"Apache-2.0"
] | 97 | 2017-06-05T08:19:27.000Z | 2020-11-30T15:25:07.000Z | examples/reproducing/hunziker2015.py | prisae/empymod | c01eae0ac51b37864c0b68bf0c207c1bd7c7e585 | [
"Apache-2.0"
] | 14 | 2017-11-05T13:24:29.000Z | 2020-09-25T19:25:18.000Z | """
Hunziker et al., 2015, Geophysics
=================================
Reproducing Figure 3 of the manual from `EMmod`. This example does, as such,
not actually reproduce a figure of Hunziker et al., 2015, but of the manual
that comes with the software accompanying the paper. With the software comes an
example input file named `simplemod.scr`, and the corresponding result is shown
in the manual of the code in Figure 3.
If you are interested in reproducing the figures of the actual paper have a
look at the notebooks in the repo `article-geo2017
<https://github.com/emsig/article-geo2017>`_.
**Reference**
- **Hunziker, J., J. Thorbecke, and E. Slob, 2015**, The electromagnetic
response in a layered vertical transverse isotropic medium: A new look at an
old problem: Geophysics, 80(1), F1–F18; DOI: `10.1190/geo2013-0411.1
<https://doi.org/10.1190/geo2013-0411.1>`_; Software:
`software.seg.org/2015/0001 <https://software.seg.org/2015/0001>`_.
"""
import empymod
import numpy as np
import matplotlib.pyplot as plt
###############################################################################
# Compute the data
# ----------------
#
# Compute the electric field with the parameters defined in `simplemod.scr`.
# x- and y-offsets
x = np.arange(4000)*7-1999.5*7
y = np.arange(1500)*10-749.5*10
# Create 2D arrays of them
rx = np.repeat([x, ], np.size(y), axis=0)
ry = np.repeat([y, ], np.size(x), axis=0)
ry = ry.transpose()
# Compute the electric field
efield = empymod.dipole(
src=[0, 0, 150],
rec=[rx.ravel(), ry.ravel(), 200],
depth=[0, 200, 1000, 1200],
res=[2e14, 1/3, 1, 50, 1],
aniso=[1, 1, np.sqrt(10), 1, 1],
freqtime=0.5,
epermH=[1, 80, 17, 2.1, 17],
epermV=[1, 80, 17, 2.1, 17],
mpermH=[1, 1, 1, 1, 1],
mpermV=[1, 1, 1, 1, 1],
ab=11,
htarg={'pts_per_dec': -1},
).reshape(np.shape(rx))
###############################################################################
# Plot
# ----
# Create a similar colormap as Hunziker et al., 2015.
cmap = plt.cm.get_cmap("jet", 61)
plt.figure(figsize=(9, 8))
# 1. Amplitude
plt.subplot(211)
plt.title('Amplitude (V/m)')
plt.xlabel('Offset (km)')
plt.ylabel('Offset (km)')
plt.pcolormesh(x/1e3, y/1e3, np.log10(efield.amp()),
cmap=cmap, vmin=-16, vmax=-7, shading='nearest')
plt.colorbar()
# 2. Phase
plt.subplot(212)
plt.title('Phase (°)')
plt.xlabel('Offset (km)')
plt.ylabel('Offset (km)')
plt.pcolormesh(x/1e3, y/1e3, efield.pha(deg=False, unwrap=False, lag=True),
cmap=cmap, vmin=-np.pi, vmax=np.pi, shading='nearest')
plt.colorbar()
plt.tight_layout()
plt.show()
###############################################################################
# Original Figure
# ---------------
#
# Figure 3 of the manual of `EMmod`.
#
# .. image:: ../../_static/figures/Hunziker2015.png
###############################################################################
empymod.Report()
| 28.764706 | 79 | 0.578391 |
import empymod
import numpy as np
import matplotlib.pyplot as plt
| true | true |
1c2b187d41fee478aa7d74702fd8de6b79deee2c | 49,508 | py | Python | lib/coins.py | lancehall123/electrumx | b1cf21900a4fc45a821d920f1ce0a36950577d0c | [
"MIT"
] | null | null | null | lib/coins.py | lancehall123/electrumx | b1cf21900a4fc45a821d920f1ce0a36950577d0c | [
"MIT"
] | null | null | null | lib/coins.py | lancehall123/electrumx | b1cf21900a4fc45a821d920f1ce0a36950577d0c | [
"MIT"
] | null | null | null | # Copyright (c) 2016-2017, Neil Booth
# Copyright (c) 2017, the ElectrumX authors
#
# All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''Module providing coin abstraction.
Anything coin-specific should go in this file and be subclassed where
necessary for appropriate handling.
'''
from collections import namedtuple
import re
import struct
from decimal import Decimal
from hashlib import sha256
from functools import partial
import base64
import lib.util as util
from lib.hash import Base58, hash160, double_sha256, hash_to_str
from lib.script import ScriptPubKey, OpCodes
import lib.tx as lib_tx
from server.block_processor import BlockProcessor
import server.daemon as daemon
from server.session import ElectrumX, DashElectrumX
Block = namedtuple("Block", "raw header transactions")
OP_RETURN = OpCodes.OP_RETURN
class CoinError(Exception):
'''Exception raised for coin-related errors.'''
class Coin(object):
'''Base class of coin hierarchy.'''
REORG_LIMIT = 200
# Not sure if these are coin-specific
RPC_URL_REGEX = re.compile('.+@(\[[0-9a-fA-F:]+\]|[^:]+)(:[0-9]+)?')
VALUE_PER_COIN = 100000000
CHUNK_SIZE = 2016
HASHX_LEN = 11
BASIC_HEADER_SIZE = 80
STATIC_BLOCK_HEADERS = True
SESSIONCLS = ElectrumX
DESERIALIZER = lib_tx.Deserializer
DAEMON = daemon.Daemon
BLOCK_PROCESSOR = BlockProcessor
XPUB_VERBYTES = bytes('????', 'utf-8')
XPRV_VERBYTES = bytes('????', 'utf-8')
ENCODE_CHECK = Base58.encode_check
DECODE_CHECK = Base58.decode_check
# Peer discovery
PEER_DEFAULT_PORTS = {'t': '50001', 's': '50002'}
PEERS = []
@classmethod
def lookup_coin_class(cls, name, net):
'''Return a coin class given name and network.
Raise an exception if unrecognised.'''
req_attrs = ['TX_COUNT', 'TX_COUNT_HEIGHT', 'TX_PER_BLOCK']
for coin in util.subclasses(Coin):
if (coin.NAME.lower() == name.lower() and
coin.NET.lower() == net.lower()):
coin_req_attrs = req_attrs.copy()
missing = [attr for attr in coin_req_attrs
if not hasattr(coin, attr)]
if missing:
raise CoinError('coin {} missing {} attributes'
.format(name, missing))
return coin
raise CoinError('unknown coin {} and network {} combination'
.format(name, net))
@classmethod
def sanitize_url(cls, url):
# Remove surrounding ws and trailing /s
url = url.strip().rstrip('/')
match = cls.RPC_URL_REGEX.match(url)
if not match:
raise CoinError('invalid daemon URL: "{}"'.format(url))
if match.groups()[1] is None:
url += ':{:d}'.format(cls.RPC_PORT)
if not url.startswith('http://') and not url.startswith('https://'):
url = 'http://' + url
return url + '/'
@classmethod
def daemon_urls(cls, urls):
return [cls.sanitize_url(url) for url in urls.split(',')]
@classmethod
def genesis_block(cls, block):
'''Check the Genesis block is the right one for this coin.
Return the block less its unspendable coinbase.
'''
header = cls.block_header(block, 0)
header_hex_hash = hash_to_str(cls.header_hash(header))
if header_hex_hash != cls.GENESIS_HASH:
raise CoinError('genesis block has hash {} expected {}'
.format(header_hex_hash, cls.GENESIS_HASH))
return header + bytes(1)
@classmethod
def hashX_from_script(cls, script):
'''Returns a hashX from a script, or None if the script is provably
unspendable so the output can be dropped.
'''
if script and script[0] == OP_RETURN:
return None
return sha256(script).digest()[:cls.HASHX_LEN]
@util.cachedproperty
def address_handlers(cls):
return ScriptPubKey.PayToHandlers(
address=cls.P2PKH_address_from_hash160,
script_hash=cls.P2SH_address_from_hash160,
pubkey=cls.P2PKH_address_from_pubkey,
unspendable=lambda: None,
strange=lambda script: None,
)
@classmethod
def address_from_script(cls, script):
'''Given a pk_script, return the adddress it pays to, or None.'''
return ScriptPubKey.pay_to(cls.address_handlers, script)
@staticmethod
def lookup_xverbytes(verbytes):
'''Return a (is_xpub, coin_class) pair given xpub/xprv verbytes.'''
# Order means BTC testnet will override NMC testnet
for coin in util.subclasses(Coin):
if verbytes == coin.XPUB_VERBYTES:
return True, coin
if verbytes == coin.XPRV_VERBYTES:
return False, coin
raise CoinError('version bytes unrecognised')
@classmethod
def address_to_hashX(cls, address):
'''Return a hashX given a coin address.'''
return cls.hashX_from_script(cls.pay_to_address_script(address))
@classmethod
def P2PKH_address_from_hash160(cls, hash160):
'''Return a P2PKH address given a public key.'''
assert len(hash160) == 20
return cls.ENCODE_CHECK(cls.P2PKH_VERBYTE + hash160)
@classmethod
def P2PKH_address_from_pubkey(cls, pubkey):
'''Return a coin address given a public key.'''
return cls.P2PKH_address_from_hash160(hash160(pubkey))
@classmethod
def P2SH_address_from_hash160(cls, hash160):
'''Return a coin address given a hash160.'''
assert len(hash160) == 20
return cls.ENCODE_CHECK(cls.P2SH_VERBYTES[0] + hash160)
@classmethod
def multisig_address(cls, m, pubkeys):
'''Return the P2SH address for an M of N multisig transaction.
Pass the N pubkeys of which M are needed to sign it. If
generating an address for a wallet, it is the caller's
responsibility to sort them to ensure order does not matter
for, e.g., wallet recovery.
'''
script = cls.pay_to_multisig_script(m, pubkeys)
return cls.P2SH_address_from_hash160(hash160(script))
@classmethod
def pay_to_multisig_script(cls, m, pubkeys):
'''Return a P2SH script for an M of N multisig transaction.'''
return ScriptPubKey.multisig_script(m, pubkeys)
@classmethod
def pay_to_pubkey_script(cls, pubkey):
'''Return a pubkey script that pays to a pubkey.
Pass the raw pubkey bytes (length 33 or 65).
'''
return ScriptPubKey.P2PK_script(pubkey)
@classmethod
def pay_to_address_script(cls, address):
'''Return a pubkey script that pays to a pubkey hash.
Pass the address (either P2PKH or P2SH) in base58 form.
'''
raw = cls.DECODE_CHECK(address)
# Require version byte(s) plus hash160.
verbyte = -1
verlen = len(raw) - 20
if verlen > 0:
verbyte, hash_bytes = raw[:verlen], raw[verlen:]
if verbyte == cls.P2PKH_VERBYTE:
return ScriptPubKey.P2PKH_script(hash_bytes)
if verbyte in cls.P2SH_VERBYTES:
return ScriptPubKey.P2SH_script(hash_bytes)
raise CoinError('invalid address: {}'.format(address))
@classmethod
def privkey_WIF(cls, privkey_bytes, compressed):
'''Return the private key encoded in Wallet Import Format.'''
payload = bytearray(cls.WIF_BYTE) + privkey_bytes
if compressed:
payload.append(0x01)
return cls.ENCODE_CHECK(payload)
@classmethod
def header_hash(cls, header):
'''Given a header return hash'''
return double_sha256(header)
@classmethod
def header_prevhash(cls, header):
'''Given a header return previous hash'''
return header[4:36]
@classmethod
def static_header_offset(cls, height):
'''Given a header height return its offset in the headers file.
If header sizes change at some point, this is the only code
that needs updating.'''
assert cls.STATIC_BLOCK_HEADERS
return height * cls.BASIC_HEADER_SIZE
@classmethod
def static_header_len(cls, height):
'''Given a header height return its length.'''
return cls.static_header_offset(height + 1) \
- cls.static_header_offset(height)
@classmethod
def block_header(cls, block, height):
'''Returns the block header given a block and its height.'''
return block[:cls.static_header_len(height)]
@classmethod
def block(cls, raw_block, height):
'''Return a Block namedtuple given a raw block and its height.'''
header = cls.block_header(raw_block, height)
txs = cls.DESERIALIZER(raw_block, start=len(header)).read_tx_block()
return Block(raw_block, header, txs)
@classmethod
def decimal_value(cls, value):
'''Return the number of standard coin units as a Decimal given a
quantity of smallest units.
For example 1 BTC is returned for 100 million satoshis.
'''
return Decimal(value) / cls.VALUE_PER_COIN
@classmethod
def electrum_header(cls, header, height):
version, = struct.unpack('<I', header[:4])
timestamp, bits, nonce = struct.unpack('<III', header[68:80])
return {
'block_height': height,
'version': version,
'prev_block_hash': hash_to_str(header[4:36]),
'merkle_root': hash_to_str(header[36:68]),
'timestamp': timestamp,
'bits': bits,
'nonce': nonce,
}
class AuxPowMixin(object):
STATIC_BLOCK_HEADERS = False
DESERIALIZER = lib_tx.DeserializerAuxPow
@classmethod
def header_hash(cls, header):
'''Given a header return hash'''
return double_sha256(header[:cls.BASIC_HEADER_SIZE])
@classmethod
def block_header(cls, block, height):
'''Return the AuxPow block header bytes'''
deserializer = cls.DESERIALIZER(block)
return deserializer.read_header(height, cls.BASIC_HEADER_SIZE)
class EquihashMixin(object):
STATIC_BLOCK_HEADERS = False
BASIC_HEADER_SIZE = 140 # Excluding Equihash solution
DESERIALIZER = lib_tx.DeserializerEquihash
@classmethod
def electrum_header(cls, header, height):
version, = struct.unpack('<I', header[:4])
timestamp, bits = struct.unpack('<II', header[100:108])
return {
'block_height': height,
'version': version,
'prev_block_hash': hash_to_str(header[4:36]),
'merkle_root': hash_to_str(header[36:68]),
'timestamp': timestamp,
'bits': bits,
'nonce': hash_to_str(header[108:140]),
}
@classmethod
def block_header(cls, block, height):
'''Return the block header bytes'''
deserializer = cls.DESERIALIZER(block)
return deserializer.read_header(height, cls.BASIC_HEADER_SIZE)
class ScryptMixin(object):
DESERIALIZER = lib_tx.DeserializerTxTime
HEADER_HASH = None
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
if cls.HEADER_HASH is None:
import scrypt
cls.HEADER_HASH = lambda x: scrypt.hash(x, x, 1024, 1, 1, 32)
version, = struct.unpack('<I', header[:4])
if version > 6:
return super().header_hash(header)
else:
return cls.HEADER_HASH(header)
class KomodoMixin(object):
P2PKH_VERBYTE = bytes.fromhex("3C")
P2SH_VERBYTES = [bytes.fromhex("55")]
WIF_BYTE = bytes.fromhex("BC")
GENESIS_HASH = ('027e3758c3a65b12aa1046462b486d0a'
'63bfa1beae327897f56c5cfb7daaae71')
DESERIALIZER = lib_tx.DeserializerZcash
class BitcoinMixin(object):
SHORTNAME = "BTC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("00")
P2SH_VERBYTES = [bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ('000000000019d6689c085ae165831e93'
'4ff763ae46a2a6c172b3f1b60a8ce26f')
RPC_PORT = 8332
class HOdlcoin(Coin):
NAME = "HOdlcoin"
SHORTNAME = "HODLC"
NET = "mainnet"
BASIC_HEADER_SIZE = 88
P2PKH_VERBYTE = bytes.fromhex("28")
P2SH_VERBYTES = [bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("a8")
GENESIS_HASH = ('008872e5582924544e5c707ee4b839bb'
'82c28a9e94e917c94b40538d5658c04b')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 258858
TX_COUNT_HEIGHT = 382138
TX_PER_BLOCK = 5
class BitcoinCash(BitcoinMixin, Coin):
NAME = "BitcoinCash"
SHORTNAME = "BCC"
TX_COUNT = 243631085
TX_COUNT_HEIGHT = 479636
TX_PER_BLOCK = 50
PEERS = [
'electrum-abc.criptolayer.net s50012',
'electroncash.cascharia.com s50002',
'bch.arihanc.com t52001 s52002',
'bccarihace4jdcnt.onion t52001 s52002',
'jelectrum-cash.1209k.com s t',
'abc.vom-stausee.de t52001 s52002',
'abc1.hsmiths.com t60001 s60002',
'electroncash.checksum0.com s t',
]
class BitcoinSegwit(BitcoinMixin, Coin):
NAME = "BitcoinSegwit"
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 217380620
TX_COUNT_HEIGHT = 464000
TX_PER_BLOCK = 1800
PEERS = [
'btc.smsys.me s995',
'E-X.not.fyi s t',
'elec.luggs.co s443',
'electrum.vom-stausee.de s t',
'electrum3.hachre.de p10000 s t',
'electrum.hsmiths.com s t',
'erbium1.sytes.net s t',
'helicarrier.bauerj.eu s t',
'hsmiths4fyqlw5xw.onion s t',
'luggscoqbymhvnkp.onion t80',
'ozahtqwp25chjdjd.onion s t',
'us11.einfachmalnettsein.de s t',
'ELEX01.blackpole.online s t',
'node.arihanc.com s t',
'arihancckjge66iv.onion s t',
]
class BitcoinGold(EquihashMixin, BitcoinMixin, Coin):
CHUNK_SIZE = 252
NAME = "BitcoinGold"
SHORTNAME = "BTG"
FORK_HEIGHT = 491407
P2PKH_VERBYTE = bytes.fromhex("26")
P2SH_VERBYTES = [bytes.fromhex("17")]
DESERIALIZER = lib_tx.DeserializerEquihashSegWit
TX_COUNT = 265026255
TX_COUNT_HEIGHT = 499923
TX_PER_BLOCK = 50
REORG_LIMIT = 1000
RPC_PORT = 8338
@classmethod
def header_hash(cls, header):
'''Given a header return hash'''
height, = struct.unpack('<I', header[68:72])
if height >= cls.FORK_HEIGHT:
return double_sha256(header)
else:
return double_sha256(header[:68] + header[100:112])
@classmethod
def electrum_header(cls, header, height):
h = dict(
block_height=height,
version=struct.unpack('<I', header[:4])[0],
prev_block_hash=hash_to_str(header[4:36]),
merkle_root=hash_to_str(header[36:68]),
timestamp=struct.unpack('<I', header[100:104])[0],
reserved=hash_to_str(header[72:100]),
bits=struct.unpack('<I', header[104:108])[0],
nonce=hash_to_str(header[108:140]),
solution=hash_to_str(header[140:])
)
return h
class BitcoinGoldTestnet(BitcoinGold):
FORK_HEIGHT = 1
SHORTNAME = "TBTG"
XPUB_VERBYTES = bytes.fromhex("043587CF")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("6F")
P2SH_VERBYTES = [bytes.fromhex("C4")]
WIF_BYTE = bytes.fromhex("EF")
TX_COUNT = 0
TX_COUNT_HEIGHT = 1
NET = 'testnet'
RPC_PORT = 18338
GENESIS_HASH = ('00000000e0781ebe24b91eedc293adfe'
'a2f557b53ec379e78959de3853e6f9f6')
class BitcoinGoldRegtest(BitcoinGold):
FORK_HEIGHT = 2000
SHORTNAME = "TBTG"
XPUB_VERBYTES = bytes.fromhex("043587CF")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("6F")
P2SH_VERBYTES = [bytes.fromhex("C4")]
WIF_BYTE = bytes.fromhex("EF")
TX_COUNT = 0
TX_COUNT_HEIGHT = 1
NET = 'regtest'
RPC_PORT = 18444
GENESIS_HASH = ('0f9188f13cb7b2c71f2a335e3a4fc328'
'bf5beb436012afca590b1a11466e2206')
class Emercoin(Coin):
NAME = "Emercoin"
SHORTNAME = "EMC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("21")
P2SH_VERBYTES = [bytes.fromhex("5c")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ('00000000bcccd459d036a588d1008fce'
'8da3754b205736f32ddfd35350e84c2d')
TX_COUNT = 217380620
TX_COUNT_HEIGHT = 464000
TX_PER_BLOCK = 1700
VALUE_PER_COIN = 1000000
RPC_PORT = 6662
DESERIALIZER = lib_tx.DeserializerTxTimeAuxPow
PEERS = []
@classmethod
def block_header(cls, block, height):
'''Returns the block header given a block and its height.'''
deserializer = cls.DESERIALIZER(block)
if deserializer.is_merged_block():
return deserializer.read_header(height, cls.BASIC_HEADER_SIZE)
return block[:cls.static_header_len(height)]
@classmethod
def header_hash(cls, header):
'''Given a header return hash'''
return double_sha256(header[:cls.BASIC_HEADER_SIZE])
class BitcoinTestnetMixin(object):
SHORTNAME = "XTN"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("043587cf")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("6f")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("ef")
GENESIS_HASH = ('000000000933ea01ad0ee984209779ba'
'aec3ced90fa3f408719526f8d77f4943')
REORG_LIMIT = 8000
TX_COUNT = 12242438
TX_COUNT_HEIGHT = 1035428
TX_PER_BLOCK = 21
RPC_PORT = 18332
PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'}
class BitcoinCashTestnet(BitcoinTestnetMixin, Coin):
'''Bitcoin Testnet for Bitcoin Cash daemons.'''
NAME = "BitcoinCash"
PEERS = [
'electrum-testnet-abc.criptolayer.net s50112',
'bchtestnet.arihanc.com t53001 s53002',
'ciiattqkgzebpp6jofjbrkhvhwmgnsfoayljdcrve2p3qmkbv3duaoyd.onion t53001 s53002',
]
class BitcoinSegwitTestnet(BitcoinTestnetMixin, Coin):
'''Bitcoin Testnet for Core bitcoind >= 0.13.1.'''
NAME = "BitcoinSegwit"
DESERIALIZER = lib_tx.DeserializerSegWit
PEERS = [
'electrum.akinbo.org s t',
'he36kyperp3kbuxu.onion s t',
'testnet.hsmiths.com t53011 s53012',
'hsmithsxurybd7uh.onion t53011 s53012',
'testnetnode.arihanc.com s t',
'w3e2orjpiiv2qwem3dw66d7c4krink4nhttngkylglpqe5r22n6n5wid.onion s t',
]
class BitcoinSegwitRegtest(BitcoinSegwitTestnet):
NAME = "BitcoinSegwit"
NET = "regtest"
GENESIS_HASH = ('0f9188f13cb7b2c71f2a335e3a4fc328'
'bf5beb436012afca590b1a11466e2206')
PEERS= []
TX_COUNT = 1
TX_COUNT_HEIGHT = 1
class BitcoinNolnet(BitcoinCash):
'''Bitcoin Unlimited nolimit testnet.'''
NET = "nolnet"
GENESIS_HASH = ('0000000057e31bd2066c939a63b7b862'
'3bd0f10d8c001304bdfc1a7902ae6d35')
PEERS = []
REORG_LIMIT = 8000
TX_COUNT = 583589
TX_COUNT_HEIGHT = 8617
TX_PER_BLOCK = 50
RPC_PORT = 28332
PEER_DEFAULT_PORTS = {'t': '52001', 's': '52002'}
class Litecoin(Coin):
NAME = "Litecoin"
SHORTNAME = "LTC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("30")
P2SH_VERBYTES = [bytes.fromhex("32"), bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("b0")
GENESIS_HASH = ('12a765e31ffd4059bada1e25190f6e98'
'c99d9714d334efa41a195a7e7e04bfe2')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 8908766
TX_COUNT_HEIGHT = 1105256
TX_PER_BLOCK = 10
RPC_PORT = 9332
REORG_LIMIT = 800
PEERS = [
'elec.luggs.co s444',
'electrum-ltc.bysh.me s t',
'electrum-ltc.ddns.net s t',
'electrum-ltc.wilv.in s t',
'electrum.cryptomachine.com p1000 s t',
'electrum.ltc.xurious.com s t',
'eywr5eubdbbe2laq.onion s50008 t50007',
]
class LitecoinTestnet(Litecoin):
SHORTNAME = "XLT"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("043587cf")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("6f")
P2SH_VERBYTES = [bytes.fromhex("3a"), bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("ef")
GENESIS_HASH = ('4966625a4b2851d9fdee139e56211a0d'
'88575f59ed816ff5e6a63deb4e3e29a0')
TX_COUNT = 21772
TX_COUNT_HEIGHT = 20800
TX_PER_BLOCK = 2
RPC_PORT = 19332
REORG_LIMIT = 4000
PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'}
PEERS = [
'electrum-ltc.bysh.me s t',
'electrum.ltc.xurious.com s t',
]
class Viacoin(AuxPowMixin, Coin):
NAME="Viacoin"
SHORTNAME = "VIA"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("47")
P2SH_VERBYTES = [bytes.fromhex("21")]
WIF_BYTE = bytes.fromhex("c7")
GENESIS_HASH = ('4e9b54001f9976049830128ec0331515'
'eaabe35a70970d79971da1539a400ba1')
TX_COUNT = 113638
TX_COUNT_HEIGHT = 3473674
TX_PER_BLOCK = 30
RPC_PORT = 5222
REORG_LIMIT = 5000
DESERIALIZER = lib_tx.DeserializerAuxPowSegWit
PEERS = [
'vialectrum.bitops.me s t',
'server.vialectrum.org s t',
'vialectrum.viacoin.net s t',
'viax1.bitops.me s t',
]
class ViacoinTestnet(Viacoin):
SHORTNAME = "TVI"
NET = "testnet"
P2PKH_VERBYTE = bytes.fromhex("7f")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("ff")
GENESIS_HASH = ('00000007199508e34a9ff81e6ec0c477'
'a4cccff2a4767a8eee39c11db367b008')
RPC_PORT = 25222
REORG_LIMIT = 2500
PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'}
PEERS = [
'vialectrum.bysh.me s t',
]
class ViacoinTestnetSegWit(ViacoinTestnet):
NET = "testnet-segwit"
DESERIALIZER = lib_tx.DeserializerSegWit
# Source: namecoin.org
class Namecoin(AuxPowMixin, Coin):
NAME = "Namecoin"
SHORTNAME = "NMC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("d7dd6370")
XPRV_VERBYTES = bytes.fromhex("d7dc6e31")
P2PKH_VERBYTE = bytes.fromhex("34")
P2SH_VERBYTES = [bytes.fromhex("0d")]
WIF_BYTE = bytes.fromhex("e4")
GENESIS_HASH = ('000000000062b72c5e2ceb45fbc8587e'
'807c155b0da735e6483dfba2f0a9c770')
TX_COUNT = 4415768
TX_COUNT_HEIGHT = 329065
TX_PER_BLOCK = 10
PEERS = [
'elec.luggs.co s446',
]
class NamecoinTestnet(Namecoin):
NAME = "Namecoin"
SHORTNAME = "XNM"
NET = "testnet"
P2PKH_VERBYTE = bytes.fromhex("6f")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("ef")
GENESIS_HASH = ('00000007199508e34a9ff81e6ec0c477'
'a4cccff2a4767a8eee39c11db367b008')
class Dogecoin(AuxPowMixin, Coin):
NAME = "Dogecoin"
SHORTNAME = "DOGE"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("02facafd")
XPRV_VERBYTES = bytes.fromhex("02fac398")
P2PKH_VERBYTE = bytes.fromhex("1e")
P2SH_VERBYTES = [bytes.fromhex("16")]
WIF_BYTE = bytes.fromhex("9e")
GENESIS_HASH = ('1a91e3dace36e2be3bf030a65679fe82'
'1aa1d6ef92e7c9902eb318182c355691')
TX_COUNT = 27583427
TX_COUNT_HEIGHT = 1604979
TX_PER_BLOCK = 20
REORG_LIMIT = 2000
class DogecoinTestnet(Dogecoin):
NAME = "Dogecoin"
SHORTNAME = "XDT"
NET = "testnet"
P2PKH_VERBYTE = bytes.fromhex("71")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("f1")
GENESIS_HASH = ('bb0a78264637406b6360aad926284d54'
'4d7049f45189db5664f3c4d07350559e')
# Source: https://github.com/dashpay/dash
class Dash(Coin):
NAME = "Dash"
SHORTNAME = "DASH"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("02fe52cc")
XPRV_VERBYTES = bytes.fromhex("02fe52f8")
GENESIS_HASH = ('00000ffd590b1485b3caadc19b22e637'
'9c733355108f107a430458cdf3407ab6')
P2PKH_VERBYTE = bytes.fromhex("4c")
P2SH_VERBYTES = [bytes.fromhex("10")]
WIF_BYTE = bytes.fromhex("cc")
TX_COUNT_HEIGHT = 569399
TX_COUNT = 2157510
TX_PER_BLOCK = 4
RPC_PORT = 9998
PEERS = [
'electrum.dash.org s t',
'electrum.masternode.io s t',
'electrum-drk.club s t',
'dashcrypto.space s t',
'electrum.dash.siampm.com s t',
'wl4sfwq2hwxnodof.onion s t',
]
SESSIONCLS = DashElectrumX
DAEMON = daemon.DashDaemon
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
import x11_hash
return x11_hash.getPoWHash(header)
class DashTestnet(Dash):
SHORTNAME = "tDASH"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("3a805837")
XPRV_VERBYTES = bytes.fromhex("3a8061a0")
GENESIS_HASH = ('00000bafbc94add76cb75e2ec9289483'
'7288a481e5c005f6563d91623bf8bc2c')
P2PKH_VERBYTE = bytes.fromhex("8c")
P2SH_VERBYTES = [bytes.fromhex("13")]
WIF_BYTE = bytes.fromhex("ef")
TX_COUNT_HEIGHT = 101619
TX_COUNT = 132681
TX_PER_BLOCK = 1
RPC_PORT = 19998
PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'}
PEERS = [
'electrum.dash.siampm.com s t',
]
class Argentum(AuxPowMixin, Coin):
NAME = "Argentum"
SHORTNAME = "ARG"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("17")
P2SH_VERBYTES = [bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("97")
GENESIS_HASH = ('88c667bc63167685e4e4da058fffdfe8'
'e007e5abffd6855de52ad59df7bb0bb2')
TX_COUNT = 2263089
TX_COUNT_HEIGHT = 2050260
TX_PER_BLOCK = 2000
RPC_PORT = 13581
class ArgentumTestnet(Argentum):
SHORTNAME = "XRG"
NET = "testnet"
P2PKH_VERBYTE = bytes.fromhex("6f")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("ef")
REORG_LIMIT = 2000
class DigiByte(Coin):
NAME = "DigiByte"
SHORTNAME = "DGB"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1E")
P2SH_VERBYTES = [bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ('7497ea1b465eb39f1c8f507bc877078f'
'e016d6fcb6dfad3a64c98dcc6e1e8496')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 1046018
TX_COUNT_HEIGHT = 1435000
TX_PER_BLOCK = 1000
RPC_PORT = 12022
class DigiByteTestnet(DigiByte):
NET = "testnet"
P2PKH_VERBYTE = bytes.fromhex("6f")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("ef")
GENESIS_HASH = ('b5dca8039e300198e5fe7cd23bdd1728'
'e2a444af34c447dbd0916fa3430a68c2')
RPC_PORT = 15022
REORG_LIMIT = 2000
class FairCoin(Coin):
NAME = "FairCoin"
SHORTNAME = "FAIR"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("5f")
P2SH_VERBYTES = [bytes.fromhex("24")]
WIF_BYTE = bytes.fromhex("df")
GENESIS_HASH = ('beed44fa5e96150d95d56ebd5d262578'
'1825a9407a5215dd7eda723373a0a1d7')
BASIC_HEADER_SIZE = 108
TX_COUNT = 505
TX_COUNT_HEIGHT = 470
TX_PER_BLOCK = 1
RPC_PORT = 40405
PEER_DEFAULT_PORTS = {'t': '51811', 's': '51812'}
PEERS = [
'electrum.faircoin.world s',
'electrumfair.punto0.org s',
]
@classmethod
def block(cls, raw_block, height):
'''Return a Block namedtuple given a raw block and its height.'''
if height > 0:
return super().block(raw_block, height)
else:
return Block(raw_block, cls.block_header(raw_block, height), [])
@classmethod
def electrum_header(cls, header, height):
version, = struct.unpack('<I', header[:4])
timestamp, creatorId = struct.unpack('<II', header[100:108])
return {
'block_height': height,
'version': version,
'prev_block_hash': hash_to_str(header[4:36]),
'merkle_root': hash_to_str(header[36:68]),
'payload_hash': hash_to_str(header[68:100]),
'timestamp': timestamp,
'creatorId': creatorId,
}
class Zcash(EquihashMixin, Coin):
NAME = "Zcash"
SHORTNAME = "ZEC"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1CB8")
P2SH_VERBYTES = [bytes.fromhex("1CBD")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ('00040fe8ec8471911baa1db1266ea15d'
'd06b4a8a5c453883c000b031973dce08')
DESERIALIZER = lib_tx.DeserializerZcash
TX_COUNT = 329196
TX_COUNT_HEIGHT = 68379
TX_PER_BLOCK = 5
RPC_PORT = 8232
REORG_LIMIT = 800
class SnowGem(EquihashMixin, Coin):
NAME = "SnowGem"
SHORTNAME = "SNG"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1C28")
P2SH_VERBYTES = [bytes.fromhex("1C2D")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ('00068b35729d9d2b0c294ff1fe9af009'
'4740524311a131de40e7f705e4c29a5b')
DESERIALIZER = lib_tx.DeserializerZcash
TX_COUNT = 140698
TX_COUNT_HEIGHT = 102802
TX_PER_BLOCK = 2
RPC_PORT = 16112
REORG_LIMIT = 800
CHUNK_SIZE = 200
@classmethod
def electrum_header(cls, header, height):
version, = struct.unpack('<I', header[:4])
timestamp, bits = struct.unpack('<II', header[100:108])
return {
'block_height': height,
'version': version,
'prev_block_hash': hash_to_str(header[4:36]),
'merkle_root': hash_to_str(header[36:68]),
'hash_reserved': hash_to_str(header[68:100]),
'timestamp': timestamp,
'bits': bits,
'nonce': hash_to_str(header[108:140]),
'n_solution': base64.b64encode(lib_tx.Deserializer(header, start=140)._read_varbytes()).decode('utf8')
}
class BitcoinZ(EquihashMixin, Coin):
NAME = "BitcoinZ"
SHORTNAME = "BTCZ"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1CB8")
P2SH_VERBYTES = [bytes.fromhex("1CBD")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ('f499ee3d498b4298ac6a64205b8addb7'
'c43197e2a660229be65db8a4534d75c1')
DESERIALIZER = lib_tx.DeserializerZcash
TX_COUNT = 171976
TX_COUNT_HEIGHT = 81323
TX_PER_BLOCK = 3
RPC_PORT = 1979
REORG_LIMIT = 800
class Hush(EquihashMixin, Coin):
NAME = "Hush"
SHORTNAME = "HUSH"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1CB8")
P2SH_VERBYTES = [bytes.fromhex("1CBD")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ( '0003a67bc26fe564b75daf11186d3606'
'52eb435a35ba3d9d3e7e5d5f8e62dc17')
DESERIALIZER = lib_tx.DeserializerZcash
TX_COUNT = 329196
TX_COUNT_HEIGHT = 68379
TX_PER_BLOCK = 5
RPC_PORT = 8822
REORG_LIMIT = 800
class Zclassic(EquihashMixin, Coin):
NAME = "Zclassic"
SHORTNAME = "ZCL"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1CB8")
P2SH_VERBYTES = [bytes.fromhex("1CBD")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ( '0007104ccda289427919efc39dc9e4d4'
'99804b7bebc22df55f8b834301260602')
DESERIALIZER = lib_tx.DeserializerZcash
TX_COUNT = 329196
TX_COUNT_HEIGHT = 68379
TX_PER_BLOCK = 5
RPC_PORT = 8023
REORG_LIMIT = 800
class Koto(Coin):
NAME = "Koto"
SHORTNAME = "KOTO"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1836")
P2SH_VERBYTES = [bytes.fromhex("183B")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ('6d424c350729ae633275d51dc3496e16'
'cd1b1d195c164da00f39c499a2e9959e')
DESERIALIZER = lib_tx.DeserializerZcash
TX_COUNT = 158914
TX_COUNT_HEIGHT = 67574
TX_PER_BLOCK = 3
RPC_PORT = 8432
REORG_LIMIT = 800
PEERS = [
'fr.kotocoin.info s t',
'electrum.kotocoin.info s t',
]
class Komodo(KomodoMixin, EquihashMixin, Coin):
NAME = "Komodo"
SHORTNAME = "KMD"
NET = "mainnet"
TX_COUNT = 693629
TX_COUNT_HEIGHT = 491777
TX_PER_BLOCK = 2
RPC_PORT = 7771
REORG_LIMIT = 800
PEERS = []
class Monaize(KomodoMixin, EquihashMixin, Coin):
NAME = "Monaize"
SHORTNAME = "MNZ"
NET = "mainnet"
TX_COUNT = 256
TX_COUNT_HEIGHT = 128
TX_PER_BLOCK = 2
RPC_PORT = 14337
REORG_LIMIT = 800
PEERS = []
class Einsteinium(Coin):
NAME = "Einsteinium"
SHORTNAME = "EMC2"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("21")
P2SH_VERBYTES = [bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("b0")
GENESIS_HASH = ('4e56204bb7b8ac06f860ff1c845f03f9'
'84303b5b97eb7b42868f714611aed94b')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 2087559
TX_COUNT_HEIGHT = 1358517
TX_PER_BLOCK = 2
RPC_PORT = 41879
REORG_LIMIT = 2000
class Blackcoin(ScryptMixin, Coin):
NAME = "Blackcoin"
SHORTNAME = "BLK"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("19")
P2SH_VERBYTES = [bytes.fromhex("55")]
WIF_BYTE = bytes.fromhex("99")
GENESIS_HASH = ('000001faef25dec4fbcf906e6242621d'
'f2c183bf232f263d0ba5b101911e4563')
DAEMON = daemon.LegacyRPCDaemon
TX_COUNT = 4594999
TX_COUNT_HEIGHT = 1667070
TX_PER_BLOCK = 3
RPC_PORT = 15715
REORG_LIMIT = 5000
class Bitbay(ScryptMixin, Coin):
NAME = "Bitbay"
SHORTNAME = "BAY"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("19")
P2SH_VERBYTES = [bytes.fromhex("55")]
WIF_BYTE = bytes.fromhex("99")
GENESIS_HASH = ('0000075685d3be1f253ce777174b1594'
'354e79954d2a32a6f77fe9cba00e6467')
TX_COUNT = 4594999
TX_COUNT_HEIGHT = 1667070
TX_PER_BLOCK = 3
RPC_PORT = 19914
REORG_LIMIT = 5000
class Peercoin(Coin):
NAME = "Peercoin"
SHORTNAME = "PPC"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("37")
P2SH_VERBYTES = [bytes.fromhex("75")]
WIF_BYTE = bytes.fromhex("b7")
GENESIS_HASH = ('0000000032fe677166d54963b62a4677'
'd8957e87c508eaa4fd7eb1c880cd27e3')
DESERIALIZER = lib_tx.DeserializerTxTime
DAEMON = daemon.LegacyRPCDaemon
TX_COUNT = 1207356
TX_COUNT_HEIGHT = 306425
TX_PER_BLOCK = 4
RPC_PORT = 9902
REORG_LIMIT = 5000
class Reddcoin(Coin):
NAME = "Reddcoin"
SHORTNAME = "RDD"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("3d")
P2SH_VERBYTES = [bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("bd")
GENESIS_HASH = ('b868e0d95a3c3c0e0dadc67ee587aaf9'
'dc8acbf99e3b4b3110fad4eb74c1decc')
DESERIALIZER = lib_tx.DeserializerReddcoin
TX_COUNT = 5413508
TX_COUNT_HEIGHT = 1717382
TX_PER_BLOCK = 3
RPC_PORT = 45443
class Vertcoin(Coin):
NAME = "Vertcoin"
SHORTNAME = "VTC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
P2PKH_VERBYTE = bytes.fromhex("47")
P2SH_VERBYTES = [bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ('4d96a915f49d40b1e5c2844d1ee2dccb'
'90013a990ccea12c492d22110489f0c4')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 2383423
TX_COUNT_HEIGHT = 759076
TX_PER_BLOCK = 3
RPC_PORT = 5888
REORG_LIMIT = 1000
class Monacoin(Coin):
NAME = "Monacoin"
SHORTNAME = "MONA"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
P2PKH_VERBYTE = bytes.fromhex("32")
P2SH_VERBYTES = [bytes.fromhex("37"), bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("B0")
GENESIS_HASH = ('ff9f1c0116d19de7c9963845e129f9ed'
'1bfc0b376eb54fd7afa42e0d418c8bb6')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 2568580
TX_COUNT_HEIGHT = 1029766
TX_PER_BLOCK = 2
RPC_PORT = 9402
REORG_LIMIT = 1000
PEERS = [
'electrumx.tamami-foundation.org s t',
'electrumx2.tamami-foundation.org s t',
'electrumx3.tamami-foundation.org s t',
'electrumx1.monacoin.nl s t',
'electrumx2.monacoin.nl s t',
'electrumx1.monacoin.ninja s t',
'electrumx2.monacoin.ninja s t',
'electrumx1.movsign.info t',
'electrumx2.movsign.info s t',
'electrum-mona.bitbank.cc s t',
]
class MonacoinTestnet(Monacoin):
SHORTNAME = "XMN"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("043587CF")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("6F")
P2SH_VERBYTES = [bytes.fromhex("75"), bytes.fromhex("C4")]
WIF_BYTE = bytes.fromhex("EF")
GENESIS_HASH = ('a2b106ceba3be0c6d097b2a6a6aacf9d'
'638ba8258ae478158f449c321061e0b2')
TX_COUNT = 83602
TX_COUNT_HEIGHT = 83252
TX_PER_BLOCK = 1
RPC_PORT = 19402
REORG_LIMIT = 1000
PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'}
PEERS = [
'electrumx1.testnet.monacoin.ninja s t',
'electrumx1.testnet.monacoin.nl s t',
]
class Crown(AuxPowMixin, Coin):
NAME = "Crown"
SHORTNAME = "CRW"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("00")
P2SH_VERBYTES = [bytes.fromhex("1c")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ('0000000085370d5e122f64f4ab19c686'
'14ff3df78c8d13cb814fd7e69a1dc6da')
TX_COUNT = 13336629
TX_COUNT_HEIGHT = 1268206
TX_PER_BLOCK = 10
RPC_PORT = 9341
REORG_LIMIT = 1000
PEERS = [
'sgp-crwseed.crowndns.info s t',
'blr-crwseed.crowndns.info s t',
'sfo-crwseed.crowndns.info s t',
'nyc-crwseed.crowndns.info s t',
'ams-crwseed.crowndns.info s t',
'tor-crwseed.crowndns.info s t',
'lon-crwseed.crowndns.info s t',
'fra-crwseed.crowndns.info s t',
]
class Fujicoin(Coin):
NAME = "Fujicoin"
SHORTNAME = "FJC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("24")
P2SH_VERBYTES = [bytes.fromhex("10")]
WIF_BYTE = bytes.fromhex("a4")
GENESIS_HASH = ('adb6d9cfd74075e7f91608add4bd2a2e'
'a636f70856183086842667a1597714a0')
ESTIMATE_FEE = 0.001
RELAY_FEE = 0.001
TX_COUNT = 170478
TX_COUNT_HEIGHT = 1521676
TX_PER_BLOCK = 1
RPC_PORT = 3776
REORG_LIMIT = 1000
class Neblio(ScryptMixin, Coin):
NAME = "Neblio"
SHORTNAME = "NEBL"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("35")
P2SH_VERBYTES = [bytes.fromhex("70")]
WIF_BYTE = bytes.fromhex("b5")
GENESIS_HASH = ('7286972be4dbc1463d256049b7471c25'
'2e6557e222cab9be73181d359cd28bcc')
TX_COUNT = 23675
TX_COUNT_HEIGHT = 22785
TX_PER_BLOCK = 1
RPC_PORT = 6326
REORG_LIMIT = 1000
class Bitzeny(Coin):
NAME = "Bitzeny"
SHORTNAME = "ZNY"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("51")
P2SH_VERBYTES = [bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ('000009f7e55e9e3b4781e22bd87a7cfa'
'4acada9e4340d43ca738bf4e9fb8f5ce')
ESTIMATE_FEE = 0.001
RELAY_FEE = 0.001
DAEMON = daemon.FakeEstimateFeeDaemon
TX_COUNT = 1000
TX_COUNT_HEIGHT = 10000
TX_PER_BLOCK = 1
RPC_PORT = 9252
REORG_LIMIT = 1000
class CanadaeCoin(AuxPowMixin, Coin):
NAME = "CanadaeCoin"
SHORTNAME = "CDN"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("1C")
P2SH_VERBYTES = [bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("9c")
GENESIS_HASH = ('863626dadaef221e2e2f30ff3dacae44'
'cabdae9e0028058072181b3fb675d94a')
ESTIMATE_FEE = 0.0001
RELAY_FEE = 0.0001
DAEMON = daemon.FakeEstimateFeeDaemon
TX_COUNT = 3455905
TX_COUNT_HEIGHT = 3645419
TX_PER_BLOCK = 1
RPC_PORT = 34330
REORG_LIMIT = 1000
class Denarius(Coin):
NAME = "Denarius"
SHORTNAME = "DNR"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("1E") #Address starts with a D
P2SH_VERBYTES = [bytes.fromhex("5A")]
WIF_BYTE = bytes.fromhex("9E") #WIF starts with a 6
GENESIS_HASH = ('00000d5dbbda01621cfc16bbc1f9bf32'
'64d641a5dbf0de89fd0182c2c4828fcd')
DESERIALIZER = lib_tx.DeserializerTxTime
TX_COUNT = 4230
RPC_PORT = 32339
ESTIMATE_FEE = 0.00001
RELAY_FEE = 0.00001
DAEMON = daemon.FakeEstimateFeeDaemon
TX_COUNT_HEIGHT = 306187
TX_PER_BLOCK = 4000
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
import tribus_hash
return tribus_hash.getPoWHash(header)
class DenariusTestnet(Denarius):
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("043587cf")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("12")
P2SH_VERBYTES = [bytes.fromhex("74")]
WIF_BYTE = bytes.fromhex("ef")
GENESIS_HASH = ('000086bfe8264d241f7f8e5393f74778'
'4b8ca2aa98bdd066278d590462a4fdb4')
RPC_PORT = 32338
REORG_LIMIT = 2000
class Sibcoin(Dash):
NAME = "Sibcoin"
SHORTNAME = "SIB"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("3F")
P2SH_VERBYTES = [bytes.fromhex("28")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ('00000c492bf73490420868bc577680bf'
'c4c60116e7e85343bc624787c21efa4c')
DAEMON = daemon.DashDaemon
TX_COUNT = 1000
TX_COUNT_HEIGHT = 10000
TX_PER_BLOCK = 1
RPC_PORT = 1944
REORG_LIMIT = 1000
PEERS = []
@classmethod
def header_hash(cls, header):
'''
Given a header return the hash for sibcoin.
Need to download `x11_gost_hash` module
Source code: https://github.com/ivansib/x11_gost_hash
'''
import x11_gost_hash
return x11_gost_hash.getPoWHash(header)
class Chips(Coin):
NAME = "Chips"
SHORTNAME = "CHIPS"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("3c")
P2SH_VERBYTES = [bytes.fromhex("55")]
WIF_BYTE = bytes.fromhex("bc")
GENESIS_HASH = ('0000006e75f6aa0efdbf7db03132aa4e'
'4d0c84951537a6f5a7c39a0a9d30e1e7')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 145290
TX_COUNT_HEIGHT = 318637
TX_PER_BLOCK = 2
RPC_PORT = 57776
REORG_LIMIT = 800
class Feathercoin(Coin):
NAME = "Feathercoin"
SHORTNAME = "FTC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488BC26")
XPRV_VERBYTES = bytes.fromhex("0488DAEE")
P2PKH_VERBYTE = bytes.fromhex("0E")
P2SH_VERBYTES = [bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("8E")
GENESIS_HASH = ('12a765e31ffd4059bada1e25190f6e98'
'c99d9714d334efa41a195a7e7e04bfe2')
TX_COUNT = 3170843
TX_COUNT_HEIGHT = 1981777
TX_PER_BLOCK = 2
RPC_PORT = 9337
REORG_LIMIT = 2000
PEERS = [
'electrumx-ch-1.feathercoin.ch s t',
]
class Newyorkcoin(AuxPowMixin, Coin):
NAME = "Newyorkcoin"
SHORTNAME = "NYC"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("3c")
P2SH_VERBYTES = [bytes.fromhex("16")]
WIF_BYTE = bytes.fromhex("bc")
GENESIS_HASH = ('5597f25c062a3038c7fd815fe46c67de'
'dfcb3c839fbc8e01ed4044540d08fe48')
DAEMON = daemon.LegacyRPCDaemon
TX_COUNT = 5161944
TX_COUNT_HEIGHT = 3948743
TX_PER_BLOCK = 2
REORG_LIMIT = 2000
class Bitcore(BitcoinMixin, Coin):
NAME = "Bitcore"
SHORTNAME = "BTX"
DESERIALIZER = lib_tx.DeserializerSegWit
GENESIS_HASH = ('604148281e5c4b7f2487e5d03cd60d8e'
'6f69411d613f6448034508cea52e9574')
TX_COUNT = 126979
TX_COUNT_HEIGHT = 126946
TX_PER_BLOCK = 2
RPC_PORT = 8556
# source: https://github.com/obsidianplatform
class Obsidian(Coin):
NAME = "Obsidian"
SHORTNAME = "ODN"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488c21e")
XPRV_VERBYTES = bytes.fromhex("0488b2dd")
P2PKH_VERBYTE = bytes.fromhex("4b")
P2SH_VERBYTES = [bytes.fromhex("7d")]
WIF_BYTE = bytes.fromhex("cb")
GENESIS_HASH = ('0000006dd8a92f58e952fa61c9402b74'
'a381a69d1930fb5cc12c73273fab5f0a')
RPC_PORT = 56661
TX_COUNT = 1067887
TX_PER_BLOCK = 2
TX_COUNT_HEIGHT = 500000
DAEMON = daemon.LegacyRPCDaemon
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
from hashlib import sha512
return sha512(header).digest()[:32]
class BitcoinAtom(Coin):
NAME = "BitcoinAtom"
SHORTNAME = "BCA"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("17")
P2SH_VERBYTES = [bytes.fromhex("0a")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ('000000000019d6689c085ae165831e93'
'4ff763ae46a2a6c172b3f1b60a8ce26f')
STATIC_BLOCK_HEADERS = False
DESERIALIZER = lib_tx.DeserializerBitcoinAtom
HEADER_SIZE_POST_FORK = 84
BLOCK_PROOF_OF_STAKE = 0x01
BLOCK_PROOF_OF_STAKE_FLAGS = b'\x01\x00\x00\x00'
TX_COUNT = 295158744
TX_COUNT_HEIGHT = 589197
TX_PER_BLOCK = 10
RPC_PORT = 9136
REORG_LIMIT = 5000
@classmethod
def header_hash(cls, header):
'''Given a header return hash'''
header_to_be_hashed = header[:cls.BASIC_HEADER_SIZE]
# New block header format has some extra flags in the end
if len(header) == cls.HEADER_SIZE_POST_FORK:
flags, = struct.unpack('<I', header[-4:])
# Proof of work blocks have special serialization
if flags & cls.BLOCK_PROOF_OF_STAKE != 0:
header_to_be_hashed += cls.BLOCK_PROOF_OF_STAKE_FLAGS
return double_sha256(header_to_be_hashed)
@classmethod
def block_header(cls, block, height):
'''Return the block header bytes'''
deserializer = cls.DESERIALIZER(block)
return deserializer.read_header(height, cls.BASIC_HEADER_SIZE)
class Decred(Coin):
NAME = "Decred"
SHORTNAME = "DCR"
NET = "mainnet"
XPUB_VERBYTES = bytes('dpub', 'utf-8')
XPRV_VERBYTES = bytes('dprv', 'utf-8')
P2PKH_VERBYTE = bytes('Ds', 'utf-8')
P2SH_VERBYTES = [bytes('Dc', 'utf-8')]
WIF_BYTE = bytes('Pm', 'utf-8')
GENESIS_HASH = ('298e5cc3d985bfe7f81dc135f360abe089edd4396b86d2de66b0cef42b21d980')
DESERIALIZER = lib_tx.DeserializerDecred
ENCODE_CHECK = partial(Base58.encode_check, hash_fn=lib_tx.DeserializerDecred.blake256)
DECODE_CHECK = partial(Base58.decode_check, hash_fn=lib_tx.DeserializerDecred.blake256)
HEADER_HASH = lib_tx.DeserializerDecred.blake256
BASIC_HEADER_SIZE = 180
ALLOW_ADVANCING_ERRORS = True
TX_COUNT = 217380620
TX_COUNT_HEIGHT = 218875
TX_PER_BLOCK = 1000
RPC_PORT = 9109
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
return cls.HEADER_HASH(header)
@classmethod
def block(cls, raw_block, height):
'''Return a Block namedtuple given a raw block and its height.'''
if height > 0:
return super().block(raw_block, height)
else:
return Block(raw_block, cls.block_header(raw_block, height), [])
class DecredTestnet(Decred):
NAME = "Decred"
NET = "testnet"
XPUB_VERBYTES = bytes('tpub', 'utf-8')
XPRV_VERBYTES = bytes('tprv', 'utf-8')
P2PKH_VERBYTE = bytes('Ts', 'utf-8')
P2SH_VERBYTES = [bytes('Tc', 'utf-8')]
WIF_BYTE = bytes('Pt', 'utf-8')
GENESIS_HASH = ('4261602a9d07d80ad47621a64ba6a07754902e496777edc4ff581946bd7bc29c')
TX_COUNT = 3176305
TX_COUNT_HEIGHT = 254198
TX_PER_BLOCK = 1000
RPC_PORT = 19109
| 31.533758 | 114 | 0.648723 |
from collections import namedtuple
import re
import struct
from decimal import Decimal
from hashlib import sha256
from functools import partial
import base64
import lib.util as util
from lib.hash import Base58, hash160, double_sha256, hash_to_str
from lib.script import ScriptPubKey, OpCodes
import lib.tx as lib_tx
from server.block_processor import BlockProcessor
import server.daemon as daemon
from server.session import ElectrumX, DashElectrumX
Block = namedtuple("Block", "raw header transactions")
OP_RETURN = OpCodes.OP_RETURN
class CoinError(Exception):
class Coin(object):
REORG_LIMIT = 200
RPC_URL_REGEX = re.compile('.+@(\[[0-9a-fA-F:]+\]|[^:]+)(:[0-9]+)?')
VALUE_PER_COIN = 100000000
CHUNK_SIZE = 2016
HASHX_LEN = 11
BASIC_HEADER_SIZE = 80
STATIC_BLOCK_HEADERS = True
SESSIONCLS = ElectrumX
DESERIALIZER = lib_tx.Deserializer
DAEMON = daemon.Daemon
BLOCK_PROCESSOR = BlockProcessor
XPUB_VERBYTES = bytes('????', 'utf-8')
XPRV_VERBYTES = bytes('????', 'utf-8')
ENCODE_CHECK = Base58.encode_check
DECODE_CHECK = Base58.decode_check
PEER_DEFAULT_PORTS = {'t': '50001', 's': '50002'}
PEERS = []
@classmethod
def lookup_coin_class(cls, name, net):
req_attrs = ['TX_COUNT', 'TX_COUNT_HEIGHT', 'TX_PER_BLOCK']
for coin in util.subclasses(Coin):
if (coin.NAME.lower() == name.lower() and
coin.NET.lower() == net.lower()):
coin_req_attrs = req_attrs.copy()
missing = [attr for attr in coin_req_attrs
if not hasattr(coin, attr)]
if missing:
raise CoinError('coin {} missing {} attributes'
.format(name, missing))
return coin
raise CoinError('unknown coin {} and network {} combination'
.format(name, net))
@classmethod
def sanitize_url(cls, url):
url = url.strip().rstrip('/')
match = cls.RPC_URL_REGEX.match(url)
if not match:
raise CoinError('invalid daemon URL: "{}"'.format(url))
if match.groups()[1] is None:
url += ':{:d}'.format(cls.RPC_PORT)
if not url.startswith('http://') and not url.startswith('https://'):
url = 'http://' + url
return url + '/'
@classmethod
def daemon_urls(cls, urls):
return [cls.sanitize_url(url) for url in urls.split(',')]
@classmethod
def genesis_block(cls, block):
header = cls.block_header(block, 0)
header_hex_hash = hash_to_str(cls.header_hash(header))
if header_hex_hash != cls.GENESIS_HASH:
raise CoinError('genesis block has hash {} expected {}'
.format(header_hex_hash, cls.GENESIS_HASH))
return header + bytes(1)
@classmethod
def hashX_from_script(cls, script):
if script and script[0] == OP_RETURN:
return None
return sha256(script).digest()[:cls.HASHX_LEN]
@util.cachedproperty
def address_handlers(cls):
return ScriptPubKey.PayToHandlers(
address=cls.P2PKH_address_from_hash160,
script_hash=cls.P2SH_address_from_hash160,
pubkey=cls.P2PKH_address_from_pubkey,
unspendable=lambda: None,
strange=lambda script: None,
)
@classmethod
def address_from_script(cls, script):
return ScriptPubKey.pay_to(cls.address_handlers, script)
@staticmethod
def lookup_xverbytes(verbytes):
for coin in util.subclasses(Coin):
if verbytes == coin.XPUB_VERBYTES:
return True, coin
if verbytes == coin.XPRV_VERBYTES:
return False, coin
raise CoinError('version bytes unrecognised')
@classmethod
def address_to_hashX(cls, address):
return cls.hashX_from_script(cls.pay_to_address_script(address))
@classmethod
def P2PKH_address_from_hash160(cls, hash160):
assert len(hash160) == 20
return cls.ENCODE_CHECK(cls.P2PKH_VERBYTE + hash160)
@classmethod
def P2PKH_address_from_pubkey(cls, pubkey):
return cls.P2PKH_address_from_hash160(hash160(pubkey))
@classmethod
def P2SH_address_from_hash160(cls, hash160):
assert len(hash160) == 20
return cls.ENCODE_CHECK(cls.P2SH_VERBYTES[0] + hash160)
@classmethod
def multisig_address(cls, m, pubkeys):
script = cls.pay_to_multisig_script(m, pubkeys)
return cls.P2SH_address_from_hash160(hash160(script))
@classmethod
def pay_to_multisig_script(cls, m, pubkeys):
return ScriptPubKey.multisig_script(m, pubkeys)
@classmethod
def pay_to_pubkey_script(cls, pubkey):
return ScriptPubKey.P2PK_script(pubkey)
@classmethod
def pay_to_address_script(cls, address):
raw = cls.DECODE_CHECK(address)
verbyte = -1
verlen = len(raw) - 20
if verlen > 0:
verbyte, hash_bytes = raw[:verlen], raw[verlen:]
if verbyte == cls.P2PKH_VERBYTE:
return ScriptPubKey.P2PKH_script(hash_bytes)
if verbyte in cls.P2SH_VERBYTES:
return ScriptPubKey.P2SH_script(hash_bytes)
raise CoinError('invalid address: {}'.format(address))
@classmethod
def privkey_WIF(cls, privkey_bytes, compressed):
payload = bytearray(cls.WIF_BYTE) + privkey_bytes
if compressed:
payload.append(0x01)
return cls.ENCODE_CHECK(payload)
@classmethod
def header_hash(cls, header):
return double_sha256(header)
@classmethod
def header_prevhash(cls, header):
return header[4:36]
@classmethod
def static_header_offset(cls, height):
assert cls.STATIC_BLOCK_HEADERS
return height * cls.BASIC_HEADER_SIZE
@classmethod
def static_header_len(cls, height):
return cls.static_header_offset(height + 1) \
- cls.static_header_offset(height)
@classmethod
def block_header(cls, block, height):
return block[:cls.static_header_len(height)]
@classmethod
def block(cls, raw_block, height):
header = cls.block_header(raw_block, height)
txs = cls.DESERIALIZER(raw_block, start=len(header)).read_tx_block()
return Block(raw_block, header, txs)
@classmethod
def decimal_value(cls, value):
return Decimal(value) / cls.VALUE_PER_COIN
@classmethod
def electrum_header(cls, header, height):
version, = struct.unpack('<I', header[:4])
timestamp, bits, nonce = struct.unpack('<III', header[68:80])
return {
'block_height': height,
'version': version,
'prev_block_hash': hash_to_str(header[4:36]),
'merkle_root': hash_to_str(header[36:68]),
'timestamp': timestamp,
'bits': bits,
'nonce': nonce,
}
class AuxPowMixin(object):
STATIC_BLOCK_HEADERS = False
DESERIALIZER = lib_tx.DeserializerAuxPow
@classmethod
def header_hash(cls, header):
return double_sha256(header[:cls.BASIC_HEADER_SIZE])
@classmethod
def block_header(cls, block, height):
deserializer = cls.DESERIALIZER(block)
return deserializer.read_header(height, cls.BASIC_HEADER_SIZE)
class EquihashMixin(object):
STATIC_BLOCK_HEADERS = False
BASIC_HEADER_SIZE = 140
DESERIALIZER = lib_tx.DeserializerEquihash
@classmethod
def electrum_header(cls, header, height):
version, = struct.unpack('<I', header[:4])
timestamp, bits = struct.unpack('<II', header[100:108])
return {
'block_height': height,
'version': version,
'prev_block_hash': hash_to_str(header[4:36]),
'merkle_root': hash_to_str(header[36:68]),
'timestamp': timestamp,
'bits': bits,
'nonce': hash_to_str(header[108:140]),
}
@classmethod
def block_header(cls, block, height):
deserializer = cls.DESERIALIZER(block)
return deserializer.read_header(height, cls.BASIC_HEADER_SIZE)
class ScryptMixin(object):
DESERIALIZER = lib_tx.DeserializerTxTime
HEADER_HASH = None
@classmethod
def header_hash(cls, header):
if cls.HEADER_HASH is None:
import scrypt
cls.HEADER_HASH = lambda x: scrypt.hash(x, x, 1024, 1, 1, 32)
version, = struct.unpack('<I', header[:4])
if version > 6:
return super().header_hash(header)
else:
return cls.HEADER_HASH(header)
class KomodoMixin(object):
P2PKH_VERBYTE = bytes.fromhex("3C")
P2SH_VERBYTES = [bytes.fromhex("55")]
WIF_BYTE = bytes.fromhex("BC")
GENESIS_HASH = ('027e3758c3a65b12aa1046462b486d0a'
'63bfa1beae327897f56c5cfb7daaae71')
DESERIALIZER = lib_tx.DeserializerZcash
class BitcoinMixin(object):
SHORTNAME = "BTC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("00")
P2SH_VERBYTES = [bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ('000000000019d6689c085ae165831e93'
'4ff763ae46a2a6c172b3f1b60a8ce26f')
RPC_PORT = 8332
class HOdlcoin(Coin):
NAME = "HOdlcoin"
SHORTNAME = "HODLC"
NET = "mainnet"
BASIC_HEADER_SIZE = 88
P2PKH_VERBYTE = bytes.fromhex("28")
P2SH_VERBYTES = [bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("a8")
GENESIS_HASH = ('008872e5582924544e5c707ee4b839bb'
'82c28a9e94e917c94b40538d5658c04b')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 258858
TX_COUNT_HEIGHT = 382138
TX_PER_BLOCK = 5
class BitcoinCash(BitcoinMixin, Coin):
NAME = "BitcoinCash"
SHORTNAME = "BCC"
TX_COUNT = 243631085
TX_COUNT_HEIGHT = 479636
TX_PER_BLOCK = 50
PEERS = [
'electrum-abc.criptolayer.net s50012',
'electroncash.cascharia.com s50002',
'bch.arihanc.com t52001 s52002',
'bccarihace4jdcnt.onion t52001 s52002',
'jelectrum-cash.1209k.com s t',
'abc.vom-stausee.de t52001 s52002',
'abc1.hsmiths.com t60001 s60002',
'electroncash.checksum0.com s t',
]
class BitcoinSegwit(BitcoinMixin, Coin):
NAME = "BitcoinSegwit"
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 217380620
TX_COUNT_HEIGHT = 464000
TX_PER_BLOCK = 1800
PEERS = [
'btc.smsys.me s995',
'E-X.not.fyi s t',
'elec.luggs.co s443',
'electrum.vom-stausee.de s t',
'electrum3.hachre.de p10000 s t',
'electrum.hsmiths.com s t',
'erbium1.sytes.net s t',
'helicarrier.bauerj.eu s t',
'hsmiths4fyqlw5xw.onion s t',
'luggscoqbymhvnkp.onion t80',
'ozahtqwp25chjdjd.onion s t',
'us11.einfachmalnettsein.de s t',
'ELEX01.blackpole.online s t',
'node.arihanc.com s t',
'arihancckjge66iv.onion s t',
]
class BitcoinGold(EquihashMixin, BitcoinMixin, Coin):
CHUNK_SIZE = 252
NAME = "BitcoinGold"
SHORTNAME = "BTG"
FORK_HEIGHT = 491407
P2PKH_VERBYTE = bytes.fromhex("26")
P2SH_VERBYTES = [bytes.fromhex("17")]
DESERIALIZER = lib_tx.DeserializerEquihashSegWit
TX_COUNT = 265026255
TX_COUNT_HEIGHT = 499923
TX_PER_BLOCK = 50
REORG_LIMIT = 1000
RPC_PORT = 8338
@classmethod
def header_hash(cls, header):
height, = struct.unpack('<I', header[68:72])
if height >= cls.FORK_HEIGHT:
return double_sha256(header)
else:
return double_sha256(header[:68] + header[100:112])
@classmethod
def electrum_header(cls, header, height):
h = dict(
block_height=height,
version=struct.unpack('<I', header[:4])[0],
prev_block_hash=hash_to_str(header[4:36]),
merkle_root=hash_to_str(header[36:68]),
timestamp=struct.unpack('<I', header[100:104])[0],
reserved=hash_to_str(header[72:100]),
bits=struct.unpack('<I', header[104:108])[0],
nonce=hash_to_str(header[108:140]),
solution=hash_to_str(header[140:])
)
return h
class BitcoinGoldTestnet(BitcoinGold):
FORK_HEIGHT = 1
SHORTNAME = "TBTG"
XPUB_VERBYTES = bytes.fromhex("043587CF")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("6F")
P2SH_VERBYTES = [bytes.fromhex("C4")]
WIF_BYTE = bytes.fromhex("EF")
TX_COUNT = 0
TX_COUNT_HEIGHT = 1
NET = 'testnet'
RPC_PORT = 18338
GENESIS_HASH = ('00000000e0781ebe24b91eedc293adfe'
'a2f557b53ec379e78959de3853e6f9f6')
class BitcoinGoldRegtest(BitcoinGold):
FORK_HEIGHT = 2000
SHORTNAME = "TBTG"
XPUB_VERBYTES = bytes.fromhex("043587CF")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("6F")
P2SH_VERBYTES = [bytes.fromhex("C4")]
WIF_BYTE = bytes.fromhex("EF")
TX_COUNT = 0
TX_COUNT_HEIGHT = 1
NET = 'regtest'
RPC_PORT = 18444
GENESIS_HASH = ('0f9188f13cb7b2c71f2a335e3a4fc328'
'bf5beb436012afca590b1a11466e2206')
class Emercoin(Coin):
NAME = "Emercoin"
SHORTNAME = "EMC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("21")
P2SH_VERBYTES = [bytes.fromhex("5c")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ('00000000bcccd459d036a588d1008fce'
'8da3754b205736f32ddfd35350e84c2d')
TX_COUNT = 217380620
TX_COUNT_HEIGHT = 464000
TX_PER_BLOCK = 1700
VALUE_PER_COIN = 1000000
RPC_PORT = 6662
DESERIALIZER = lib_tx.DeserializerTxTimeAuxPow
PEERS = []
@classmethod
def block_header(cls, block, height):
deserializer = cls.DESERIALIZER(block)
if deserializer.is_merged_block():
return deserializer.read_header(height, cls.BASIC_HEADER_SIZE)
return block[:cls.static_header_len(height)]
@classmethod
def header_hash(cls, header):
return double_sha256(header[:cls.BASIC_HEADER_SIZE])
class BitcoinTestnetMixin(object):
SHORTNAME = "XTN"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("043587cf")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("6f")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("ef")
GENESIS_HASH = ('000000000933ea01ad0ee984209779ba'
'aec3ced90fa3f408719526f8d77f4943')
REORG_LIMIT = 8000
TX_COUNT = 12242438
TX_COUNT_HEIGHT = 1035428
TX_PER_BLOCK = 21
RPC_PORT = 18332
PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'}
class BitcoinCashTestnet(BitcoinTestnetMixin, Coin):
NAME = "BitcoinCash"
PEERS = [
'electrum-testnet-abc.criptolayer.net s50112',
'bchtestnet.arihanc.com t53001 s53002',
'ciiattqkgzebpp6jofjbrkhvhwmgnsfoayljdcrve2p3qmkbv3duaoyd.onion t53001 s53002',
]
class BitcoinSegwitTestnet(BitcoinTestnetMixin, Coin):
NAME = "BitcoinSegwit"
DESERIALIZER = lib_tx.DeserializerSegWit
PEERS = [
'electrum.akinbo.org s t',
'he36kyperp3kbuxu.onion s t',
'testnet.hsmiths.com t53011 s53012',
'hsmithsxurybd7uh.onion t53011 s53012',
'testnetnode.arihanc.com s t',
'w3e2orjpiiv2qwem3dw66d7c4krink4nhttngkylglpqe5r22n6n5wid.onion s t',
]
class BitcoinSegwitRegtest(BitcoinSegwitTestnet):
NAME = "BitcoinSegwit"
NET = "regtest"
GENESIS_HASH = ('0f9188f13cb7b2c71f2a335e3a4fc328'
'bf5beb436012afca590b1a11466e2206')
PEERS= []
TX_COUNT = 1
TX_COUNT_HEIGHT = 1
class BitcoinNolnet(BitcoinCash):
NET = "nolnet"
GENESIS_HASH = ('0000000057e31bd2066c939a63b7b862'
'3bd0f10d8c001304bdfc1a7902ae6d35')
PEERS = []
REORG_LIMIT = 8000
TX_COUNT = 583589
TX_COUNT_HEIGHT = 8617
TX_PER_BLOCK = 50
RPC_PORT = 28332
PEER_DEFAULT_PORTS = {'t': '52001', 's': '52002'}
class Litecoin(Coin):
NAME = "Litecoin"
SHORTNAME = "LTC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("30")
P2SH_VERBYTES = [bytes.fromhex("32"), bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("b0")
GENESIS_HASH = ('12a765e31ffd4059bada1e25190f6e98'
'c99d9714d334efa41a195a7e7e04bfe2')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 8908766
TX_COUNT_HEIGHT = 1105256
TX_PER_BLOCK = 10
RPC_PORT = 9332
REORG_LIMIT = 800
PEERS = [
'elec.luggs.co s444',
'electrum-ltc.bysh.me s t',
'electrum-ltc.ddns.net s t',
'electrum-ltc.wilv.in s t',
'electrum.cryptomachine.com p1000 s t',
'electrum.ltc.xurious.com s t',
'eywr5eubdbbe2laq.onion s50008 t50007',
]
class LitecoinTestnet(Litecoin):
SHORTNAME = "XLT"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("043587cf")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("6f")
P2SH_VERBYTES = [bytes.fromhex("3a"), bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("ef")
GENESIS_HASH = ('4966625a4b2851d9fdee139e56211a0d'
'88575f59ed816ff5e6a63deb4e3e29a0')
TX_COUNT = 21772
TX_COUNT_HEIGHT = 20800
TX_PER_BLOCK = 2
RPC_PORT = 19332
REORG_LIMIT = 4000
PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'}
PEERS = [
'electrum-ltc.bysh.me s t',
'electrum.ltc.xurious.com s t',
]
class Viacoin(AuxPowMixin, Coin):
NAME="Viacoin"
SHORTNAME = "VIA"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("47")
P2SH_VERBYTES = [bytes.fromhex("21")]
WIF_BYTE = bytes.fromhex("c7")
GENESIS_HASH = ('4e9b54001f9976049830128ec0331515'
'eaabe35a70970d79971da1539a400ba1')
TX_COUNT = 113638
TX_COUNT_HEIGHT = 3473674
TX_PER_BLOCK = 30
RPC_PORT = 5222
REORG_LIMIT = 5000
DESERIALIZER = lib_tx.DeserializerAuxPowSegWit
PEERS = [
'vialectrum.bitops.me s t',
'server.vialectrum.org s t',
'vialectrum.viacoin.net s t',
'viax1.bitops.me s t',
]
class ViacoinTestnet(Viacoin):
SHORTNAME = "TVI"
NET = "testnet"
P2PKH_VERBYTE = bytes.fromhex("7f")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("ff")
GENESIS_HASH = ('00000007199508e34a9ff81e6ec0c477'
'a4cccff2a4767a8eee39c11db367b008')
RPC_PORT = 25222
REORG_LIMIT = 2500
PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'}
PEERS = [
'vialectrum.bysh.me s t',
]
class ViacoinTestnetSegWit(ViacoinTestnet):
NET = "testnet-segwit"
DESERIALIZER = lib_tx.DeserializerSegWit
class Namecoin(AuxPowMixin, Coin):
NAME = "Namecoin"
SHORTNAME = "NMC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("d7dd6370")
XPRV_VERBYTES = bytes.fromhex("d7dc6e31")
P2PKH_VERBYTE = bytes.fromhex("34")
P2SH_VERBYTES = [bytes.fromhex("0d")]
WIF_BYTE = bytes.fromhex("e4")
GENESIS_HASH = ('000000000062b72c5e2ceb45fbc8587e'
'807c155b0da735e6483dfba2f0a9c770')
TX_COUNT = 4415768
TX_COUNT_HEIGHT = 329065
TX_PER_BLOCK = 10
PEERS = [
'elec.luggs.co s446',
]
class NamecoinTestnet(Namecoin):
NAME = "Namecoin"
SHORTNAME = "XNM"
NET = "testnet"
P2PKH_VERBYTE = bytes.fromhex("6f")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("ef")
GENESIS_HASH = ('00000007199508e34a9ff81e6ec0c477'
'a4cccff2a4767a8eee39c11db367b008')
class Dogecoin(AuxPowMixin, Coin):
NAME = "Dogecoin"
SHORTNAME = "DOGE"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("02facafd")
XPRV_VERBYTES = bytes.fromhex("02fac398")
P2PKH_VERBYTE = bytes.fromhex("1e")
P2SH_VERBYTES = [bytes.fromhex("16")]
WIF_BYTE = bytes.fromhex("9e")
GENESIS_HASH = ('1a91e3dace36e2be3bf030a65679fe82'
'1aa1d6ef92e7c9902eb318182c355691')
TX_COUNT = 27583427
TX_COUNT_HEIGHT = 1604979
TX_PER_BLOCK = 20
REORG_LIMIT = 2000
class DogecoinTestnet(Dogecoin):
NAME = "Dogecoin"
SHORTNAME = "XDT"
NET = "testnet"
P2PKH_VERBYTE = bytes.fromhex("71")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("f1")
GENESIS_HASH = ('bb0a78264637406b6360aad926284d54'
'4d7049f45189db5664f3c4d07350559e')
class Dash(Coin):
NAME = "Dash"
SHORTNAME = "DASH"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("02fe52cc")
XPRV_VERBYTES = bytes.fromhex("02fe52f8")
GENESIS_HASH = ('00000ffd590b1485b3caadc19b22e637'
'9c733355108f107a430458cdf3407ab6')
P2PKH_VERBYTE = bytes.fromhex("4c")
P2SH_VERBYTES = [bytes.fromhex("10")]
WIF_BYTE = bytes.fromhex("cc")
TX_COUNT_HEIGHT = 569399
TX_COUNT = 2157510
TX_PER_BLOCK = 4
RPC_PORT = 9998
PEERS = [
'electrum.dash.org s t',
'electrum.masternode.io s t',
'electrum-drk.club s t',
'dashcrypto.space s t',
'electrum.dash.siampm.com s t',
'wl4sfwq2hwxnodof.onion s t',
]
SESSIONCLS = DashElectrumX
DAEMON = daemon.DashDaemon
@classmethod
def header_hash(cls, header):
import x11_hash
return x11_hash.getPoWHash(header)
class DashTestnet(Dash):
SHORTNAME = "tDASH"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("3a805837")
XPRV_VERBYTES = bytes.fromhex("3a8061a0")
GENESIS_HASH = ('00000bafbc94add76cb75e2ec9289483'
'7288a481e5c005f6563d91623bf8bc2c')
P2PKH_VERBYTE = bytes.fromhex("8c")
P2SH_VERBYTES = [bytes.fromhex("13")]
WIF_BYTE = bytes.fromhex("ef")
TX_COUNT_HEIGHT = 101619
TX_COUNT = 132681
TX_PER_BLOCK = 1
RPC_PORT = 19998
PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'}
PEERS = [
'electrum.dash.siampm.com s t',
]
class Argentum(AuxPowMixin, Coin):
NAME = "Argentum"
SHORTNAME = "ARG"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("17")
P2SH_VERBYTES = [bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("97")
GENESIS_HASH = ('88c667bc63167685e4e4da058fffdfe8'
'e007e5abffd6855de52ad59df7bb0bb2')
TX_COUNT = 2263089
TX_COUNT_HEIGHT = 2050260
TX_PER_BLOCK = 2000
RPC_PORT = 13581
class ArgentumTestnet(Argentum):
SHORTNAME = "XRG"
NET = "testnet"
P2PKH_VERBYTE = bytes.fromhex("6f")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("ef")
REORG_LIMIT = 2000
class DigiByte(Coin):
NAME = "DigiByte"
SHORTNAME = "DGB"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1E")
P2SH_VERBYTES = [bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ('7497ea1b465eb39f1c8f507bc877078f'
'e016d6fcb6dfad3a64c98dcc6e1e8496')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 1046018
TX_COUNT_HEIGHT = 1435000
TX_PER_BLOCK = 1000
RPC_PORT = 12022
class DigiByteTestnet(DigiByte):
NET = "testnet"
P2PKH_VERBYTE = bytes.fromhex("6f")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("ef")
GENESIS_HASH = ('b5dca8039e300198e5fe7cd23bdd1728'
'e2a444af34c447dbd0916fa3430a68c2')
RPC_PORT = 15022
REORG_LIMIT = 2000
class FairCoin(Coin):
NAME = "FairCoin"
SHORTNAME = "FAIR"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("5f")
P2SH_VERBYTES = [bytes.fromhex("24")]
WIF_BYTE = bytes.fromhex("df")
GENESIS_HASH = ('beed44fa5e96150d95d56ebd5d262578'
'1825a9407a5215dd7eda723373a0a1d7')
BASIC_HEADER_SIZE = 108
TX_COUNT = 505
TX_COUNT_HEIGHT = 470
TX_PER_BLOCK = 1
RPC_PORT = 40405
PEER_DEFAULT_PORTS = {'t': '51811', 's': '51812'}
PEERS = [
'electrum.faircoin.world s',
'electrumfair.punto0.org s',
]
@classmethod
def block(cls, raw_block, height):
if height > 0:
return super().block(raw_block, height)
else:
return Block(raw_block, cls.block_header(raw_block, height), [])
@classmethod
def electrum_header(cls, header, height):
version, = struct.unpack('<I', header[:4])
timestamp, creatorId = struct.unpack('<II', header[100:108])
return {
'block_height': height,
'version': version,
'prev_block_hash': hash_to_str(header[4:36]),
'merkle_root': hash_to_str(header[36:68]),
'payload_hash': hash_to_str(header[68:100]),
'timestamp': timestamp,
'creatorId': creatorId,
}
class Zcash(EquihashMixin, Coin):
NAME = "Zcash"
SHORTNAME = "ZEC"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1CB8")
P2SH_VERBYTES = [bytes.fromhex("1CBD")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ('00040fe8ec8471911baa1db1266ea15d'
'd06b4a8a5c453883c000b031973dce08')
DESERIALIZER = lib_tx.DeserializerZcash
TX_COUNT = 329196
TX_COUNT_HEIGHT = 68379
TX_PER_BLOCK = 5
RPC_PORT = 8232
REORG_LIMIT = 800
class SnowGem(EquihashMixin, Coin):
NAME = "SnowGem"
SHORTNAME = "SNG"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1C28")
P2SH_VERBYTES = [bytes.fromhex("1C2D")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ('00068b35729d9d2b0c294ff1fe9af009'
'4740524311a131de40e7f705e4c29a5b')
DESERIALIZER = lib_tx.DeserializerZcash
TX_COUNT = 140698
TX_COUNT_HEIGHT = 102802
TX_PER_BLOCK = 2
RPC_PORT = 16112
REORG_LIMIT = 800
CHUNK_SIZE = 200
@classmethod
def electrum_header(cls, header, height):
version, = struct.unpack('<I', header[:4])
timestamp, bits = struct.unpack('<II', header[100:108])
return {
'block_height': height,
'version': version,
'prev_block_hash': hash_to_str(header[4:36]),
'merkle_root': hash_to_str(header[36:68]),
'hash_reserved': hash_to_str(header[68:100]),
'timestamp': timestamp,
'bits': bits,
'nonce': hash_to_str(header[108:140]),
'n_solution': base64.b64encode(lib_tx.Deserializer(header, start=140)._read_varbytes()).decode('utf8')
}
class BitcoinZ(EquihashMixin, Coin):
NAME = "BitcoinZ"
SHORTNAME = "BTCZ"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1CB8")
P2SH_VERBYTES = [bytes.fromhex("1CBD")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ('f499ee3d498b4298ac6a64205b8addb7'
'c43197e2a660229be65db8a4534d75c1')
DESERIALIZER = lib_tx.DeserializerZcash
TX_COUNT = 171976
TX_COUNT_HEIGHT = 81323
TX_PER_BLOCK = 3
RPC_PORT = 1979
REORG_LIMIT = 800
class Hush(EquihashMixin, Coin):
NAME = "Hush"
SHORTNAME = "HUSH"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1CB8")
P2SH_VERBYTES = [bytes.fromhex("1CBD")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ( '0003a67bc26fe564b75daf11186d3606'
'52eb435a35ba3d9d3e7e5d5f8e62dc17')
DESERIALIZER = lib_tx.DeserializerZcash
TX_COUNT = 329196
TX_COUNT_HEIGHT = 68379
TX_PER_BLOCK = 5
RPC_PORT = 8822
REORG_LIMIT = 800
class Zclassic(EquihashMixin, Coin):
NAME = "Zclassic"
SHORTNAME = "ZCL"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1CB8")
P2SH_VERBYTES = [bytes.fromhex("1CBD")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ( '0007104ccda289427919efc39dc9e4d4'
'99804b7bebc22df55f8b834301260602')
DESERIALIZER = lib_tx.DeserializerZcash
TX_COUNT = 329196
TX_COUNT_HEIGHT = 68379
TX_PER_BLOCK = 5
RPC_PORT = 8023
REORG_LIMIT = 800
class Koto(Coin):
NAME = "Koto"
SHORTNAME = "KOTO"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1836")
P2SH_VERBYTES = [bytes.fromhex("183B")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ('6d424c350729ae633275d51dc3496e16'
'cd1b1d195c164da00f39c499a2e9959e')
DESERIALIZER = lib_tx.DeserializerZcash
TX_COUNT = 158914
TX_COUNT_HEIGHT = 67574
TX_PER_BLOCK = 3
RPC_PORT = 8432
REORG_LIMIT = 800
PEERS = [
'fr.kotocoin.info s t',
'electrum.kotocoin.info s t',
]
class Komodo(KomodoMixin, EquihashMixin, Coin):
NAME = "Komodo"
SHORTNAME = "KMD"
NET = "mainnet"
TX_COUNT = 693629
TX_COUNT_HEIGHT = 491777
TX_PER_BLOCK = 2
RPC_PORT = 7771
REORG_LIMIT = 800
PEERS = []
class Monaize(KomodoMixin, EquihashMixin, Coin):
NAME = "Monaize"
SHORTNAME = "MNZ"
NET = "mainnet"
TX_COUNT = 256
TX_COUNT_HEIGHT = 128
TX_PER_BLOCK = 2
RPC_PORT = 14337
REORG_LIMIT = 800
PEERS = []
class Einsteinium(Coin):
NAME = "Einsteinium"
SHORTNAME = "EMC2"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("21")
P2SH_VERBYTES = [bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("b0")
GENESIS_HASH = ('4e56204bb7b8ac06f860ff1c845f03f9'
'84303b5b97eb7b42868f714611aed94b')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 2087559
TX_COUNT_HEIGHT = 1358517
TX_PER_BLOCK = 2
RPC_PORT = 41879
REORG_LIMIT = 2000
class Blackcoin(ScryptMixin, Coin):
NAME = "Blackcoin"
SHORTNAME = "BLK"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("19")
P2SH_VERBYTES = [bytes.fromhex("55")]
WIF_BYTE = bytes.fromhex("99")
GENESIS_HASH = ('000001faef25dec4fbcf906e6242621d'
'f2c183bf232f263d0ba5b101911e4563')
DAEMON = daemon.LegacyRPCDaemon
TX_COUNT = 4594999
TX_COUNT_HEIGHT = 1667070
TX_PER_BLOCK = 3
RPC_PORT = 15715
REORG_LIMIT = 5000
class Bitbay(ScryptMixin, Coin):
NAME = "Bitbay"
SHORTNAME = "BAY"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("19")
P2SH_VERBYTES = [bytes.fromhex("55")]
WIF_BYTE = bytes.fromhex("99")
GENESIS_HASH = ('0000075685d3be1f253ce777174b1594'
'354e79954d2a32a6f77fe9cba00e6467')
TX_COUNT = 4594999
TX_COUNT_HEIGHT = 1667070
TX_PER_BLOCK = 3
RPC_PORT = 19914
REORG_LIMIT = 5000
class Peercoin(Coin):
NAME = "Peercoin"
SHORTNAME = "PPC"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("37")
P2SH_VERBYTES = [bytes.fromhex("75")]
WIF_BYTE = bytes.fromhex("b7")
GENESIS_HASH = ('0000000032fe677166d54963b62a4677'
'd8957e87c508eaa4fd7eb1c880cd27e3')
DESERIALIZER = lib_tx.DeserializerTxTime
DAEMON = daemon.LegacyRPCDaemon
TX_COUNT = 1207356
TX_COUNT_HEIGHT = 306425
TX_PER_BLOCK = 4
RPC_PORT = 9902
REORG_LIMIT = 5000
class Reddcoin(Coin):
NAME = "Reddcoin"
SHORTNAME = "RDD"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("3d")
P2SH_VERBYTES = [bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("bd")
GENESIS_HASH = ('b868e0d95a3c3c0e0dadc67ee587aaf9'
'dc8acbf99e3b4b3110fad4eb74c1decc')
DESERIALIZER = lib_tx.DeserializerReddcoin
TX_COUNT = 5413508
TX_COUNT_HEIGHT = 1717382
TX_PER_BLOCK = 3
RPC_PORT = 45443
class Vertcoin(Coin):
NAME = "Vertcoin"
SHORTNAME = "VTC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
P2PKH_VERBYTE = bytes.fromhex("47")
P2SH_VERBYTES = [bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ('4d96a915f49d40b1e5c2844d1ee2dccb'
'90013a990ccea12c492d22110489f0c4')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 2383423
TX_COUNT_HEIGHT = 759076
TX_PER_BLOCK = 3
RPC_PORT = 5888
REORG_LIMIT = 1000
class Monacoin(Coin):
NAME = "Monacoin"
SHORTNAME = "MONA"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488B21E")
XPRV_VERBYTES = bytes.fromhex("0488ADE4")
P2PKH_VERBYTE = bytes.fromhex("32")
P2SH_VERBYTES = [bytes.fromhex("37"), bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("B0")
GENESIS_HASH = ('ff9f1c0116d19de7c9963845e129f9ed'
'1bfc0b376eb54fd7afa42e0d418c8bb6')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 2568580
TX_COUNT_HEIGHT = 1029766
TX_PER_BLOCK = 2
RPC_PORT = 9402
REORG_LIMIT = 1000
PEERS = [
'electrumx.tamami-foundation.org s t',
'electrumx2.tamami-foundation.org s t',
'electrumx3.tamami-foundation.org s t',
'electrumx1.monacoin.nl s t',
'electrumx2.monacoin.nl s t',
'electrumx1.monacoin.ninja s t',
'electrumx2.monacoin.ninja s t',
'electrumx1.movsign.info t',
'electrumx2.movsign.info s t',
'electrum-mona.bitbank.cc s t',
]
class MonacoinTestnet(Monacoin):
SHORTNAME = "XMN"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("043587CF")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("6F")
P2SH_VERBYTES = [bytes.fromhex("75"), bytes.fromhex("C4")]
WIF_BYTE = bytes.fromhex("EF")
GENESIS_HASH = ('a2b106ceba3be0c6d097b2a6a6aacf9d'
'638ba8258ae478158f449c321061e0b2')
TX_COUNT = 83602
TX_COUNT_HEIGHT = 83252
TX_PER_BLOCK = 1
RPC_PORT = 19402
REORG_LIMIT = 1000
PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'}
PEERS = [
'electrumx1.testnet.monacoin.ninja s t',
'electrumx1.testnet.monacoin.nl s t',
]
class Crown(AuxPowMixin, Coin):
NAME = "Crown"
SHORTNAME = "CRW"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("00")
P2SH_VERBYTES = [bytes.fromhex("1c")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ('0000000085370d5e122f64f4ab19c686'
'14ff3df78c8d13cb814fd7e69a1dc6da')
TX_COUNT = 13336629
TX_COUNT_HEIGHT = 1268206
TX_PER_BLOCK = 10
RPC_PORT = 9341
REORG_LIMIT = 1000
PEERS = [
'sgp-crwseed.crowndns.info s t',
'blr-crwseed.crowndns.info s t',
'sfo-crwseed.crowndns.info s t',
'nyc-crwseed.crowndns.info s t',
'ams-crwseed.crowndns.info s t',
'tor-crwseed.crowndns.info s t',
'lon-crwseed.crowndns.info s t',
'fra-crwseed.crowndns.info s t',
]
class Fujicoin(Coin):
NAME = "Fujicoin"
SHORTNAME = "FJC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("24")
P2SH_VERBYTES = [bytes.fromhex("10")]
WIF_BYTE = bytes.fromhex("a4")
GENESIS_HASH = ('adb6d9cfd74075e7f91608add4bd2a2e'
'a636f70856183086842667a1597714a0')
ESTIMATE_FEE = 0.001
RELAY_FEE = 0.001
TX_COUNT = 170478
TX_COUNT_HEIGHT = 1521676
TX_PER_BLOCK = 1
RPC_PORT = 3776
REORG_LIMIT = 1000
class Neblio(ScryptMixin, Coin):
NAME = "Neblio"
SHORTNAME = "NEBL"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("35")
P2SH_VERBYTES = [bytes.fromhex("70")]
WIF_BYTE = bytes.fromhex("b5")
GENESIS_HASH = ('7286972be4dbc1463d256049b7471c25'
'2e6557e222cab9be73181d359cd28bcc')
TX_COUNT = 23675
TX_COUNT_HEIGHT = 22785
TX_PER_BLOCK = 1
RPC_PORT = 6326
REORG_LIMIT = 1000
class Bitzeny(Coin):
NAME = "Bitzeny"
SHORTNAME = "ZNY"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("51")
P2SH_VERBYTES = [bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ('000009f7e55e9e3b4781e22bd87a7cfa'
'4acada9e4340d43ca738bf4e9fb8f5ce')
ESTIMATE_FEE = 0.001
RELAY_FEE = 0.001
DAEMON = daemon.FakeEstimateFeeDaemon
TX_COUNT = 1000
TX_COUNT_HEIGHT = 10000
TX_PER_BLOCK = 1
RPC_PORT = 9252
REORG_LIMIT = 1000
class CanadaeCoin(AuxPowMixin, Coin):
NAME = "CanadaeCoin"
SHORTNAME = "CDN"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("1C")
P2SH_VERBYTES = [bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("9c")
GENESIS_HASH = ('863626dadaef221e2e2f30ff3dacae44'
'cabdae9e0028058072181b3fb675d94a')
ESTIMATE_FEE = 0.0001
RELAY_FEE = 0.0001
DAEMON = daemon.FakeEstimateFeeDaemon
TX_COUNT = 3455905
TX_COUNT_HEIGHT = 3645419
TX_PER_BLOCK = 1
RPC_PORT = 34330
REORG_LIMIT = 1000
class Denarius(Coin):
NAME = "Denarius"
SHORTNAME = "DNR"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("1E")
P2SH_VERBYTES = [bytes.fromhex("5A")]
WIF_BYTE = bytes.fromhex("9E")
GENESIS_HASH = ('00000d5dbbda01621cfc16bbc1f9bf32'
'64d641a5dbf0de89fd0182c2c4828fcd')
DESERIALIZER = lib_tx.DeserializerTxTime
TX_COUNT = 4230
RPC_PORT = 32339
ESTIMATE_FEE = 0.00001
RELAY_FEE = 0.00001
DAEMON = daemon.FakeEstimateFeeDaemon
TX_COUNT_HEIGHT = 306187
TX_PER_BLOCK = 4000
@classmethod
def header_hash(cls, header):
import tribus_hash
return tribus_hash.getPoWHash(header)
class DenariusTestnet(Denarius):
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("043587cf")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("12")
P2SH_VERBYTES = [bytes.fromhex("74")]
WIF_BYTE = bytes.fromhex("ef")
GENESIS_HASH = ('000086bfe8264d241f7f8e5393f74778'
'4b8ca2aa98bdd066278d590462a4fdb4')
RPC_PORT = 32338
REORG_LIMIT = 2000
class Sibcoin(Dash):
NAME = "Sibcoin"
SHORTNAME = "SIB"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("3F")
P2SH_VERBYTES = [bytes.fromhex("28")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ('00000c492bf73490420868bc577680bf'
'c4c60116e7e85343bc624787c21efa4c')
DAEMON = daemon.DashDaemon
TX_COUNT = 1000
TX_COUNT_HEIGHT = 10000
TX_PER_BLOCK = 1
RPC_PORT = 1944
REORG_LIMIT = 1000
PEERS = []
@classmethod
def header_hash(cls, header):
import x11_gost_hash
return x11_gost_hash.getPoWHash(header)
class Chips(Coin):
NAME = "Chips"
SHORTNAME = "CHIPS"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("3c")
P2SH_VERBYTES = [bytes.fromhex("55")]
WIF_BYTE = bytes.fromhex("bc")
GENESIS_HASH = ('0000006e75f6aa0efdbf7db03132aa4e'
'4d0c84951537a6f5a7c39a0a9d30e1e7')
DESERIALIZER = lib_tx.DeserializerSegWit
TX_COUNT = 145290
TX_COUNT_HEIGHT = 318637
TX_PER_BLOCK = 2
RPC_PORT = 57776
REORG_LIMIT = 800
class Feathercoin(Coin):
NAME = "Feathercoin"
SHORTNAME = "FTC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488BC26")
XPRV_VERBYTES = bytes.fromhex("0488DAEE")
P2PKH_VERBYTE = bytes.fromhex("0E")
P2SH_VERBYTES = [bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("8E")
GENESIS_HASH = ('12a765e31ffd4059bada1e25190f6e98'
'c99d9714d334efa41a195a7e7e04bfe2')
TX_COUNT = 3170843
TX_COUNT_HEIGHT = 1981777
TX_PER_BLOCK = 2
RPC_PORT = 9337
REORG_LIMIT = 2000
PEERS = [
'electrumx-ch-1.feathercoin.ch s t',
]
class Newyorkcoin(AuxPowMixin, Coin):
NAME = "Newyorkcoin"
SHORTNAME = "NYC"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("3c")
P2SH_VERBYTES = [bytes.fromhex("16")]
WIF_BYTE = bytes.fromhex("bc")
GENESIS_HASH = ('5597f25c062a3038c7fd815fe46c67de'
'dfcb3c839fbc8e01ed4044540d08fe48')
DAEMON = daemon.LegacyRPCDaemon
TX_COUNT = 5161944
TX_COUNT_HEIGHT = 3948743
TX_PER_BLOCK = 2
REORG_LIMIT = 2000
class Bitcore(BitcoinMixin, Coin):
NAME = "Bitcore"
SHORTNAME = "BTX"
DESERIALIZER = lib_tx.DeserializerSegWit
GENESIS_HASH = ('604148281e5c4b7f2487e5d03cd60d8e'
'6f69411d613f6448034508cea52e9574')
TX_COUNT = 126979
TX_COUNT_HEIGHT = 126946
TX_PER_BLOCK = 2
RPC_PORT = 8556
class Obsidian(Coin):
NAME = "Obsidian"
SHORTNAME = "ODN"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488c21e")
XPRV_VERBYTES = bytes.fromhex("0488b2dd")
P2PKH_VERBYTE = bytes.fromhex("4b")
P2SH_VERBYTES = [bytes.fromhex("7d")]
WIF_BYTE = bytes.fromhex("cb")
GENESIS_HASH = ('0000006dd8a92f58e952fa61c9402b74'
'a381a69d1930fb5cc12c73273fab5f0a')
RPC_PORT = 56661
TX_COUNT = 1067887
TX_PER_BLOCK = 2
TX_COUNT_HEIGHT = 500000
DAEMON = daemon.LegacyRPCDaemon
@classmethod
def header_hash(cls, header):
from hashlib import sha512
return sha512(header).digest()[:32]
class BitcoinAtom(Coin):
NAME = "BitcoinAtom"
SHORTNAME = "BCA"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("17")
P2SH_VERBYTES = [bytes.fromhex("0a")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ('000000000019d6689c085ae165831e93'
'4ff763ae46a2a6c172b3f1b60a8ce26f')
STATIC_BLOCK_HEADERS = False
DESERIALIZER = lib_tx.DeserializerBitcoinAtom
HEADER_SIZE_POST_FORK = 84
BLOCK_PROOF_OF_STAKE = 0x01
BLOCK_PROOF_OF_STAKE_FLAGS = b'\x01\x00\x00\x00'
TX_COUNT = 295158744
TX_COUNT_HEIGHT = 589197
TX_PER_BLOCK = 10
RPC_PORT = 9136
REORG_LIMIT = 5000
@classmethod
def header_hash(cls, header):
header_to_be_hashed = header[:cls.BASIC_HEADER_SIZE]
if len(header) == cls.HEADER_SIZE_POST_FORK:
flags, = struct.unpack('<I', header[-4:])
if flags & cls.BLOCK_PROOF_OF_STAKE != 0:
header_to_be_hashed += cls.BLOCK_PROOF_OF_STAKE_FLAGS
return double_sha256(header_to_be_hashed)
@classmethod
def block_header(cls, block, height):
deserializer = cls.DESERIALIZER(block)
return deserializer.read_header(height, cls.BASIC_HEADER_SIZE)
class Decred(Coin):
NAME = "Decred"
SHORTNAME = "DCR"
NET = "mainnet"
XPUB_VERBYTES = bytes('dpub', 'utf-8')
XPRV_VERBYTES = bytes('dprv', 'utf-8')
P2PKH_VERBYTE = bytes('Ds', 'utf-8')
P2SH_VERBYTES = [bytes('Dc', 'utf-8')]
WIF_BYTE = bytes('Pm', 'utf-8')
GENESIS_HASH = ('298e5cc3d985bfe7f81dc135f360abe089edd4396b86d2de66b0cef42b21d980')
DESERIALIZER = lib_tx.DeserializerDecred
ENCODE_CHECK = partial(Base58.encode_check, hash_fn=lib_tx.DeserializerDecred.blake256)
DECODE_CHECK = partial(Base58.decode_check, hash_fn=lib_tx.DeserializerDecred.blake256)
HEADER_HASH = lib_tx.DeserializerDecred.blake256
BASIC_HEADER_SIZE = 180
ALLOW_ADVANCING_ERRORS = True
TX_COUNT = 217380620
TX_COUNT_HEIGHT = 218875
TX_PER_BLOCK = 1000
RPC_PORT = 9109
@classmethod
def header_hash(cls, header):
return cls.HEADER_HASH(header)
@classmethod
def block(cls, raw_block, height):
if height > 0:
return super().block(raw_block, height)
else:
return Block(raw_block, cls.block_header(raw_block, height), [])
class DecredTestnet(Decred):
NAME = "Decred"
NET = "testnet"
XPUB_VERBYTES = bytes('tpub', 'utf-8')
XPRV_VERBYTES = bytes('tprv', 'utf-8')
P2PKH_VERBYTE = bytes('Ts', 'utf-8')
P2SH_VERBYTES = [bytes('Tc', 'utf-8')]
WIF_BYTE = bytes('Pt', 'utf-8')
GENESIS_HASH = ('4261602a9d07d80ad47621a64ba6a07754902e496777edc4ff581946bd7bc29c')
TX_COUNT = 3176305
TX_COUNT_HEIGHT = 254198
TX_PER_BLOCK = 1000
RPC_PORT = 19109
| true | true |
1c2b194566a9e96dba834338ec915a2289eb1837 | 682 | py | Python | functions/markdown-to-html/markdown2html.py | truls/faas-profiler | d54ca0d9926f38c693f616ba4d08414aea823f51 | [
"MIT"
] | null | null | null | functions/markdown-to-html/markdown2html.py | truls/faas-profiler | d54ca0d9926f38c693f616ba4d08414aea823f51 | [
"MIT"
] | null | null | null | functions/markdown-to-html/markdown2html.py | truls/faas-profiler | d54ca0d9926f38c693f616ba4d08414aea823f51 | [
"MIT"
] | null | null | null | # Copyright (c) 2019 Princeton University
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from markdown import markdown
import base64
import json
import base64
def main(params):
try:
md = json.loads(base64.decodebytes(params["__ow_body"].encode("utf-8")))["markdown"].encode("utf-8")
md_text = base64.decodebytes(md).decode("utf-8")
except KeyError:
return {'Error' : 'Possibly lacking markdown parameter in request.'}
test_id = params["__ow_query"].split("&")[0]
html = markdown(md_text)
return {"result": "ok", "html_response": html, "testid": test_id}
| 29.652174 | 108 | 0.690616 |
from markdown import markdown
import base64
import json
import base64
def main(params):
try:
md = json.loads(base64.decodebytes(params["__ow_body"].encode("utf-8")))["markdown"].encode("utf-8")
md_text = base64.decodebytes(md).decode("utf-8")
except KeyError:
return {'Error' : 'Possibly lacking markdown parameter in request.'}
test_id = params["__ow_query"].split("&")[0]
html = markdown(md_text)
return {"result": "ok", "html_response": html, "testid": test_id}
| true | true |
1c2b1a1f7735b02e97f5b6e0193d9fd6cf1a373c | 6,205 | py | Python | 09_Recurrent_Neural_Networks/02_Implementing_RNN_for_Spam_Prediction/02_implementing_rnn.py | dolaameng/tensorflow_cookbook | ca9bcb892239e9276e9348689e06cd6d1edd19ef | [
"MIT"
] | null | null | null | 09_Recurrent_Neural_Networks/02_Implementing_RNN_for_Spam_Prediction/02_implementing_rnn.py | dolaameng/tensorflow_cookbook | ca9bcb892239e9276e9348689e06cd6d1edd19ef | [
"MIT"
] | null | null | null | 09_Recurrent_Neural_Networks/02_Implementing_RNN_for_Spam_Prediction/02_implementing_rnn.py | dolaameng/tensorflow_cookbook | ca9bcb892239e9276e9348689e06cd6d1edd19ef | [
"MIT"
] | 1 | 2018-04-25T17:10:22.000Z | 2018-04-25T17:10:22.000Z | # Implementing an RNN in TensorFlow
#----------------------------------
#
# We implement an RNN in TensorFlow to predict spam/ham from texts
#
import os
import re
import io
import requests
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from zipfile import ZipFile
from tensorflow.python.framework import ops
ops.reset_default_graph()
# Start a graph
sess = tf.Session()
# Set RNN parameters
epochs = 20
batch_size = 250
max_sequence_length = 25
rnn_size = 10
embedding_size = 50
min_word_frequency = 10
learning_rate = 0.0005
dropout_keep_prob = tf.placeholder(tf.float32)
# Download or open data
data_dir = 'temp'
data_file = 'text_data.txt'
if not os.path.exists(data_dir):
os.makedirs(data_dir)
if not os.path.isfile(os.path.join(data_dir, data_file)):
zip_url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip'
r = requests.get(zip_url)
z = ZipFile(io.BytesIO(r.content))
file = z.read('SMSSpamCollection')
# Format Data
text_data = file.decode()
text_data = text_data.encode('ascii',errors='ignore')
text_data = text_data.decode().split('\n')
# Save data to text file
with open(os.path.join(data_dir, data_file), 'w') as file_conn:
for text in text_data:
file_conn.write("{}\n".format(text))
else:
# Open data from text file
text_data = []
with open(os.path.join(data_dir, data_file), 'r') as file_conn:
for row in file_conn:
text_data.append(row)
text_data = text_data[:-1]
text_data = [x.split('\t') for x in text_data if len(x)>=1]
[text_data_target, text_data_train] = [list(x) for x in zip(*text_data)]
# Create a text cleaning function
def clean_text(text_string):
text_string = re.sub(r'([^\s\w]|_|[0-9])+', '', text_string)
text_string = " ".join(text_string.split())
text_string = text_string.lower()
return(text_string)
# Clean texts
text_data_train = [clean_text(x) for x in text_data_train]
# Change texts into numeric vectors
vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(max_sequence_length,
min_frequency=min_word_frequency)
text_processed = np.array(list(vocab_processor.fit_transform(text_data_train)))
# Shuffle and split data
text_processed = np.array(text_processed)
text_data_target = np.array([1 if x=='ham' else 0 for x in text_data_target])
shuffled_ix = np.random.permutation(np.arange(len(text_data_target)))
x_shuffled = text_processed[shuffled_ix]
y_shuffled = text_data_target[shuffled_ix]
# Split train/test set
ix_cutoff = int(len(y_shuffled)*0.80)
x_train, x_test = x_shuffled[:ix_cutoff], x_shuffled[ix_cutoff:]
y_train, y_test = y_shuffled[:ix_cutoff], y_shuffled[ix_cutoff:]
vocab_size = len(vocab_processor.vocabulary_)
print("Vocabulary Size: {:d}".format(vocab_size))
print("80-20 Train Test split: {:d} -- {:d}".format(len(y_train), len(y_test)))
# Create placeholders
x_data = tf.placeholder(tf.int32, [None, max_sequence_length])
y_output = tf.placeholder(tf.int32, [None])
# Create embedding
embedding_mat = tf.Variable(tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0))
embedding_output = tf.nn.embedding_lookup(embedding_mat, x_data)
#embedding_output_expanded = tf.expand_dims(embedding_output, -1)
# Define the RNN cell
cell = tf.nn.rnn_cell.BasicRNNCell(num_units = rnn_size)
output, state = tf.nn.dynamic_rnn(cell, embedding_output, dtype=tf.float32)
output = tf.nn.dropout(output, dropout_keep_prob)
# Get output of RNN sequence
output = tf.transpose(output, [1, 0, 2])
last = tf.gather(output, int(output.get_shape()[0]) - 1)
weight = tf.Variable(tf.truncated_normal([rnn_size, 2], stddev=0.1))
bias = tf.Variable(tf.constant(0.1, shape=[2]))
logits_out = tf.nn.softmax(tf.matmul(last, weight) + bias)
# Loss function
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits_out, y_output) # logits=float32, labels=int32
loss = tf.reduce_mean(losses)
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(logits_out, 1), tf.cast(y_output, tf.int64)), tf.float32))
optimizer = tf.train.RMSPropOptimizer(learning_rate)
train_step = optimizer.minimize(loss)
init = tf.initialize_all_variables()
sess.run(init)
train_loss = []
test_loss = []
train_accuracy = []
test_accuracy = []
# Start training
for epoch in range(epochs):
# Shuffle training data
shuffled_ix = np.random.permutation(np.arange(len(x_train)))
x_train = x_train[shuffled_ix]
y_train = y_train[shuffled_ix]
num_batches = int(len(x_train)/batch_size) + 1
# TO DO CALCULATE GENERATIONS ExACTLY
for i in range(num_batches):
# Select train data
min_ix = i * batch_size
max_ix = np.min([len(x_train), ((i+1) * batch_size)])
x_train_batch = x_train[min_ix:max_ix]
y_train_batch = y_train[min_ix:max_ix]
# Run train step
train_dict = {x_data: x_train_batch, y_output: y_train_batch, dropout_keep_prob:0.5}
sess.run(train_step, feed_dict=train_dict)
# Run loss and accuracy for training
temp_train_loss, temp_train_acc = sess.run([loss, accuracy], feed_dict=train_dict)
train_loss.append(temp_train_loss)
train_accuracy.append(temp_train_acc)
# Run Eval Step
test_dict = {x_data: x_test, y_output: y_test, dropout_keep_prob:1.0}
temp_test_loss, temp_test_acc = sess.run([loss, accuracy], feed_dict=test_dict)
test_loss.append(temp_test_loss)
test_accuracy.append(temp_test_acc)
print('Epoch: {}, Test Loss: {:.2}, Test Acc: {:.2}'.format(epoch+1, temp_test_loss, temp_test_acc))
# Plot loss over time
epoch_seq = np.arange(1, epochs+1)
plt.plot(epoch_seq, train_loss, 'k--', label='Train Set')
plt.plot(epoch_seq, test_loss, 'r-', label='Test Set')
plt.title('Softmax Loss')
plt.xlabel('Epochs')
plt.ylabel('Softmax Loss')
plt.legend(loc='upper left')
plt.show()
# Plot accuracy over time
plt.plot(epoch_seq, train_accuracy, 'k--', label='Train Set')
plt.plot(epoch_seq, test_accuracy, 'r-', label='Test Set')
plt.title('Test Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend(loc='upper left')
plt.show() | 33.907104 | 111 | 0.714424 |
import os
import re
import io
import requests
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from zipfile import ZipFile
from tensorflow.python.framework import ops
ops.reset_default_graph()
sess = tf.Session()
epochs = 20
batch_size = 250
max_sequence_length = 25
rnn_size = 10
embedding_size = 50
min_word_frequency = 10
learning_rate = 0.0005
dropout_keep_prob = tf.placeholder(tf.float32)
data_dir = 'temp'
data_file = 'text_data.txt'
if not os.path.exists(data_dir):
os.makedirs(data_dir)
if not os.path.isfile(os.path.join(data_dir, data_file)):
zip_url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip'
r = requests.get(zip_url)
z = ZipFile(io.BytesIO(r.content))
file = z.read('SMSSpamCollection')
text_data = file.decode()
text_data = text_data.encode('ascii',errors='ignore')
text_data = text_data.decode().split('\n')
with open(os.path.join(data_dir, data_file), 'w') as file_conn:
for text in text_data:
file_conn.write("{}\n".format(text))
else:
text_data = []
with open(os.path.join(data_dir, data_file), 'r') as file_conn:
for row in file_conn:
text_data.append(row)
text_data = text_data[:-1]
text_data = [x.split('\t') for x in text_data if len(x)>=1]
[text_data_target, text_data_train] = [list(x) for x in zip(*text_data)]
def clean_text(text_string):
text_string = re.sub(r'([^\s\w]|_|[0-9])+', '', text_string)
text_string = " ".join(text_string.split())
text_string = text_string.lower()
return(text_string)
text_data_train = [clean_text(x) for x in text_data_train]
vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(max_sequence_length,
min_frequency=min_word_frequency)
text_processed = np.array(list(vocab_processor.fit_transform(text_data_train)))
text_processed = np.array(text_processed)
text_data_target = np.array([1 if x=='ham' else 0 for x in text_data_target])
shuffled_ix = np.random.permutation(np.arange(len(text_data_target)))
x_shuffled = text_processed[shuffled_ix]
y_shuffled = text_data_target[shuffled_ix]
ix_cutoff = int(len(y_shuffled)*0.80)
x_train, x_test = x_shuffled[:ix_cutoff], x_shuffled[ix_cutoff:]
y_train, y_test = y_shuffled[:ix_cutoff], y_shuffled[ix_cutoff:]
vocab_size = len(vocab_processor.vocabulary_)
print("Vocabulary Size: {:d}".format(vocab_size))
print("80-20 Train Test split: {:d} -- {:d}".format(len(y_train), len(y_test)))
x_data = tf.placeholder(tf.int32, [None, max_sequence_length])
y_output = tf.placeholder(tf.int32, [None])
embedding_mat = tf.Variable(tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0))
embedding_output = tf.nn.embedding_lookup(embedding_mat, x_data)
cell = tf.nn.rnn_cell.BasicRNNCell(num_units = rnn_size)
output, state = tf.nn.dynamic_rnn(cell, embedding_output, dtype=tf.float32)
output = tf.nn.dropout(output, dropout_keep_prob)
output = tf.transpose(output, [1, 0, 2])
last = tf.gather(output, int(output.get_shape()[0]) - 1)
weight = tf.Variable(tf.truncated_normal([rnn_size, 2], stddev=0.1))
bias = tf.Variable(tf.constant(0.1, shape=[2]))
logits_out = tf.nn.softmax(tf.matmul(last, weight) + bias)
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits_out, y_output)
loss = tf.reduce_mean(losses)
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(logits_out, 1), tf.cast(y_output, tf.int64)), tf.float32))
optimizer = tf.train.RMSPropOptimizer(learning_rate)
train_step = optimizer.minimize(loss)
init = tf.initialize_all_variables()
sess.run(init)
train_loss = []
test_loss = []
train_accuracy = []
test_accuracy = []
for epoch in range(epochs):
shuffled_ix = np.random.permutation(np.arange(len(x_train)))
x_train = x_train[shuffled_ix]
y_train = y_train[shuffled_ix]
num_batches = int(len(x_train)/batch_size) + 1
for i in range(num_batches):
min_ix = i * batch_size
max_ix = np.min([len(x_train), ((i+1) * batch_size)])
x_train_batch = x_train[min_ix:max_ix]
y_train_batch = y_train[min_ix:max_ix]
train_dict = {x_data: x_train_batch, y_output: y_train_batch, dropout_keep_prob:0.5}
sess.run(train_step, feed_dict=train_dict)
temp_train_loss, temp_train_acc = sess.run([loss, accuracy], feed_dict=train_dict)
train_loss.append(temp_train_loss)
train_accuracy.append(temp_train_acc)
test_dict = {x_data: x_test, y_output: y_test, dropout_keep_prob:1.0}
temp_test_loss, temp_test_acc = sess.run([loss, accuracy], feed_dict=test_dict)
test_loss.append(temp_test_loss)
test_accuracy.append(temp_test_acc)
print('Epoch: {}, Test Loss: {:.2}, Test Acc: {:.2}'.format(epoch+1, temp_test_loss, temp_test_acc))
epoch_seq = np.arange(1, epochs+1)
plt.plot(epoch_seq, train_loss, 'k--', label='Train Set')
plt.plot(epoch_seq, test_loss, 'r-', label='Test Set')
plt.title('Softmax Loss')
plt.xlabel('Epochs')
plt.ylabel('Softmax Loss')
plt.legend(loc='upper left')
plt.show()
plt.plot(epoch_seq, train_accuracy, 'k--', label='Train Set')
plt.plot(epoch_seq, test_accuracy, 'r-', label='Test Set')
plt.title('Test Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend(loc='upper left')
plt.show() | true | true |
1c2b1a59f09f774fe08b5b66ca33edcbb2313f3a | 925 | py | Python | carbon/client/metrics/timer.py | mosquito/carbonate | 5eca69602b9fc03dc0b982f9104c7ebb04159059 | [
"MIT"
] | 2 | 2017-12-21T15:40:12.000Z | 2018-02-07T10:00:14.000Z | carbon/client/metrics/timer.py | mosquito/carbonate | 5eca69602b9fc03dc0b982f9104c7ebb04159059 | [
"MIT"
] | 2 | 2016-12-02T08:53:48.000Z | 2016-12-05T21:46:04.000Z | carbon/client/metrics/timer.py | mosquito/carbonate | 5eca69602b9fc03dc0b982f9104c7ebb04159059 | [
"MIT"
] | 5 | 2015-07-22T14:31:28.000Z | 2020-09-30T08:20:29.000Z | # encoding: utf-8
from time import time
from threading import RLock
from carbon.client.metrics.base import MeasurerBase, Metric
class StopWatch(object):
__slots__ = '_lock', '_current'
def __init__(self):
self._lock = RLock()
self._current = None
def start(self):
with self._lock:
self._current = time()
def stop(self):
assert self._current, "StopWatch not running"
with self._lock:
return time() - self._current
class Timer(MeasurerBase):
__slots__ = '_current',
def __init__(self, cleanup=None):
MeasurerBase.__init__(self, cleanup)
self._current = None
@classmethod
def start(cls):
watch = StopWatch()
watch.start()
return watch
def stop(self, stop_watch):
assert isinstance(stop_watch, StopWatch)
self.add(Metric(name=self.name, value=stop_watch.stop()))
| 23.125 | 65 | 0.632432 |
from time import time
from threading import RLock
from carbon.client.metrics.base import MeasurerBase, Metric
class StopWatch(object):
__slots__ = '_lock', '_current'
def __init__(self):
self._lock = RLock()
self._current = None
def start(self):
with self._lock:
self._current = time()
def stop(self):
assert self._current, "StopWatch not running"
with self._lock:
return time() - self._current
class Timer(MeasurerBase):
__slots__ = '_current',
def __init__(self, cleanup=None):
MeasurerBase.__init__(self, cleanup)
self._current = None
@classmethod
def start(cls):
watch = StopWatch()
watch.start()
return watch
def stop(self, stop_watch):
assert isinstance(stop_watch, StopWatch)
self.add(Metric(name=self.name, value=stop_watch.stop()))
| true | true |
1c2b1a6f4bbfe3d44da0ea4bdb00ceaaf3fb1cd7 | 181 | py | Python | test_data/parse/unexpected/class_definitions/unexpected_double_description_for_a_property/meta_model.py | gillistephan/aas-core-codegen | 5b89ea2ee35aecaca9a1bed7ac81d420cc560f29 | [
"MIT"
] | 5 | 2021-12-29T12:55:34.000Z | 2022-03-01T17:57:21.000Z | test_data/parse/unexpected/class_definitions/unexpected_double_description_for_a_property/meta_model.py | gillistephan/aas-core-codegen | 5b89ea2ee35aecaca9a1bed7ac81d420cc560f29 | [
"MIT"
] | 10 | 2021-12-29T02:15:55.000Z | 2022-03-09T11:04:22.000Z | test_data/parse/unexpected/class_definitions/unexpected_double_description_for_a_property/meta_model.py | gillistephan/aas-core-codegen | 5b89ea2ee35aecaca9a1bed7ac81d420cc560f29 | [
"MIT"
] | 2 | 2021-12-29T01:42:12.000Z | 2022-02-15T13:46:33.000Z | class Something:
"""Represent something."""
"""unexpected description"""
some_property: int
"""some property"""
__book_url__ = "dummy"
__book_version__ = "dummy"
| 16.454545 | 32 | 0.657459 | class Something:
some_property: int
__book_url__ = "dummy"
__book_version__ = "dummy"
| true | true |
1c2b1df7891e280c81f4a7da992ea6b335bd7a32 | 3,501 | py | Python | scripts/automation/trex_control_plane/interactive/trex/console/plugins/plugin_bird.py | MassimoGirondi/trex-core | 404f2ce95db249bbf11c959a530f33bb5d10f94c | [
"Apache-2.0"
] | null | null | null | scripts/automation/trex_control_plane/interactive/trex/console/plugins/plugin_bird.py | MassimoGirondi/trex-core | 404f2ce95db249bbf11c959a530f33bb5d10f94c | [
"Apache-2.0"
] | null | null | null | scripts/automation/trex_control_plane/interactive/trex/console/plugins/plugin_bird.py | MassimoGirondi/trex-core | 404f2ce95db249bbf11c959a530f33bb5d10f94c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
from trex.console.plugins import *
from trex.stl.api import *
from trex.pybird.bird_cfg_creator import *
from trex.pybird.bird_zmq_client import *
'''
Bird plugin
'''
class Bird_Plugin(ConsolePlugin):
def plugin_description(self):
return 'Bird plugin for simple communication with PyBirdserver'
def plugin_load(self):
self.add_argument("-p", "--port", type = int,
dest = 'port',
required = True,
help = 'port to use')
self.add_argument("-m", "--mac", type = str,
dest = 'mac',
required = True,
help = 'mac address to use')
self.add_argument("--ipv4", type = str,
dest = 'ipv4',
help = 'src ip to use')
self.add_argument("--ipv4-subnet", type = int,
dest = 'ipv4_subnet',
help = 'ipv4 subnet to use')
self.add_argument("--ipv6-enable", action = "store_true",
dest = 'ipv6_enabled',
default = False,
help = 'ipv6 enable, default False')
self.add_argument("--ipv6-subnet", type = int,
dest = 'ipv6_subnet',
default = 127,
help = 'ipv6 subnet ip to use, default 127')
self.add_argument("--vlans", type = list,
dest = 'vlans',
help = 'vlans for bird node')
self.add_argument("--tpids", type = list,
dest = 'tpids',
help = 'tpids for bird node')
self.c = STLClient()
self.pybird = PyBirdClient()
self.pybird.connect()
self.pybird.acquire()
def plugin_unload(self):
try:
self.pybird.release()()
self.pybird.disconnect()
except Exception as e:
print('Error while unloading bird plugin: \n' + str(e))
def do_add_bird_node(self, port, mac, ipv4, ipv4_subnet, ipv6_enabled, ipv6_subnet, vlans, tpids):
''' Simple adding bird node with arguments. '''
self.c.connect()
self.c.acquire(force = True)
self.c.set_bird_node(node_port = port,
mac = mac,
ipv4 = ipv4,
ipv4_subnet = ipv4_subnet,
ipv6_enabled = ipv6_enabled,
ipv6_subnet = ipv6_subnet,
vlans = vlans,
tpids = tpids)
def do_add_rip(self):
''' Adding rip protocol to bird configuration file. '''
curr_conf = self.pybird.get_config()
cfg_creator = BirdCFGCreator(curr_conf)
cfg_creator.add_simple_rip()
self.pybird.set_config(cfg_creator.build_config())
def do_add_bgp(self):
''' Adding bgp protocol to bird configuration file. '''
curr_conf = self.pybird.get_config()
cfg_creator = BirdCFGCreator(curr_conf)
cfg_creator.add_simple_bgp()
self.pybird.set_config(cfg_creator.build_config())
def do_add_ospf(self):
''' Adding ospf protocol to bird configuration file. '''
curr_conf = self.pybird.get_config()
cfg_creator = BirdCFGCreator(curr_conf)
cfg_creator.add_simple_ospf()
self.pybird.set_config(cfg_creator.build_config())
| 37.645161 | 102 | 0.52585 |
from trex.console.plugins import *
from trex.stl.api import *
from trex.pybird.bird_cfg_creator import *
from trex.pybird.bird_zmq_client import *
class Bird_Plugin(ConsolePlugin):
def plugin_description(self):
return 'Bird plugin for simple communication with PyBirdserver'
def plugin_load(self):
self.add_argument("-p", "--port", type = int,
dest = 'port',
required = True,
help = 'port to use')
self.add_argument("-m", "--mac", type = str,
dest = 'mac',
required = True,
help = 'mac address to use')
self.add_argument("--ipv4", type = str,
dest = 'ipv4',
help = 'src ip to use')
self.add_argument("--ipv4-subnet", type = int,
dest = 'ipv4_subnet',
help = 'ipv4 subnet to use')
self.add_argument("--ipv6-enable", action = "store_true",
dest = 'ipv6_enabled',
default = False,
help = 'ipv6 enable, default False')
self.add_argument("--ipv6-subnet", type = int,
dest = 'ipv6_subnet',
default = 127,
help = 'ipv6 subnet ip to use, default 127')
self.add_argument("--vlans", type = list,
dest = 'vlans',
help = 'vlans for bird node')
self.add_argument("--tpids", type = list,
dest = 'tpids',
help = 'tpids for bird node')
self.c = STLClient()
self.pybird = PyBirdClient()
self.pybird.connect()
self.pybird.acquire()
def plugin_unload(self):
try:
self.pybird.release()()
self.pybird.disconnect()
except Exception as e:
print('Error while unloading bird plugin: \n' + str(e))
def do_add_bird_node(self, port, mac, ipv4, ipv4_subnet, ipv6_enabled, ipv6_subnet, vlans, tpids):
self.c.connect()
self.c.acquire(force = True)
self.c.set_bird_node(node_port = port,
mac = mac,
ipv4 = ipv4,
ipv4_subnet = ipv4_subnet,
ipv6_enabled = ipv6_enabled,
ipv6_subnet = ipv6_subnet,
vlans = vlans,
tpids = tpids)
def do_add_rip(self):
curr_conf = self.pybird.get_config()
cfg_creator = BirdCFGCreator(curr_conf)
cfg_creator.add_simple_rip()
self.pybird.set_config(cfg_creator.build_config())
def do_add_bgp(self):
curr_conf = self.pybird.get_config()
cfg_creator = BirdCFGCreator(curr_conf)
cfg_creator.add_simple_bgp()
self.pybird.set_config(cfg_creator.build_config())
def do_add_ospf(self):
curr_conf = self.pybird.get_config()
cfg_creator = BirdCFGCreator(curr_conf)
cfg_creator.add_simple_ospf()
self.pybird.set_config(cfg_creator.build_config())
| true | true |
1c2b1e7e48eede273a5b9885d4ebbc36fdb68f2b | 1,004 | py | Python | launch/link-intersection-brute-force.py | balazs-bamer/link-intersection-brute-force | 1098d5555ebaa9c23c326f75c493b855199ff6bf | [
"MIT"
] | 1 | 2021-04-27T10:40:50.000Z | 2021-04-27T10:40:50.000Z | launch/link-intersection-brute-force.py | balazs-bamer/link-intersection-brute-force | 1098d5555ebaa9c23c326f75c493b855199ff6bf | [
"MIT"
] | 1 | 2021-04-27T16:05:38.000Z | 2021-04-28T11:52:39.000Z | launch/link-intersection-brute-force.py | balazs-bamer/link-intersection-brute-force | 1098d5555ebaa9c23c326f75c493b855199ff6bf | [
"MIT"
] | null | null | null | import os
from launch import LaunchDescription
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
def generate_launch_description():
packageName = 'link-intersection-brute-force'
nodeName = 'linkIntersectionBruteForce'
urdfFilename = 'px150_coll.urdf'
urdf = os.path.join(
get_package_share_directory(packageName),
urdfFilename)
forbiddenLinksFilename = 'forbidden-links.txt'
forbiddenLinks = os.path.join(
get_package_share_directory(packageName),
forbiddenLinksFilename)
return LaunchDescription([
Node(
package = packageName,
namespace = 'cudaTrajectoryPlanner',
executable = nodeName,
name = nodeName,
arguments = [urdf, forbiddenLinks, '/home/balazs/munka/cuda-trajectory-planner/ros-workspace/src/link-intersection-brute-force/']
)
])
| 34.62069 | 135 | 0.77988 | import os
from launch import LaunchDescription
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
def generate_launch_description():
packageName = 'link-intersection-brute-force'
nodeName = 'linkIntersectionBruteForce'
urdfFilename = 'px150_coll.urdf'
urdf = os.path.join(
get_package_share_directory(packageName),
urdfFilename)
forbiddenLinksFilename = 'forbidden-links.txt'
forbiddenLinks = os.path.join(
get_package_share_directory(packageName),
forbiddenLinksFilename)
return LaunchDescription([
Node(
package = packageName,
namespace = 'cudaTrajectoryPlanner',
executable = nodeName,
name = nodeName,
arguments = [urdf, forbiddenLinks, '/home/balazs/munka/cuda-trajectory-planner/ros-workspace/src/link-intersection-brute-force/']
)
])
| true | true |
1c2b1ea4f2fc78cf05b77d8b3ecf42626a31e9a5 | 2,161 | py | Python | monarch/s3.py | LaurEars/monarch | 0554df50edab6ccb67480038b8db72197d36783a | [
"MIT"
] | null | null | null | monarch/s3.py | LaurEars/monarch | 0554df50edab6ccb67480038b8db72197d36783a | [
"MIT"
] | null | null | null | monarch/s3.py | LaurEars/monarch | 0554df50edab6ccb67480038b8db72197d36783a | [
"MIT"
] | null | null | null | import os
from datetime import datetime
# 3rd Party Imports
import boto
from boto.s3.key import Key
from click import echo
from .utils import temp_directory, zipdir
from .local import local_restore
from .mongo import dump_db
def get_s3_bucket(s3_settings):
conn = boto.connect_s3(s3_settings['aws_access_key_id'], s3_settings['aws_secret_access_key'])
bucket = conn.get_bucket(s3_settings['bucket_name'])
return bucket
def generate_uniqueish_key(s3_settings, environment, name_prefix):
bucket = get_s3_bucket(s3_settings)
if name_prefix and name_prefix != '':
name_base = name_prefix
else:
name_base = environment['db_name']
name_attempt = "{}__{}.dmp.zip".format(name_base, datetime.utcnow().strftime("%Y_%m_%d"))
key = bucket.get_key(name_attempt)
if not key:
key = Key(bucket)
key.key = name_attempt
return key
else:
counter = 1
while True:
counter += 1
name_attempt = "{}__{}_{}.dmp.zip".format(name_base,
datetime.utcnow().strftime("%Y_%m_%d"), counter)
if bucket.get_key(name_attempt):
continue
else:
key = Key(bucket)
key.key = name_attempt
return key
def backup_to_s3(environment, s3_settings, name, query_set_class):
dump_path = dump_db(environment, QuerySet=query_set_class)
zipf = zipdir(dump_path)
key = generate_uniqueish_key(s3_settings, environment, name)
bytes_written = key.set_contents_from_filename(zipf.filename)
# 4) print out the name of the bucket
echo("Wrote {} btyes to s3".format(bytes_written))
def s3_restore(key, to_enviornment):
with temp_directory() as temp_dir:
zip_path = os.path.join(temp_dir, 'MongoDump.zip')
key.get_contents_to_filename(zip_path)
local_restore(zip_path, to_enviornment)
def s3_backups(s3_config):
""" a dict of key.name: key
"""
bucket = get_s3_bucket(s3_config)
buckets = {}
for key in bucket.get_all_keys():
buckets[key.name] = key
return buckets
| 26.353659 | 98 | 0.654327 | import os
from datetime import datetime
import boto
from boto.s3.key import Key
from click import echo
from .utils import temp_directory, zipdir
from .local import local_restore
from .mongo import dump_db
def get_s3_bucket(s3_settings):
conn = boto.connect_s3(s3_settings['aws_access_key_id'], s3_settings['aws_secret_access_key'])
bucket = conn.get_bucket(s3_settings['bucket_name'])
return bucket
def generate_uniqueish_key(s3_settings, environment, name_prefix):
bucket = get_s3_bucket(s3_settings)
if name_prefix and name_prefix != '':
name_base = name_prefix
else:
name_base = environment['db_name']
name_attempt = "{}__{}.dmp.zip".format(name_base, datetime.utcnow().strftime("%Y_%m_%d"))
key = bucket.get_key(name_attempt)
if not key:
key = Key(bucket)
key.key = name_attempt
return key
else:
counter = 1
while True:
counter += 1
name_attempt = "{}__{}_{}.dmp.zip".format(name_base,
datetime.utcnow().strftime("%Y_%m_%d"), counter)
if bucket.get_key(name_attempt):
continue
else:
key = Key(bucket)
key.key = name_attempt
return key
def backup_to_s3(environment, s3_settings, name, query_set_class):
dump_path = dump_db(environment, QuerySet=query_set_class)
zipf = zipdir(dump_path)
key = generate_uniqueish_key(s3_settings, environment, name)
bytes_written = key.set_contents_from_filename(zipf.filename)
echo("Wrote {} btyes to s3".format(bytes_written))
def s3_restore(key, to_enviornment):
with temp_directory() as temp_dir:
zip_path = os.path.join(temp_dir, 'MongoDump.zip')
key.get_contents_to_filename(zip_path)
local_restore(zip_path, to_enviornment)
def s3_backups(s3_config):
bucket = get_s3_bucket(s3_config)
buckets = {}
for key in bucket.get_all_keys():
buckets[key.name] = key
return buckets
| true | true |
1c2b1f78a9f0d57e00fbd23e8fd849fef5b60c1f | 3,062 | py | Python | gqp_mc/fm.py | changhoonhahn/GQP_mock_challenge | 831d5423edd9955ee1bda8d41e44d30cd3c6bd4b | [
"MIT"
] | 3 | 2019-12-18T20:51:45.000Z | 2021-12-11T05:59:24.000Z | gqp_mc/fm.py | changhoonhahn/GQP_mock_challenge | 831d5423edd9955ee1bda8d41e44d30cd3c6bd4b | [
"MIT"
] | 44 | 2020-02-20T06:02:00.000Z | 2021-04-13T20:00:50.000Z | gqp_mc/fm.py | changhoonhahn/GQP_mock_challenge | 831d5423edd9955ee1bda8d41e44d30cd3c6bd4b | [
"MIT"
] | 7 | 2019-10-04T22:25:44.000Z | 2020-07-20T02:05:03.000Z | '''
submodule for forward modeling spectrophotometry
'''
import os
import numpy as np
from speclite import filters as specFilter
def Photo_DESI(wave, spectra, bands=['g', 'r', 'z']):
''' generate photometry by convolving the input spectrum with DECAM and WISE
bandpasses: g, r, z, W1, W2, W3, W4 filters.
:param wave:
wavelength of input spectra in Angstroms. 2D array Nspec x Nwave.
:param fluxes:
fluxes of input spectra. This should be noiseless source spectra.
2D array Nspec x Nwave. In units of 10e-17 erg/s/cm2/A
'''
wave = np.atleast_2d(wave)
assert wave.shape[1] == spectra.shape[1]
n_spec = spectra.shape[0] # number of spectra
if wave.shape[0] == 1: wave = np.tile(wave, (n_spec, 1))
from astropy import units as U
filter_dict = {'g': 'decam2014-g', 'r': 'decam2014-r', 'z': 'decam2014-z',
'w1': 'wise2010-W1', 'w2': 'wise2010-W2', 'w3': 'wise2010-W3',
'w4': 'wise2010-W4'}
# load DECAM g, r, z and WISE W1-4
filter_response = specFilter.load_filters(
*tuple([filter_dict[b] for b in bands]))
# apply filters
fluxes = np.zeros((n_spec, len(bands))) # photometric flux in nanomaggies
for i in range(n_spec):
spectrum = spectra[i]
# apply filters
flux = np.array(filter_response.get_ab_maggies(
np.atleast_2d(spectrum) * 1e-17 * U.erg/U.s/U.cm**2/U.Angstrom,
wave[i,:]*U.Angstrom))
# convert to nanomaggies
fluxes[i,:] = 1e9 * np.array([flux[0][i] for i in range(len(bands))])
# calculate magnitudes (not advised due to NaNs)
mags = 22.5 - 2.5 * np.log10(fluxes)
return fluxes, mags
def Spec_BGS(wave, flux, exptime, airmass, Isky, filename=None):
''' Given noiseless spectra, simulate noisy BGS spectra with Isky
sky brightness, exptime sec exposure time, and airmass. Wrapper for
FM.fakeDESIspec().simExposure
:param wave:
wavelength of spectra. Nwave
:param flux:
noiseless spectra in units of 1e-17 erg/s/cm2/A. Nspec x Nwave
:param exptime:
exposure time
:param airmass:
airmass
:param Isky:
[wave_sky, sky_brightness]. sky brightness is in units of
1e-17 erg / Ang / arcsec^2 / cm^2 / sec
:param filename:
If specified, the output fits file. (default: None)
:return bgs_spec:
data structure with all BGS data from the DESI spectrographs:
bgs.wave['b'], bgs.wave['r'], bgs.wave['z']
bgs.flux['b'], bgs.flux['r'], bgs.flux['z']
bgs.ivar['b'], bgs.ivar['r'], bgs.ivar['z']
'''
# requires desiutil, desimodel, desisim, desispec, desitarget,
# also requires numba, fitsio, healpy, pandas, astroplan... shoot me in the face!
from feasibgs import forwardmodel as FM
fdesi = FM.fakeDESIspec()
bgs_spec = fdesi.simExposure(wave, flux, exptime=exptime, airmass=airmass, Isky=Isky, filename=filename)
return bgs_spec
| 34.795455 | 109 | 0.621489 | import os
import numpy as np
from speclite import filters as specFilter
def Photo_DESI(wave, spectra, bands=['g', 'r', 'z']):
wave = np.atleast_2d(wave)
assert wave.shape[1] == spectra.shape[1]
n_spec = spectra.shape[0]
if wave.shape[0] == 1: wave = np.tile(wave, (n_spec, 1))
from astropy import units as U
filter_dict = {'g': 'decam2014-g', 'r': 'decam2014-r', 'z': 'decam2014-z',
'w1': 'wise2010-W1', 'w2': 'wise2010-W2', 'w3': 'wise2010-W3',
'w4': 'wise2010-W4'}
filter_response = specFilter.load_filters(
*tuple([filter_dict[b] for b in bands]))
fluxes = np.zeros((n_spec, len(bands)))
for i in range(n_spec):
spectrum = spectra[i]
flux = np.array(filter_response.get_ab_maggies(
np.atleast_2d(spectrum) * 1e-17 * U.erg/U.s/U.cm**2/U.Angstrom,
wave[i,:]*U.Angstrom))
fluxes[i,:] = 1e9 * np.array([flux[0][i] for i in range(len(bands))])
mags = 22.5 - 2.5 * np.log10(fluxes)
return fluxes, mags
def Spec_BGS(wave, flux, exptime, airmass, Isky, filename=None):
from feasibgs import forwardmodel as FM
fdesi = FM.fakeDESIspec()
bgs_spec = fdesi.simExposure(wave, flux, exptime=exptime, airmass=airmass, Isky=Isky, filename=filename)
return bgs_spec
| true | true |
1c2b1fbfb14ddd021cc54211238b66fa242fea79 | 14,369 | py | Python | cfgov/v1/migrations/0219_move_directors_notebook.py | flacoman91/consumerfinance.gov | 64e3d68d1c023ae944baf66a99e54236e5976097 | [
"CC0-1.0"
] | 37 | 2020-08-18T19:52:39.000Z | 2022-03-23T08:08:41.000Z | cfgov/v1/migrations/0219_move_directors_notebook.py | flacoman91/consumerfinance.gov | 64e3d68d1c023ae944baf66a99e54236e5976097 | [
"CC0-1.0"
] | 338 | 2020-08-14T20:46:36.000Z | 2022-03-31T20:49:32.000Z | cfgov/v1/migrations/0219_move_directors_notebook.py | raft-tech/cfgov-refresh | 7c63c31fd6bb95ed4f7d368f1e1252175f0c71ca | [
"CC0-1.0"
] | 14 | 2020-10-21T15:27:03.000Z | 2022-03-17T03:16:36.000Z | # Generated by Django 2.2.12 on 2020-06-02 16:41
from django.db import migrations, models
import v1.atomic_elements.molecules
import v1.blocks
import v1.models.snippets
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
import wagtail.snippets.blocks
class Migration(migrations.Migration):
dependencies = [
('v1', '0218_add_force_breadcrumbs'),
]
operations = [
migrations.AlterField(
model_name='cfgovpage',
name='sidefoot',
field=wagtail.core.fields.StreamField([('call_to_action', wagtail.core.blocks.StructBlock([('slug_text', wagtail.core.blocks.CharBlock(required=False)), ('paragraph_text', wagtail.core.blocks.RichTextBlock(required=False)), ('button', wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.CharBlock(required=False)), ('url', wagtail.core.blocks.CharBlock(default='/', required=False)), ('size', wagtail.core.blocks.ChoiceBlock(choices=[('regular', 'Regular'), ('large', 'Large Primary')]))]))])), ('related_links', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(required=False)), ('paragraph', wagtail.core.blocks.RichTextBlock(required=False)), ('links', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.CharBlock(required=False)), ('url', wagtail.core.blocks.CharBlock(default='/', required=False))])))])), ('related_posts', wagtail.core.blocks.StructBlock([('limit', wagtail.core.blocks.CharBlock(default='3', help_text='This limit applies to EACH TYPE of post this module retrieves, not the total number of retrieved posts.')), ('show_heading', wagtail.core.blocks.BooleanBlock(default=True, help_text='This toggles the heading and icon for the related types.', label='Show Heading and Icon?', required=False)), ('header_title', wagtail.core.blocks.CharBlock(default='Further reading', label='Slug Title')), ('relate_posts', wagtail.core.blocks.BooleanBlock(default=True, editable=False, label='Blog Posts', required=False)), ('relate_newsroom', wagtail.core.blocks.BooleanBlock(default=True, editable=False, label='Newsroom', required=False)), ('relate_events', wagtail.core.blocks.BooleanBlock(default=True, label='Events', required=False)), ('specific_categories', wagtail.core.blocks.ListBlock(wagtail.core.blocks.ChoiceBlock(choices=[('Blog', (('At the CFPB', 'At the CFPB'), ("Director's notebook", "Director's notebook"), ('Policy & Compliance', 'Policy and compliance'), ('Data, Research & Reports', 'Data, research, and reports'), ('Info for Consumers', 'Info for consumers'))), ('Newsroom', (('Op-Ed', 'Op-ed'), ('Press Release', 'Press release'), ('Speech', 'Speech'), ('Testimony', 'Testimony')))], required=False), required=False)), ('and_filtering', wagtail.core.blocks.BooleanBlock(default=False, help_text='If checked, related posts will only be pulled in if they match ALL topic tags set on this page. Otherwise, related posts can match any one topic tag.', label='Match all topic tags', required=False)), ('alternate_view_more_url', wagtail.core.blocks.CharBlock(help_text='By default, the "View more" link will go to the Activity Log, filtered based on the above parameters. Enter a URL in this field to override that link destination.', label='Alternate "View more" URL', required=False))])), ('related_metadata', wagtail.core.blocks.StructBlock([('slug', wagtail.core.blocks.CharBlock(max_length=100)), ('content', wagtail.core.blocks.StreamBlock([('text', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(max_length=100)), ('blob', wagtail.core.blocks.RichTextBlock())], icon='pilcrow')), ('list', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(max_length=100)), ('links', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.CharBlock(required=False)), ('url', wagtail.core.blocks.CharBlock(default='/', required=False))])))], icon='list-ul')), ('date', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(max_length=100)), ('date', wagtail.core.blocks.DateBlock())], icon='date')), ('topics', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(default='Topics', max_length=100)), ('show_topics', wagtail.core.blocks.BooleanBlock(default=True, required=False))], icon='tag')), ('categories', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(default='Categories', max_length=100)), ('show_categories', wagtail.core.blocks.BooleanBlock(default=True, required=False))], icon='list-ul'))])), ('is_half_width', wagtail.core.blocks.BooleanBlock(default=False, required=False))])), ('email_signup', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(default='Stay informed', required=False)), ('default_heading', wagtail.core.blocks.BooleanBlock(default=True, help_text='If selected, heading will be styled as an H5 with green top rule. Deselect to style header as H3.', label='Default heading style', required=False)), ('text', wagtail.core.blocks.CharBlock(help_text='Write a sentence or two about what kinds of emails the user is signing up for, how frequently they will be sent, etc.', required=False)), ('gd_code', wagtail.core.blocks.CharBlock(help_text='Code for the topic (i.e., mailing list) you want people who submit this form to subscribe to. Format: USCFPB_###', label='GovDelivery code', required=False)), ('disclaimer_page', wagtail.core.blocks.PageChooserBlock(help_text='Choose the page that the "See Privacy Act statement" link should go to. If in doubt, use "Generic Email Sign-Up Privacy Act Statement".', label='Privacy Act statement', required=False))])), ('sidebar_contact', wagtail.core.blocks.StructBlock([('contact', wagtail.snippets.blocks.SnippetChooserBlock('v1.Contact')), ('has_top_rule_line', wagtail.core.blocks.BooleanBlock(default=False, help_text='Add a horizontal rule line to top of contact block.', required=False))])), ('rss_feed', v1.atomic_elements.molecules.RSSFeed()), ('social_media', wagtail.core.blocks.StructBlock([('is_share_view', wagtail.core.blocks.BooleanBlock(default=True, help_text='If unchecked, social media icons will link users to official CFPB accounts. Do not fill in any further fields.', label='Desired action: share this page', required=False)), ('blurb', wagtail.core.blocks.CharBlock(default="Look what I found on the CFPB's site!", help_text='Sets the tweet text, email subject line, and LinkedIn post text.', required=False)), ('twitter_text', wagtail.core.blocks.CharBlock(help_text='(Optional) Custom text for Twitter shares. If blank, will default to value of blurb field above.', max_length=100, required=False)), ('twitter_related', wagtail.core.blocks.CharBlock(help_text='(Optional) A comma-separated list of accounts related to the content of the shared URL. Do not enter the @ symbol. If blank, it will default to just "cfpb".', required=False)), ('twitter_hashtags', wagtail.core.blocks.CharBlock(help_text='(Optional) A comma-separated list of hashtags to be appended to default tweet text.', required=False)), ('twitter_lang', wagtail.core.blocks.CharBlock(help_text='(Optional) Loads text components in the specified language, if other than English. E.g., use "es" for Spanish. See https://dev.twitter.com/web/overview/languages for a list of supported language codes.', required=False)), ('email_title', wagtail.core.blocks.CharBlock(help_text='(Optional) Custom subject for email shares. If blank, will default to value of blurb field above.', required=False)), ('email_text', wagtail.core.blocks.CharBlock(help_text='(Optional) Custom text for email shares. If blank, will default to "Check out this page from the CFPB".', required=False)), ('email_signature', wagtail.core.blocks.CharBlock(help_text='(Optional) Adds a custom signature line to email shares. ', required=False)), ('linkedin_title', wagtail.core.blocks.CharBlock(help_text='(Optional) Custom title for LinkedIn shares. If blank, will default to value of blurb field above.', required=False)), ('linkedin_text', wagtail.core.blocks.CharBlock(help_text='(Optional) Custom text for LinkedIn shares.', required=False))])), ('reusable_text', v1.blocks.ReusableTextChooserBlock(v1.models.snippets.ReusableText))], blank=True),
),
migrations.AlterField(
model_name='cfgovpagecategory',
name='name',
field=models.CharField(choices=[('Administrative adjudication docket', (('administrative-adjudication', 'Administrative adjudication'), ('stipulation-and-constent-order', 'Stipulation and consent order'))), ('Amicus Brief', (('us-supreme-court', 'U.S. Supreme Court'), ('fed-circuit-court', 'Federal Circuit Court'), ('fed-district-court', 'Federal District Court'), ('state-court', 'State Court'))), ('Blog', (('at-the-cfpb', 'At the CFPB'), ('directors-notebook', "Director's notebook"), ('policy_compliance', 'Policy and compliance'), ('data-research-reports', 'Data, research, and reports'), ('info-for-consumers', 'Info for consumers'))), ('Consumer Reporting Companies', (('nationwide', 'Nationwide'), ('employment-screening', 'Employment screening'), ('tenant-screening', 'Tenant screening'), ('check-bank-screening', 'Check and bank screening'), ('personal-property-insurance', 'Personal property insurance'), ('medical', 'Medical'), ('low-income-and-subprime', 'Low-income and subprime'), ('supplementary-reports', 'Supplementary reports'), ('utilities', 'Utilities'), ('retail', 'Retail'), ('gaming', 'Gaming'))), ('Enforcement Action', (('civil-action', 'Civil Action'), ('administrative-proceeding', 'Administrative Proceeding'))), ('Final rule', (('interim-final-rule', 'Interim final rule'), ('final-rule', 'Final rule'))), ('FOIA Frequently Requested Record', (('report', 'Report'), ('log', 'Log'), ('record', 'Record'))), ('Implementation Resource', (('compliance-aid', 'Compliance aid'), ('official-guidance', 'Official guidance'))), ('Newsroom', (('op-ed', 'Op-ed'), ('press-release', 'Press release'), ('speech', 'Speech'), ('testimony', 'Testimony'))), ('Notice and Opportunity for Comment', (('notice-proposed-rule', 'Advance notice of proposed rulemaking'), ('proposed-rule', 'Proposed rule'), ('interim-final-rule-2', 'Interim final rule'), ('request-comment-info', 'Request for comment or information'), ('proposed-policy', 'Proposed policy'), ('intent-preempt-determ', 'Intent to make preemption determination'), ('info-collect-activity', 'Information collection activities'), ('notice-privacy-act', 'Notice related to Privacy Act'))), ('Research Report', (('consumer-complaint', 'Consumer complaint'), ('super-highlight', 'Supervisory Highlights'), ('data-point', 'Data point'), ('industry-markets', 'Industry and markets'), ('consumer-edu-empower', 'Consumer education and empowerment'), ('to-congress', 'To Congress'))), ('Rule Under Development', (('notice-proposed-rule-2', 'Advance notice of proposed rulemaking'), ('proposed-rule-2', 'Proposed rule'))), ('Story', (('auto-loans', 'Auto loans'), ('bank-accts-services', 'Bank accounts and services'), ('credit-cards', 'Credit cards'), ('credit-reports-scores', 'Credit reports and scores'), ('debt-collection', 'Debt collection'), ('money-transfers', 'Money transfers'), ('mortgages', 'Mortgages'), ('payday-loans', 'Payday loans'), ('prepaid-cards', 'Prepaid cards'), ('student-loans', 'Student loans')))], max_length=255),
),
migrations.AlterField(
model_name='sublandingpage',
name='sidebar_breakout',
field=wagtail.core.fields.StreamField([('slug', wagtail.core.blocks.CharBlock(icon='title')), ('heading', wagtail.core.blocks.CharBlock(icon='title')), ('paragraph', wagtail.core.blocks.RichTextBlock(icon='edit')), ('breakout_image', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('is_round', wagtail.core.blocks.BooleanBlock(default=True, label='Round?', required=False)), ('icon', wagtail.core.blocks.CharBlock(help_text='Enter icon class name.')), ('heading', wagtail.core.blocks.CharBlock(label='Introduction Heading', required=False)), ('body', wagtail.core.blocks.TextBlock(label='Introduction Body', required=False))], heading='Breakout Image', icon='image')), ('related_posts', wagtail.core.blocks.StructBlock([('limit', wagtail.core.blocks.CharBlock(default='3', help_text='This limit applies to EACH TYPE of post this module retrieves, not the total number of retrieved posts.')), ('show_heading', wagtail.core.blocks.BooleanBlock(default=True, help_text='This toggles the heading and icon for the related types.', label='Show Heading and Icon?', required=False)), ('header_title', wagtail.core.blocks.CharBlock(default='Further reading', label='Slug Title')), ('relate_posts', wagtail.core.blocks.BooleanBlock(default=True, editable=False, label='Blog Posts', required=False)), ('relate_newsroom', wagtail.core.blocks.BooleanBlock(default=True, editable=False, label='Newsroom', required=False)), ('relate_events', wagtail.core.blocks.BooleanBlock(default=True, label='Events', required=False)), ('specific_categories', wagtail.core.blocks.ListBlock(wagtail.core.blocks.ChoiceBlock(choices=[('Blog', (('At the CFPB', 'At the CFPB'), ("Director's notebook", "Director's notebook"), ('Policy & Compliance', 'Policy and compliance'), ('Data, Research & Reports', 'Data, research, and reports'), ('Info for Consumers', 'Info for consumers'))), ('Newsroom', (('Op-Ed', 'Op-ed'), ('Press Release', 'Press release'), ('Speech', 'Speech'), ('Testimony', 'Testimony')))], required=False), required=False)), ('and_filtering', wagtail.core.blocks.BooleanBlock(default=False, help_text='If checked, related posts will only be pulled in if they match ALL topic tags set on this page. Otherwise, related posts can match any one topic tag.', label='Match all topic tags', required=False)), ('alternate_view_more_url', wagtail.core.blocks.CharBlock(help_text='By default, the "View more" link will go to the Activity Log, filtered based on the above parameters. Enter a URL in this field to override that link destination.', label='Alternate "View more" URL', required=False))])), ('job_listing_list', wagtail.core.blocks.StructBlock([('more_jobs_page', wagtail.core.blocks.PageChooserBlock(help_text='Link to full list of jobs'))]))], blank=True),
),
]
| 399.138889 | 7,782 | 0.737351 |
from django.db import migrations, models
import v1.atomic_elements.molecules
import v1.blocks
import v1.models.snippets
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
import wagtail.snippets.blocks
class Migration(migrations.Migration):
dependencies = [
('v1', '0218_add_force_breadcrumbs'),
]
operations = [
migrations.AlterField(
model_name='cfgovpage',
name='sidefoot',
field=wagtail.core.fields.StreamField([('call_to_action', wagtail.core.blocks.StructBlock([('slug_text', wagtail.core.blocks.CharBlock(required=False)), ('paragraph_text', wagtail.core.blocks.RichTextBlock(required=False)), ('button', wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.CharBlock(required=False)), ('url', wagtail.core.blocks.CharBlock(default='/', required=False)), ('size', wagtail.core.blocks.ChoiceBlock(choices=[('regular', 'Regular'), ('large', 'Large Primary')]))]))])), ('related_links', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(required=False)), ('paragraph', wagtail.core.blocks.RichTextBlock(required=False)), ('links', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.CharBlock(required=False)), ('url', wagtail.core.blocks.CharBlock(default='/', required=False))])))])), ('related_posts', wagtail.core.blocks.StructBlock([('limit', wagtail.core.blocks.CharBlock(default='3', help_text='This limit applies to EACH TYPE of post this module retrieves, not the total number of retrieved posts.')), ('show_heading', wagtail.core.blocks.BooleanBlock(default=True, help_text='This toggles the heading and icon for the related types.', label='Show Heading and Icon?', required=False)), ('header_title', wagtail.core.blocks.CharBlock(default='Further reading', label='Slug Title')), ('relate_posts', wagtail.core.blocks.BooleanBlock(default=True, editable=False, label='Blog Posts', required=False)), ('relate_newsroom', wagtail.core.blocks.BooleanBlock(default=True, editable=False, label='Newsroom', required=False)), ('relate_events', wagtail.core.blocks.BooleanBlock(default=True, label='Events', required=False)), ('specific_categories', wagtail.core.blocks.ListBlock(wagtail.core.blocks.ChoiceBlock(choices=[('Blog', (('At the CFPB', 'At the CFPB'), ("Director's notebook", "Director's notebook"), ('Policy & Compliance', 'Policy and compliance'), ('Data, Research & Reports', 'Data, research, and reports'), ('Info for Consumers', 'Info for consumers'))), ('Newsroom', (('Op-Ed', 'Op-ed'), ('Press Release', 'Press release'), ('Speech', 'Speech'), ('Testimony', 'Testimony')))], required=False), required=False)), ('and_filtering', wagtail.core.blocks.BooleanBlock(default=False, help_text='If checked, related posts will only be pulled in if they match ALL topic tags set on this page. Otherwise, related posts can match any one topic tag.', label='Match all topic tags', required=False)), ('alternate_view_more_url', wagtail.core.blocks.CharBlock(help_text='By default, the "View more" link will go to the Activity Log, filtered based on the above parameters. Enter a URL in this field to override that link destination.', label='Alternate "View more" URL', required=False))])), ('related_metadata', wagtail.core.blocks.StructBlock([('slug', wagtail.core.blocks.CharBlock(max_length=100)), ('content', wagtail.core.blocks.StreamBlock([('text', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(max_length=100)), ('blob', wagtail.core.blocks.RichTextBlock())], icon='pilcrow')), ('list', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(max_length=100)), ('links', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.CharBlock(required=False)), ('url', wagtail.core.blocks.CharBlock(default='/', required=False))])))], icon='list-ul')), ('date', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(max_length=100)), ('date', wagtail.core.blocks.DateBlock())], icon='date')), ('topics', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(default='Topics', max_length=100)), ('show_topics', wagtail.core.blocks.BooleanBlock(default=True, required=False))], icon='tag')), ('categories', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(default='Categories', max_length=100)), ('show_categories', wagtail.core.blocks.BooleanBlock(default=True, required=False))], icon='list-ul'))])), ('is_half_width', wagtail.core.blocks.BooleanBlock(default=False, required=False))])), ('email_signup', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(default='Stay informed', required=False)), ('default_heading', wagtail.core.blocks.BooleanBlock(default=True, help_text='If selected, heading will be styled as an H5 with green top rule. Deselect to style header as H3.', label='Default heading style', required=False)), ('text', wagtail.core.blocks.CharBlock(help_text='Write a sentence or two about what kinds of emails the user is signing up for, how frequently they will be sent, etc.', required=False)), ('gd_code', wagtail.core.blocks.CharBlock(help_text='Code for the topic (i.e., mailing list) you want people who submit this form to subscribe to. Format: USCFPB_###', label='GovDelivery code', required=False)), ('disclaimer_page', wagtail.core.blocks.PageChooserBlock(help_text='Choose the page that the "See Privacy Act statement" link should go to. If in doubt, use "Generic Email Sign-Up Privacy Act Statement".', label='Privacy Act statement', required=False))])), ('sidebar_contact', wagtail.core.blocks.StructBlock([('contact', wagtail.snippets.blocks.SnippetChooserBlock('v1.Contact')), ('has_top_rule_line', wagtail.core.blocks.BooleanBlock(default=False, help_text='Add a horizontal rule line to top of contact block.', required=False))])), ('rss_feed', v1.atomic_elements.molecules.RSSFeed()), ('social_media', wagtail.core.blocks.StructBlock([('is_share_view', wagtail.core.blocks.BooleanBlock(default=True, help_text='If unchecked, social media icons will link users to official CFPB accounts. Do not fill in any further fields.', label='Desired action: share this page', required=False)), ('blurb', wagtail.core.blocks.CharBlock(default="Look what I found on the CFPB's site!", help_text='Sets the tweet text, email subject line, and LinkedIn post text.', required=False)), ('twitter_text', wagtail.core.blocks.CharBlock(help_text='(Optional) Custom text for Twitter shares. If blank, will default to value of blurb field above.', max_length=100, required=False)), ('twitter_related', wagtail.core.blocks.CharBlock(help_text='(Optional) A comma-separated list of accounts related to the content of the shared URL. Do not enter the @ symbol. If blank, it will default to just "cfpb".', required=False)), ('twitter_hashtags', wagtail.core.blocks.CharBlock(help_text='(Optional) A comma-separated list of hashtags to be appended to default tweet text.', required=False)), ('twitter_lang', wagtail.core.blocks.CharBlock(help_text='(Optional) Loads text components in the specified language, if other than English. E.g., use "es" for Spanish. See https://dev.twitter.com/web/overview/languages for a list of supported language codes.', required=False)), ('email_title', wagtail.core.blocks.CharBlock(help_text='(Optional) Custom subject for email shares. If blank, will default to value of blurb field above.', required=False)), ('email_text', wagtail.core.blocks.CharBlock(help_text='(Optional) Custom text for email shares. If blank, will default to "Check out this page from the CFPB".', required=False)), ('email_signature', wagtail.core.blocks.CharBlock(help_text='(Optional) Adds a custom signature line to email shares. ', required=False)), ('linkedin_title', wagtail.core.blocks.CharBlock(help_text='(Optional) Custom title for LinkedIn shares. If blank, will default to value of blurb field above.', required=False)), ('linkedin_text', wagtail.core.blocks.CharBlock(help_text='(Optional) Custom text for LinkedIn shares.', required=False))])), ('reusable_text', v1.blocks.ReusableTextChooserBlock(v1.models.snippets.ReusableText))], blank=True),
),
migrations.AlterField(
model_name='cfgovpagecategory',
name='name',
field=models.CharField(choices=[('Administrative adjudication docket', (('administrative-adjudication', 'Administrative adjudication'), ('stipulation-and-constent-order', 'Stipulation and consent order'))), ('Amicus Brief', (('us-supreme-court', 'U.S. Supreme Court'), ('fed-circuit-court', 'Federal Circuit Court'), ('fed-district-court', 'Federal District Court'), ('state-court', 'State Court'))), ('Blog', (('at-the-cfpb', 'At the CFPB'), ('directors-notebook', "Director's notebook"), ('policy_compliance', 'Policy and compliance'), ('data-research-reports', 'Data, research, and reports'), ('info-for-consumers', 'Info for consumers'))), ('Consumer Reporting Companies', (('nationwide', 'Nationwide'), ('employment-screening', 'Employment screening'), ('tenant-screening', 'Tenant screening'), ('check-bank-screening', 'Check and bank screening'), ('personal-property-insurance', 'Personal property insurance'), ('medical', 'Medical'), ('low-income-and-subprime', 'Low-income and subprime'), ('supplementary-reports', 'Supplementary reports'), ('utilities', 'Utilities'), ('retail', 'Retail'), ('gaming', 'Gaming'))), ('Enforcement Action', (('civil-action', 'Civil Action'), ('administrative-proceeding', 'Administrative Proceeding'))), ('Final rule', (('interim-final-rule', 'Interim final rule'), ('final-rule', 'Final rule'))), ('FOIA Frequently Requested Record', (('report', 'Report'), ('log', 'Log'), ('record', 'Record'))), ('Implementation Resource', (('compliance-aid', 'Compliance aid'), ('official-guidance', 'Official guidance'))), ('Newsroom', (('op-ed', 'Op-ed'), ('press-release', 'Press release'), ('speech', 'Speech'), ('testimony', 'Testimony'))), ('Notice and Opportunity for Comment', (('notice-proposed-rule', 'Advance notice of proposed rulemaking'), ('proposed-rule', 'Proposed rule'), ('interim-final-rule-2', 'Interim final rule'), ('request-comment-info', 'Request for comment or information'), ('proposed-policy', 'Proposed policy'), ('intent-preempt-determ', 'Intent to make preemption determination'), ('info-collect-activity', 'Information collection activities'), ('notice-privacy-act', 'Notice related to Privacy Act'))), ('Research Report', (('consumer-complaint', 'Consumer complaint'), ('super-highlight', 'Supervisory Highlights'), ('data-point', 'Data point'), ('industry-markets', 'Industry and markets'), ('consumer-edu-empower', 'Consumer education and empowerment'), ('to-congress', 'To Congress'))), ('Rule Under Development', (('notice-proposed-rule-2', 'Advance notice of proposed rulemaking'), ('proposed-rule-2', 'Proposed rule'))), ('Story', (('auto-loans', 'Auto loans'), ('bank-accts-services', 'Bank accounts and services'), ('credit-cards', 'Credit cards'), ('credit-reports-scores', 'Credit reports and scores'), ('debt-collection', 'Debt collection'), ('money-transfers', 'Money transfers'), ('mortgages', 'Mortgages'), ('payday-loans', 'Payday loans'), ('prepaid-cards', 'Prepaid cards'), ('student-loans', 'Student loans')))], max_length=255),
),
migrations.AlterField(
model_name='sublandingpage',
name='sidebar_breakout',
field=wagtail.core.fields.StreamField([('slug', wagtail.core.blocks.CharBlock(icon='title')), ('heading', wagtail.core.blocks.CharBlock(icon='title')), ('paragraph', wagtail.core.blocks.RichTextBlock(icon='edit')), ('breakout_image', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('is_round', wagtail.core.blocks.BooleanBlock(default=True, label='Round?', required=False)), ('icon', wagtail.core.blocks.CharBlock(help_text='Enter icon class name.')), ('heading', wagtail.core.blocks.CharBlock(label='Introduction Heading', required=False)), ('body', wagtail.core.blocks.TextBlock(label='Introduction Body', required=False))], heading='Breakout Image', icon='image')), ('related_posts', wagtail.core.blocks.StructBlock([('limit', wagtail.core.blocks.CharBlock(default='3', help_text='This limit applies to EACH TYPE of post this module retrieves, not the total number of retrieved posts.')), ('show_heading', wagtail.core.blocks.BooleanBlock(default=True, help_text='This toggles the heading and icon for the related types.', label='Show Heading and Icon?', required=False)), ('header_title', wagtail.core.blocks.CharBlock(default='Further reading', label='Slug Title')), ('relate_posts', wagtail.core.blocks.BooleanBlock(default=True, editable=False, label='Blog Posts', required=False)), ('relate_newsroom', wagtail.core.blocks.BooleanBlock(default=True, editable=False, label='Newsroom', required=False)), ('relate_events', wagtail.core.blocks.BooleanBlock(default=True, label='Events', required=False)), ('specific_categories', wagtail.core.blocks.ListBlock(wagtail.core.blocks.ChoiceBlock(choices=[('Blog', (('At the CFPB', 'At the CFPB'), ("Director's notebook", "Director's notebook"), ('Policy & Compliance', 'Policy and compliance'), ('Data, Research & Reports', 'Data, research, and reports'), ('Info for Consumers', 'Info for consumers'))), ('Newsroom', (('Op-Ed', 'Op-ed'), ('Press Release', 'Press release'), ('Speech', 'Speech'), ('Testimony', 'Testimony')))], required=False), required=False)), ('and_filtering', wagtail.core.blocks.BooleanBlock(default=False, help_text='If checked, related posts will only be pulled in if they match ALL topic tags set on this page. Otherwise, related posts can match any one topic tag.', label='Match all topic tags', required=False)), ('alternate_view_more_url', wagtail.core.blocks.CharBlock(help_text='By default, the "View more" link will go to the Activity Log, filtered based on the above parameters. Enter a URL in this field to override that link destination.', label='Alternate "View more" URL', required=False))])), ('job_listing_list', wagtail.core.blocks.StructBlock([('more_jobs_page', wagtail.core.blocks.PageChooserBlock(help_text='Link to full list of jobs'))]))], blank=True),
),
]
| true | true |
1c2b20126cfa1deb4d78e265e26c98ff9951782a | 1,369 | py | Python | ooobuild/dyn/frame/x_layout_manager2.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/dyn/frame/x_layout_manager2.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/dyn/frame/x_layout_manager2.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.frame
from typing import TYPE_CHECKING
from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME
_DYNAMIC = False
if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT:
_DYNAMIC = True
if not TYPE_CHECKING and _DYNAMIC:
from com.sun.star.frame import XLayoutManager2 as XLayoutManager2
setattr(XLayoutManager2, '__ooo_ns__', 'com.sun.star.frame')
setattr(XLayoutManager2, '__ooo_full_ns__', 'com.sun.star.frame.XLayoutManager2')
setattr(XLayoutManager2, '__ooo_type_name__', 'interface')
else:
from ...lo.frame.x_layout_manager2 import XLayoutManager2 as XLayoutManager2
__all__ = ['XLayoutManager2']
| 37 | 85 | 0.769905 |
from typing import TYPE_CHECKING
from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME
_DYNAMIC = False
if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT:
_DYNAMIC = True
if not TYPE_CHECKING and _DYNAMIC:
from com.sun.star.frame import XLayoutManager2 as XLayoutManager2
setattr(XLayoutManager2, '__ooo_ns__', 'com.sun.star.frame')
setattr(XLayoutManager2, '__ooo_full_ns__', 'com.sun.star.frame.XLayoutManager2')
setattr(XLayoutManager2, '__ooo_type_name__', 'interface')
else:
from ...lo.frame.x_layout_manager2 import XLayoutManager2 as XLayoutManager2
__all__ = ['XLayoutManager2']
| true | true |
1c2b22346e3ef10d78afbd239821aac46d72fd58 | 711 | py | Python | main/service/type_service.py | emhayusa/4d_api | cbf380c150d6f7a01954346492fe9a7751a6603b | [
"MIT"
] | null | null | null | main/service/type_service.py | emhayusa/4d_api | cbf380c150d6f7a01954346492fe9a7751a6603b | [
"MIT"
] | null | null | null | main/service/type_service.py | emhayusa/4d_api | cbf380c150d6f7a01954346492fe9a7751a6603b | [
"MIT"
] | null | null | null | import uuid
import datetime
from app.main import db
from app.main.model.type import Type
def save_new(data):
row = Type.query.filter_by(type_name=data['type_name']).first()
if not row:
new = Type(
type_name=data['type_name'],
)
save_changes(new)
response_object = {
'status': 'success',
'message': 'Successfully inserted.'
}
return response_object, 201
else:
response_object = {
'status': 'fail',
'message': 'Type already exists.',
}
return response_object, 409
def get_all():
return Type.query.order_by('id').all()
def get_by_id(id):
return Type.query.filter_by(id=id).first()
def save_changes(data):
db.session.add(data)
db.session.commit() | 20.911765 | 64 | 0.673699 | import uuid
import datetime
from app.main import db
from app.main.model.type import Type
def save_new(data):
row = Type.query.filter_by(type_name=data['type_name']).first()
if not row:
new = Type(
type_name=data['type_name'],
)
save_changes(new)
response_object = {
'status': 'success',
'message': 'Successfully inserted.'
}
return response_object, 201
else:
response_object = {
'status': 'fail',
'message': 'Type already exists.',
}
return response_object, 409
def get_all():
return Type.query.order_by('id').all()
def get_by_id(id):
return Type.query.filter_by(id=id).first()
def save_changes(data):
db.session.add(data)
db.session.commit() | true | true |
1c2b22572104c711e6f9f2ddfd62d842121e2ebe | 261 | py | Python | config.py | Mainakkundu/titanic-gcp-kubernet | d3d0cc9969f237aada4572ef8e627eb3aafb5fc9 | [
"MIT"
] | 6 | 2020-11-26T23:14:36.000Z | 2021-04-16T03:21:34.000Z | config.py | Mainakkundu/titanic-gcp-kubernet | d3d0cc9969f237aada4572ef8e627eb3aafb5fc9 | [
"MIT"
] | 5 | 2020-04-22T01:58:38.000Z | 2022-03-12T00:23:40.000Z | config.py | Mainakkundu/titanic-gcp-kubernet | d3d0cc9969f237aada4572ef8e627eb3aafb5fc9 | [
"MIT"
] | null | null | null | from os import environ as env
import multiprocessing
PORT = int(env.get("PORT", 8080))
DEBUG_MODE = int(env.get("DEBUG_MODE", 1))
# Gunicorn config
bind = ":" + str(PORT)
workers = multiprocessing.cpu_count() * 2 + 1
threads = 2 * multiprocessing.cpu_count()
| 23.727273 | 45 | 0.712644 | from os import environ as env
import multiprocessing
PORT = int(env.get("PORT", 8080))
DEBUG_MODE = int(env.get("DEBUG_MODE", 1))
bind = ":" + str(PORT)
workers = multiprocessing.cpu_count() * 2 + 1
threads = 2 * multiprocessing.cpu_count()
| true | true |
1c2b22c206bc71ab4eeddd556a1edcdc75ff2aa4 | 10,120 | py | Python | application/games/crosswordcreator/data/board.py | Tyler-Yates/game-box | dc838270c3777372c3eeaf1e09fb1962c36fc2a8 | [
"MIT"
] | 1 | 2020-12-13T02:41:19.000Z | 2020-12-13T02:41:19.000Z | application/games/crosswordcreator/data/board.py | Tyler-Yates/game-box | dc838270c3777372c3eeaf1e09fb1962c36fc2a8 | [
"MIT"
] | null | null | null | application/games/crosswordcreator/data/board.py | Tyler-Yates/game-box | dc838270c3777372c3eeaf1e09fb1962c36fc2a8 | [
"MIT"
] | null | null | null | from typing import List, Optional, Tuple, Set, Dict
from ...common.word_manager import WordManager
class Board:
"""
Represents a board for a single player.
Boards are assumed to be square (equal number of rows and columns).
The board is represented as a two dimensional matrix.
Points on the board are represented as tuples of integers: (row, column).
The upper-left corner of the board is point (0,0).
The lower-right corner of the board is point (board_size-1, board_size-1).
"""
def __init__(self, player_id: str, board_size: int, word_manager: WordManager):
self.player_id = player_id
self.board_size = board_size
self.board: List[List[Optional[str]]] = [[None for _ in range(board_size)] for _ in range(board_size)]
self.word_manager = word_manager
def _set_board(self, board: List[List[Optional[str]]]):
# Helper method for tests to set the board how they like.
self.board_size = len(board)
self.board = board
def add_tile(self, tile: str, row: int, col: int) -> Optional[str]:
"""
Adds a tile to the given position on the board.
Args:
tile: The tile
row: The row
col: The column
Returns:
The tile that was previously at that location. Could be None.
"""
previous_tile = self.board[row][col]
self.board[row][col] = tile
return previous_tile
def remove_tile(self, row: int, col: int) -> Optional[str]:
"""
Removes the tile from the given position on the board.
Args:
row: The row
col: The column
Returns:
The tile that was removed. Could be None.
"""
removed_tile = self.board[row][col]
self.board[row][col] = None
return removed_tile
def _find_connected_tiles(self, row, col, non_empty_tiles_not_visited: set) -> None:
"""
Recursive function used to find all connected tiles from a given point.
NOTE: non_empty_tiles_not_visited will be modified by this function.
Args:
row: The starting row
col: The starting column
non_empty_tiles_not_visited: The complete set of non-empty tiles for this function to work with
"""
non_empty_tiles_not_visited.remove((row, col))
if (row > 0) and (self.board[row - 1][col] is not None) and ((row - 1, col) in non_empty_tiles_not_visited):
self._find_connected_tiles(row - 1, col, non_empty_tiles_not_visited)
if (
(row < self.board_size - 1)
and (self.board[row + 1][col] is not None)
and ((row + 1, col) in non_empty_tiles_not_visited)
):
self._find_connected_tiles(row + 1, col, non_empty_tiles_not_visited)
if (col > 0) and (self.board[row][col - 1] is not None) and ((row, col - 1) in non_empty_tiles_not_visited):
self._find_connected_tiles(row, col - 1, non_empty_tiles_not_visited)
if (
(col < self.board_size - 1)
and (self.board[row][col + 1] is not None)
and ((row, col + 1) in non_empty_tiles_not_visited)
):
self._find_connected_tiles(row, col + 1, non_empty_tiles_not_visited)
def _check_connected(self) -> Set[Tuple[int, int]]:
"""
Function used to check if all tiles on the board are connected.
Returns:
A set of points on the board that are not connected. May be empty which indicates all tiles are connected.
"""
first_tile = None
non_empty_tiles = set()
# Find the first tile and all tiles that are non-empty
for row in range(self.board_size):
for col in range(self.board_size):
tile = self.board[row][col]
if tile is not None:
non_empty_tiles.add((row, col))
if first_tile is None:
first_tile = (row, col)
# Traverse through all tiles reachable from the first tile.
# Whatever tiles are left in non_empty_tiles are not connected.
self._find_connected_tiles(first_tile[0], first_tile[1], non_empty_tiles)
return non_empty_tiles
def _check_valid_words(self) -> Set[Tuple[int, int]]:
"""
Helper method to check that all words in the crossword of the board are valid.
Returns:
A set of points that are part of invalid words.
This may be empty, indicating the board is a valid crossword.
"""
invalid_points = set()
# Check across each row
for row in range(self.board_size):
current_word = ""
for col in range(self.board_size):
tile = self.board[row][col]
# If the position is blank, it's time to check
if tile is None:
# If we have a current word of length more than 1, check its validity
if len(current_word) > 1:
# If the word is not valid, add the points to the list of invalid points
if not self.word_manager.is_word(current_word):
for i in range(len(current_word)):
invalid_points.add((row, col - 1 - i))
# Now that we are done with our checks, we clear the current word to continue our search
current_word = ""
else:
current_word += tile
# The current word could go to the end of the board so we need to do an additional check
if not self.word_manager.is_word(current_word):
for i in range(len(current_word)):
invalid_points.add((row, self.board_size - 1 - i))
# Check down each column
for col in range(self.board_size):
current_word = ""
for row in range(self.board_size):
tile = self.board[row][col]
# If the position is blank, it's time to check
if tile is None:
# If we have a current word of length more than 1, check its validity
if len(current_word) > 1:
# If the word is not valid, add the points to the list of invalid points
if not self.word_manager.is_word(current_word):
for i in range(len(current_word)):
invalid_points.add((row - 1 - i, col))
# Now that we are done with our checks, we clear the current word to continue our search
current_word = ""
else:
current_word += tile
# The current word could go to the end of the board so we need to do an additional check
if not self.word_manager.is_word(current_word):
for i in range(len(current_word)):
invalid_points.add((self.board_size - 1 - i, col))
return invalid_points
def board_is_valid_crossword(self) -> Set[Tuple[int, int]]:
"""
Returns whether the board represents a valid crossword.
Returns:
A set of invalid points on the board. If empty, the board is a valid crossword.
"""
# First check is to ensure that all tiles on the board are connected.
unconnected_points = self._check_connected()
if unconnected_points:
return unconnected_points
# Now, ensure all tiles make a valid crossword of recognized words.
return self._check_valid_words()
def shift_board_down(self) -> bool:
"""
Shifts the entire board down one row if possible.
Returns:
True if the shift occurred, False otherwise
"""
for c in range(self.board_size):
if self.board[self.board_size - 1][c] is not None:
return False
for r in range(self.board_size - 1, 0, -1):
for c in range(self.board_size):
self.board[r][c] = self.board[r - 1][c]
for c in range(self.board_size):
self.board[0][c] = None
return True
def shift_board_up(self) -> bool:
"""
Shifts the entire board up one row if possible.
Returns:
True if the shift occurred, False otherwise
"""
for c in range(self.board_size):
if self.board[0][c] is not None:
return False
for r in range(0, self.board_size - 1):
for c in range(self.board_size):
self.board[r][c] = self.board[r + 1][c]
for c in range(self.board_size):
self.board[self.board_size - 1][c] = None
return True
def shift_board_right(self) -> bool:
"""
Shifts the entire board right one row if possible.
Returns:
True if the shift occurred, False otherwise
"""
for r in range(self.board_size):
if self.board[r][self.board_size - 1] is not None:
return False
for c in range(self.board_size - 1, 0, -1):
for r in range(self.board_size):
self.board[r][c] = self.board[r][c - 1]
for r in range(self.board_size):
self.board[r][0] = None
return True
def shift_board_left(self) -> bool:
"""
Shifts the entire board left one row if possible.
Returns:
True if the shift occurred, False otherwise
"""
for r in range(self.board_size):
if self.board[r][0] is not None:
return False
for c in range(0, self.board_size - 1):
for r in range(self.board_size):
self.board[r][c] = self.board[r][c + 1]
for r in range(self.board_size):
self.board[r][self.board_size - 1] = None
return True
def get_json(self) -> Dict[str, object]:
return {"board": self.board}
| 37.902622 | 118 | 0.573221 | from typing import List, Optional, Tuple, Set, Dict
from ...common.word_manager import WordManager
class Board:
def __init__(self, player_id: str, board_size: int, word_manager: WordManager):
self.player_id = player_id
self.board_size = board_size
self.board: List[List[Optional[str]]] = [[None for _ in range(board_size)] for _ in range(board_size)]
self.word_manager = word_manager
def _set_board(self, board: List[List[Optional[str]]]):
self.board_size = len(board)
self.board = board
def add_tile(self, tile: str, row: int, col: int) -> Optional[str]:
previous_tile = self.board[row][col]
self.board[row][col] = tile
return previous_tile
def remove_tile(self, row: int, col: int) -> Optional[str]:
removed_tile = self.board[row][col]
self.board[row][col] = None
return removed_tile
def _find_connected_tiles(self, row, col, non_empty_tiles_not_visited: set) -> None:
non_empty_tiles_not_visited.remove((row, col))
if (row > 0) and (self.board[row - 1][col] is not None) and ((row - 1, col) in non_empty_tiles_not_visited):
self._find_connected_tiles(row - 1, col, non_empty_tiles_not_visited)
if (
(row < self.board_size - 1)
and (self.board[row + 1][col] is not None)
and ((row + 1, col) in non_empty_tiles_not_visited)
):
self._find_connected_tiles(row + 1, col, non_empty_tiles_not_visited)
if (col > 0) and (self.board[row][col - 1] is not None) and ((row, col - 1) in non_empty_tiles_not_visited):
self._find_connected_tiles(row, col - 1, non_empty_tiles_not_visited)
if (
(col < self.board_size - 1)
and (self.board[row][col + 1] is not None)
and ((row, col + 1) in non_empty_tiles_not_visited)
):
self._find_connected_tiles(row, col + 1, non_empty_tiles_not_visited)
def _check_connected(self) -> Set[Tuple[int, int]]:
first_tile = None
non_empty_tiles = set()
for row in range(self.board_size):
for col in range(self.board_size):
tile = self.board[row][col]
if tile is not None:
non_empty_tiles.add((row, col))
if first_tile is None:
first_tile = (row, col)
self._find_connected_tiles(first_tile[0], first_tile[1], non_empty_tiles)
return non_empty_tiles
def _check_valid_words(self) -> Set[Tuple[int, int]]:
invalid_points = set()
for row in range(self.board_size):
current_word = ""
for col in range(self.board_size):
tile = self.board[row][col]
if tile is None:
# If we have a current word of length more than 1, check its validity
if len(current_word) > 1:
# If the word is not valid, add the points to the list of invalid points
if not self.word_manager.is_word(current_word):
for i in range(len(current_word)):
invalid_points.add((row, col - 1 - i))
# Now that we are done with our checks, we clear the current word to continue our search
current_word = ""
else:
current_word += tile
# The current word could go to the end of the board so we need to do an additional check
if not self.word_manager.is_word(current_word):
for i in range(len(current_word)):
invalid_points.add((row, self.board_size - 1 - i))
# Check down each column
for col in range(self.board_size):
current_word = ""
for row in range(self.board_size):
tile = self.board[row][col]
# If the position is blank, it's time to check
if tile is None:
if len(current_word) > 1:
if not self.word_manager.is_word(current_word):
for i in range(len(current_word)):
invalid_points.add((row - 1 - i, col))
current_word = ""
else:
current_word += tile
if not self.word_manager.is_word(current_word):
for i in range(len(current_word)):
invalid_points.add((self.board_size - 1 - i, col))
return invalid_points
def board_is_valid_crossword(self) -> Set[Tuple[int, int]]:
unconnected_points = self._check_connected()
if unconnected_points:
return unconnected_points
return self._check_valid_words()
def shift_board_down(self) -> bool:
for c in range(self.board_size):
if self.board[self.board_size - 1][c] is not None:
return False
for r in range(self.board_size - 1, 0, -1):
for c in range(self.board_size):
self.board[r][c] = self.board[r - 1][c]
for c in range(self.board_size):
self.board[0][c] = None
return True
def shift_board_up(self) -> bool:
for c in range(self.board_size):
if self.board[0][c] is not None:
return False
for r in range(0, self.board_size - 1):
for c in range(self.board_size):
self.board[r][c] = self.board[r + 1][c]
for c in range(self.board_size):
self.board[self.board_size - 1][c] = None
return True
def shift_board_right(self) -> bool:
for r in range(self.board_size):
if self.board[r][self.board_size - 1] is not None:
return False
for c in range(self.board_size - 1, 0, -1):
for r in range(self.board_size):
self.board[r][c] = self.board[r][c - 1]
for r in range(self.board_size):
self.board[r][0] = None
return True
def shift_board_left(self) -> bool:
for r in range(self.board_size):
if self.board[r][0] is not None:
return False
for c in range(0, self.board_size - 1):
for r in range(self.board_size):
self.board[r][c] = self.board[r][c + 1]
for r in range(self.board_size):
self.board[r][self.board_size - 1] = None
return True
def get_json(self) -> Dict[str, object]:
return {"board": self.board}
| true | true |
1c2b236d2d55b6a39af3ed7fa660f24bc4e4454c | 18,818 | py | Python | log_complete/model_390.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | log_complete/model_390.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | log_complete/model_390.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | # exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('C6A', ['C8pro'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 97500.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('C6A_0', 0.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('C6A_obs', C6A())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6pro(C3A=None), C6pro_0)
| 91.349515 | 710 | 0.806515 |
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('C6A', ['C8pro'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 97500.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('C6A_0', 0.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('C6A_obs', C6A())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6pro(C3A=None), C6pro_0)
| true | true |
1c2b26105539ec751d049b502c83a687b11e6631 | 1,062 | py | Python | gmm_code/plot_var_vs_clip.py | DPBayes/DP-MCMC-NeurIPS2019 | bf5349835b2044135749ea6dbedea2e310d7d2f2 | [
"MIT"
] | 1 | 2021-06-29T00:35:10.000Z | 2021-06-29T00:35:10.000Z | gmm_code/plot_var_vs_clip.py | DPBayes/DP-MCMC-NeurIPS2019 | bf5349835b2044135749ea6dbedea2e310d7d2f2 | [
"MIT"
] | null | null | null | gmm_code/plot_var_vs_clip.py | DPBayes/DP-MCMC-NeurIPS2019 | bf5349835b2044135749ea6dbedea2e310d7d2f2 | [
"MIT"
] | 1 | 2021-06-29T00:35:14.000Z | 2021-06-29T00:35:14.000Z | import numpy as np
import pickle
import matplotlib.pyplot as plt
from plot_path import path
def plot_fig3(fname):
fontsize = 25
figsize = (8.5,6)
temped_results = pickle.load(open(fname, 'rb'))
temped_dp_mcmc_params = temped_results[0]
temped_chain = temped_results[1]
temped_privacy_params = temped_results[2]
# Plot clipped proportion vs. proposal variance
prop_vars = temped_dp_mcmc_params['prop_vars']
clip_counts = temped_dp_mcmc_params['clip_counts']
T = temped_dp_mcmc_params['T']
batch_size = temped_dp_mcmc_params['B']
plt.figure(figsize=figsize)
plt.plot(prop_vars, clip_counts.sum(1)/T/batch_size)
plt.title('Average proportion of \n clipped llr vs. proposal variance', fontsize=fontsize)
plt.xlabel(r'$\sigma^2$', fontsize=fontsize)
plt.ylabel(r'$\frac{\#(clipped)}{Tb}$', fontsize=fontsize)
plt.setp(plt.gca().get_xticklabels(), fontsize=fontsize-1, rotation=45)
plt.setp(plt.gca().get_yticklabels(), fontsize=fontsize-1)
plt.tight_layout()
plt.savefig(path+'prop_vs_clip.pdf',format='pdf', bbox_inches = 'tight')
plt.close()
| 36.62069 | 91 | 0.76177 | import numpy as np
import pickle
import matplotlib.pyplot as plt
from plot_path import path
def plot_fig3(fname):
fontsize = 25
figsize = (8.5,6)
temped_results = pickle.load(open(fname, 'rb'))
temped_dp_mcmc_params = temped_results[0]
temped_chain = temped_results[1]
temped_privacy_params = temped_results[2]
prop_vars = temped_dp_mcmc_params['prop_vars']
clip_counts = temped_dp_mcmc_params['clip_counts']
T = temped_dp_mcmc_params['T']
batch_size = temped_dp_mcmc_params['B']
plt.figure(figsize=figsize)
plt.plot(prop_vars, clip_counts.sum(1)/T/batch_size)
plt.title('Average proportion of \n clipped llr vs. proposal variance', fontsize=fontsize)
plt.xlabel(r'$\sigma^2$', fontsize=fontsize)
plt.ylabel(r'$\frac{\#(clipped)}{Tb}$', fontsize=fontsize)
plt.setp(plt.gca().get_xticklabels(), fontsize=fontsize-1, rotation=45)
plt.setp(plt.gca().get_yticklabels(), fontsize=fontsize-1)
plt.tight_layout()
plt.savefig(path+'prop_vs_clip.pdf',format='pdf', bbox_inches = 'tight')
plt.close()
| true | true |
1c2b2692ef1f831acee54c7ccdf84e973f1db901 | 2,579 | py | Python | iot_control/app.py | DDizzzy79/ScienceFair | b41c96f74ab7e1db752e9985a740130ee1abeb1f | [
"MIT"
] | 1 | 2021-12-01T15:12:59.000Z | 2021-12-01T15:12:59.000Z | iot_control/app.py | DDizzzy79/ScienceFair | b41c96f74ab7e1db752e9985a740130ee1abeb1f | [
"MIT"
] | null | null | null | iot_control/app.py | DDizzzy79/ScienceFair | b41c96f74ab7e1db752e9985a740130ee1abeb1f | [
"MIT"
] | null | null | null | from flask import Flask,render_template
import RPi.GPIO as GPIO
import time
import lcd1602 as lcd
import time
from luma.led_matrix.device import *
from luma.core.interface.serial import spi, noop
from luma.core.render import canvas
from luma.core.legacy import text
serial = spi(port=0, device=0, gpio=noop())
device = max7219(serial, cascaded=1 or 1, block_orientation=0,rotate=0 or 0, blocks_arranged_in_reverse_order=False)
id = 1
GPIO.setmode(GPIO.BCM)
GPIO.setup(20,GPIO.OUT)
GPIO.setwarnings(False)
lcd.init_lcd()
time.sleep(1)
lcd.turn_light(1)
app = Flask(__name__)
@app.route("/")
def main():
return render_template("main.html")
@app.route("/time")
def on():
global id
id = 1
'''time'''
lcd.clear_lcd()
while id!=0:
nowtime = time.strftime('%m-%d %H:%M:%S',time.localtime(time.time()))
hourtime = time.strftime('%H',time.localtime(time.time()))
mintime = time.strftime('%M',time.localtime(time.time()))
sectime = time.strftime('%S',time.localtime(time.time()))
lcd.print_lcd(1,1,nowtime)
if mintime == '59':
if sectime == '00':
lcd.turn_light(1)
elif sectime == '59':
lcd.turn_light(0)
time.sleep(0.2)
return render_template("main.html")
@app.route("/print")
def print():
global id
id = 1
'''printHelloWorld'''
lcd.clear_lcd()
#GPIO.output(20,GPIO.LOW)
lcd.print_lcd(0,0,"Hello World")
return render_template("main.html")
@app.route("/clear")
def clear():
global id
id = 0
lcd.clear_lcd()
return render_template("main.html")
@app.route("/left")
def left():
for x in range(10):
#print("drawing")
for x in range(5):
with canvas(device) as draw:
text(draw, (0, 0), chr(27), fill="white")
time.sleep(0.01)
return render_template("main.html")
@app.route("/right")
def right():
for x in range(10):
#print("drawing")
for x in range(4):
with canvas(device) as draw:
text(draw, (0, 0), chr(26), fill="white")
time.sleep(0.01)
return render_template("main.html")
@app.route("/line")
def line():
for x in range(10):
#print("drawing")
for x in range(4):
with canvas(device) as draw:
text(draw, (0, 0), chr(24), fill="white")
time.sleep(0.01)
return render_template("main.html")
if __name__=="__main__":
app.run(host='0.0.0.0', port=8080, debug=True, threaded=True)
| 27.43617 | 116 | 0.597131 | from flask import Flask,render_template
import RPi.GPIO as GPIO
import time
import lcd1602 as lcd
import time
from luma.led_matrix.device import *
from luma.core.interface.serial import spi, noop
from luma.core.render import canvas
from luma.core.legacy import text
serial = spi(port=0, device=0, gpio=noop())
device = max7219(serial, cascaded=1 or 1, block_orientation=0,rotate=0 or 0, blocks_arranged_in_reverse_order=False)
id = 1
GPIO.setmode(GPIO.BCM)
GPIO.setup(20,GPIO.OUT)
GPIO.setwarnings(False)
lcd.init_lcd()
time.sleep(1)
lcd.turn_light(1)
app = Flask(__name__)
@app.route("/")
def main():
return render_template("main.html")
@app.route("/time")
def on():
global id
id = 1
lcd.clear_lcd()
while id!=0:
nowtime = time.strftime('%m-%d %H:%M:%S',time.localtime(time.time()))
hourtime = time.strftime('%H',time.localtime(time.time()))
mintime = time.strftime('%M',time.localtime(time.time()))
sectime = time.strftime('%S',time.localtime(time.time()))
lcd.print_lcd(1,1,nowtime)
if mintime == '59':
if sectime == '00':
lcd.turn_light(1)
elif sectime == '59':
lcd.turn_light(0)
time.sleep(0.2)
return render_template("main.html")
@app.route("/print")
def print():
global id
id = 1
lcd.clear_lcd()
lcd.print_lcd(0,0,"Hello World")
return render_template("main.html")
@app.route("/clear")
def clear():
global id
id = 0
lcd.clear_lcd()
return render_template("main.html")
@app.route("/left")
def left():
for x in range(10):
for x in range(5):
with canvas(device) as draw:
text(draw, (0, 0), chr(27), fill="white")
time.sleep(0.01)
return render_template("main.html")
@app.route("/right")
def right():
for x in range(10):
for x in range(4):
with canvas(device) as draw:
text(draw, (0, 0), chr(26), fill="white")
time.sleep(0.01)
return render_template("main.html")
@app.route("/line")
def line():
for x in range(10):
for x in range(4):
with canvas(device) as draw:
text(draw, (0, 0), chr(24), fill="white")
time.sleep(0.01)
return render_template("main.html")
if __name__=="__main__":
app.run(host='0.0.0.0', port=8080, debug=True, threaded=True)
| true | true |
1c2b26e248bcb6fc179ce066242c7221429578e8 | 726 | py | Python | touchdown/aws/kms/__init__.py | yaybu/touchdown | 70ecda5191ce2d095bc074dcb23bfa1584464814 | [
"Apache-2.0"
] | 14 | 2015-01-05T18:18:04.000Z | 2022-02-07T19:35:12.000Z | touchdown/aws/kms/__init__.py | yaybu/touchdown | 70ecda5191ce2d095bc074dcb23bfa1584464814 | [
"Apache-2.0"
] | 106 | 2015-01-06T00:17:13.000Z | 2019-09-07T00:35:32.000Z | touchdown/aws/kms/__init__.py | yaybu/touchdown | 70ecda5191ce2d095bc074dcb23bfa1584464814 | [
"Apache-2.0"
] | 5 | 2015-01-30T10:18:24.000Z | 2022-02-07T19:35:13.000Z | # Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .alias import Alias
from .grant import Grant
from .key import Key
from .wrapper import Wrapper
__all__ = ["Alias", "Grant", "Key", "Wrapper"]
| 34.571429 | 74 | 0.757576 |
from .alias import Alias
from .grant import Grant
from .key import Key
from .wrapper import Wrapper
__all__ = ["Alias", "Grant", "Key", "Wrapper"]
| true | true |
1c2b28b316b72deb738bf4f5837b485b63c49ba7 | 490 | py | Python | fairseq/__init__.py | beichao1314/fairseq | b1521f962e4ca670311c0cd0c8b1dadf310cb242 | [
"BSD-3-Clause"
] | 140 | 2019-06-10T04:02:07.000Z | 2022-03-22T11:08:27.000Z | fairseq/__init__.py | beichao1314/fairseq | b1521f962e4ca670311c0cd0c8b1dadf310cb242 | [
"BSD-3-Clause"
] | 7 | 2019-04-24T09:07:06.000Z | 2022-03-28T21:58:04.000Z | fairseq/__init__.py | beichao1314/fairseq | b1521f962e4ca670311c0cd0c8b1dadf310cb242 | [
"BSD-3-Clause"
] | 11 | 2019-06-21T05:31:17.000Z | 2022-01-04T02:20:46.000Z | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from .multiprocessing_pdb import pdb
__all__ = ['pdb']
import fairseq.criterions
import fairseq.models
import fairseq.modules
import fairseq.optim
import fairseq.optim.lr_scheduler
import fairseq.tasks
| 27.222222 | 78 | 0.791837 |
from .multiprocessing_pdb import pdb
__all__ = ['pdb']
import fairseq.criterions
import fairseq.models
import fairseq.modules
import fairseq.optim
import fairseq.optim.lr_scheduler
import fairseq.tasks
| true | true |
1c2b29ee506620ebaf32b44c7796a7ef747c0674 | 31 | py | Python | learn.py | ishmandoo/FlapPyBird-Neural | 2cd733db090dd972e698a5d951b90f76f091babe | [
"MIT"
] | 2 | 2019-11-13T22:14:30.000Z | 2019-11-13T22:15:24.000Z | learn.py | ishmandoo/FlapPyBird | 2cd733db090dd972e698a5d951b90f76f091babe | [
"MIT"
] | null | null | null | learn.py | ishmandoo/FlapPyBird | 2cd733db090dd972e698a5d951b90f76f091babe | [
"MIT"
] | null | null | null | from flappy import main
main() | 10.333333 | 23 | 0.774194 | from flappy import main
main() | true | true |
1c2b2a1bbaf27d3f3d13070304854ad20431ba7a | 2,517 | py | Python | convert.py | vkhurana/calibre-convert | 7ae6cf4ca5c40386131a5a32a40e0bd68ecf77ce | [
"MIT"
] | null | null | null | convert.py | vkhurana/calibre-convert | 7ae6cf4ca5c40386131a5a32a40e0bd68ecf77ce | [
"MIT"
] | null | null | null | convert.py | vkhurana/calibre-convert | 7ae6cf4ca5c40386131a5a32a40e0bd68ecf77ce | [
"MIT"
] | null | null | null | import time
import subprocess
import os
import sys
import pyinotify
from os.path import exists
from pyinotify import WatchManager, Notifier, ProcessEvent, EventsCodes
def Monitor(path):
class PClose(ProcessEvent):
temp_folder = "temp"
def process_IN_CLOSE(self, event):
src_folder = event.path
dest_folder = os.path.join(src_folder, self.temp_folder)
src_file = event.name and os.path.join(event.path, event.name) or event.path
dest_file_temp = os.path.join(dest_folder, event.name + ".mobi")
print ("IN_CLOSE_WRITE event: " + src_file)
print ("src_folder: " + src_folder)
print ("dest_folder: " + dest_folder)
print ("src_file: " + src_file)
print ("dest_file_temp: " + dest_file_temp)
if not exists(dest_folder):
print("creating temp folder: " + dest_folder)
os.mkdir(dest_folder)
# we only really care about created events. 'modified' is another
file_type = ".epub";
if src_file.endswith(file_type):
# pathinfo = os.path.split(src_file)
dest_file = src_file + ".mobi"
dest_exists = exists(dest_file)
if not dest_exists:
print("Converting %s to %s" % (src_file, dest_file_temp))
cmd = "ebook-convert" + " \"" + src_file + "\" \"" + dest_file_temp + "\""
print("cmd: %s" % cmd)
ret = subprocess.call(cmd, shell=True)
print("ret: %d" % ret)
if ret == 0:
print("success converting. moving.")
os.rename(dest_file_temp, dest_file)
else:
print("error converting " + src_file)
else:
print("Skipping. File %s exists" % dest_file)
wm = WatchManager()
notifier = Notifier(wm, PClose())
wm.add_watch(path, pyinotify.IN_CLOSE_WRITE)
try:
while 1:
notifier.process_events()
if notifier.check_events():
notifier.read_events()
except KeyboardInterrupt:
notifier.stop()
return
if __name__ == '__main__':
try:
path = "/target"
except IndexError:
print ("error")
else:
print("Watching: %s" % path)
Monitor(path) | 37.014706 | 95 | 0.528804 | import time
import subprocess
import os
import sys
import pyinotify
from os.path import exists
from pyinotify import WatchManager, Notifier, ProcessEvent, EventsCodes
def Monitor(path):
class PClose(ProcessEvent):
temp_folder = "temp"
def process_IN_CLOSE(self, event):
src_folder = event.path
dest_folder = os.path.join(src_folder, self.temp_folder)
src_file = event.name and os.path.join(event.path, event.name) or event.path
dest_file_temp = os.path.join(dest_folder, event.name + ".mobi")
print ("IN_CLOSE_WRITE event: " + src_file)
print ("src_folder: " + src_folder)
print ("dest_folder: " + dest_folder)
print ("src_file: " + src_file)
print ("dest_file_temp: " + dest_file_temp)
if not exists(dest_folder):
print("creating temp folder: " + dest_folder)
os.mkdir(dest_folder)
file_type = ".epub";
if src_file.endswith(file_type):
dest_file = src_file + ".mobi"
dest_exists = exists(dest_file)
if not dest_exists:
print("Converting %s to %s" % (src_file, dest_file_temp))
cmd = "ebook-convert" + " \"" + src_file + "\" \"" + dest_file_temp + "\""
print("cmd: %s" % cmd)
ret = subprocess.call(cmd, shell=True)
print("ret: %d" % ret)
if ret == 0:
print("success converting. moving.")
os.rename(dest_file_temp, dest_file)
else:
print("error converting " + src_file)
else:
print("Skipping. File %s exists" % dest_file)
wm = WatchManager()
notifier = Notifier(wm, PClose())
wm.add_watch(path, pyinotify.IN_CLOSE_WRITE)
try:
while 1:
notifier.process_events()
if notifier.check_events():
notifier.read_events()
except KeyboardInterrupt:
notifier.stop()
return
if __name__ == '__main__':
try:
path = "/target"
except IndexError:
print ("error")
else:
print("Watching: %s" % path)
Monitor(path) | true | true |
1c2b2a7cd5acfc7949b666466fb33ee224cd6528 | 2,889 | py | Python | genlog/helper.py | ilpan/GenerateLog | 1cc9779870473e21e6b42112b17489c59792d5c1 | [
"MIT"
] | null | null | null | genlog/helper.py | ilpan/GenerateLog | 1cc9779870473e21e6b42112b17489c59792d5c1 | [
"MIT"
] | null | null | null | genlog/helper.py | ilpan/GenerateLog | 1cc9779870473e21e6b42112b17489c59792d5c1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author : ilpan
@contact : pna.dev@outlook.com
@file : helper.py
@desc :
1)指定造数据的 client 的个数
2)指定 client 造数据的时间间隔(秒)
3)指定需要收集的用户最大数目
4)指定 flume 的 host:port
@time : 18-3-18 下午12:10
"""
import argparse
import sys
from genlog import __description__, __version__
from genlog.exception import *
class Helper:
def __init__(self):
self._client_num = 100
self._interval = 0
self._user_num = 10000
self._remote_host_list = []
self._show = False
def help(self):
parser = argparse.ArgumentParser(description=__description__)
parser.add_argument('-v', '--version', action='store_true', help='output version and exit')
parser.add_argument('-n', '--client_num', type=int, default=888, help='the num of client that generate logs')
parser.add_argument('-i', '--interval', type=int, default=60000,
help='the time(ms) that a client show wait before next generating logs')
parser.add_argument('-u', '--user_num', type=int, default=10000, help='the num of user that will be collected')
parser.add_argument('-l', '--remote_host_list', default="0.0.0.0:2018,0.0.0.0:2019,0.0.0.0:2020,0.0.0.0:2021",
help='remote host list that we send logs to (format: ip:port,ip:port...)')
parser.add_argument('-s', '--show', action='store_true', help='show send logs info')
# get arguments
args = parser.parse_args()
if args.version:
print('genlog: ', __version__)
sys.exit(0)
def get_ip_port(host):
host = host.strip()
try:
ip = host.split(':')[0]
port = int(host.split(':')[1])
return (ip, port)
except IndexError:
raise WrongFormatError("与标准格式ip:port不一致")
if args.remote_host_list is not None:
host_list = args.remote_host_list.split(',')
try:
self._remote_host_list = [get_ip_port(host) for host in host_list]
except WrongFormatError as e:
print(e)
sys.exit(1)
if args.show:
self._show = True
if args.interval <= 1000:
print('interval must greater than 1000(ms)')
sys.exit(2)
self._client_num, self._interval, self._user_num = args.client_num, args.interval, args.user_num
@property
def client_num(self):
return self._client_num
@property
def interval(self):
return self._interval
@property
def user_num(self):
return self._user_num
@property
def remote_host_list(self):
return self._remote_host_list
@property
def show(self):
return self._show
| 30.09375 | 119 | 0.581862 |
import argparse
import sys
from genlog import __description__, __version__
from genlog.exception import *
class Helper:
def __init__(self):
self._client_num = 100
self._interval = 0
self._user_num = 10000
self._remote_host_list = []
self._show = False
def help(self):
parser = argparse.ArgumentParser(description=__description__)
parser.add_argument('-v', '--version', action='store_true', help='output version and exit')
parser.add_argument('-n', '--client_num', type=int, default=888, help='the num of client that generate logs')
parser.add_argument('-i', '--interval', type=int, default=60000,
help='the time(ms) that a client show wait before next generating logs')
parser.add_argument('-u', '--user_num', type=int, default=10000, help='the num of user that will be collected')
parser.add_argument('-l', '--remote_host_list', default="0.0.0.0:2018,0.0.0.0:2019,0.0.0.0:2020,0.0.0.0:2021",
help='remote host list that we send logs to (format: ip:port,ip:port...)')
parser.add_argument('-s', '--show', action='store_true', help='show send logs info')
args = parser.parse_args()
if args.version:
print('genlog: ', __version__)
sys.exit(0)
def get_ip_port(host):
host = host.strip()
try:
ip = host.split(':')[0]
port = int(host.split(':')[1])
return (ip, port)
except IndexError:
raise WrongFormatError("与标准格式ip:port不一致")
if args.remote_host_list is not None:
host_list = args.remote_host_list.split(',')
try:
self._remote_host_list = [get_ip_port(host) for host in host_list]
except WrongFormatError as e:
print(e)
sys.exit(1)
if args.show:
self._show = True
if args.interval <= 1000:
print('interval must greater than 1000(ms)')
sys.exit(2)
self._client_num, self._interval, self._user_num = args.client_num, args.interval, args.user_num
@property
def client_num(self):
return self._client_num
@property
def interval(self):
return self._interval
@property
def user_num(self):
return self._user_num
@property
def remote_host_list(self):
return self._remote_host_list
@property
def show(self):
return self._show
| true | true |
1c2b2ac70dc685491a57826d0eb2dc253498f493 | 3,852 | py | Python | GRNetDetector/utils/metrics.py | 565353780/gr-net | 7dedb326bd5f8e12e0f8aa39e1c728fe68f26f4f | [
"MIT"
] | null | null | null | GRNetDetector/utils/metrics.py | 565353780/gr-net | 7dedb326bd5f8e12e0f8aa39e1c728fe68f26f4f | [
"MIT"
] | null | null | null | GRNetDetector/utils/metrics.py | 565353780/gr-net | 7dedb326bd5f8e12e0f8aa39e1c728fe68f26f4f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Author: Haozhe Xie
# @Date: 2019-08-08 14:31:30
# @Last Modified by: Haozhe Xie
# @Last Modified time: 2020-05-25 09:13:32
# @Email: cshzxie@gmail.com
import logging
import open3d
from GRNetDetector.extensions.chamfer_dist import ChamferDistance
class Metrics(object):
ITEMS = [{
'name': 'F-Score',
'enabled': True,
'eval_func': 'cls._get_f_score',
'is_greater_better': True,
'init_value': 0
}, {
'name': 'ChamferDistance',
'enabled': True,
'eval_func': 'cls._get_chamfer_distance',
'eval_object': ChamferDistance(ignore_zeros=True),
'is_greater_better': False,
'init_value': 32767
}]
@classmethod
def get(cls, pred, gt):
_items = cls.items()
_values = [0] * len(_items)
for i, item in enumerate(_items):
eval_func = eval(item['eval_func'])
_values[i] = eval_func(pred, gt)
return _values
@classmethod
def items(cls):
return [i for i in cls.ITEMS if i['enabled']]
@classmethod
def names(cls):
_items = cls.items()
return [i['name'] for i in _items]
@classmethod
def _get_f_score(cls, pred, gt, th=0.01):
"""References: https://github.com/lmb-freiburg/what3d/blob/master/util.py"""
pred = cls._get_open3d_ptcloud(pred)
gt = cls._get_open3d_ptcloud(gt)
dist1 = pred.compute_point_cloud_distance(gt)
dist2 = gt.compute_point_cloud_distance(pred)
recall = float(sum(d < th for d in dist2)) / float(len(dist2))
precision = float(sum(d < th for d in dist1)) / float(len(dist1))
return 2 * recall * precision / (recall + precision) if recall + precision else 0
@classmethod
def _get_open3d_ptcloud(cls, tensor):
tensor = tensor.squeeze().cpu().numpy()
ptcloud = open3d.geometry.PointCloud()
ptcloud.points = open3d.utility.Vector3dVector(tensor)
return ptcloud
@classmethod
def _get_chamfer_distance(cls, pred, gt):
chamfer_distance = cls.ITEMS[1]['eval_object']
return chamfer_distance(pred, gt).item() * 1000
def __init__(self, metric_name, values):
self._items = Metrics.items()
self._values = [item['init_value'] for item in self._items]
self.metric_name = metric_name
if type(values).__name__ == 'list':
self._values = values
elif type(values).__name__ == 'dict':
metric_indexes = {}
for idx, item in enumerate(self._items):
item_name = item['name']
metric_indexes[item_name] = idx
for k, v in values.items():
if k not in metric_indexes:
logging.warn('Ignore Metric[Name=%s] due to disability.' % k)
continue
self._values[metric_indexes[k]] = v
else:
raise Exception('Unsupported value type: %s' % type(values))
def state_dict(self):
_dict = dict()
for i in range(len(self._items)):
item = self._items[i]['name']
value = self._values[i]
_dict[item] = value
return _dict
def __repr__(self):
return str(self.state_dict())
def better_than(self, other):
if other is None:
return True
_index = -1
for i, _item in enumerate(self._items):
if _item['name'] == self.metric_name:
_index = i
break
if _index == -1:
raise Exception('Invalid metric name to compare.')
_metric = self._items[i]
_value = self._values[_index]
other_value = other._values[_index]
return _value > other_value if _metric['is_greater_better'] else _value < other_value
| 31.317073 | 93 | 0.58541 |
import logging
import open3d
from GRNetDetector.extensions.chamfer_dist import ChamferDistance
class Metrics(object):
ITEMS = [{
'name': 'F-Score',
'enabled': True,
'eval_func': 'cls._get_f_score',
'is_greater_better': True,
'init_value': 0
}, {
'name': 'ChamferDistance',
'enabled': True,
'eval_func': 'cls._get_chamfer_distance',
'eval_object': ChamferDistance(ignore_zeros=True),
'is_greater_better': False,
'init_value': 32767
}]
@classmethod
def get(cls, pred, gt):
_items = cls.items()
_values = [0] * len(_items)
for i, item in enumerate(_items):
eval_func = eval(item['eval_func'])
_values[i] = eval_func(pred, gt)
return _values
@classmethod
def items(cls):
return [i for i in cls.ITEMS if i['enabled']]
@classmethod
def names(cls):
_items = cls.items()
return [i['name'] for i in _items]
@classmethod
def _get_f_score(cls, pred, gt, th=0.01):
pred = cls._get_open3d_ptcloud(pred)
gt = cls._get_open3d_ptcloud(gt)
dist1 = pred.compute_point_cloud_distance(gt)
dist2 = gt.compute_point_cloud_distance(pred)
recall = float(sum(d < th for d in dist2)) / float(len(dist2))
precision = float(sum(d < th for d in dist1)) / float(len(dist1))
return 2 * recall * precision / (recall + precision) if recall + precision else 0
@classmethod
def _get_open3d_ptcloud(cls, tensor):
tensor = tensor.squeeze().cpu().numpy()
ptcloud = open3d.geometry.PointCloud()
ptcloud.points = open3d.utility.Vector3dVector(tensor)
return ptcloud
@classmethod
def _get_chamfer_distance(cls, pred, gt):
chamfer_distance = cls.ITEMS[1]['eval_object']
return chamfer_distance(pred, gt).item() * 1000
def __init__(self, metric_name, values):
self._items = Metrics.items()
self._values = [item['init_value'] for item in self._items]
self.metric_name = metric_name
if type(values).__name__ == 'list':
self._values = values
elif type(values).__name__ == 'dict':
metric_indexes = {}
for idx, item in enumerate(self._items):
item_name = item['name']
metric_indexes[item_name] = idx
for k, v in values.items():
if k not in metric_indexes:
logging.warn('Ignore Metric[Name=%s] due to disability.' % k)
continue
self._values[metric_indexes[k]] = v
else:
raise Exception('Unsupported value type: %s' % type(values))
def state_dict(self):
_dict = dict()
for i in range(len(self._items)):
item = self._items[i]['name']
value = self._values[i]
_dict[item] = value
return _dict
def __repr__(self):
return str(self.state_dict())
def better_than(self, other):
if other is None:
return True
_index = -1
for i, _item in enumerate(self._items):
if _item['name'] == self.metric_name:
_index = i
break
if _index == -1:
raise Exception('Invalid metric name to compare.')
_metric = self._items[i]
_value = self._values[_index]
other_value = other._values[_index]
return _value > other_value if _metric['is_greater_better'] else _value < other_value
| true | true |
1c2b2b4f373264cdeee202bfa427475f8fd2cdf8 | 12,601 | py | Python | espresso/speech_train.py | beat-buesser/espresso | bd6ba1f7745c90a2c3c8ff0a0d7332efeebcc808 | [
"MIT"
] | 1 | 2021-01-08T02:51:16.000Z | 2021-01-08T02:51:16.000Z | espresso/speech_train.py | beat-buesser/espresso | bd6ba1f7745c90a2c3c8ff0a0d7332efeebcc808 | [
"MIT"
] | null | null | null | espresso/speech_train.py | beat-buesser/espresso | bd6ba1f7745c90a2c3c8ff0a0d7332efeebcc808 | [
"MIT"
] | 1 | 2021-09-10T15:35:58.000Z | 2021-09-10T15:35:58.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# Copyright (c) Yiming Wang
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a new model on one or across multiple GPUs.
"""
import logging
import math
import os
import sys
import numpy as np
import torch
from fairseq import (
checkpoint_utils,
distributed_utils,
options,
quantization_utils,
tasks,
utils,
)
from fairseq.data import iterators
from fairseq.logging import meters, metrics, progress_bar
from fairseq.model_parallel.megatron_trainer import MegatronTrainer
from fairseq.trainer import Trainer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("espresso.speech_train")
def main(args):
utils.import_user_module(args)
assert (
args.max_tokens is not None or args.batch_size is not None
), "Must specify batch size either with --max-tokens or --batch-size"
metrics.reset()
np.random.seed(args.seed)
utils.set_torch_seed(args.seed)
if distributed_utils.is_master(args):
checkpoint_utils.verify_checkpoint_directory(args.save_dir)
# Print args
logger.info(args)
# Setup task, e.g., translation, language modeling, etc.
task = tasks.setup_task(args)
# Load valid dataset (we load training data below, based on the latest checkpoint)
for valid_sub_split in args.valid_subset.split(","):
task.load_dataset(valid_sub_split, combine=False, epoch=1)
# Build model and criterion
model = task.build_model(args)
criterion = task.build_criterion(args)
logger.info(model)
logger.info("task: {} ({})".format(args.task, task.__class__.__name__))
logger.info("model: {} ({})".format(args.arch, model.__class__.__name__))
logger.info(
"criterion: {} ({})".format(args.criterion, criterion.__class__.__name__)
)
logger.info(
"num. model params: {} (num. trained: {})".format(
sum(p.numel() for p in model.parameters()),
sum(p.numel() for p in model.parameters() if p.requires_grad),
)
)
# (optionally) Configure quantization
if args.quantization_config_path is not None:
quantizer = quantization_utils.Quantizer(
config_path=args.quantization_config_path,
max_epoch=args.max_epoch,
max_update=args.max_update,
)
else:
quantizer = None
# Build trainer
if args.model_parallel_size == 1:
trainer = Trainer(args, task, model, criterion, quantizer)
else:
trainer = MegatronTrainer(args, task, model, criterion)
logger.info(
"training on {} devices (GPUs/TPUs)".format(args.distributed_world_size)
)
logger.info(
"max input frames per GPU = {} and max sentences per GPU = {}".format(
args.max_tokens, args.batch_size
)
)
# Load the latest checkpoint if one is available and restore the
# corresponding train iterator
extra_state, epoch_itr = checkpoint_utils.load_checkpoint(
args,
trainer,
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=task.has_sharded_data("train"),
)
# Train until the learning rate gets too small
max_epoch = args.max_epoch or math.inf
lr = trainer.get_lr()
train_meter = meters.StopwatchMeter()
train_meter.start()
while lr > args.min_lr and epoch_itr.next_epoch_idx <= max_epoch:
# train for one epoch
valid_losses, should_stop = train(args, trainer, task, epoch_itr)
if should_stop:
break
# only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
epoch_itr = trainer.get_train_iterator(
epoch_itr.next_epoch_idx,
# sharded data: get train iterator for next epoch
load_dataset=task.has_sharded_data("train"),
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=task.has_sharded_data("train"),
)
train_meter.stop()
logger.info("done training in {:.1f} seconds".format(train_meter.sum))
def should_stop_early(args, valid_loss):
# skip check if no validation was done in the current epoch
if valid_loss is None:
return False
if args.patience <= 0:
return False
def is_better(a, b):
return a > b if args.maximize_best_checkpoint_metric else a < b
prev_best = getattr(should_stop_early, "best", None)
if prev_best is None or is_better(valid_loss, prev_best):
should_stop_early.best = valid_loss
should_stop_early.num_runs = 0
return False
else:
should_stop_early.num_runs += 1
if should_stop_early.num_runs >= args.patience:
logger.info(
"early stop since valid performance hasn't improved for last {} runs".format(
args.patience
)
)
return True
else:
return False
@metrics.aggregate("train")
def train(args, trainer, task, epoch_itr):
"""Train the model for one epoch and return validation losses."""
# Initialize data iterator
itr = epoch_itr.next_epoch_itr(
fix_batches_to_gpus=args.fix_batches_to_gpus,
shuffle=(epoch_itr.next_epoch_idx > args.curriculum),
)
update_freq = (
args.update_freq[epoch_itr.epoch - 1]
if epoch_itr.epoch <= len(args.update_freq)
else args.update_freq[-1]
)
itr = iterators.GroupedIterator(itr, update_freq)
if getattr(args, "tpu", False):
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch_itr.epoch,
tensorboard_logdir=(
args.tensorboard_logdir if distributed_utils.is_master(args) else None
),
default_log_format=("tqdm" if not args.no_progress_bar else "simple"),
)
trainer.begin_epoch(epoch_itr.epoch)
if hasattr(trainer.criterion, "set_epoch"):
trainer.criterion.set_epoch(epoch_itr.epoch)
valid_losses = [None]
valid_subsets = args.valid_subset.split(",")
should_stop = False
num_updates = trainer.get_num_updates()
for i, samples in enumerate(progress):
with metrics.aggregate("train_inner"), torch.autograd.profiler.record_function(
"train_step-%d" % i
):
log_output = trainer.train_step(samples)
if log_output is not None: # not OOM, overflow, ...
# log mid-epoch stats
num_updates = trainer.get_num_updates()
if num_updates % args.log_interval == 0:
stats = get_training_stats(metrics.get_smoothed_values("train_inner"))
progress.log(stats, tag="train_inner", step=num_updates)
# reset mid-epoch stats after each log interval
# the end-of-epoch stats will still be preserved
metrics.reset_meters("train_inner")
# update the state prior stored in the model for cross-entropy training
if hasattr(task, "update_state_prior"):
task.update_state_prior(trainer.get_model())
end_of_epoch = not itr.has_next()
valid_losses, should_stop = validate_and_save(
args, trainer, task, epoch_itr, valid_subsets, end_of_epoch
)
if should_stop:
break
# log end-of-epoch stats
logger.info("end of epoch {} (average epoch stats below)".format(epoch_itr.epoch))
stats = get_training_stats(metrics.get_smoothed_values("train"))
progress.print(stats, tag="train", step=num_updates)
# reset epoch-level meters
metrics.reset_meters("train")
return valid_losses, should_stop
def validate_and_save(args, trainer, task, epoch_itr, valid_subsets, end_of_epoch):
num_updates = trainer.get_num_updates()
max_update = args.max_update or math.inf
do_save = (
(end_of_epoch and epoch_itr.epoch % args.save_interval == 0)
or num_updates >= max_update
or (
args.save_interval_updates > 0
and num_updates > 0
and num_updates % args.save_interval_updates == 0
and num_updates >= args.validate_after_updates
)
)
do_validate = (
(not end_of_epoch and do_save) # validate during mid-epoch saves
or (end_of_epoch and epoch_itr.epoch % args.validate_interval == 0)
or num_updates >= max_update
or (
args.validate_interval_updates > 0
and num_updates > 0
and num_updates % args.validate_interval_updates == 0
)
) and not args.disable_validation
# Validate
valid_losses = [None]
if do_validate:
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
# Stopping conditions
should_stop = (
should_stop_early(args, valid_losses[0])
or num_updates >= max_update
or (
args.stop_time_hours > 0
and trainer.cumulative_training_time() / (60 * 60) > args.stop_time_hours
)
)
# Save checkpoint
if do_save or should_stop:
logger.info("begin save checkpoint")
checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
return valid_losses, should_stop
def get_training_stats(stats):
stats["wall"] = round(metrics.get_meter("default", "wall").elapsed_time, 0)
return stats
def validate(args, trainer, task, epoch_itr, subsets):
"""Evaluate the model on the validation set(s) and return the losses."""
if args.fixed_validation_seed is not None:
# set fixed seed for every validation
utils.set_torch_seed(args.fixed_validation_seed)
trainer.begin_valid_epoch(epoch_itr.epoch)
valid_losses = []
for subset in subsets:
logger.info('begin validation on "{}" subset'.format(subset))
# Initialize data iterator
itr = trainer.get_valid_iterator(subset).next_epoch_itr(shuffle=False)
if getattr(args, "tpu", False):
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch_itr.epoch,
prefix=f"valid on '{subset}' subset",
tensorboard_logdir=(
args.tensorboard_logdir if distributed_utils.is_master(args) else None
),
default_log_format=("tqdm" if not args.no_progress_bar else "simple"),
)
# create a new root metrics aggregator so validation metrics
# don't pollute other aggregators (e.g., train meters)
with metrics.aggregate(new_root=True) as agg:
for sample in progress:
trainer.valid_step(sample)
# log validation stats
stats = get_valid_stats(args, trainer, agg.get_smoothed_values())
progress.print(stats, tag=subset, step=trainer.get_num_updates())
valid_losses.append(stats[args.best_checkpoint_metric])
return valid_losses
def get_valid_stats(args, trainer, stats):
stats["num_updates"] = trainer.get_num_updates()
if hasattr(checkpoint_utils.save_checkpoint, "best"):
key = "best_{0}".format(args.best_checkpoint_metric)
best_function = max if args.maximize_best_checkpoint_metric else min
stats[key] = best_function(
checkpoint_utils.save_checkpoint.best, stats[args.best_checkpoint_metric]
)
return stats
def print_options_meaning_changes(args):
"""Options that have different meanings than those in the translation task
are explained here.
"""
logger.info("--max-tokens is the maximum number of input frames in a batch")
def cli_main(modify_parser=None):
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser, modify_parser=modify_parser)
print_options_meaning_changes(args)
if args.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(args, main)
else:
distributed_utils.call_main(args, main)
if __name__ == "__main__":
cli_main()
| 33.96496 | 93 | 0.660821 |
import logging
import math
import os
import sys
import numpy as np
import torch
from fairseq import (
checkpoint_utils,
distributed_utils,
options,
quantization_utils,
tasks,
utils,
)
from fairseq.data import iterators
from fairseq.logging import meters, metrics, progress_bar
from fairseq.model_parallel.megatron_trainer import MegatronTrainer
from fairseq.trainer import Trainer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("espresso.speech_train")
def main(args):
utils.import_user_module(args)
assert (
args.max_tokens is not None or args.batch_size is not None
), "Must specify batch size either with --max-tokens or --batch-size"
metrics.reset()
np.random.seed(args.seed)
utils.set_torch_seed(args.seed)
if distributed_utils.is_master(args):
checkpoint_utils.verify_checkpoint_directory(args.save_dir)
logger.info(args)
task = tasks.setup_task(args)
for valid_sub_split in args.valid_subset.split(","):
task.load_dataset(valid_sub_split, combine=False, epoch=1)
model = task.build_model(args)
criterion = task.build_criterion(args)
logger.info(model)
logger.info("task: {} ({})".format(args.task, task.__class__.__name__))
logger.info("model: {} ({})".format(args.arch, model.__class__.__name__))
logger.info(
"criterion: {} ({})".format(args.criterion, criterion.__class__.__name__)
)
logger.info(
"num. model params: {} (num. trained: {})".format(
sum(p.numel() for p in model.parameters()),
sum(p.numel() for p in model.parameters() if p.requires_grad),
)
)
if args.quantization_config_path is not None:
quantizer = quantization_utils.Quantizer(
config_path=args.quantization_config_path,
max_epoch=args.max_epoch,
max_update=args.max_update,
)
else:
quantizer = None
if args.model_parallel_size == 1:
trainer = Trainer(args, task, model, criterion, quantizer)
else:
trainer = MegatronTrainer(args, task, model, criterion)
logger.info(
"training on {} devices (GPUs/TPUs)".format(args.distributed_world_size)
)
logger.info(
"max input frames per GPU = {} and max sentences per GPU = {}".format(
args.max_tokens, args.batch_size
)
)
extra_state, epoch_itr = checkpoint_utils.load_checkpoint(
args,
trainer,
disable_iterator_cache=task.has_sharded_data("train"),
)
# Train until the learning rate gets too small
max_epoch = args.max_epoch or math.inf
lr = trainer.get_lr()
train_meter = meters.StopwatchMeter()
train_meter.start()
while lr > args.min_lr and epoch_itr.next_epoch_idx <= max_epoch:
# train for one epoch
valid_losses, should_stop = train(args, trainer, task, epoch_itr)
if should_stop:
break
# only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
epoch_itr = trainer.get_train_iterator(
epoch_itr.next_epoch_idx,
# sharded data: get train iterator for next epoch
load_dataset=task.has_sharded_data("train"),
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=task.has_sharded_data("train"),
)
train_meter.stop()
logger.info("done training in {:.1f} seconds".format(train_meter.sum))
def should_stop_early(args, valid_loss):
if valid_loss is None:
return False
if args.patience <= 0:
return False
def is_better(a, b):
return a > b if args.maximize_best_checkpoint_metric else a < b
prev_best = getattr(should_stop_early, "best", None)
if prev_best is None or is_better(valid_loss, prev_best):
should_stop_early.best = valid_loss
should_stop_early.num_runs = 0
return False
else:
should_stop_early.num_runs += 1
if should_stop_early.num_runs >= args.patience:
logger.info(
"early stop since valid performance hasn't improved for last {} runs".format(
args.patience
)
)
return True
else:
return False
@metrics.aggregate("train")
def train(args, trainer, task, epoch_itr):
# Initialize data iterator
itr = epoch_itr.next_epoch_itr(
fix_batches_to_gpus=args.fix_batches_to_gpus,
shuffle=(epoch_itr.next_epoch_idx > args.curriculum),
)
update_freq = (
args.update_freq[epoch_itr.epoch - 1]
if epoch_itr.epoch <= len(args.update_freq)
else args.update_freq[-1]
)
itr = iterators.GroupedIterator(itr, update_freq)
if getattr(args, "tpu", False):
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch_itr.epoch,
tensorboard_logdir=(
args.tensorboard_logdir if distributed_utils.is_master(args) else None
),
default_log_format=("tqdm" if not args.no_progress_bar else "simple"),
)
trainer.begin_epoch(epoch_itr.epoch)
if hasattr(trainer.criterion, "set_epoch"):
trainer.criterion.set_epoch(epoch_itr.epoch)
valid_losses = [None]
valid_subsets = args.valid_subset.split(",")
should_stop = False
num_updates = trainer.get_num_updates()
for i, samples in enumerate(progress):
with metrics.aggregate("train_inner"), torch.autograd.profiler.record_function(
"train_step-%d" % i
):
log_output = trainer.train_step(samples)
if log_output is not None: # not OOM, overflow, ...
# log mid-epoch stats
num_updates = trainer.get_num_updates()
if num_updates % args.log_interval == 0:
stats = get_training_stats(metrics.get_smoothed_values("train_inner"))
progress.log(stats, tag="train_inner", step=num_updates)
# reset mid-epoch stats after each log interval
# the end-of-epoch stats will still be preserved
metrics.reset_meters("train_inner")
# update the state prior stored in the model for cross-entropy training
if hasattr(task, "update_state_prior"):
task.update_state_prior(trainer.get_model())
end_of_epoch = not itr.has_next()
valid_losses, should_stop = validate_and_save(
args, trainer, task, epoch_itr, valid_subsets, end_of_epoch
)
if should_stop:
break
# log end-of-epoch stats
logger.info("end of epoch {} (average epoch stats below)".format(epoch_itr.epoch))
stats = get_training_stats(metrics.get_smoothed_values("train"))
progress.print(stats, tag="train", step=num_updates)
# reset epoch-level meters
metrics.reset_meters("train")
return valid_losses, should_stop
def validate_and_save(args, trainer, task, epoch_itr, valid_subsets, end_of_epoch):
num_updates = trainer.get_num_updates()
max_update = args.max_update or math.inf
do_save = (
(end_of_epoch and epoch_itr.epoch % args.save_interval == 0)
or num_updates >= max_update
or (
args.save_interval_updates > 0
and num_updates > 0
and num_updates % args.save_interval_updates == 0
and num_updates >= args.validate_after_updates
)
)
do_validate = (
(not end_of_epoch and do_save) # validate during mid-epoch saves
or (end_of_epoch and epoch_itr.epoch % args.validate_interval == 0)
or num_updates >= max_update
or (
args.validate_interval_updates > 0
and num_updates > 0
and num_updates % args.validate_interval_updates == 0
)
) and not args.disable_validation
# Validate
valid_losses = [None]
if do_validate:
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
# Stopping conditions
should_stop = (
should_stop_early(args, valid_losses[0])
or num_updates >= max_update
or (
args.stop_time_hours > 0
and trainer.cumulative_training_time() / (60 * 60) > args.stop_time_hours
)
)
# Save checkpoint
if do_save or should_stop:
logger.info("begin save checkpoint")
checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
return valid_losses, should_stop
def get_training_stats(stats):
stats["wall"] = round(metrics.get_meter("default", "wall").elapsed_time, 0)
return stats
def validate(args, trainer, task, epoch_itr, subsets):
if args.fixed_validation_seed is not None:
# set fixed seed for every validation
utils.set_torch_seed(args.fixed_validation_seed)
trainer.begin_valid_epoch(epoch_itr.epoch)
valid_losses = []
for subset in subsets:
logger.info('begin validation on "{}" subset'.format(subset))
# Initialize data iterator
itr = trainer.get_valid_iterator(subset).next_epoch_itr(shuffle=False)
if getattr(args, "tpu", False):
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch_itr.epoch,
prefix=f"valid on '{subset}' subset",
tensorboard_logdir=(
args.tensorboard_logdir if distributed_utils.is_master(args) else None
),
default_log_format=("tqdm" if not args.no_progress_bar else "simple"),
)
# create a new root metrics aggregator so validation metrics
# don't pollute other aggregators (e.g., train meters)
with metrics.aggregate(new_root=True) as agg:
for sample in progress:
trainer.valid_step(sample)
stats = get_valid_stats(args, trainer, agg.get_smoothed_values())
progress.print(stats, tag=subset, step=trainer.get_num_updates())
valid_losses.append(stats[args.best_checkpoint_metric])
return valid_losses
def get_valid_stats(args, trainer, stats):
stats["num_updates"] = trainer.get_num_updates()
if hasattr(checkpoint_utils.save_checkpoint, "best"):
key = "best_{0}".format(args.best_checkpoint_metric)
best_function = max if args.maximize_best_checkpoint_metric else min
stats[key] = best_function(
checkpoint_utils.save_checkpoint.best, stats[args.best_checkpoint_metric]
)
return stats
def print_options_meaning_changes(args):
logger.info("--max-tokens is the maximum number of input frames in a batch")
def cli_main(modify_parser=None):
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser, modify_parser=modify_parser)
print_options_meaning_changes(args)
if args.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(args, main)
else:
distributed_utils.call_main(args, main)
if __name__ == "__main__":
cli_main()
| true | true |
1c2b2b6e112085f3589e4822c5885ce013b1bc21 | 2,518 | py | Python | object_tracking.py | UAVs-at-Berkeley/flywave | 483012ab34af5b465ecb8750ade9b9e7a2ca5c4e | [
"MIT"
] | 6 | 2018-05-02T15:34:23.000Z | 2021-04-13T19:28:13.000Z | object_tracking.py | UAVs-at-Berkeley/flywave | 483012ab34af5b465ecb8750ade9b9e7a2ca5c4e | [
"MIT"
] | 1 | 2018-04-19T16:11:33.000Z | 2018-05-02T22:53:07.000Z | object_tracking.py | UAVs-at-Berkeley/flywave | 483012ab34af5b465ecb8750ade9b9e7a2ca5c4e | [
"MIT"
] | 7 | 2018-04-19T01:59:03.000Z | 2022-01-02T13:18:26.000Z | """
Demo of the Bebop vision code (basically flies around and saves out photos as it flies)
"""
from Bebop import Bebop
from DroneVision import DroneVision
import threading
import cv2
import time
isAlive = False
class UserVision:
def __init__(self, vision):
self.index = 0
self.vision = vision
def save_pictures(self, args):
#print("saving picture")
img = self.vision.get_latest_valid_picture()
# cv2.imshow("Video", img)
filename = "/rightout/test_image_%06d.png" % self.index
# cv2.imwrite(filename, img)
self.index +=1
def detect(self, args):
img = self.vision.get_latest_valid_picture()
# make my bebop object
bebop = Bebop()
# connect to the bebop
success = bebop.connect(5)
if (success):
# start up the video
bebopVision = DroneVision(bebop, is_bebop=True)
userVision = UserVision(bebopVision)
bebopVision.set_user_callback_function(userVision.save_pictures, user_callback_args=None)
success = bebopVision.open_video()
if (success):
print("Vision successfully started!")
#removed the user call to this function (it now happens in open_video())
#bebopVision.start_video_buffering()
# skipping actually flying for safety purposes indoors - if you want
# different pictures, move the bebop around by hand
print("Fly me around by hand!")
bebop.smart_sleep(5)
print("Moving the camera using velocity")
# bebop.pan_tilt_camera_velocity(pan_velocity=0, tilt_velocity=-2, duration=4)
#
# bebop.safe_takeoff(10)
# bebop.fly_direct(roll=0, pitch=0, yaw=0, vertical_movement=20, duration=1)
count = 0
# while True and count < 30:
# bebop.smart_sleep(1)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
# count += 1
# bebop.fly_direct(roll=15, pitch=0, yaw=0, vertical_movement=0, duration=3)
# bebop.fly_direct(roll=0, pitch=15, yaw=0, vertical_movement=0, duration=3)
# bebop.fly_direct(roll=0, pitch=0, yaw=20, vertical_movement=0, duration=4)
#
# bebop.fly_direct(roll=0, pitch=0, yaw=0, vertical_movement=-10, duration=1)
# bebop.smart_sleep(50)
bebop.safe_land(10)
print("Finishing demo and stopping vision")
bebopVision.close_video()
# disconnect nicely so we don't need a reboot
bebop.disconnect()
else:
print("Error connecting to bebop. Retry")
| 31.873418 | 93 | 0.651708 | from Bebop import Bebop
from DroneVision import DroneVision
import threading
import cv2
import time
isAlive = False
class UserVision:
def __init__(self, vision):
self.index = 0
self.vision = vision
def save_pictures(self, args):
img = self.vision.get_latest_valid_picture()
filename = "/rightout/test_image_%06d.png" % self.index
self.index +=1
def detect(self, args):
img = self.vision.get_latest_valid_picture()
bebop = Bebop()
success = bebop.connect(5)
if (success):
bebopVision = DroneVision(bebop, is_bebop=True)
userVision = UserVision(bebopVision)
bebopVision.set_user_callback_function(userVision.save_pictures, user_callback_args=None)
success = bebopVision.open_video()
if (success):
print("Vision successfully started!")
print("Fly me around by hand!")
bebop.smart_sleep(5)
print("Moving the camera using velocity")
count = 0
bebop.safe_land(10)
print("Finishing demo and stopping vision")
bebopVision.close_video()
bebop.disconnect()
else:
print("Error connecting to bebop. Retry")
| true | true |
1c2b2c18b70a298c667528f485ac2535fef0d885 | 853 | py | Python | src/config/configs_parser.py | changleibox/flutter_build_script | a93a7d9ce276b68c3a2d34b5830a4fc9683e574b | [
"Apache-2.0"
] | null | null | null | src/config/configs_parser.py | changleibox/flutter_build_script | a93a7d9ce276b68c3a2d34b5830a4fc9683e574b | [
"Apache-2.0"
] | null | null | null | src/config/configs_parser.py | changleibox/flutter_build_script | a93a7d9ce276b68c3a2d34b5830a4fc9683e574b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2020 CHANGLEI. All rights reserved.
# Created by changlei on 2020/6/30.
import json
import os
import yaml
from src.system import Paths
def __resolve_configs_file():
with open(Paths.config_path, 'r', encoding='utf-8') as f:
return yaml.safe_load(f)
def __create_config_file():
with open(Paths.config_path, 'w', encoding='utf-8') as f, \
open(Paths.config_template_path, 'r', encoding='utf-8') as template:
yaml.safe_dump(
data=json.loads(template.read()),
stream=f,
default_style=None,
sort_keys=False,
allow_unicode=True,
indent=2,
)
def get_config():
if not os.path.exists(Paths.config_path):
__create_config_file()
return __resolve_configs_file()
| 23.694444 | 80 | 0.62837 |
import json
import os
import yaml
from src.system import Paths
def __resolve_configs_file():
with open(Paths.config_path, 'r', encoding='utf-8') as f:
return yaml.safe_load(f)
def __create_config_file():
with open(Paths.config_path, 'w', encoding='utf-8') as f, \
open(Paths.config_template_path, 'r', encoding='utf-8') as template:
yaml.safe_dump(
data=json.loads(template.read()),
stream=f,
default_style=None,
sort_keys=False,
allow_unicode=True,
indent=2,
)
def get_config():
if not os.path.exists(Paths.config_path):
__create_config_file()
return __resolve_configs_file()
| true | true |
1c2b2cac081ab4554009e172551ff7f519612cde | 1,119 | py | Python | Code/skipthoughts/skipthoughts_dir/training/vocab.py | mattaq31/recognition-forge | c5a6e36d2e81a66ad8c7eb2f108b6821610a7ba9 | [
"BSD-2-Clause"
] | null | null | null | Code/skipthoughts/skipthoughts_dir/training/vocab.py | mattaq31/recognition-forge | c5a6e36d2e81a66ad8c7eb2f108b6821610a7ba9 | [
"BSD-2-Clause"
] | null | null | null | Code/skipthoughts/skipthoughts_dir/training/vocab.py | mattaq31/recognition-forge | c5a6e36d2e81a66ad8c7eb2f108b6821610a7ba9 | [
"BSD-2-Clause"
] | 1 | 2019-06-25T11:40:57.000Z | 2019-06-25T11:40:57.000Z | """
Constructing and loading dictionaries
"""
import _pickle as pkl
import numpy
from collections import OrderedDict
def build_dictionary(text):
"""
Build a dictionary
text: list of sentences (pre-tokenized)
"""
wordcount = OrderedDict()
for cc in text:
words = cc.split()
for w in words:
if w not in wordcount:
wordcount[w] = 0
wordcount[w] += 1
words = list(wordcount.keys())
freqs = list(wordcount.values())
sorted_idx = numpy.argsort(freqs)[::-1]
worddict = OrderedDict()
for idx, sidx in enumerate(sorted_idx):
worddict[words[sidx]] = idx+2 # 0: <eos>, 1: <unk>
return worddict, wordcount
def load_dictionary(loc='/ais/gobi3/u/rkiros/bookgen/book_dictionary_large.pkl'):
"""
Load a dictionary
"""
with open(loc, 'rb') as f:
worddict = pkl.load(f)
return worddict
def save_dictionary(worddict, wordcount, loc):
"""
Save a dictionary to the specified location
"""
with open(loc, 'wb') as f:
pkl.dump(worddict, f)
pkl.dump(wordcount, f)
| 23.808511 | 81 | 0.613047 | import _pickle as pkl
import numpy
from collections import OrderedDict
def build_dictionary(text):
wordcount = OrderedDict()
for cc in text:
words = cc.split()
for w in words:
if w not in wordcount:
wordcount[w] = 0
wordcount[w] += 1
words = list(wordcount.keys())
freqs = list(wordcount.values())
sorted_idx = numpy.argsort(freqs)[::-1]
worddict = OrderedDict()
for idx, sidx in enumerate(sorted_idx):
worddict[words[sidx]] = idx+2
return worddict, wordcount
def load_dictionary(loc='/ais/gobi3/u/rkiros/bookgen/book_dictionary_large.pkl'):
with open(loc, 'rb') as f:
worddict = pkl.load(f)
return worddict
def save_dictionary(worddict, wordcount, loc):
with open(loc, 'wb') as f:
pkl.dump(worddict, f)
pkl.dump(wordcount, f)
| true | true |
1c2b2cc3484323921bb899b5611005bf15da3919 | 729 | py | Python | tree/bottom_view_of_a_binary_tree/bottom_view.py | chrisjdavie/compsci_basics | dab0d377c6cf040ddda2c9c9f8373e996b1a594c | [
"MIT"
] | null | null | null | tree/bottom_view_of_a_binary_tree/bottom_view.py | chrisjdavie/compsci_basics | dab0d377c6cf040ddda2c9c9f8373e996b1a594c | [
"MIT"
] | null | null | null | tree/bottom_view_of_a_binary_tree/bottom_view.py | chrisjdavie/compsci_basics | dab0d377c6cf040ddda2c9c9f8373e996b1a594c | [
"MIT"
] | null | null | null | class BottomViewData:
"""List functionality using a negative indexed list.
An over-complicated way of avoiding sorting, to keep the algo to O(N)"""
def __init__(self, len_data):
self._len_data = len_data
self._data = [None]*(2*len_data-1)
def _key_zeroed(self, key):
return self._len_data-1+key
def __setitem__(self, key, depth_val):
key_z = self._key_zeroed(key)
if self._data[key_z] is None or self._data[key_z][0] <= depth_val[0]:
self._data[self._len_data-1+key] = depth_val
def __getitem__(self, key):
return self._data[self._key_zeroed(key)]
def view(self):
return [ item[1] for item in self._data if item is not None ]
| 31.695652 | 77 | 0.647462 | class BottomViewData:
def __init__(self, len_data):
self._len_data = len_data
self._data = [None]*(2*len_data-1)
def _key_zeroed(self, key):
return self._len_data-1+key
def __setitem__(self, key, depth_val):
key_z = self._key_zeroed(key)
if self._data[key_z] is None or self._data[key_z][0] <= depth_val[0]:
self._data[self._len_data-1+key] = depth_val
def __getitem__(self, key):
return self._data[self._key_zeroed(key)]
def view(self):
return [ item[1] for item in self._data if item is not None ]
| true | true |
1c2b2d87b02fa9fbc37371aba75d8666759ee1b5 | 786 | py | Python | discussion/urls.py | Bruskoo/PublicDiscussion | f3e9cba88fc48078ac3570a6f562dcad7612ef0a | [
"MIT"
] | null | null | null | discussion/urls.py | Bruskoo/PublicDiscussion | f3e9cba88fc48078ac3570a6f562dcad7612ef0a | [
"MIT"
] | null | null | null | discussion/urls.py | Bruskoo/PublicDiscussion | f3e9cba88fc48078ac3570a6f562dcad7612ef0a | [
"MIT"
] | null | null | null | from django.urls import path
from .views import (
ArticleCreateView,
ArticleDeleteView,
ArticleListView,
# CommentCreateView,
ArticleUpdateView,
SearchView,
ArticleDetailView
)
urlpatterns = [
path("article/add/", ArticleCreateView.as_view(), name="article-create"),
path("article/<int:pk>/", ArticleDetailView.as_view(), name="article-detail"),
path("article/<int:pk>/delete/", ArticleDeleteView.as_view(), name="article-delete"),
path("article/<int:pk>/update/", ArticleUpdateView.as_view(), name="article-update"),
# path("article/<int:pk>/comment/", CommentCreateView.as_view(), name="article-comment"),
path('search/', SearchView.as_view(), name='search-results'),
path("", ArticleListView.as_view(), name="article-list"),
] | 39.3 | 93 | 0.697201 | from django.urls import path
from .views import (
ArticleCreateView,
ArticleDeleteView,
ArticleListView,
ArticleUpdateView,
SearchView,
ArticleDetailView
)
urlpatterns = [
path("article/add/", ArticleCreateView.as_view(), name="article-create"),
path("article/<int:pk>/", ArticleDetailView.as_view(), name="article-detail"),
path("article/<int:pk>/delete/", ArticleDeleteView.as_view(), name="article-delete"),
path("article/<int:pk>/update/", ArticleUpdateView.as_view(), name="article-update"),
path('search/', SearchView.as_view(), name='search-results'),
path("", ArticleListView.as_view(), name="article-list"),
] | true | true |
1c2b2da8382be12f3d6521d7c9eb17eee7eb103f | 27,208 | py | Python | pydantic/types.py | bluetech/pydantic | b7a8ef25c667b5dd4c4cd0b109c6625d1a57139a | [
"MIT"
] | null | null | null | pydantic/types.py | bluetech/pydantic | b7a8ef25c667b5dd4c4cd0b109c6625d1a57139a | [
"MIT"
] | null | null | null | pydantic/types.py | bluetech/pydantic | b7a8ef25c667b5dd4c4cd0b109c6625d1a57139a | [
"MIT"
] | null | null | null | import math
import re
import warnings
from decimal import Decimal
from enum import Enum
from pathlib import Path
from types import new_class
from typing import (
TYPE_CHECKING,
Any,
Callable,
ClassVar,
Dict,
List,
Optional,
Pattern,
Set,
Tuple,
Type,
TypeVar,
Union,
cast,
overload,
)
from uuid import UUID
from weakref import WeakSet
from . import errors
from .utils import import_string, update_not_none
from .validators import (
bytes_validator,
constr_length_validator,
constr_lower,
constr_strip_whitespace,
decimal_validator,
float_validator,
int_validator,
list_validator,
number_multiple_validator,
number_size_validator,
path_exists_validator,
path_validator,
set_validator,
str_validator,
strict_bytes_validator,
strict_float_validator,
strict_int_validator,
strict_str_validator,
)
__all__ = [
'NoneStr',
'NoneBytes',
'StrBytes',
'NoneStrBytes',
'StrictStr',
'ConstrainedBytes',
'conbytes',
'ConstrainedList',
'conlist',
'ConstrainedSet',
'conset',
'ConstrainedStr',
'constr',
'PyObject',
'ConstrainedInt',
'conint',
'PositiveInt',
'NegativeInt',
'NonNegativeInt',
'NonPositiveInt',
'ConstrainedFloat',
'confloat',
'PositiveFloat',
'NegativeFloat',
'NonNegativeFloat',
'NonPositiveFloat',
'ConstrainedDecimal',
'condecimal',
'UUID1',
'UUID3',
'UUID4',
'UUID5',
'FilePath',
'DirectoryPath',
'Json',
'JsonWrapper',
'SecretStr',
'SecretBytes',
'StrictBool',
'StrictBytes',
'StrictInt',
'StrictFloat',
'PaymentCardNumber',
'ByteSize',
]
NoneStr = Optional[str]
NoneBytes = Optional[bytes]
StrBytes = Union[str, bytes]
NoneStrBytes = Optional[StrBytes]
OptionalInt = Optional[int]
OptionalIntFloat = Union[OptionalInt, float]
OptionalIntFloatDecimal = Union[OptionalIntFloat, Decimal]
StrIntFloat = Union[str, int, float]
if TYPE_CHECKING:
from .dataclasses import Dataclass # noqa: F401
from .main import BaseConfig, BaseModel # noqa: F401
from .typing import CallableGenerator
ModelOrDc = Type[Union['BaseModel', 'Dataclass']]
T = TypeVar('T')
_DEFINED_TYPES: 'WeakSet[type]' = WeakSet()
@overload
def _registered(typ: Type[T]) -> Type[T]:
pass
@overload
def _registered(typ: 'ConstrainedNumberMeta') -> 'ConstrainedNumberMeta':
pass
def _registered(typ: Union[Type[T], 'ConstrainedNumberMeta']) -> Union[Type[T], 'ConstrainedNumberMeta']:
# In order to generate valid examples of constrained types, Hypothesis needs
# to inspect the type object - so we keep a weakref to each contype object
# until it can be registered. When (or if) our Hypothesis plugin is loaded,
# it monkeypatches this function.
# If Hypothesis is never used, the total effect is to keep a weak reference
# which has minimal memory usage and doesn't even affect garbage collection.
_DEFINED_TYPES.add(typ)
return typ
class ConstrainedBytes(bytes):
strip_whitespace = False
to_lower = False
min_length: OptionalInt = None
max_length: OptionalInt = None
strict: bool = False
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(field_schema, minLength=cls.min_length, maxLength=cls.max_length)
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield strict_bytes_validator if cls.strict else bytes_validator
yield constr_strip_whitespace
yield constr_lower
yield constr_length_validator
class StrictBytes(ConstrainedBytes):
strict = True
def conbytes(
*, strip_whitespace: bool = False, to_lower: bool = False, min_length: int = None, max_length: int = None
) -> Type[bytes]:
# use kwargs then define conf in a dict to aid with IDE type hinting
namespace = dict(strip_whitespace=strip_whitespace, to_lower=to_lower, min_length=min_length, max_length=max_length)
return _registered(type('ConstrainedBytesValue', (ConstrainedBytes,), namespace))
# This types superclass should be List[T], but cython chokes on that...
class ConstrainedList(list): # type: ignore
# Needed for pydantic to detect that this is a list
__origin__ = list
__args__: Tuple[Type[T], ...] # type: ignore
min_items: Optional[int] = None
max_items: Optional[int] = None
item_type: Type[T] # type: ignore
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.list_length_validator
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(field_schema, minItems=cls.min_items, maxItems=cls.max_items)
@classmethod
def list_length_validator(cls, v: 'Optional[List[T]]') -> 'Optional[List[T]]':
if v is None:
return None
v = list_validator(v)
v_len = len(v)
if cls.min_items is not None and v_len < cls.min_items:
raise errors.ListMinLengthError(limit_value=cls.min_items)
if cls.max_items is not None and v_len > cls.max_items:
raise errors.ListMaxLengthError(limit_value=cls.max_items)
return v
def conlist(item_type: Type[T], *, min_items: int = None, max_items: int = None) -> Type[List[T]]:
# __args__ is needed to conform to typing generics api
namespace = {'min_items': min_items, 'max_items': max_items, 'item_type': item_type, '__args__': (item_type,)}
# We use new_class to be able to deal with Generic types
return new_class('ConstrainedListValue', (ConstrainedList,), {}, lambda ns: ns.update(namespace))
# This types superclass should be Set[T], but cython chokes on that...
class ConstrainedSet(set): # type: ignore
# Needed for pydantic to detect that this is a set
__origin__ = set
__args__: Set[Type[T]] # type: ignore
min_items: Optional[int] = None
max_items: Optional[int] = None
item_type: Type[T] # type: ignore
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.set_length_validator
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(field_schema, minItems=cls.min_items, maxItems=cls.max_items)
@classmethod
def set_length_validator(cls, v: 'Optional[Set[T]]') -> 'Optional[Set[T]]':
if v is None:
return None
v = set_validator(v)
v_len = len(v)
if cls.min_items is not None and v_len < cls.min_items:
raise errors.SetMinLengthError(limit_value=cls.min_items)
if cls.max_items is not None and v_len > cls.max_items:
raise errors.SetMaxLengthError(limit_value=cls.max_items)
return v
def conset(item_type: Type[T], *, min_items: int = None, max_items: int = None) -> Type[Set[T]]:
# __args__ is needed to conform to typing generics api
namespace = {'min_items': min_items, 'max_items': max_items, 'item_type': item_type, '__args__': [item_type]}
# We use new_class to be able to deal with Generic types
return new_class('ConstrainedSetValue', (ConstrainedSet,), {}, lambda ns: ns.update(namespace))
class ConstrainedStr(str):
strip_whitespace = False
to_lower = False
min_length: OptionalInt = None
max_length: OptionalInt = None
curtail_length: OptionalInt = None
regex: Optional[Pattern[str]] = None
strict = False
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(
field_schema, minLength=cls.min_length, maxLength=cls.max_length, pattern=cls.regex and cls.regex.pattern
)
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield strict_str_validator if cls.strict else str_validator
yield constr_strip_whitespace
yield constr_lower
yield constr_length_validator
yield cls.validate
@classmethod
def validate(cls, value: Union[str]) -> Union[str]:
if cls.curtail_length and len(value) > cls.curtail_length:
value = value[: cls.curtail_length]
if cls.regex:
if not cls.regex.match(value):
raise errors.StrRegexError(pattern=cls.regex.pattern)
return value
def constr(
*,
strip_whitespace: bool = False,
to_lower: bool = False,
strict: bool = False,
min_length: int = None,
max_length: int = None,
curtail_length: int = None,
regex: str = None,
) -> Type[str]:
# use kwargs then define conf in a dict to aid with IDE type hinting
namespace = dict(
strip_whitespace=strip_whitespace,
to_lower=to_lower,
strict=strict,
min_length=min_length,
max_length=max_length,
curtail_length=curtail_length,
regex=regex and re.compile(regex),
)
return _registered(type('ConstrainedStrValue', (ConstrainedStr,), namespace))
class StrictStr(ConstrainedStr):
strict = True
if TYPE_CHECKING:
StrictBool = bool
else:
class StrictBool(int):
"""
StrictBool to allow for bools which are not type-coerced.
"""
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(type='boolean')
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.validate
@classmethod
def validate(cls, value: Any) -> bool:
"""
Ensure that we only allow bools.
"""
if isinstance(value, bool):
return value
raise errors.StrictBoolError()
class PyObject:
validate_always = True
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.validate
@classmethod
def validate(cls, value: Any) -> Any:
if isinstance(value, Callable): # type: ignore
return value
try:
value = str_validator(value)
except errors.StrError:
raise errors.PyObjectError(error_message='value is neither a valid import path not a valid callable')
try:
return import_string(value)
except ImportError as e:
raise errors.PyObjectError(error_message=str(e))
if TYPE_CHECKING:
def __call__(self, *args: Any, **kwargs: Any) -> Any:
...
class ConstrainedNumberMeta(type):
def __new__(cls, name: str, bases: Any, dct: Dict[str, Any]) -> 'ConstrainedInt': # type: ignore
new_cls = cast('ConstrainedInt', type.__new__(cls, name, bases, dct))
if new_cls.gt is not None and new_cls.ge is not None:
raise errors.ConfigError('bounds gt and ge cannot be specified at the same time')
if new_cls.lt is not None and new_cls.le is not None:
raise errors.ConfigError('bounds lt and le cannot be specified at the same time')
return _registered(new_cls) # type: ignore
class ConstrainedInt(int, metaclass=ConstrainedNumberMeta):
strict: bool = False
gt: OptionalInt = None
ge: OptionalInt = None
lt: OptionalInt = None
le: OptionalInt = None
multiple_of: OptionalInt = None
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(
field_schema,
exclusiveMinimum=cls.gt,
exclusiveMaximum=cls.lt,
minimum=cls.ge,
maximum=cls.le,
multipleOf=cls.multiple_of,
)
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield strict_int_validator if cls.strict else int_validator
yield number_size_validator
yield number_multiple_validator
def conint(
*, strict: bool = False, gt: int = None, ge: int = None, lt: int = None, le: int = None, multiple_of: int = None
) -> Type[int]:
# use kwargs then define conf in a dict to aid with IDE type hinting
namespace = dict(strict=strict, gt=gt, ge=ge, lt=lt, le=le, multiple_of=multiple_of)
return type('ConstrainedIntValue', (ConstrainedInt,), namespace)
class PositiveInt(ConstrainedInt):
gt = 0
class NegativeInt(ConstrainedInt):
lt = 0
class NonPositiveInt(ConstrainedInt):
le = 0
class NonNegativeInt(ConstrainedInt):
ge = 0
class StrictInt(ConstrainedInt):
strict = True
class ConstrainedFloat(float, metaclass=ConstrainedNumberMeta):
strict: bool = False
gt: OptionalIntFloat = None
ge: OptionalIntFloat = None
lt: OptionalIntFloat = None
le: OptionalIntFloat = None
multiple_of: OptionalIntFloat = None
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(
field_schema,
exclusiveMinimum=cls.gt,
exclusiveMaximum=cls.lt,
minimum=cls.ge,
maximum=cls.le,
multipleOf=cls.multiple_of,
)
# Modify constraints to account for differences between IEEE floats and JSON
if field_schema.get('exclusiveMinimum') == -math.inf:
del field_schema['exclusiveMinimum']
if field_schema.get('minimum') == -math.inf:
del field_schema['minimum']
if field_schema.get('exclusiveMaximum') == math.inf:
del field_schema['exclusiveMaximum']
if field_schema.get('maximum') == math.inf:
del field_schema['maximum']
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield strict_float_validator if cls.strict else float_validator
yield number_size_validator
yield number_multiple_validator
def confloat(
*,
strict: bool = False,
gt: float = None,
ge: float = None,
lt: float = None,
le: float = None,
multiple_of: float = None,
) -> Type[float]:
# use kwargs then define conf in a dict to aid with IDE type hinting
namespace = dict(strict=strict, gt=gt, ge=ge, lt=lt, le=le, multiple_of=multiple_of)
return type('ConstrainedFloatValue', (ConstrainedFloat,), namespace)
class PositiveFloat(ConstrainedFloat):
gt = 0
class NegativeFloat(ConstrainedFloat):
lt = 0
class NonPositiveFloat(ConstrainedFloat):
le = 0
class NonNegativeFloat(ConstrainedFloat):
ge = 0
class StrictFloat(ConstrainedFloat):
strict = True
class ConstrainedDecimal(Decimal, metaclass=ConstrainedNumberMeta):
gt: OptionalIntFloatDecimal = None
ge: OptionalIntFloatDecimal = None
lt: OptionalIntFloatDecimal = None
le: OptionalIntFloatDecimal = None
max_digits: OptionalInt = None
decimal_places: OptionalInt = None
multiple_of: OptionalIntFloatDecimal = None
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(
field_schema,
exclusiveMinimum=cls.gt,
exclusiveMaximum=cls.lt,
minimum=cls.ge,
maximum=cls.le,
multipleOf=cls.multiple_of,
)
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield decimal_validator
yield number_size_validator
yield number_multiple_validator
yield cls.validate
@classmethod
def validate(cls, value: Decimal) -> Decimal:
digit_tuple, exponent = value.as_tuple()[1:]
if exponent in {'F', 'n', 'N'}:
raise errors.DecimalIsNotFiniteError()
if exponent >= 0:
# A positive exponent adds that many trailing zeros.
digits = len(digit_tuple) + exponent
decimals = 0
else:
# If the absolute value of the negative exponent is larger than the
# number of digits, then it's the same as the number of digits,
# because it'll consume all of the digits in digit_tuple and then
# add abs(exponent) - len(digit_tuple) leading zeros after the
# decimal point.
if abs(exponent) > len(digit_tuple):
digits = decimals = abs(exponent)
else:
digits = len(digit_tuple)
decimals = abs(exponent)
whole_digits = digits - decimals
if cls.max_digits is not None and digits > cls.max_digits:
raise errors.DecimalMaxDigitsError(max_digits=cls.max_digits)
if cls.decimal_places is not None and decimals > cls.decimal_places:
raise errors.DecimalMaxPlacesError(decimal_places=cls.decimal_places)
if cls.max_digits is not None and cls.decimal_places is not None:
expected = cls.max_digits - cls.decimal_places
if whole_digits > expected:
raise errors.DecimalWholeDigitsError(whole_digits=expected)
return value
def condecimal(
*,
gt: Decimal = None,
ge: Decimal = None,
lt: Decimal = None,
le: Decimal = None,
max_digits: int = None,
decimal_places: int = None,
multiple_of: Decimal = None,
) -> Type[Decimal]:
# use kwargs then define conf in a dict to aid with IDE type hinting
namespace = dict(
gt=gt, ge=ge, lt=lt, le=le, max_digits=max_digits, decimal_places=decimal_places, multiple_of=multiple_of
)
return type('ConstrainedDecimalValue', (ConstrainedDecimal,), namespace)
class UUID1(UUID):
_required_version = 1
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(type='string', format=f'uuid{cls._required_version}')
class UUID3(UUID1):
_required_version = 3
class UUID4(UUID1):
_required_version = 4
class UUID5(UUID1):
_required_version = 5
class FilePath(Path):
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(format='file-path')
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield path_validator
yield path_exists_validator
yield cls.validate
@classmethod
def validate(cls, value: Path) -> Path:
if not value.is_file():
raise errors.PathNotAFileError(path=value)
return value
class DirectoryPath(Path):
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(format='directory-path')
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield path_validator
yield path_exists_validator
yield cls.validate
@classmethod
def validate(cls, value: Path) -> Path:
if not value.is_dir():
raise errors.PathNotADirectoryError(path=value)
return value
class JsonWrapper:
pass
class JsonMeta(type):
def __getitem__(self, t: Type[Any]) -> Type[JsonWrapper]:
return _registered(type('JsonWrapperValue', (JsonWrapper,), {'inner_type': t}))
class Json(metaclass=JsonMeta):
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(type='string', format='json-string')
class SecretStr:
min_length: OptionalInt = None
max_length: OptionalInt = None
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(
field_schema,
type='string',
writeOnly=True,
format='password',
minLength=cls.min_length,
maxLength=cls.max_length,
)
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.validate
yield constr_length_validator
@classmethod
def validate(cls, value: Any) -> 'SecretStr':
if isinstance(value, cls):
return value
value = str_validator(value)
return cls(value)
def __init__(self, value: str):
self._secret_value = value
def __repr__(self) -> str:
return f"SecretStr('{self}')"
def __str__(self) -> str:
return '**********' if self._secret_value else ''
def __eq__(self, other: Any) -> bool:
return isinstance(other, SecretStr) and self.get_secret_value() == other.get_secret_value()
def __len__(self) -> int:
return len(self._secret_value)
def display(self) -> str:
warnings.warn('`secret_str.display()` is deprecated, use `str(secret_str)` instead', DeprecationWarning)
return str(self)
def get_secret_value(self) -> str:
return self._secret_value
class SecretBytes:
min_length: OptionalInt = None
max_length: OptionalInt = None
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(
field_schema,
type='string',
writeOnly=True,
format='password',
minLength=cls.min_length,
maxLength=cls.max_length,
)
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.validate
yield constr_length_validator
@classmethod
def validate(cls, value: Any) -> 'SecretBytes':
if isinstance(value, cls):
return value
value = bytes_validator(value)
return cls(value)
def __init__(self, value: bytes):
self._secret_value = value
def __repr__(self) -> str:
return f"SecretBytes(b'{self}')"
def __str__(self) -> str:
return '**********' if self._secret_value else ''
def __eq__(self, other: Any) -> bool:
return isinstance(other, SecretBytes) and self.get_secret_value() == other.get_secret_value()
def __len__(self) -> int:
return len(self._secret_value)
def display(self) -> str:
warnings.warn('`secret_bytes.display()` is deprecated, use `str(secret_bytes)` instead', DeprecationWarning)
return str(self)
def get_secret_value(self) -> bytes:
return self._secret_value
class PaymentCardBrand(str, Enum):
# If you add another card type, please also add it to the
# Hypothesis strategy in `pydantic._hypothesis_plugin`.
amex = 'American Express'
mastercard = 'Mastercard'
visa = 'Visa'
other = 'other'
def __str__(self) -> str:
return self.value
class PaymentCardNumber(str):
"""
Based on: https://en.wikipedia.org/wiki/Payment_card_number
"""
strip_whitespace: ClassVar[bool] = True
min_length: ClassVar[int] = 12
max_length: ClassVar[int] = 19
bin: str
last4: str
brand: PaymentCardBrand
def __init__(self, card_number: str):
self.bin = card_number[:6]
self.last4 = card_number[-4:]
self.brand = self._get_brand(card_number)
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield str_validator
yield constr_strip_whitespace
yield constr_length_validator
yield cls.validate_digits
yield cls.validate_luhn_check_digit
yield cls
yield cls.validate_length_for_brand
@property
def masked(self) -> str:
num_masked = len(self) - 10 # len(bin) + len(last4) == 10
return f'{self.bin}{"*" * num_masked}{self.last4}'
@classmethod
def validate_digits(cls, card_number: str) -> str:
if not card_number.isdigit():
raise errors.NotDigitError
return card_number
@classmethod
def validate_luhn_check_digit(cls, card_number: str) -> str:
"""
Based on: https://en.wikipedia.org/wiki/Luhn_algorithm
"""
sum_ = int(card_number[-1])
length = len(card_number)
parity = length % 2
for i in range(length - 1):
digit = int(card_number[i])
if i % 2 == parity:
digit *= 2
if digit > 9:
digit -= 9
sum_ += digit
valid = sum_ % 10 == 0
if not valid:
raise errors.LuhnValidationError
return card_number
@classmethod
def validate_length_for_brand(cls, card_number: 'PaymentCardNumber') -> 'PaymentCardNumber':
"""
Validate length based on BIN for major brands:
https://en.wikipedia.org/wiki/Payment_card_number#Issuer_identification_number_(IIN)
"""
required_length: Optional[int] = None
if card_number.brand in {PaymentCardBrand.visa, PaymentCardBrand.mastercard}:
required_length = 16
valid = len(card_number) == required_length
elif card_number.brand == PaymentCardBrand.amex:
required_length = 15
valid = len(card_number) == required_length
else:
valid = True
if not valid:
raise errors.InvalidLengthForBrand(brand=card_number.brand, required_length=required_length)
return card_number
@staticmethod
def _get_brand(card_number: str) -> PaymentCardBrand:
if card_number[0] == '4':
brand = PaymentCardBrand.visa
elif 51 <= int(card_number[:2]) <= 55:
brand = PaymentCardBrand.mastercard
elif card_number[:2] in {'34', '37'}:
brand = PaymentCardBrand.amex
else:
brand = PaymentCardBrand.other
return brand
BYTE_SIZES = {
'b': 1,
'kb': 10 ** 3,
'mb': 10 ** 6,
'gb': 10 ** 9,
'tb': 10 ** 12,
'pb': 10 ** 15,
'eb': 10 ** 18,
'kib': 2 ** 10,
'mib': 2 ** 20,
'gib': 2 ** 30,
'tib': 2 ** 40,
'pib': 2 ** 50,
'eib': 2 ** 60,
}
BYTE_SIZES.update({k.lower()[0]: v for k, v in BYTE_SIZES.items() if 'i' not in k})
byte_string_re = re.compile(r'^\s*(\d*\.?\d+)\s*(\w+)?', re.IGNORECASE)
class ByteSize(int):
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.validate
@classmethod
def validate(cls, v: StrIntFloat) -> 'ByteSize':
try:
return cls(int(v))
except ValueError:
pass
str_match = byte_string_re.match(str(v))
if str_match is None:
raise errors.InvalidByteSize()
scalar, unit = str_match.groups()
if unit is None:
unit = 'b'
try:
unit_mult = BYTE_SIZES[unit.lower()]
except KeyError:
raise errors.InvalidByteSizeUnit(unit=unit)
return cls(int(float(scalar) * unit_mult))
def human_readable(self, decimal: bool = False) -> str:
if decimal:
divisor = 1000
units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
final_unit = 'EB'
else:
divisor = 1024
units = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB']
final_unit = 'EiB'
num = float(self)
for unit in units:
if abs(num) < divisor:
return f'{num:0.1f}{unit}'
num /= divisor
return f'{num:0.1f}{final_unit}'
def to(self, unit: str) -> float:
try:
unit_div = BYTE_SIZES[unit.lower()]
except KeyError:
raise errors.InvalidByteSizeUnit(unit=unit)
return self / unit_div
| 29.006397 | 120 | 0.641392 | import math
import re
import warnings
from decimal import Decimal
from enum import Enum
from pathlib import Path
from types import new_class
from typing import (
TYPE_CHECKING,
Any,
Callable,
ClassVar,
Dict,
List,
Optional,
Pattern,
Set,
Tuple,
Type,
TypeVar,
Union,
cast,
overload,
)
from uuid import UUID
from weakref import WeakSet
from . import errors
from .utils import import_string, update_not_none
from .validators import (
bytes_validator,
constr_length_validator,
constr_lower,
constr_strip_whitespace,
decimal_validator,
float_validator,
int_validator,
list_validator,
number_multiple_validator,
number_size_validator,
path_exists_validator,
path_validator,
set_validator,
str_validator,
strict_bytes_validator,
strict_float_validator,
strict_int_validator,
strict_str_validator,
)
__all__ = [
'NoneStr',
'NoneBytes',
'StrBytes',
'NoneStrBytes',
'StrictStr',
'ConstrainedBytes',
'conbytes',
'ConstrainedList',
'conlist',
'ConstrainedSet',
'conset',
'ConstrainedStr',
'constr',
'PyObject',
'ConstrainedInt',
'conint',
'PositiveInt',
'NegativeInt',
'NonNegativeInt',
'NonPositiveInt',
'ConstrainedFloat',
'confloat',
'PositiveFloat',
'NegativeFloat',
'NonNegativeFloat',
'NonPositiveFloat',
'ConstrainedDecimal',
'condecimal',
'UUID1',
'UUID3',
'UUID4',
'UUID5',
'FilePath',
'DirectoryPath',
'Json',
'JsonWrapper',
'SecretStr',
'SecretBytes',
'StrictBool',
'StrictBytes',
'StrictInt',
'StrictFloat',
'PaymentCardNumber',
'ByteSize',
]
NoneStr = Optional[str]
NoneBytes = Optional[bytes]
StrBytes = Union[str, bytes]
NoneStrBytes = Optional[StrBytes]
OptionalInt = Optional[int]
OptionalIntFloat = Union[OptionalInt, float]
OptionalIntFloatDecimal = Union[OptionalIntFloat, Decimal]
StrIntFloat = Union[str, int, float]
if TYPE_CHECKING:
from .dataclasses import Dataclass
from .main import BaseConfig, BaseModel
from .typing import CallableGenerator
ModelOrDc = Type[Union['BaseModel', 'Dataclass']]
T = TypeVar('T')
_DEFINED_TYPES: 'WeakSet[type]' = WeakSet()
@overload
def _registered(typ: Type[T]) -> Type[T]:
pass
@overload
def _registered(typ: 'ConstrainedNumberMeta') -> 'ConstrainedNumberMeta':
pass
def _registered(typ: Union[Type[T], 'ConstrainedNumberMeta']) -> Union[Type[T], 'ConstrainedNumberMeta']:
_DEFINED_TYPES.add(typ)
return typ
class ConstrainedBytes(bytes):
strip_whitespace = False
to_lower = False
min_length: OptionalInt = None
max_length: OptionalInt = None
strict: bool = False
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(field_schema, minLength=cls.min_length, maxLength=cls.max_length)
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield strict_bytes_validator if cls.strict else bytes_validator
yield constr_strip_whitespace
yield constr_lower
yield constr_length_validator
class StrictBytes(ConstrainedBytes):
strict = True
def conbytes(
*, strip_whitespace: bool = False, to_lower: bool = False, min_length: int = None, max_length: int = None
) -> Type[bytes]:
# use kwargs then define conf in a dict to aid with IDE type hinting
namespace = dict(strip_whitespace=strip_whitespace, to_lower=to_lower, min_length=min_length, max_length=max_length)
return _registered(type('ConstrainedBytesValue', (ConstrainedBytes,), namespace))
# This types superclass should be List[T], but cython chokes on that...
class ConstrainedList(list): # type: ignore
# Needed for pydantic to detect that this is a list
__origin__ = list
__args__: Tuple[Type[T], ...] # type: ignore
min_items: Optional[int] = None
max_items: Optional[int] = None
item_type: Type[T] # type: ignore
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.list_length_validator
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(field_schema, minItems=cls.min_items, maxItems=cls.max_items)
@classmethod
def list_length_validator(cls, v: 'Optional[List[T]]') -> 'Optional[List[T]]':
if v is None:
return None
v = list_validator(v)
v_len = len(v)
if cls.min_items is not None and v_len < cls.min_items:
raise errors.ListMinLengthError(limit_value=cls.min_items)
if cls.max_items is not None and v_len > cls.max_items:
raise errors.ListMaxLengthError(limit_value=cls.max_items)
return v
def conlist(item_type: Type[T], *, min_items: int = None, max_items: int = None) -> Type[List[T]]:
# __args__ is needed to conform to typing generics api
namespace = {'min_items': min_items, 'max_items': max_items, 'item_type': item_type, '__args__': (item_type,)}
# We use new_class to be able to deal with Generic types
return new_class('ConstrainedListValue', (ConstrainedList,), {}, lambda ns: ns.update(namespace))
# This types superclass should be Set[T], but cython chokes on that...
class ConstrainedSet(set): # type: ignore
# Needed for pydantic to detect that this is a set
__origin__ = set
__args__: Set[Type[T]] # type: ignore
min_items: Optional[int] = None
max_items: Optional[int] = None
item_type: Type[T] # type: ignore
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.set_length_validator
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(field_schema, minItems=cls.min_items, maxItems=cls.max_items)
@classmethod
def set_length_validator(cls, v: 'Optional[Set[T]]') -> 'Optional[Set[T]]':
if v is None:
return None
v = set_validator(v)
v_len = len(v)
if cls.min_items is not None and v_len < cls.min_items:
raise errors.SetMinLengthError(limit_value=cls.min_items)
if cls.max_items is not None and v_len > cls.max_items:
raise errors.SetMaxLengthError(limit_value=cls.max_items)
return v
def conset(item_type: Type[T], *, min_items: int = None, max_items: int = None) -> Type[Set[T]]:
# __args__ is needed to conform to typing generics api
namespace = {'min_items': min_items, 'max_items': max_items, 'item_type': item_type, '__args__': [item_type]}
# We use new_class to be able to deal with Generic types
return new_class('ConstrainedSetValue', (ConstrainedSet,), {}, lambda ns: ns.update(namespace))
class ConstrainedStr(str):
strip_whitespace = False
to_lower = False
min_length: OptionalInt = None
max_length: OptionalInt = None
curtail_length: OptionalInt = None
regex: Optional[Pattern[str]] = None
strict = False
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(
field_schema, minLength=cls.min_length, maxLength=cls.max_length, pattern=cls.regex and cls.regex.pattern
)
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield strict_str_validator if cls.strict else str_validator
yield constr_strip_whitespace
yield constr_lower
yield constr_length_validator
yield cls.validate
@classmethod
def validate(cls, value: Union[str]) -> Union[str]:
if cls.curtail_length and len(value) > cls.curtail_length:
value = value[: cls.curtail_length]
if cls.regex:
if not cls.regex.match(value):
raise errors.StrRegexError(pattern=cls.regex.pattern)
return value
def constr(
*,
strip_whitespace: bool = False,
to_lower: bool = False,
strict: bool = False,
min_length: int = None,
max_length: int = None,
curtail_length: int = None,
regex: str = None,
) -> Type[str]:
# use kwargs then define conf in a dict to aid with IDE type hinting
namespace = dict(
strip_whitespace=strip_whitespace,
to_lower=to_lower,
strict=strict,
min_length=min_length,
max_length=max_length,
curtail_length=curtail_length,
regex=regex and re.compile(regex),
)
return _registered(type('ConstrainedStrValue', (ConstrainedStr,), namespace))
class StrictStr(ConstrainedStr):
strict = True
if TYPE_CHECKING:
StrictBool = bool
else:
class StrictBool(int):
"""
StrictBool to allow for bools which are not type-coerced.
"""
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(type='boolean')
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.validate
@classmethod
def validate(cls, value: Any) -> bool:
"""
Ensure that we only allow bools.
"""
if isinstance(value, bool):
return value
raise errors.StrictBoolError()
class PyObject:
validate_always = True
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.validate
@classmethod
def validate(cls, value: Any) -> Any:
if isinstance(value, Callable): # type: ignore
return value
try:
value = str_validator(value)
except errors.StrError:
raise errors.PyObjectError(error_message='value is neither a valid import path not a valid callable')
try:
return import_string(value)
except ImportError as e:
raise errors.PyObjectError(error_message=str(e))
if TYPE_CHECKING:
def __call__(self, *args: Any, **kwargs: Any) -> Any:
...
class ConstrainedNumberMeta(type):
def __new__(cls, name: str, bases: Any, dct: Dict[str, Any]) -> 'ConstrainedInt': # type: ignore
new_cls = cast('ConstrainedInt', type.__new__(cls, name, bases, dct))
if new_cls.gt is not None and new_cls.ge is not None:
raise errors.ConfigError('bounds gt and ge cannot be specified at the same time')
if new_cls.lt is not None and new_cls.le is not None:
raise errors.ConfigError('bounds lt and le cannot be specified at the same time')
return _registered(new_cls) # type: ignore
class ConstrainedInt(int, metaclass=ConstrainedNumberMeta):
strict: bool = False
gt: OptionalInt = None
ge: OptionalInt = None
lt: OptionalInt = None
le: OptionalInt = None
multiple_of: OptionalInt = None
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(
field_schema,
exclusiveMinimum=cls.gt,
exclusiveMaximum=cls.lt,
minimum=cls.ge,
maximum=cls.le,
multipleOf=cls.multiple_of,
)
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield strict_int_validator if cls.strict else int_validator
yield number_size_validator
yield number_multiple_validator
def conint(
*, strict: bool = False, gt: int = None, ge: int = None, lt: int = None, le: int = None, multiple_of: int = None
) -> Type[int]:
# use kwargs then define conf in a dict to aid with IDE type hinting
namespace = dict(strict=strict, gt=gt, ge=ge, lt=lt, le=le, multiple_of=multiple_of)
return type('ConstrainedIntValue', (ConstrainedInt,), namespace)
class PositiveInt(ConstrainedInt):
gt = 0
class NegativeInt(ConstrainedInt):
lt = 0
class NonPositiveInt(ConstrainedInt):
le = 0
class NonNegativeInt(ConstrainedInt):
ge = 0
class StrictInt(ConstrainedInt):
strict = True
class ConstrainedFloat(float, metaclass=ConstrainedNumberMeta):
strict: bool = False
gt: OptionalIntFloat = None
ge: OptionalIntFloat = None
lt: OptionalIntFloat = None
le: OptionalIntFloat = None
multiple_of: OptionalIntFloat = None
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(
field_schema,
exclusiveMinimum=cls.gt,
exclusiveMaximum=cls.lt,
minimum=cls.ge,
maximum=cls.le,
multipleOf=cls.multiple_of,
)
# Modify constraints to account for differences between IEEE floats and JSON
if field_schema.get('exclusiveMinimum') == -math.inf:
del field_schema['exclusiveMinimum']
if field_schema.get('minimum') == -math.inf:
del field_schema['minimum']
if field_schema.get('exclusiveMaximum') == math.inf:
del field_schema['exclusiveMaximum']
if field_schema.get('maximum') == math.inf:
del field_schema['maximum']
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield strict_float_validator if cls.strict else float_validator
yield number_size_validator
yield number_multiple_validator
def confloat(
*,
strict: bool = False,
gt: float = None,
ge: float = None,
lt: float = None,
le: float = None,
multiple_of: float = None,
) -> Type[float]:
# use kwargs then define conf in a dict to aid with IDE type hinting
namespace = dict(strict=strict, gt=gt, ge=ge, lt=lt, le=le, multiple_of=multiple_of)
return type('ConstrainedFloatValue', (ConstrainedFloat,), namespace)
class PositiveFloat(ConstrainedFloat):
gt = 0
class NegativeFloat(ConstrainedFloat):
lt = 0
class NonPositiveFloat(ConstrainedFloat):
le = 0
class NonNegativeFloat(ConstrainedFloat):
ge = 0
class StrictFloat(ConstrainedFloat):
strict = True
class ConstrainedDecimal(Decimal, metaclass=ConstrainedNumberMeta):
gt: OptionalIntFloatDecimal = None
ge: OptionalIntFloatDecimal = None
lt: OptionalIntFloatDecimal = None
le: OptionalIntFloatDecimal = None
max_digits: OptionalInt = None
decimal_places: OptionalInt = None
multiple_of: OptionalIntFloatDecimal = None
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(
field_schema,
exclusiveMinimum=cls.gt,
exclusiveMaximum=cls.lt,
minimum=cls.ge,
maximum=cls.le,
multipleOf=cls.multiple_of,
)
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield decimal_validator
yield number_size_validator
yield number_multiple_validator
yield cls.validate
@classmethod
def validate(cls, value: Decimal) -> Decimal:
digit_tuple, exponent = value.as_tuple()[1:]
if exponent in {'F', 'n', 'N'}:
raise errors.DecimalIsNotFiniteError()
if exponent >= 0:
# A positive exponent adds that many trailing zeros.
digits = len(digit_tuple) + exponent
decimals = 0
else:
# If the absolute value of the negative exponent is larger than the
# number of digits, then it's the same as the number of digits,
# add abs(exponent) - len(digit_tuple) leading zeros after the
# decimal point.
if abs(exponent) > len(digit_tuple):
digits = decimals = abs(exponent)
else:
digits = len(digit_tuple)
decimals = abs(exponent)
whole_digits = digits - decimals
if cls.max_digits is not None and digits > cls.max_digits:
raise errors.DecimalMaxDigitsError(max_digits=cls.max_digits)
if cls.decimal_places is not None and decimals > cls.decimal_places:
raise errors.DecimalMaxPlacesError(decimal_places=cls.decimal_places)
if cls.max_digits is not None and cls.decimal_places is not None:
expected = cls.max_digits - cls.decimal_places
if whole_digits > expected:
raise errors.DecimalWholeDigitsError(whole_digits=expected)
return value
def condecimal(
*,
gt: Decimal = None,
ge: Decimal = None,
lt: Decimal = None,
le: Decimal = None,
max_digits: int = None,
decimal_places: int = None,
multiple_of: Decimal = None,
) -> Type[Decimal]:
# use kwargs then define conf in a dict to aid with IDE type hinting
namespace = dict(
gt=gt, ge=ge, lt=lt, le=le, max_digits=max_digits, decimal_places=decimal_places, multiple_of=multiple_of
)
return type('ConstrainedDecimalValue', (ConstrainedDecimal,), namespace)
class UUID1(UUID):
_required_version = 1
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(type='string', format=f'uuid{cls._required_version}')
class UUID3(UUID1):
_required_version = 3
class UUID4(UUID1):
_required_version = 4
class UUID5(UUID1):
_required_version = 5
class FilePath(Path):
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(format='file-path')
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield path_validator
yield path_exists_validator
yield cls.validate
@classmethod
def validate(cls, value: Path) -> Path:
if not value.is_file():
raise errors.PathNotAFileError(path=value)
return value
class DirectoryPath(Path):
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(format='directory-path')
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield path_validator
yield path_exists_validator
yield cls.validate
@classmethod
def validate(cls, value: Path) -> Path:
if not value.is_dir():
raise errors.PathNotADirectoryError(path=value)
return value
class JsonWrapper:
pass
class JsonMeta(type):
def __getitem__(self, t: Type[Any]) -> Type[JsonWrapper]:
return _registered(type('JsonWrapperValue', (JsonWrapper,), {'inner_type': t}))
class Json(metaclass=JsonMeta):
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(type='string', format='json-string')
class SecretStr:
min_length: OptionalInt = None
max_length: OptionalInt = None
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(
field_schema,
type='string',
writeOnly=True,
format='password',
minLength=cls.min_length,
maxLength=cls.max_length,
)
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.validate
yield constr_length_validator
@classmethod
def validate(cls, value: Any) -> 'SecretStr':
if isinstance(value, cls):
return value
value = str_validator(value)
return cls(value)
def __init__(self, value: str):
self._secret_value = value
def __repr__(self) -> str:
return f"SecretStr('{self}')"
def __str__(self) -> str:
return '**********' if self._secret_value else ''
def __eq__(self, other: Any) -> bool:
return isinstance(other, SecretStr) and self.get_secret_value() == other.get_secret_value()
def __len__(self) -> int:
return len(self._secret_value)
def display(self) -> str:
warnings.warn('`secret_str.display()` is deprecated, use `str(secret_str)` instead', DeprecationWarning)
return str(self)
def get_secret_value(self) -> str:
return self._secret_value
class SecretBytes:
min_length: OptionalInt = None
max_length: OptionalInt = None
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(
field_schema,
type='string',
writeOnly=True,
format='password',
minLength=cls.min_length,
maxLength=cls.max_length,
)
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.validate
yield constr_length_validator
@classmethod
def validate(cls, value: Any) -> 'SecretBytes':
if isinstance(value, cls):
return value
value = bytes_validator(value)
return cls(value)
def __init__(self, value: bytes):
self._secret_value = value
def __repr__(self) -> str:
return f"SecretBytes(b'{self}')"
def __str__(self) -> str:
return '**********' if self._secret_value else ''
def __eq__(self, other: Any) -> bool:
return isinstance(other, SecretBytes) and self.get_secret_value() == other.get_secret_value()
def __len__(self) -> int:
return len(self._secret_value)
def display(self) -> str:
warnings.warn('`secret_bytes.display()` is deprecated, use `str(secret_bytes)` instead', DeprecationWarning)
return str(self)
def get_secret_value(self) -> bytes:
return self._secret_value
class PaymentCardBrand(str, Enum):
# If you add another card type, please also add it to the
# Hypothesis strategy in `pydantic._hypothesis_plugin`.
amex = 'American Express'
mastercard = 'Mastercard'
visa = 'Visa'
other = 'other'
def __str__(self) -> str:
return self.value
class PaymentCardNumber(str):
strip_whitespace: ClassVar[bool] = True
min_length: ClassVar[int] = 12
max_length: ClassVar[int] = 19
bin: str
last4: str
brand: PaymentCardBrand
def __init__(self, card_number: str):
self.bin = card_number[:6]
self.last4 = card_number[-4:]
self.brand = self._get_brand(card_number)
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield str_validator
yield constr_strip_whitespace
yield constr_length_validator
yield cls.validate_digits
yield cls.validate_luhn_check_digit
yield cls
yield cls.validate_length_for_brand
@property
def masked(self) -> str:
num_masked = len(self) - 10 # len(bin) + len(last4) == 10
return f'{self.bin}{"*" * num_masked}{self.last4}'
@classmethod
def validate_digits(cls, card_number: str) -> str:
if not card_number.isdigit():
raise errors.NotDigitError
return card_number
@classmethod
def validate_luhn_check_digit(cls, card_number: str) -> str:
sum_ = int(card_number[-1])
length = len(card_number)
parity = length % 2
for i in range(length - 1):
digit = int(card_number[i])
if i % 2 == parity:
digit *= 2
if digit > 9:
digit -= 9
sum_ += digit
valid = sum_ % 10 == 0
if not valid:
raise errors.LuhnValidationError
return card_number
@classmethod
def validate_length_for_brand(cls, card_number: 'PaymentCardNumber') -> 'PaymentCardNumber':
required_length: Optional[int] = None
if card_number.brand in {PaymentCardBrand.visa, PaymentCardBrand.mastercard}:
required_length = 16
valid = len(card_number) == required_length
elif card_number.brand == PaymentCardBrand.amex:
required_length = 15
valid = len(card_number) == required_length
else:
valid = True
if not valid:
raise errors.InvalidLengthForBrand(brand=card_number.brand, required_length=required_length)
return card_number
@staticmethod
def _get_brand(card_number: str) -> PaymentCardBrand:
if card_number[0] == '4':
brand = PaymentCardBrand.visa
elif 51 <= int(card_number[:2]) <= 55:
brand = PaymentCardBrand.mastercard
elif card_number[:2] in {'34', '37'}:
brand = PaymentCardBrand.amex
else:
brand = PaymentCardBrand.other
return brand
BYTE_SIZES = {
'b': 1,
'kb': 10 ** 3,
'mb': 10 ** 6,
'gb': 10 ** 9,
'tb': 10 ** 12,
'pb': 10 ** 15,
'eb': 10 ** 18,
'kib': 2 ** 10,
'mib': 2 ** 20,
'gib': 2 ** 30,
'tib': 2 ** 40,
'pib': 2 ** 50,
'eib': 2 ** 60,
}
BYTE_SIZES.update({k.lower()[0]: v for k, v in BYTE_SIZES.items() if 'i' not in k})
byte_string_re = re.compile(r'^\s*(\d*\.?\d+)\s*(\w+)?', re.IGNORECASE)
class ByteSize(int):
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.validate
@classmethod
def validate(cls, v: StrIntFloat) -> 'ByteSize':
try:
return cls(int(v))
except ValueError:
pass
str_match = byte_string_re.match(str(v))
if str_match is None:
raise errors.InvalidByteSize()
scalar, unit = str_match.groups()
if unit is None:
unit = 'b'
try:
unit_mult = BYTE_SIZES[unit.lower()]
except KeyError:
raise errors.InvalidByteSizeUnit(unit=unit)
return cls(int(float(scalar) * unit_mult))
def human_readable(self, decimal: bool = False) -> str:
if decimal:
divisor = 1000
units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
final_unit = 'EB'
else:
divisor = 1024
units = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB']
final_unit = 'EiB'
num = float(self)
for unit in units:
if abs(num) < divisor:
return f'{num:0.1f}{unit}'
num /= divisor
return f'{num:0.1f}{final_unit}'
def to(self, unit: str) -> float:
try:
unit_div = BYTE_SIZES[unit.lower()]
except KeyError:
raise errors.InvalidByteSizeUnit(unit=unit)
return self / unit_div
| true | true |
1c2b2dcba360df8497d5ba43d575b2ba81dcabef | 2,790 | py | Python | networks/unet.py | songpeng326/pytorch-semantic-segmentation | 366259cbc3220744c3a633766075f1d06b1c0b3f | [
"MIT"
] | 88 | 2018-04-04T11:02:55.000Z | 2022-01-04T16:32:54.000Z | networks/unet.py | ZhenhLi/pytorch-semantic-segmentation | 7469de95cdb0fbfe9b00b93a8b068c35d398c6cf | [
"MIT"
] | 8 | 2018-04-09T07:52:35.000Z | 2019-04-12T07:35:23.000Z | networks/unet.py | ZhenhLi/pytorch-semantic-segmentation | 7469de95cdb0fbfe9b00b93a8b068c35d398c6cf | [
"MIT"
] | 32 | 2018-05-30T04:05:05.000Z | 2021-04-22T15:45:56.000Z | import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torch.utils import model_zoo
from torchvision import models
class UNetEnc(nn.Module):
def __init__(self, in_channels, features, out_channels):
super().__init__()
self.up = nn.Sequential(
nn.Conv2d(in_channels, features, 3),
nn.ReLU(inplace=True),
nn.Conv2d(features, features, 3),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(features, out_channels, 2, stride=2),
nn.ReLU(inplace=True),
)
def forward(self, x):
return self.up(x)
class UNetDec(nn.Module):
def __init__(self, in_channels, out_channels, dropout=False):
super().__init__()
layers = [
nn.Conv2d(in_channels, out_channels, 3),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3),
nn.ReLU(inplace=True),
]
if dropout:
layers += [nn.Dropout(.5)]
layers += [nn.MaxPool2d(2, stride=2, ceil_mode=True)]
self.down = nn.Sequential(*layers)
def forward(self, x):
return self.down(x)
class UNet(nn.Module):
def __init__(self, num_classes):
super().__init__()
self.dec1 = UNetDec(3, 64)
self.dec2 = UNetDec(64, 128)
self.dec3 = UNetDec(128, 256)
self.dec4 = UNetDec(256, 512, dropout=True)
self.center = nn.Sequential(
nn.Conv2d(512, 1024, 3),
nn.ReLU(inplace=True),
nn.Conv2d(1024, 1024, 3),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.ConvTranspose2d(1024, 512, 2, stride=2),
nn.ReLU(inplace=True),
)
self.enc4 = UNetEnc(1024, 512, 256)
self.enc3 = UNetEnc(512, 256, 128)
self.enc2 = UNetEnc(256, 128, 64)
self.enc1 = nn.Sequential(
nn.Conv2d(128, 64, 3),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, 3),
nn.ReLU(inplace=True),
)
self.final = nn.Conv2d(64, num_classes, 1)
def forward(self, x):
dec1 = self.dec1(x)
dec2 = self.dec2(dec1)
dec3 = self.dec3(dec2)
dec4 = self.dec4(dec3)
center = self.center(dec4)
enc4 = self.enc4(torch.cat([
center, F.upsample_bilinear(dec4, center.size()[2:])], 1))
enc3 = self.enc3(torch.cat([
enc4, F.upsample_bilinear(dec3, enc4.size()[2:])], 1))
enc2 = self.enc2(torch.cat([
enc3, F.upsample_bilinear(dec2, enc3.size()[2:])], 1))
enc1 = self.enc1(torch.cat([
enc2, F.upsample_bilinear(dec1, enc2.size()[2:])], 1))
return F.upsample_bilinear(self.final(enc1), x.size()[2:])
| 30 | 70 | 0.558781 | import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torch.utils import model_zoo
from torchvision import models
class UNetEnc(nn.Module):
def __init__(self, in_channels, features, out_channels):
super().__init__()
self.up = nn.Sequential(
nn.Conv2d(in_channels, features, 3),
nn.ReLU(inplace=True),
nn.Conv2d(features, features, 3),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(features, out_channels, 2, stride=2),
nn.ReLU(inplace=True),
)
def forward(self, x):
return self.up(x)
class UNetDec(nn.Module):
def __init__(self, in_channels, out_channels, dropout=False):
super().__init__()
layers = [
nn.Conv2d(in_channels, out_channels, 3),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3),
nn.ReLU(inplace=True),
]
if dropout:
layers += [nn.Dropout(.5)]
layers += [nn.MaxPool2d(2, stride=2, ceil_mode=True)]
self.down = nn.Sequential(*layers)
def forward(self, x):
return self.down(x)
class UNet(nn.Module):
def __init__(self, num_classes):
super().__init__()
self.dec1 = UNetDec(3, 64)
self.dec2 = UNetDec(64, 128)
self.dec3 = UNetDec(128, 256)
self.dec4 = UNetDec(256, 512, dropout=True)
self.center = nn.Sequential(
nn.Conv2d(512, 1024, 3),
nn.ReLU(inplace=True),
nn.Conv2d(1024, 1024, 3),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.ConvTranspose2d(1024, 512, 2, stride=2),
nn.ReLU(inplace=True),
)
self.enc4 = UNetEnc(1024, 512, 256)
self.enc3 = UNetEnc(512, 256, 128)
self.enc2 = UNetEnc(256, 128, 64)
self.enc1 = nn.Sequential(
nn.Conv2d(128, 64, 3),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, 3),
nn.ReLU(inplace=True),
)
self.final = nn.Conv2d(64, num_classes, 1)
def forward(self, x):
dec1 = self.dec1(x)
dec2 = self.dec2(dec1)
dec3 = self.dec3(dec2)
dec4 = self.dec4(dec3)
center = self.center(dec4)
enc4 = self.enc4(torch.cat([
center, F.upsample_bilinear(dec4, center.size()[2:])], 1))
enc3 = self.enc3(torch.cat([
enc4, F.upsample_bilinear(dec3, enc4.size()[2:])], 1))
enc2 = self.enc2(torch.cat([
enc3, F.upsample_bilinear(dec2, enc3.size()[2:])], 1))
enc1 = self.enc1(torch.cat([
enc2, F.upsample_bilinear(dec1, enc2.size()[2:])], 1))
return F.upsample_bilinear(self.final(enc1), x.size()[2:])
| true | true |
1c2b2e18eb5e04cc32cc54e6818b65ee64684e89 | 909 | py | Python | regulation/debug.py | pierrehebert/photovoltaic_optimizer | 5c20d2fccabc2e3e8a7c471a2e83a6061a8fd235 | [
"Apache-2.0"
] | 2 | 2020-04-15T12:02:16.000Z | 2020-05-18T02:13:46.000Z | regulation/debug.py | pierrehebert/photovoltaic_optimizer | 5c20d2fccabc2e3e8a7c471a2e83a6061a8fd235 | [
"Apache-2.0"
] | null | null | null | regulation/debug.py | pierrehebert/photovoltaic_optimizer | 5c20d2fccabc2e3e8a7c471a2e83a6061a8fd235 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2018-2019 Pierre Hébert
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
logger = logging.getLogger('power_regulation')
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
def debug(indent, msg):
logger.info((' '*indent)+str(msg)) | 32.464286 | 74 | 0.754675 |
import logging
logger = logging.getLogger('power_regulation')
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
def debug(indent, msg):
logger.info((' '*indent)+str(msg)) | true | true |
1c2b2e96e15eac01204fe0230ea6ab7b6887d64f | 8,028 | py | Python | main.py | comp5331-Xtimeseries/mWDN | 3805f90230b93d04f86201079358ec1f6dd6bb2d | [
"MIT"
] | null | null | null | main.py | comp5331-Xtimeseries/mWDN | 3805f90230b93d04f86201079358ec1f6dd6bb2d | [
"MIT"
] | null | null | null | main.py | comp5331-Xtimeseries/mWDN | 3805f90230b93d04f86201079358ec1f6dd6bb2d | [
"MIT"
] | null | null | null | import argparse
import math
import time
import torch
import torch.nn as nn
from models import LSTNet
from models.mWDN import mWDN
# from tsai.models import mWDN
import numpy as np;
import importlib
import Datasets
from utils import *;
import Optim
def evaluate(data, X, Y, model, evaluateL2, evaluateL1, batch_size):
model.eval();
total_loss = 0;
total_loss_l1 = 0;
n_samples = 0;
predict = None;
test = None;
for X, Y in data.get_batches(X, Y, batch_size, False):
if args.cuda:
X = X.cuda();
Y = Y.cuda();
output = model(X);
if predict is None:
predict = output;
test = Y;
else:
predict = torch.cat((predict,output));
test = torch.cat((test, Y));
scale = data.scale.expand(output.size(0), data.m)
total_loss += evaluateL2(output * scale, Y * scale).item()
total_loss_l1 += evaluateL1(output * scale, Y * scale).item()
n_samples += (output.size(0) * data.m);
rse = math.sqrt(total_loss / n_samples)/data.rse
rae = (total_loss_l1/n_samples)/data.rae
predict = predict.data.cpu().numpy();
Ytest = test.data.cpu().numpy();
sigma_p = (predict).std(axis = 0);
sigma_g = (Ytest).std(axis = 0);
mean_p = predict.mean(axis = 0)
mean_g = Ytest.mean(axis = 0)
index = (sigma_g!=0);
correlation = ((predict - mean_p) * (Ytest - mean_g)).mean(axis = 0)/(sigma_p * sigma_g);
correlation = (correlation[index]).mean();
return rse, rae, correlation;
def train(data, X, Y, model, criterion, optim, batch_size):
model.train();
total_loss = 0;
n_samples = 0;
for X, Y in data.get_batches(X, Y, batch_size, True):
if args.cuda:
X = X.cuda();
Y = Y.cuda();
model.zero_grad();
output = model(X);
scale = data.scale.expand(output.size(0), data.m)
loss = criterion(output * scale, Y * scale);
loss.backward();
grad_norm = optim.step();
total_loss += loss.item();
n_samples += (output.size(0) * data.m);
return total_loss / n_samples
parser = argparse.ArgumentParser(description='PyTorch Time series forecasting')
parser.add_argument('--data', type=str, required=True,
help='location of the data file')
parser.add_argument('--model', type=str, default='LSTNet',
help='')
parser.add_argument('--hidCNN', type=int, default=100,
help='number of CNN hidden units')
parser.add_argument('--hidRNN', type=int, default=100,
help='number of RNN hidden units')
parser.add_argument('--window', type=int, default=24 * 7,
help='window size')
parser.add_argument('--CNN_kernel', type=int, default=6,
help='the kernel size of the CNN layers')
parser.add_argument('--highway_window', type=int, default=24,
help='The window size of the highway component')
parser.add_argument('--clip', type=float, default=10.,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=100,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=128, metavar='N',
help='batch size')
parser.add_argument('--dropout', type=float, default=0.2,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--seed', type=int, default=54321,
help='random seed')
parser.add_argument('--gpu', type=int, default=None)
parser.add_argument('--log_interval', type=int, default=2000, metavar='N',
help='report interval')
parser.add_argument('--save', type=str, default='model/model.pt',
help='path to save the final model')
parser.add_argument('--cuda', type=str, default=True)
parser.add_argument('--optim', type=str, default='adam')
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--horizon', type=int, default=12)
parser.add_argument('--skip', type=float, default=24)
parser.add_argument('--hidSkip', type=int, default=5)
parser.add_argument('--L1Loss', type=bool, default=True)
parser.add_argument('--normalize', type=int, default=2)
parser.add_argument('--output_fun', type=str, default='sigmoid')
parser.add_argument('--c_in', type=int, default=3)
parser.add_argument('--seq_len', type=int, default=12)
parser.add_argument('--c_out', type=int, default=2)
parser.add_argument('--levels', type=int, default=3)
args = parser.parse_args()
args.cuda = args.gpu is not None
if args.cuda:
torch.cuda.set_device(args.gpu)
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
if args.data=="solar":
dSet=Datasets.Solar().data
elif args.data=="exchange_rate":
dSet=Datasets.ExchangeRate().data
elif args.data=="electricity":
dSet=Datasets.Electricity().data
elif args.data=="traffic":
dSet=Datasets.Traffic().data
Data = Data_utility(dSet, 0.6, 0.2, args.cuda, args.horizon, args.window, args.normalize);
print(Data.rse);
Data.train[0]=Data.train[0].permute(0,2,1)
Data.valid[0]=Data.valid[0].permute(0,2,1)
Data.test[0]=Data.test[0].permute(0,2,1)
if args.model == "LSTNet":
model = eval(args.model).Model(args, Data);
elif args.model == "mWDN":
model = mWDN(args)
model.float()
if args.cuda:
model.cuda()
else:
model.cpu()
nParams = sum([p.nelement() for p in model.parameters()])
print('* number of parameters: %d' % nParams)
# for name, param in model.named_parameters():
# print(name)
# param_in_param = [p for p in model.parameters()]
# param_in_named_param = [p for name, p in model.named_parameters()]
# for param1, param2 in zip(param_in_param, param_in_named_param):
# assert param1.shape == param2.shape
#
if args.L1Loss:
criterion = nn.L1Loss(size_average=False);
else:
criterion = nn.MSELoss(size_average=False);
evaluateL2 = nn.MSELoss(size_average=False);
evaluateL1 = nn.L1Loss(size_average=False)
if args.cuda:
criterion = criterion.cuda()
evaluateL1 = evaluateL1.cuda();
evaluateL2 = evaluateL2.cuda();
best_val = 10000000;
optim = Optim.Optim(
model.parameters(), args.optim, args.lr, args.clip,
)
# At any point you can hit Ctrl + C to break out of training early.
try:
print('begin training');
for epoch in range(1, args.epochs+1):
epoch_start_time = time.time()
train_loss = train(Data, Data.train[0], Data.train[1], model, criterion, optim, args.batch_size)
val_loss, val_rae, val_corr = evaluate(Data, Data.valid[0], Data.valid[1], model, evaluateL2, evaluateL1, args.batch_size);
print('| end of epoch {:3d} | time: {:5.2f}s | train_loss {:5.4f} | valid rse {:5.4f} | valid rae {:5.4f} | valid corr {:5.4f}'.format(epoch, (time.time() - epoch_start_time), train_loss, val_loss, val_rae, val_corr))
# Save the model if the validation loss is the best we've seen so far.
if val_loss < best_val:
with open(args.save, 'wb') as f:
torch.save(model, f)
best_val = val_loss
if epoch % 5 == 0:
test_acc, test_rae, test_corr = evaluate(Data, Data.test[0], Data.test[1], model, evaluateL2, evaluateL1, args.batch_size);
print ("test rse {:5.4f} | test rae {:5.4f} | test corr {:5.4f}".format(test_acc, test_rae, test_corr))
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early')
# Load the best saved model.
with open(args.save, 'rb') as f:
model = torch.load(f)
test_acc, test_rae, test_corr = evaluate(Data, Data.test[0], Data.test[1], model, evaluateL2, evaluateL1, args.batch_size);
print ("test rse {:5.4f} | test rae {:5.4f} | test corr {:5.4f}".format(test_acc, test_rae, test_corr))
| 38.228571 | 226 | 0.643 | import argparse
import math
import time
import torch
import torch.nn as nn
from models import LSTNet
from models.mWDN import mWDN
import numpy as np;
import importlib
import Datasets
from utils import *;
import Optim
def evaluate(data, X, Y, model, evaluateL2, evaluateL1, batch_size):
model.eval();
total_loss = 0;
total_loss_l1 = 0;
n_samples = 0;
predict = None;
test = None;
for X, Y in data.get_batches(X, Y, batch_size, False):
if args.cuda:
X = X.cuda();
Y = Y.cuda();
output = model(X);
if predict is None:
predict = output;
test = Y;
else:
predict = torch.cat((predict,output));
test = torch.cat((test, Y));
scale = data.scale.expand(output.size(0), data.m)
total_loss += evaluateL2(output * scale, Y * scale).item()
total_loss_l1 += evaluateL1(output * scale, Y * scale).item()
n_samples += (output.size(0) * data.m);
rse = math.sqrt(total_loss / n_samples)/data.rse
rae = (total_loss_l1/n_samples)/data.rae
predict = predict.data.cpu().numpy();
Ytest = test.data.cpu().numpy();
sigma_p = (predict).std(axis = 0);
sigma_g = (Ytest).std(axis = 0);
mean_p = predict.mean(axis = 0)
mean_g = Ytest.mean(axis = 0)
index = (sigma_g!=0);
correlation = ((predict - mean_p) * (Ytest - mean_g)).mean(axis = 0)/(sigma_p * sigma_g);
correlation = (correlation[index]).mean();
return rse, rae, correlation;
def train(data, X, Y, model, criterion, optim, batch_size):
model.train();
total_loss = 0;
n_samples = 0;
for X, Y in data.get_batches(X, Y, batch_size, True):
if args.cuda:
X = X.cuda();
Y = Y.cuda();
model.zero_grad();
output = model(X);
scale = data.scale.expand(output.size(0), data.m)
loss = criterion(output * scale, Y * scale);
loss.backward();
grad_norm = optim.step();
total_loss += loss.item();
n_samples += (output.size(0) * data.m);
return total_loss / n_samples
parser = argparse.ArgumentParser(description='PyTorch Time series forecasting')
parser.add_argument('--data', type=str, required=True,
help='location of the data file')
parser.add_argument('--model', type=str, default='LSTNet',
help='')
parser.add_argument('--hidCNN', type=int, default=100,
help='number of CNN hidden units')
parser.add_argument('--hidRNN', type=int, default=100,
help='number of RNN hidden units')
parser.add_argument('--window', type=int, default=24 * 7,
help='window size')
parser.add_argument('--CNN_kernel', type=int, default=6,
help='the kernel size of the CNN layers')
parser.add_argument('--highway_window', type=int, default=24,
help='The window size of the highway component')
parser.add_argument('--clip', type=float, default=10.,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=100,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=128, metavar='N',
help='batch size')
parser.add_argument('--dropout', type=float, default=0.2,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--seed', type=int, default=54321,
help='random seed')
parser.add_argument('--gpu', type=int, default=None)
parser.add_argument('--log_interval', type=int, default=2000, metavar='N',
help='report interval')
parser.add_argument('--save', type=str, default='model/model.pt',
help='path to save the final model')
parser.add_argument('--cuda', type=str, default=True)
parser.add_argument('--optim', type=str, default='adam')
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--horizon', type=int, default=12)
parser.add_argument('--skip', type=float, default=24)
parser.add_argument('--hidSkip', type=int, default=5)
parser.add_argument('--L1Loss', type=bool, default=True)
parser.add_argument('--normalize', type=int, default=2)
parser.add_argument('--output_fun', type=str, default='sigmoid')
parser.add_argument('--c_in', type=int, default=3)
parser.add_argument('--seq_len', type=int, default=12)
parser.add_argument('--c_out', type=int, default=2)
parser.add_argument('--levels', type=int, default=3)
args = parser.parse_args()
args.cuda = args.gpu is not None
if args.cuda:
torch.cuda.set_device(args.gpu)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
if args.data=="solar":
dSet=Datasets.Solar().data
elif args.data=="exchange_rate":
dSet=Datasets.ExchangeRate().data
elif args.data=="electricity":
dSet=Datasets.Electricity().data
elif args.data=="traffic":
dSet=Datasets.Traffic().data
Data = Data_utility(dSet, 0.6, 0.2, args.cuda, args.horizon, args.window, args.normalize);
print(Data.rse);
Data.train[0]=Data.train[0].permute(0,2,1)
Data.valid[0]=Data.valid[0].permute(0,2,1)
Data.test[0]=Data.test[0].permute(0,2,1)
if args.model == "LSTNet":
model = eval(args.model).Model(args, Data);
elif args.model == "mWDN":
model = mWDN(args)
model.float()
if args.cuda:
model.cuda()
else:
model.cpu()
nParams = sum([p.nelement() for p in model.parameters()])
print('* number of parameters: %d' % nParams)
if args.L1Loss:
criterion = nn.L1Loss(size_average=False);
else:
criterion = nn.MSELoss(size_average=False);
evaluateL2 = nn.MSELoss(size_average=False);
evaluateL1 = nn.L1Loss(size_average=False)
if args.cuda:
criterion = criterion.cuda()
evaluateL1 = evaluateL1.cuda();
evaluateL2 = evaluateL2.cuda();
best_val = 10000000;
optim = Optim.Optim(
model.parameters(), args.optim, args.lr, args.clip,
)
try:
print('begin training');
for epoch in range(1, args.epochs+1):
epoch_start_time = time.time()
train_loss = train(Data, Data.train[0], Data.train[1], model, criterion, optim, args.batch_size)
val_loss, val_rae, val_corr = evaluate(Data, Data.valid[0], Data.valid[1], model, evaluateL2, evaluateL1, args.batch_size);
print('| end of epoch {:3d} | time: {:5.2f}s | train_loss {:5.4f} | valid rse {:5.4f} | valid rae {:5.4f} | valid corr {:5.4f}'.format(epoch, (time.time() - epoch_start_time), train_loss, val_loss, val_rae, val_corr))
if val_loss < best_val:
with open(args.save, 'wb') as f:
torch.save(model, f)
best_val = val_loss
if epoch % 5 == 0:
test_acc, test_rae, test_corr = evaluate(Data, Data.test[0], Data.test[1], model, evaluateL2, evaluateL1, args.batch_size);
print ("test rse {:5.4f} | test rae {:5.4f} | test corr {:5.4f}".format(test_acc, test_rae, test_corr))
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early')
# Load the best saved model.
with open(args.save, 'rb') as f:
model = torch.load(f)
test_acc, test_rae, test_corr = evaluate(Data, Data.test[0], Data.test[1], model, evaluateL2, evaluateL1, args.batch_size);
print ("test rse {:5.4f} | test rae {:5.4f} | test corr {:5.4f}".format(test_acc, test_rae, test_corr))
| true | true |
1c2b2eacd034df49e2f4737731a48a56bfa4e8af | 1,571 | py | Python | tools/Polygraphy/polygraphy/config.py | hwkyai/TensorRT | d04182cd0086c70db4a8ad30e0d7675c4eb33782 | [
"Apache-2.0"
] | null | null | null | tools/Polygraphy/polygraphy/config.py | hwkyai/TensorRT | d04182cd0086c70db4a8ad30e0d7675c4eb33782 | [
"Apache-2.0"
] | null | null | null | tools/Polygraphy/polygraphy/config.py | hwkyai/TensorRT | d04182cd0086c70db4a8ad30e0d7675c4eb33782 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
INTERNAL_CORRECTNESS_CHECKS = bool(os.environ.get("POLYGRAPHY_INTERNAL_CORRECTNESS_CHECKS", "0") != "0")
"""
Whether internal correctness checks are enabled.
This can be configured by setting the 'POLYGRAPHY_INTERNAL_CORRECTNESS_CHECKS' environment variable.
"""
AUTOINSTALL_DEPS = bool(os.environ.get("POLYGRAPHY_AUTOINSTALL_DEPS", "0") != "0")
"""
Whether Polygraphy will automatically install required Python packages at runtime.
This can be configured by setting the 'POLYGRAPHY_AUTOINSTALL_DEPS' environment variable.
"""
ARRAY_SWAP_THRESHOLD_MB = int(os.environ.get("POLYGRAPHY_ARRAY_SWAP_THRESHOLD_MB", "-1"))
"""
The threshold, in megabytes, above which Polygraphy will evict a NumPy array from memory and swap it to disk.
A negative value disables swapping and a value of 0 causes all arrays to be saved to disk.
Disabled by default.
This can be configured by setting the 'POLYGRAPHY_ARRAY_SWAP_THRESHOLD_MB' environment variable.
"""
| 41.342105 | 109 | 0.782304 |
import os
INTERNAL_CORRECTNESS_CHECKS = bool(os.environ.get("POLYGRAPHY_INTERNAL_CORRECTNESS_CHECKS", "0") != "0")
AUTOINSTALL_DEPS = bool(os.environ.get("POLYGRAPHY_AUTOINSTALL_DEPS", "0") != "0")
ARRAY_SWAP_THRESHOLD_MB = int(os.environ.get("POLYGRAPHY_ARRAY_SWAP_THRESHOLD_MB", "-1"))
| true | true |
1c2b2f0ce5b70b75b13a6cbbbbe690ee0279af9c | 708 | py | Python | defining_classes/6_flower.py | Minkov/python-oop-2020-06 | 63b830a42b7abfac5bee576a81ee7626c47a80bc | [
"MIT"
] | 3 | 2020-07-04T11:32:42.000Z | 2020-08-14T08:43:25.000Z | defining_classes/6_flower.py | Minkov/python-oop-2020-06 | 63b830a42b7abfac5bee576a81ee7626c47a80bc | [
"MIT"
] | null | null | null | defining_classes/6_flower.py | Minkov/python-oop-2020-06 | 63b830a42b7abfac5bee576a81ee7626c47a80bc | [
"MIT"
] | 2 | 2020-07-09T07:17:37.000Z | 2021-02-22T22:55:52.000Z | class Flower:
def __init__(self, name, water_requirements):
self.name = name
self.water_requirements = water_requirements
self.current_water = 0
self.is_happy = False
def water(self, quantity):
self.current_water += quantity
self.is_happy = self.get_happy_status()
def get_happy_status(self):
return self.water_requirements <= self.current_water
def status(self):
if self.is_happy:
return f'{self.name} is happy'
else:
return f'{self.name} is not happy'
flower = Flower("Lilly", 100)
flower.water(50)
print(flower.status())
flower.water(100)
print(flower.status())
| 26.222222 | 61 | 0.620056 | class Flower:
def __init__(self, name, water_requirements):
self.name = name
self.water_requirements = water_requirements
self.current_water = 0
self.is_happy = False
def water(self, quantity):
self.current_water += quantity
self.is_happy = self.get_happy_status()
def get_happy_status(self):
return self.water_requirements <= self.current_water
def status(self):
if self.is_happy:
return f'{self.name} is happy'
else:
return f'{self.name} is not happy'
flower = Flower("Lilly", 100)
flower.water(50)
print(flower.status())
flower.water(100)
print(flower.status())
| true | true |
1c2b2f67485747ddf6766f04c0cc97347b89b0ef | 10,985 | py | Python | hyperlib/manifold/poincare.py | sourface94/hyperlib | 2353475a843070588a9faf62f075cb6c75082e48 | [
"MIT"
] | null | null | null | hyperlib/manifold/poincare.py | sourface94/hyperlib | 2353475a843070588a9faf62f075cb6c75082e48 | [
"MIT"
] | null | null | null | hyperlib/manifold/poincare.py | sourface94/hyperlib | 2353475a843070588a9faf62f075cb6c75082e48 | [
"MIT"
] | null | null | null | import tensorflow as tf
from .base import Manifold
from ..utils.math import tanh, atanh_
class Poincare(Manifold):
"""
Implementation of the poincare manifold,. This class can be used for mathematical functions on the poincare manifold.
"""
def __init__(self,):
super(Poincare, self).__init__()
self.name = "Poincare"
self.min_norm = 1e-15
self.eps = {tf.float32: 4e-3, tf.float64: 1e-5}
self.k = 1.0 # scale of the hyperbolic space, k > 0.
def mobius_matvec(self, m, x, c):
"""
Generalization for matrix-vector multiplication to hyperbolic space defined as
math::
M \otimes_c x = (1/\sqrt{c}) \tanh\left(
\frac{\|Mx\|_2}{\|x\|_2}\tanh^{-1}(\sqrt{c}\|x\|_2)
\right)\frac{Mx}{\|Mx\|_2}
Args:
m : Tensor for multiplication
x : Tensor point on poincare ball
c : Tensor of size 1 representing the hyperbolic curvature.
Returns
Mobius matvec result
"""
sqrt_c = c ** 0.5
x_norm = tf.norm(x, axis=-1, keepdims=True, ord=2)
max_num = tf.math.reduce_max(x_norm)
x_norm = tf.clip_by_value(
x_norm, clip_value_min=self.min_norm, clip_value_max=max_num
)
mx = x @ m
mx_norm = tf.norm(mx, axis=-1, keepdims=True, ord=2)
max_num = tf.math.reduce_max(mx_norm)
mx_norm = tf.clip_by_value(
mx_norm, clip_value_min=self.min_norm, clip_value_max=max_num
)
res_c = (
tanh(mx_norm / x_norm * atanh_(sqrt_c * x_norm)) * mx / (mx_norm * sqrt_c)
)
cond = tf.reduce_prod(
tf.cast((mx == 0), tf.uint8, name=None), axis=-1, keepdims=True
)
res_0 = tf.zeros(1, dtype=res_c.dtype)
res = tf.where(tf.cast(cond, tf.bool), res_0, res_c)
return res
def expmap(self, u, p, c):
sqrt_c = c ** 0.5
#u_norm = u.norm(dim=-1, p=2, keepdim=True).clamp_min(self.min_norm)
u_norm = tf.norm(u, axis=-1, ord=2, keepdims=True)
u_norm = tf.clip_by_value(
u_norm, clip_value_min=self.min_norm, clip_value_max=tf.math.reduce_max(u_norm)
)
second_term = (
tanh(sqrt_c / 2 * self._lambda(p, c) * u_norm) * u / (sqrt_c * u_norm)
)
gamma_1 = self.mobius_add(p, second_term, c)
return gamma_1
def hyp_act(self, act, x, c_in, c_out):
"""Apply an activation function to a tensor in the hyperbolic space"""
xt = act(self.logmap0(x, c=c_in))
return self.proj(self.expmap0(xt, c=c_out), c=c_out)
# meijke implementation
def expmap_m(self, u, x, c=1.0):
""" Exponential map of u at p in the Poincare ball """
#u += 1e-15 #avoid u=0
u = tf.cast(u, tf.float64)
x = tf.cast(x, tf.float64)
c = tf.cast(c, tf.float64)
sqrt_c = tf.math.sqrt(c)
u_norm = self.clipped_norm(u)
second_term = (
tanh(sqrt_c / 2 * self.lambda_x(x, c) * u_norm) * u / (sqrt_c * u_norm)
)
return self.mobius_add(x, second_term, c)
def expmap0(self, u, c):
"""
Hyperbolic exponential map at zero in the Poincare ball model.
Args:
u: tensor of size B x dimension representing tangent vectors.
c: tensor of size 1 representing the hyperbolic curvature.
Returns:
Tensor of shape B x dimension.
"""
sqrt_c = c ** 0.5
max_num = tf.math.reduce_max(u)
u_norm = tf.clip_by_value(
tf.norm(u, axis=-1, ord=2, keepdims=True),
clip_value_min=self.min_norm,
clip_value_max=max_num,
)
gamma_1 = tf.math.tanh(sqrt_c * u_norm) * u / (sqrt_c * u_norm)
return gamma_1
def logmap0(self, p, c):
"""
Hyperbolic logarithmic map at zero in the Poincare ball model.
Args:
p: tensor of size B x dimension representing hyperbolic points.
c: tensor of size 1 representing the hyperbolic curvature.
Returns:
Tensor of shape B x dimension.
"""
sqrt_c = c ** 0.5
p_norm = tf.norm(p, axis=-1, ord=2, keepdims=True)
max_num = tf.math.reduce_max(p_norm)
p_norm = tf.clip_by_value(
p_norm, clip_value_min=self.min_norm, clip_value_max=max_num
)
scale = 1.0 / sqrt_c * atanh_(sqrt_c * p_norm) / p_norm
return scale * p
def proj(self, x, c):
"""
Safe projection on the manifold for numerical stability. This was mentioned in [1]
Args:
x : Tensor point on the Poincare ball
c : Tensor of size 1 representing the hyperbolic curvature.
Returns:
Projected vector on the manifold
References:
[1] Hyperbolic Neural Networks, NIPS2018
https://arxiv.org/abs/1805.09112
"""
x_for_norm = tf.norm(x, axis=-1, keepdims=True, ord=2)
max_num = tf.math.reduce_max(x_for_norm)
norm = tf.clip_by_value(
x_for_norm, clip_value_min=self.min_norm, clip_value_max=max_num
)
maxnorm = (1 - self.eps[x.dtype]) / (c ** 0.5) # tf.math.reduce_max(x)
cond = norm > maxnorm
projected = x / norm * maxnorm
return tf.where(cond, projected, x)
def mobius_add(self, x, y, c):
"""Element-wise Mobius addition.
Args:
x: Tensor of size B x dimension representing hyperbolic points.
y: Tensor of size B x dimension representing hyperbolic points.
c: Tensor of size 1 representing the absolute hyperbolic curvature.
Returns:
Tensor of shape B x dimension representing the element-wise Mobius addition
of x and y.
"""
cx2 = c * tf.reduce_sum(x * x, axis=-1, keepdims=True)
cy2 = c * tf.reduce_sum(y * y, axis=-1, keepdims=True)
cxy = c * tf.reduce_sum(x * y, axis=-1, keepdims=True)
num = (1 + 2 * cxy + cy2) * x + (1 - cx2) * y
denom = 1 + 2 * cxy + cx2 * cy2
return self.proj(num / tf.maximum(denom, self.min_norm), c)
# additions
def _lambda(self, x, c=1.0, keepdims=False):
"""Compute the conformal factor :math:`lambda_x^k`"""
#k = tf.cast(self.k, x.dtype)
norm_x_2 = tf.reduce_sum(x * x, axis=-1, keepdims=keepdims)
res = 2.0 / (1.0 - c * norm_x_2)
max_num = tf.math.reduce_max(res)
return tf.clip_by_value(
res, clip_value_min=self.min_norm, clip_value_max=max_num
)
def inner(self, x, u, v, keepdims=False):
lambda_x = self._lambda(x, keepdims=keepdims)
return tf.reduce_sum(u * v, axis=-1, keepdims=keepdims) * lambda_x ** 2
def proju(self, x, u):
lambda_x = self._lambda(x, keepdims=True)
return u / lambda_x ** 2
def projx(self, x):
sqrt_k = tf.math.sqrt(tf.cast(self.k, x.dtype))
norm = tf.linalg.norm(x, axis=-1, keepdims=True)
def get_eps(val):
return np.finfo(val.dtype.name).eps
return tf.where(
sqrt_k * norm < tf.ones_like(norm),
x,
x / (sqrt_k * norm + 10 * get_eps(x)),
)
def egrad2rgrad(self, x, u):
lambda_x = self._lambda(x, keepdims=True)
return u / lambda_x ** 2
def _mobius_add(self, x, y):
"""Compute the Möbius addition of :math:`x` and :math:`y` in
:math:`\mathcal{D}^{n}_{k}`
:math:`x \oplus y = \frac{(1 + 2k\langle x, y\rangle + k||y||^2)x + (1
- k||x||^2)y}{1 + 2k\langle x,y\rangle + k^2||x||^2||y||^2}`
"""
x_2 = tf.reduce_sum(tf.math.square(x), axis=-1, keepdims=True)
y_2 = tf.reduce_sum(tf.math.square(y), axis=-1, keepdims=True)
x_y = tf.reduce_sum(x * y, axis=-1, keepdims=True)
k = tf.cast(self.k, x.dtype)
return ((1 + 2 * k * x_y + k * y_2) * x + (1 - k * x_2) * y) / (
1 + 2 * k * x_y + k ** 2 * x_2 * y_2
)
def _gyration(self, u, v, w):
"""Compute the gyration of :math:`u`, :math:`v`, :math:`w`:
:math:`\operatorname{gyr}[u, v]w =
\ominus (u \oplus_\kappa v) \oplus (u \oplus_\kappa (v \oplus_\kappa w))`
"""
min_u_v = -self._mobius_add(u, v)
v_w = self._mobius_add(v, w)
u_v_w = self._mobius_add(u, v_w)
return self._mobius_add(min_u_v, u_v_w)
def ptransp(self, x, y, v):
lambda_x = self._lambda(x, keepdims=True)
lambda_y = self._lambda(y, keepdims=True)
return self._gyration(y, -x, v) * lambda_x / lambda_y
transp = ptransp
def exp(self, x, u):
sqrt_k = tf.math.sqrt(tf.cast(self.k, x.dtype))
norm_u = tf.linalg.norm(u, axis=-1, keepdims=True)
lambda_x = self._lambda(x, keepdims=True)
y = (
tf.math.tanh(sqrt_k * norm_u * lambda_x / 2.0)
* u
/ (sqrt_k * norm_u)
)
return self._mobius_add(x, y)
retr = exp
# hmath meijke
def clipped_norm(self, x, max_norm = None):
""" Clipped Euclidean norm of x """
x_norm = tf.norm(x, axis=-1, ord=2, keepdims=True)
if max_norm is None:
max_norm= tf.math.reduce_max(x_norm)
return tf.clip_by_value(
x_norm,
clip_value_min=self.min_norm,
clip_value_max=max_norm,
)
def gyr(self, x, y, z, c=1.0):
"""
Ungar's gryation operation defined in [1].
math::
gyr[x,y]z = \ominus (x \oplus y)\oplus(x \oplus (y \oplus z))
where \oplus is Mobius addition and \ominus is the left inverse.
Args:
x, y, z: Tensors of size B x dim in the Poincare ball of curvature c
Returns:
Tensor of size B x dim
Reference:
[1] A. Ungar, A Gryovector Space Approach to Hyperbolic Geometry
"""
xy = tf.reduce_sum( x*y, axis=-1, keepdims=True)
yz = tf.reduce_sum( y*z, axis=-1, keepdims=True)
xz = tf.reduce_sum( x*z, axis=-1, keepdims=True)
x2 = tf.reduce_sum( x*x, axis=-1, keepdims=True)
y2 = tf.reduce_sum( y*y, axis=-1, keepdims=True)
z2 = tf.reduce_sum( z*z, axis=-1, keepdims=True)
A = c*yz - c**2 * xz * y2 + 2 * c**2 * xy * yz
B = c**2 * yz * x2 + c * xz
C = 1 + 2 * c* xy + c**2 * x2 * y2
return tf.add(2*tf.divide(A * x - B * y, C), z)
def lambda_x(self, x, c=1.0):
""" Poincare conformal factor at point x """
cx2 = c * tf.reduce_sum(x * x, axis=-1, keepdims=True)
return 2.0 / (1.0 - cx2)
def parallel_transport(self, x, y, v, c=1.0):
"""
The parallel transport of the tangent vector v from the tangent space at x
to the tangent space at y
"""
return tf.divide(self.lambda_x(x,c), self.lambda_x(y,c)) * self.gyr(y,-x,v)
| 36.374172 | 121 | 0.558398 | import tensorflow as tf
from .base import Manifold
from ..utils.math import tanh, atanh_
class Poincare(Manifold):
def __init__(self,):
super(Poincare, self).__init__()
self.name = "Poincare"
self.min_norm = 1e-15
self.eps = {tf.float32: 4e-3, tf.float64: 1e-5}
self.k = 1.0
def mobius_matvec(self, m, x, c):
sqrt_c = c ** 0.5
x_norm = tf.norm(x, axis=-1, keepdims=True, ord=2)
max_num = tf.math.reduce_max(x_norm)
x_norm = tf.clip_by_value(
x_norm, clip_value_min=self.min_norm, clip_value_max=max_num
)
mx = x @ m
mx_norm = tf.norm(mx, axis=-1, keepdims=True, ord=2)
max_num = tf.math.reduce_max(mx_norm)
mx_norm = tf.clip_by_value(
mx_norm, clip_value_min=self.min_norm, clip_value_max=max_num
)
res_c = (
tanh(mx_norm / x_norm * atanh_(sqrt_c * x_norm)) * mx / (mx_norm * sqrt_c)
)
cond = tf.reduce_prod(
tf.cast((mx == 0), tf.uint8, name=None), axis=-1, keepdims=True
)
res_0 = tf.zeros(1, dtype=res_c.dtype)
res = tf.where(tf.cast(cond, tf.bool), res_0, res_c)
return res
def expmap(self, u, p, c):
sqrt_c = c ** 0.5
u_norm = tf.norm(u, axis=-1, ord=2, keepdims=True)
u_norm = tf.clip_by_value(
u_norm, clip_value_min=self.min_norm, clip_value_max=tf.math.reduce_max(u_norm)
)
second_term = (
tanh(sqrt_c / 2 * self._lambda(p, c) * u_norm) * u / (sqrt_c * u_norm)
)
gamma_1 = self.mobius_add(p, second_term, c)
return gamma_1
def hyp_act(self, act, x, c_in, c_out):
xt = act(self.logmap0(x, c=c_in))
return self.proj(self.expmap0(xt, c=c_out), c=c_out)
def expmap_m(self, u, x, c=1.0):
= tf.cast(u, tf.float64)
x = tf.cast(x, tf.float64)
c = tf.cast(c, tf.float64)
sqrt_c = tf.math.sqrt(c)
u_norm = self.clipped_norm(u)
second_term = (
tanh(sqrt_c / 2 * self.lambda_x(x, c) * u_norm) * u / (sqrt_c * u_norm)
)
return self.mobius_add(x, second_term, c)
def expmap0(self, u, c):
sqrt_c = c ** 0.5
max_num = tf.math.reduce_max(u)
u_norm = tf.clip_by_value(
tf.norm(u, axis=-1, ord=2, keepdims=True),
clip_value_min=self.min_norm,
clip_value_max=max_num,
)
gamma_1 = tf.math.tanh(sqrt_c * u_norm) * u / (sqrt_c * u_norm)
return gamma_1
def logmap0(self, p, c):
sqrt_c = c ** 0.5
p_norm = tf.norm(p, axis=-1, ord=2, keepdims=True)
max_num = tf.math.reduce_max(p_norm)
p_norm = tf.clip_by_value(
p_norm, clip_value_min=self.min_norm, clip_value_max=max_num
)
scale = 1.0 / sqrt_c * atanh_(sqrt_c * p_norm) / p_norm
return scale * p
def proj(self, x, c):
x_for_norm = tf.norm(x, axis=-1, keepdims=True, ord=2)
max_num = tf.math.reduce_max(x_for_norm)
norm = tf.clip_by_value(
x_for_norm, clip_value_min=self.min_norm, clip_value_max=max_num
)
maxnorm = (1 - self.eps[x.dtype]) / (c ** 0.5)
cond = norm > maxnorm
projected = x / norm * maxnorm
return tf.where(cond, projected, x)
def mobius_add(self, x, y, c):
cx2 = c * tf.reduce_sum(x * x, axis=-1, keepdims=True)
cy2 = c * tf.reduce_sum(y * y, axis=-1, keepdims=True)
cxy = c * tf.reduce_sum(x * y, axis=-1, keepdims=True)
num = (1 + 2 * cxy + cy2) * x + (1 - cx2) * y
denom = 1 + 2 * cxy + cx2 * cy2
return self.proj(num / tf.maximum(denom, self.min_norm), c)
def _lambda(self, x, c=1.0, keepdims=False):
norm_x_2 = tf.reduce_sum(x * x, axis=-1, keepdims=keepdims)
res = 2.0 / (1.0 - c * norm_x_2)
max_num = tf.math.reduce_max(res)
return tf.clip_by_value(
res, clip_value_min=self.min_norm, clip_value_max=max_num
)
def inner(self, x, u, v, keepdims=False):
lambda_x = self._lambda(x, keepdims=keepdims)
return tf.reduce_sum(u * v, axis=-1, keepdims=keepdims) * lambda_x ** 2
def proju(self, x, u):
lambda_x = self._lambda(x, keepdims=True)
return u / lambda_x ** 2
def projx(self, x):
sqrt_k = tf.math.sqrt(tf.cast(self.k, x.dtype))
norm = tf.linalg.norm(x, axis=-1, keepdims=True)
def get_eps(val):
return np.finfo(val.dtype.name).eps
return tf.where(
sqrt_k * norm < tf.ones_like(norm),
x,
x / (sqrt_k * norm + 10 * get_eps(x)),
)
def egrad2rgrad(self, x, u):
lambda_x = self._lambda(x, keepdims=True)
return u / lambda_x ** 2
def _mobius_add(self, x, y):
x_2 = tf.reduce_sum(tf.math.square(x), axis=-1, keepdims=True)
y_2 = tf.reduce_sum(tf.math.square(y), axis=-1, keepdims=True)
x_y = tf.reduce_sum(x * y, axis=-1, keepdims=True)
k = tf.cast(self.k, x.dtype)
return ((1 + 2 * k * x_y + k * y_2) * x + (1 - k * x_2) * y) / (
1 + 2 * k * x_y + k ** 2 * x_2 * y_2
)
def _gyration(self, u, v, w):
min_u_v = -self._mobius_add(u, v)
v_w = self._mobius_add(v, w)
u_v_w = self._mobius_add(u, v_w)
return self._mobius_add(min_u_v, u_v_w)
def ptransp(self, x, y, v):
lambda_x = self._lambda(x, keepdims=True)
lambda_y = self._lambda(y, keepdims=True)
return self._gyration(y, -x, v) * lambda_x / lambda_y
transp = ptransp
def exp(self, x, u):
sqrt_k = tf.math.sqrt(tf.cast(self.k, x.dtype))
norm_u = tf.linalg.norm(u, axis=-1, keepdims=True)
lambda_x = self._lambda(x, keepdims=True)
y = (
tf.math.tanh(sqrt_k * norm_u * lambda_x / 2.0)
* u
/ (sqrt_k * norm_u)
)
return self._mobius_add(x, y)
retr = exp
def clipped_norm(self, x, max_norm = None):
x_norm = tf.norm(x, axis=-1, ord=2, keepdims=True)
if max_norm is None:
max_norm= tf.math.reduce_max(x_norm)
return tf.clip_by_value(
x_norm,
clip_value_min=self.min_norm,
clip_value_max=max_norm,
)
def gyr(self, x, y, z, c=1.0):
xy = tf.reduce_sum( x*y, axis=-1, keepdims=True)
yz = tf.reduce_sum( y*z, axis=-1, keepdims=True)
xz = tf.reduce_sum( x*z, axis=-1, keepdims=True)
x2 = tf.reduce_sum( x*x, axis=-1, keepdims=True)
y2 = tf.reduce_sum( y*y, axis=-1, keepdims=True)
z2 = tf.reduce_sum( z*z, axis=-1, keepdims=True)
A = c*yz - c**2 * xz * y2 + 2 * c**2 * xy * yz
B = c**2 * yz * x2 + c * xz
C = 1 + 2 * c* xy + c**2 * x2 * y2
return tf.add(2*tf.divide(A * x - B * y, C), z)
def lambda_x(self, x, c=1.0):
cx2 = c * tf.reduce_sum(x * x, axis=-1, keepdims=True)
return 2.0 / (1.0 - cx2)
def parallel_transport(self, x, y, v, c=1.0):
return tf.divide(self.lambda_x(x,c), self.lambda_x(y,c)) * self.gyr(y,-x,v)
| true | true |
1c2b2f742468695c7088ead48076b290d5274a1a | 8,268 | py | Python | conoha/api/network.py | ttk1/conoha-cli | d1c68ee63e9c61a0a727a24206a1fd8aa4abcf13 | [
"MIT"
] | null | null | null | conoha/api/network.py | ttk1/conoha-cli | d1c68ee63e9c61a0a727a24206a1fd8aa4abcf13 | [
"MIT"
] | null | null | null | conoha/api/network.py | ttk1/conoha-cli | d1c68ee63e9c61a0a727a24206a1fd8aa4abcf13 | [
"MIT"
] | null | null | null | '''
Network API の呼び出し部分
'''
from conoha import config
from conoha.util import http
endpoint = config.get_config()['endpoint']['network']
def list_networks():
'''
https://www.conoha.jp/docs/neutron-get_networks_list.php
'''
headers = {
'Accept': 'application/json',
'X-Auth-Token': config.get_token()['id']
}
return http.get(f'{endpoint}/networks', headers)
def create_network():
'''
https://www.conoha.jp/docs/neutron-add_network.php
'''
headers = {
'Accept': 'application/json',
'X-Auth-Token': config.get_token()['id']
}
return http.post(f'{endpoint}/networks', None, headers)
def delete_network(network_id):
'''
https://www.conoha.jp/docs/neutron-remove_network.php
'''
headers = {
'Accept': 'application/json',
'X-Auth-Token': config.get_token()['id']
}
return http.delete(f'{endpoint}/networks/{network_id}', headers)
def describe_network(network_id):
'''
https://www.conoha.jp/docs/neutron-get_networks_detail_specified.php
'''
headers = {
'Accept': 'application/json',
'X-Auth-Token': config.get_token()['id']
}
return http.get(f'{endpoint}/networks/{network_id}', headers)
###########################################################################
def create_port(network_id, ip_address,
subnet_id, security_group_ids=None):
'''
https://www.conoha.jp/docs/neutron-add_port.php
'''
headers = {
'Accept': 'application/json',
'X-Auth-Token': config.get_token()['id']
}
# 必須項目
data = {
'port': {
'network_id': network_id,
'fixed_ips': [{
'ip_address': ip_address,
'subnet_id': subnet_id
}]
}
}
# Optional 項目
if security_group_ids is not None:
data['port']['security_groups'] = security_group_ids
return http.post(f'{endpoint}/ports', data, headers)
def update_port(port_id, security_group_ids):
'''
https://www.conoha.jp/docs/neutron-update_port.php
'''
headers = {
'Accept': 'application/json',
'X-Auth-Token': config.get_token()['id']
}
data = {
'port': {
'security_groups': security_group_ids
}
}
return http.put(f'{endpoint}/ports/{port_id}', data, headers)
def delete_port(port_id):
'''
https://www.conoha.jp/docs/neutron-remove_port.php
'''
headers = {
'Accept': 'application/json',
'X-Auth-Token': config.get_token()['id']
}
return http.delete(f'{endpoint}/ports/{port_id}', headers)
def list_ports():
'''
https://www.conoha.jp/docs/neutron-get_ports_list.php
'''
headers = {
'Accept': 'application/json',
'X-Auth-Token': config.get_token()['id']
}
return http.get(f'{endpoint}/ports', headers)
def describe_port(port_id):
'''
https://www.conoha.jp/docs/neutron-get_ports_detail_specified.php
'''
headers = {
'Accept': 'application/json',
'X-Auth-Token': config.get_token()['id']
}
return http.get(f'{endpoint}/ports/{port_id}', headers)
###########################################################################
def create_subnet(network_id, cidr):
'''
https://www.conoha.jp/docs/neutron-add_subnet.php
'''
headers = {
'Accept': 'application/json',
'X-Auth-Token': config.get_token()['id']
}
data = {
'subnet': {
'network_id': network_id,
'cidr': cidr
}
}
return http.post(f'{endpoint}/subnets', data, headers)
def delete_subnet(subnet_id):
'''
https://www.conoha.jp/docs/neutron-remove_subnet.php
'''
headers = {
'Accept': 'application/json',
'X-Auth-Token': config.get_token()['id']
}
return http.delete(f'{endpoint}/subnets/{subnet_id}', headers)
def list_subnets():
'''
https://www.conoha.jp/docs/neutron-get_subnets_list.php
'''
headers = {
'Accept': 'application/json',
'X-Auth-Token': config.get_token()['id']
}
return http.get(f'{endpoint}/subnets', headers)
def describe_subnet(subnet_id):
'''
https://www.conoha.jp/docs/neutron-get_subnets_detail_specified.php
'''
headers = {
'Accept': 'application/json',
'X-Auth-Token': config.get_token()['id']
}
return http.get(f'{endpoint}/subnets/{subnet_id}', headers)
###########################################################################
def create_security_group(name, description=None):
'''
https://www.conoha.jp/docs/neutron-create_secgroup.php
'''
headers = {
'Accept': 'application/json',
'X-Auth-Token': config.get_token()['id']
}
# 必須項目
data = {
'security_group': {
'name': name
}
}
# Optional 項目
if description is not None:
data['security_group']['description'] = description
return http.post(f'{endpoint}/security-groups', data, headers)
def delete_security_group(security_group_id):
'''
https://www.conoha.jp/docs/neutron-delete_secgroup.php
'''
headers = {
'Accept': 'application/json',
'X-Auth-Token': config.get_token()['id']
}
return http.delete(f'{endpoint}/security-groups/{security_group_id}', headers)
def list_security_groups():
'''
https://www.conoha.jp/docs/neutron-get_secgroups_list.php
'''
headers = {
'Accept': 'application/json',
'X-Auth-Token': config.get_token()['id']
}
return http.get(f'{endpoint}/security-groups', headers)
def describe_security_group(security_group_id):
'''
https://www.conoha.jp/docs/neutron-get_secgroups_detail_specified.php
'''
headers = {
'Accept': 'application/json',
'X-Auth-Token': config.get_token()['id']
}
return http.get(f'{endpoint}/security-groups/{security_group_id}', headers)
###########################################################################
def create_security_group_rule(direction, ether_type, security_group_id,
port_range_min=None, port_range_max=None, protocol=None,
remote_group_id=None, remote_ip_prefix=None):
'''
https://www.conoha.jp/docs/neutron-create_rule_on_secgroup.php
'''
headers = {
'Accept': 'application/json',
'X-Auth-Token': config.get_token()['id']
}
# 必須項目
data = {
'security_group_rule': {
'direction': direction,
'ethertype': ether_type,
'security_group_id': security_group_id
}
}
# Optional 項目
if port_range_min is not None:
data['security_group_rule']['port_range_min'] = port_range_min
if port_range_max is not None:
data['security_group_rule']['port_range_max'] = port_range_max
if protocol is not None and protocol != 'null':
data['security_group_rule']['protocol'] = protocol
if remote_group_id is not None:
data['security_group_rule']['remote_group_id'] = remote_group_id
if remote_ip_prefix is not None:
data['security_group_rule']['remote_ip_prefix'] = remote_ip_prefix
return http.post(f'{endpoint}/security-group-rules', data, headers)
def delete_security_group_rule(rule_id):
'''
https://www.conoha.jp/docs/neutron-delete_rule_on_secgroup.php
'''
headers = {
'Accept': 'application/json',
'X-Auth-Token': config.get_token()['id']
}
return http.delete(f'{endpoint}/security-group-rules/{rule_id}', headers)
def list_security_group_rules():
'''
https://www.conoha.jp/docs/neutron-get_rules_on_secgroup.php
'''
headers = {
'Accept': 'application/json',
'X-Auth-Token': config.get_token()['id']
}
return http.get(f'{endpoint}/security-group-rules', headers)
def describe_security_group_rule(rule_id):
'''
https://www.conoha.jp/docs/neutron-get_rules_detail_specified.php
'''
headers = {
'Accept': 'application/json',
'X-Auth-Token': config.get_token()['id']
}
return http.get(f'{endpoint}/security-group-rules/{rule_id}', headers)
| 26.5 | 87 | 0.583454 |
from conoha import config
from conoha.util import http
endpoint = config.get_config()['endpoint']['network']
def list_networks():
headers = {
'Accept': 'application/json',
'X-Auth-Token': config.get_token()['id']
}
return http.get(f'{endpoint}/networks', headers)
def create_network():
headers = {
'Accept': 'application/json',
'X-Auth-Token': config.get_token()['id']
}
return http.post(f'{endpoint}/networks', None, headers)
def delete_network(network_id):
headers = {
'Accept': 'application/json',
'X-Auth-Token': config.get_token()['id']
}
return http.delete(f'{endpoint}/networks/{network_id}', headers)
def describe_network(network_id):
headers = {
'Accept': 'application/json',
'X-Auth-Token': config.get_token()['id']
}
return http.get(f'{endpoint}/networks/{network_id}', headers)
| true | true |
1c2b3275f73874280dbbf256213b60596356c59d | 183 | py | Python | gym_multigrid/envs/__init__.py | n0whereRuoxi/gym-multigrid | 98809bd40b3d4a0bfa1ab909b1a748fe82d71b60 | [
"Apache-2.0"
] | null | null | null | gym_multigrid/envs/__init__.py | n0whereRuoxi/gym-multigrid | 98809bd40b3d4a0bfa1ab909b1a748fe82d71b60 | [
"Apache-2.0"
] | null | null | null | gym_multigrid/envs/__init__.py | n0whereRuoxi/gym-multigrid | 98809bd40b3d4a0bfa1ab909b1a748fe82d71b60 | [
"Apache-2.0"
] | null | null | null | from gym_multigrid.envs.collect_game import CollectGame4HEnv10x10N2
from gym_multigrid.envs.soccer_game import SoccerGame4HEnv10x15N2
from gym_multigrid.envs.doorkey import DoorKeyEnv | 61 | 67 | 0.907104 | from gym_multigrid.envs.collect_game import CollectGame4HEnv10x10N2
from gym_multigrid.envs.soccer_game import SoccerGame4HEnv10x15N2
from gym_multigrid.envs.doorkey import DoorKeyEnv | true | true |
1c2b32fec97d43e768fb54eff2e7f5f1493499f4 | 71 | py | Python | src/fdk_organization_bff/config/__init__.py | Informasjonsforvaltning/organization-page-bffe | 473dc9606649f864618f4f8bfc4a6a2a035f06d7 | [
"Apache-2.0"
] | null | null | null | src/fdk_organization_bff/config/__init__.py | Informasjonsforvaltning/organization-page-bffe | 473dc9606649f864618f4f8bfc4a6a2a035f06d7 | [
"Apache-2.0"
] | 47 | 2020-05-14T07:54:48.000Z | 2022-03-29T22:17:08.000Z | src/fdk_organization_bff/config/__init__.py | Informasjonsforvaltning/organization-page-bffe | 473dc9606649f864618f4f8bfc4a6a2a035f06d7 | [
"Apache-2.0"
] | null | null | null | """Config package.
Modules:
config
"""
from .config import Config
| 10.142857 | 26 | 0.676056 | from .config import Config
| true | true |
1c2b3475de2a1e1297fba8b659a529d7453411fa | 42,050 | py | Python | .mywaflib/waflib/Build.py | tobiasraabe/crypto | 5b40049169cfbf02f4979a55e8abdb77b834b820 | [
"BSD-3-Clause"
] | null | null | null | .mywaflib/waflib/Build.py | tobiasraabe/crypto | 5b40049169cfbf02f4979a55e8abdb77b834b820 | [
"BSD-3-Clause"
] | 1 | 2017-08-31T15:55:24.000Z | 2017-08-31T15:55:24.000Z | .mywaflib/waflib/Build.py | tobiasraabe/crypto | 5b40049169cfbf02f4979a55e8abdb77b834b820 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005-2016 (ita)
"""
Classes related to the build phase (build, clean, install, step, etc)
The inheritance tree is the following:
"""
import os, sys, errno, re, shutil, stat
try:
import cPickle
except ImportError:
import pickle as cPickle
from waflib import Node, Runner, TaskGen, Utils, ConfigSet, Task, Logs, Options, Context, Errors
CACHE_DIR = 'c4che'
"""Name of the cache directory"""
CACHE_SUFFIX = '_cache.py'
"""ConfigSet cache files for variants are written under :py:attr:´waflib.Build.CACHE_DIR´ in the form ´variant_name´_cache.py"""
INSTALL = 1337
"""Positive value '->' install, see :py:attr:`waflib.Build.BuildContext.is_install`"""
UNINSTALL = -1337
"""Negative value '<-' uninstall, see :py:attr:`waflib.Build.BuildContext.is_install`"""
SAVED_ATTRS = 'root node_sigs task_sigs imp_sigs raw_deps node_deps'.split()
"""Build class members to save between the runs; these should be all dicts
except for `root` which represents a :py:class:`waflib.Node.Node` instance
"""
CFG_FILES = 'cfg_files'
"""Files from the build directory to hash before starting the build (``config.h`` written during the configuration)"""
POST_AT_ONCE = 0
"""Post mode: all task generators are posted before any task executed"""
POST_LAZY = 1
"""Post mode: post the task generators group after group, the tasks in the next group are created when the tasks in the previous groups are done"""
PROTOCOL = -1
if sys.platform == 'cli':
PROTOCOL = 0
class BuildContext(Context.Context):
'''executes the build'''
cmd = 'build'
variant = ''
def __init__(self, **kw):
super(BuildContext, self).__init__(**kw)
self.is_install = 0
"""Non-zero value when installing or uninstalling file"""
self.top_dir = kw.get('top_dir', Context.top_dir)
"""See :py:attr:`waflib.Context.top_dir`; prefer :py:attr:`waflib.Build.BuildContext.srcnode`"""
self.out_dir = kw.get('out_dir', Context.out_dir)
"""See :py:attr:`waflib.Context.out_dir`; prefer :py:attr:`waflib.Build.BuildContext.bldnode`"""
self.run_dir = kw.get('run_dir', Context.run_dir)
"""See :py:attr:`waflib.Context.run_dir`"""
self.launch_dir = Context.launch_dir
"""See :py:attr:`waflib.Context.out_dir`; prefer :py:meth:`waflib.Build.BuildContext.launch_node`"""
self.post_mode = POST_LAZY
"""Whether to post the task generators at once or group-by-group (default is group-by-group)"""
self.cache_dir = kw.get('cache_dir')
if not self.cache_dir:
self.cache_dir = os.path.join(self.out_dir, CACHE_DIR)
self.all_envs = {}
"""Map names to :py:class:`waflib.ConfigSet.ConfigSet`, the empty string must map to the default environment"""
# ======================================= #
# cache variables
self.node_sigs = {}
"""Dict mapping build nodes to task identifier (uid), it indicates whether a task created a particular file (persists across builds)"""
self.task_sigs = {}
"""Dict mapping task identifiers (uid) to task signatures (persists across builds)"""
self.imp_sigs = {}
"""Dict mapping task identifiers (uid) to implicit task dependencies used for scanning targets (persists across builds)"""
self.node_deps = {}
"""Dict mapping task identifiers (uid) to node dependencies found by :py:meth:`waflib.Task.Task.scan` (persists across builds)"""
self.raw_deps = {}
"""Dict mapping task identifiers (uid) to custom data returned by :py:meth:`waflib.Task.Task.scan` (persists across builds)"""
self.task_gen_cache_names = {}
self.jobs = Options.options.jobs
"""Amount of jobs to run in parallel"""
self.targets = Options.options.targets
"""List of targets to build (default: \*)"""
self.keep = Options.options.keep
"""Whether the build should continue past errors"""
self.progress_bar = Options.options.progress_bar
"""
Level of progress status:
0. normal output
1. progress bar
2. IDE output
3. No output at all
"""
# Manual dependencies.
self.deps_man = Utils.defaultdict(list)
"""Manual dependencies set by :py:meth:`waflib.Build.BuildContext.add_manual_dependency`"""
# just the structure here
self.current_group = 0
"""
Current build group
"""
self.groups = []
"""
List containing lists of task generators
"""
self.group_names = {}
"""
Map group names to the group lists. See :py:meth:`waflib.Build.BuildContext.add_group`
"""
for v in SAVED_ATTRS:
if not hasattr(self, v):
setattr(self, v, {})
def get_variant_dir(self):
"""Getter for the variant_dir attribute"""
if not self.variant:
return self.out_dir
return os.path.join(self.out_dir, self.variant)
variant_dir = property(get_variant_dir, None)
def __call__(self, *k, **kw):
"""
Create a task generator and add it to the current build group. The following forms are equivalent::
def build(bld):
tg = bld(a=1, b=2)
def build(bld):
tg = bld()
tg.a = 1
tg.b = 2
def build(bld):
tg = TaskGen.task_gen(a=1, b=2)
bld.add_to_group(tg, None)
:param group: group name to add the task generator to
:type group: string
"""
kw['bld'] = self
ret = TaskGen.task_gen(*k, **kw)
self.task_gen_cache_names = {} # reset the cache, each time
self.add_to_group(ret, group=kw.get('group'))
return ret
def rule(self, *k, **kw):
"""
Wrapper for creating a task generator using the decorator notation. The following code::
@bld.rule(target="foo")
def _(tsk):
print("bar")
is equivalent to::
def bar(tsk):
print("bar")
bld(
target = "foo",
rule = bar,
)
"""
def f(rule):
ret = self(*k, **kw)
ret.rule = rule
return ret
return f
def __copy__(self):
"""
Build contexts cannot be copied
:raises: :py:class:`waflib.Errors.WafError`
"""
raise Errors.WafError('build contexts cannot be copied')
def load_envs(self):
"""
The configuration command creates files of the form ``build/c4che/NAMEcache.py``. This method
creates a :py:class:`waflib.ConfigSet.ConfigSet` instance for each ``NAME`` by reading those
files and stores them in :py:attr:`waflib.Build.BuildContext.allenvs`.
"""
node = self.root.find_node(self.cache_dir)
if not node:
raise Errors.WafError('The project was not configured: run "waf configure" first!')
lst = node.ant_glob('**/*%s' % CACHE_SUFFIX, quiet=True)
if not lst:
raise Errors.WafError('The cache directory is empty: reconfigure the project')
for x in lst:
name = x.path_from(node).replace(CACHE_SUFFIX, '').replace('\\', '/')
env = ConfigSet.ConfigSet(x.abspath())
self.all_envs[name] = env
for f in env[CFG_FILES]:
newnode = self.root.find_resource(f)
if not newnode or not newnode.exists():
raise Errors.WafError('Missing configuration file %r, reconfigure the project!' % f)
def init_dirs(self):
"""
Initialize the project directory and the build directory by creating the nodes
:py:attr:`waflib.Build.BuildContext.srcnode` and :py:attr:`waflib.Build.BuildContext.bldnode`
corresponding to ``top_dir`` and ``variant_dir`` respectively. The ``bldnode`` directory is
created if necessary.
"""
if not (os.path.isabs(self.top_dir) and os.path.isabs(self.out_dir)):
raise Errors.WafError('The project was not configured: run "waf configure" first!')
self.path = self.srcnode = self.root.find_dir(self.top_dir)
self.bldnode = self.root.make_node(self.variant_dir)
self.bldnode.mkdir()
def execute(self):
"""
Restore data from previous builds and call :py:meth:`waflib.Build.BuildContext.execute_build`.
Overrides from :py:func:`waflib.Context.Context.execute`
"""
self.restore()
if not self.all_envs:
self.load_envs()
self.execute_build()
def execute_build(self):
"""
Execute the build by:
* reading the scripts (see :py:meth:`waflib.Context.Context.recurse`)
* calling :py:meth:`waflib.Build.BuildContext.pre_build` to call user build functions
* calling :py:meth:`waflib.Build.BuildContext.compile` to process the tasks
* calling :py:meth:`waflib.Build.BuildContext.post_build` to call user build functions
"""
Logs.info("Waf: Entering directory `%s'", self.variant_dir)
self.recurse([self.run_dir])
self.pre_build()
# display the time elapsed in the progress bar
self.timer = Utils.Timer()
try:
self.compile()
finally:
if self.progress_bar == 1 and sys.stderr.isatty():
c = self.producer.processed or 1
m = self.progress_line(c, c, Logs.colors.BLUE, Logs.colors.NORMAL)
Logs.info(m, extra={'stream': sys.stderr, 'c1': Logs.colors.cursor_off, 'c2' : Logs.colors.cursor_on})
Logs.info("Waf: Leaving directory `%s'", self.variant_dir)
try:
self.producer.bld = None
del self.producer
except AttributeError:
pass
self.post_build()
def restore(self):
"""
Load data from a previous run, sets the attributes listed in :py:const:`waflib.Build.SAVED_ATTRS`
"""
try:
env = ConfigSet.ConfigSet(os.path.join(self.cache_dir, 'build.config.py'))
except EnvironmentError:
pass
else:
if env.version < Context.HEXVERSION:
raise Errors.WafError('Version mismatch! reconfigure the project')
for t in env.tools:
self.setup(**t)
dbfn = os.path.join(self.variant_dir, Context.DBFILE)
try:
data = Utils.readf(dbfn, 'rb')
except (EnvironmentError, EOFError):
# handle missing file/empty file
Logs.debug('build: Could not load the build cache %s (missing)', dbfn)
else:
try:
Node.pickle_lock.acquire()
Node.Nod3 = self.node_class
try:
data = cPickle.loads(data)
except Exception as e:
Logs.debug('build: Could not pickle the build cache %s: %r', dbfn, e)
else:
for x in SAVED_ATTRS:
setattr(self, x, data.get(x, {}))
finally:
Node.pickle_lock.release()
self.init_dirs()
def store(self):
"""
Store data for next runs, set the attributes listed in :py:const:`waflib.Build.SAVED_ATTRS`. Uses a temporary
file to avoid problems on ctrl+c.
"""
data = {}
for x in SAVED_ATTRS:
data[x] = getattr(self, x)
db = os.path.join(self.variant_dir, Context.DBFILE)
try:
Node.pickle_lock.acquire()
Node.Nod3 = self.node_class
x = cPickle.dumps(data, PROTOCOL)
finally:
Node.pickle_lock.release()
Utils.writef(db + '.tmp', x, m='wb')
try:
st = os.stat(db)
os.remove(db)
if not Utils.is_win32: # win32 has no chown but we're paranoid
os.chown(db + '.tmp', st.st_uid, st.st_gid)
except (AttributeError, OSError):
pass
# do not use shutil.move (copy is not thread-safe)
os.rename(db + '.tmp', db)
def compile(self):
"""
Run the build by creating an instance of :py:class:`waflib.Runner.Parallel`
The cache file is written when at least a task was executed.
:raises: :py:class:`waflib.Errors.BuildError` in case the build fails
"""
Logs.debug('build: compile()')
# delegate the producer-consumer logic to another object to reduce the complexity
self.producer = Runner.Parallel(self, self.jobs)
self.producer.biter = self.get_build_iterator()
try:
self.producer.start()
except KeyboardInterrupt:
self.store()
raise
else:
if self.producer.dirty:
self.store()
if self.producer.error:
raise Errors.BuildError(self.producer.error)
def setup(self, tool, tooldir=None, funs=None):
"""
Import waf tools defined during the configuration::
def configure(conf):
conf.load('glib2')
def build(bld):
pass # glib2 is imported implicitly
:param tool: tool list
:type tool: list
:param tooldir: optional tool directory (sys.path)
:type tooldir: list of string
:param funs: unused variable
"""
if isinstance(tool, list):
for i in tool: self.setup(i, tooldir)
return
module = Context.load_tool(tool, tooldir)
if hasattr(module, "setup"): module.setup(self)
def get_env(self):
"""Getter for the env property"""
try:
return self.all_envs[self.variant]
except KeyError:
return self.all_envs['']
def set_env(self, val):
"""Setter for the env property"""
self.all_envs[self.variant] = val
env = property(get_env, set_env)
def add_manual_dependency(self, path, value):
"""
Adds a dependency from a node object to a value::
def build(bld):
bld.add_manual_dependency(
bld.path.find_resource('wscript'),
bld.root.find_resource('/etc/fstab'))
:param path: file path
:type path: string or :py:class:`waflib.Node.Node`
:param value: value to depend
:type value: :py:class:`waflib.Node.Node`, byte object, or function returning a byte object
"""
if not path:
raise ValueError('Invalid input path %r' % path)
if isinstance(path, Node.Node):
node = path
elif os.path.isabs(path):
node = self.root.find_resource(path)
else:
node = self.path.find_resource(path)
if not node:
raise ValueError('Could not find the path %r' % path)
if isinstance(value, list):
self.deps_man[node].extend(value)
else:
self.deps_man[node].append(value)
def launch_node(self):
"""Returns the launch directory as a :py:class:`waflib.Node.Node` object (cached)"""
try:
# private cache
return self.p_ln
except AttributeError:
self.p_ln = self.root.find_dir(self.launch_dir)
return self.p_ln
def hash_env_vars(self, env, vars_lst):
"""
Hashes configuration set variables::
def build(bld):
bld.hash_env_vars(bld.env, ['CXX', 'CC'])
This method uses an internal cache.
:param env: Configuration Set
:type env: :py:class:`waflib.ConfigSet.ConfigSet`
:param vars_lst: list of variables
:type vars_list: list of string
"""
if not env.table:
env = env.parent
if not env:
return Utils.SIG_NIL
idx = str(id(env)) + str(vars_lst)
try:
cache = self.cache_env
except AttributeError:
cache = self.cache_env = {}
else:
try:
return self.cache_env[idx]
except KeyError:
pass
lst = [env[a] for a in vars_lst]
cache[idx] = ret = Utils.h_list(lst)
Logs.debug('envhash: %s %r', Utils.to_hex(ret), lst)
return ret
def get_tgen_by_name(self, name):
"""
Fetches a task generator by its name or its target attribute;
the name must be unique in a build::
def build(bld):
tg = bld(name='foo')
tg == bld.get_tgen_by_name('foo')
This method use a private internal cache.
:param name: Task generator name
:raises: :py:class:`waflib.Errors.WafError` in case there is no task genenerator by that name
"""
cache = self.task_gen_cache_names
if not cache:
# create the index lazily
for g in self.groups:
for tg in g:
try:
cache[tg.name] = tg
except AttributeError:
# raised if not a task generator, which should be uncommon
pass
try:
return cache[name]
except KeyError:
raise Errors.WafError('Could not find a task generator for the name %r' % name)
def progress_line(self, idx, total, col1, col2):
"""
Computes a progress bar line displayed when running ``waf -p``
:returns: progress bar line
:rtype: string
"""
if not sys.stderr.isatty():
return ''
n = len(str(total))
Utils.rot_idx += 1
ind = Utils.rot_chr[Utils.rot_idx % 4]
pc = (100. * idx)/total
fs = "[%%%dd/%%d][%%s%%2d%%%%%%s][%s][" % (n, ind)
left = fs % (idx, total, col1, pc, col2)
right = '][%s%s%s]' % (col1, self.timer, col2)
cols = Logs.get_term_cols() - len(left) - len(right) + 2*len(col1) + 2*len(col2)
if cols < 7: cols = 7
ratio = ((cols * idx)//total) - 1
bar = ('='*ratio+'>').ljust(cols)
msg = Logs.indicator % (left, bar, right)
return msg
def declare_chain(self, *k, **kw):
"""
Wraps :py:func:`waflib.TaskGen.declare_chain` for convenience
"""
return TaskGen.declare_chain(*k, **kw)
def pre_build(self):
"""Executes user-defined methods before the build starts, see :py:meth:`waflib.Build.BuildContext.add_pre_fun`"""
for m in getattr(self, 'pre_funs', []):
m(self)
def post_build(self):
"""Executes user-defined methods after the build is successful, see :py:meth:`waflib.Build.BuildContext.add_post_fun`"""
for m in getattr(self, 'post_funs', []):
m(self)
def add_pre_fun(self, meth):
"""
Binds a callback method to execute after the scripts are read and before the build starts::
def mycallback(bld):
print("Hello, world!")
def build(bld):
bld.add_pre_fun(mycallback)
"""
try:
self.pre_funs.append(meth)
except AttributeError:
self.pre_funs = [meth]
def add_post_fun(self, meth):
"""
Binds a callback method to execute immediately after the build is successful::
def call_ldconfig(bld):
bld.exec_command('/sbin/ldconfig')
def build(bld):
if bld.cmd == 'install':
bld.add_pre_fun(call_ldconfig)
"""
try:
self.post_funs.append(meth)
except AttributeError:
self.post_funs = [meth]
def get_group(self, x):
"""
Returns the build group named `x`, or the current group if `x` is None
:param x: name or number or None
:type x: string, int or None
"""
if not self.groups:
self.add_group()
if x is None:
return self.groups[self.current_group]
if x in self.group_names:
return self.group_names[x]
return self.groups[x]
def add_to_group(self, tgen, group=None):
"""Adds a task or a task generator to the build; there is no attempt to remove it if it was already added."""
assert(isinstance(tgen, TaskGen.task_gen) or isinstance(tgen, Task.TaskBase))
tgen.bld = self
self.get_group(group).append(tgen)
def get_group_name(self, g):
"""
Returns the name of the input build group
:param g: build group object or build group index
:type g: integer or list
:return: name
:rtype: string
"""
if not isinstance(g, list):
g = self.groups[g]
for x in self.group_names:
if id(self.group_names[x]) == id(g):
return x
return ''
def get_group_idx(self, tg):
"""
Returns the index of the group containing the task generator given as argument::
def build(bld):
tg = bld(name='nada')
0 == bld.get_group_idx(tg)
:param tg: Task generator object
:type tg: :py:class:`waflib.TaskGen.task_gen`
:rtype: int
"""
se = id(tg)
for i, tmp in enumerate(self.groups):
for t in tmp:
if id(t) == se:
return i
return None
def add_group(self, name=None, move=True):
"""
Adds a new group of tasks/task generators. By default the new group becomes
the default group for new task generators (make sure to create build groups in order).
:param name: name for this group
:type name: string
:param move: set this new group as default group (True by default)
:type move: bool
:raises: :py:class:`waflib.Errors.WafError` if a group by the name given already exists
"""
if name and name in self.group_names:
raise Errors.WafError('add_group: name %s already present', name)
g = []
self.group_names[name] = g
self.groups.append(g)
if move:
self.current_group = len(self.groups) - 1
def set_group(self, idx):
"""
Sets the build group at position idx as current so that newly added
task generators are added to this one by default::
def build(bld):
bld(rule='touch ${TGT}', target='foo.txt')
bld.add_group() # now the current group is 1
bld(rule='touch ${TGT}', target='bar.txt')
bld.set_group(0) # now the current group is 0
bld(rule='touch ${TGT}', target='truc.txt') # build truc.txt before bar.txt
:param idx: group name or group index
:type idx: string or int
"""
if isinstance(idx, str):
g = self.group_names[idx]
for i, tmp in enumerate(self.groups):
if id(g) == id(tmp):
self.current_group = i
break
else:
self.current_group = idx
def total(self):
"""
Approximate task count: this value may be inaccurate if task generators
are posted lazily (see :py:attr:`waflib.Build.BuildContext.post_mode`).
The value :py:attr:`waflib.Runner.Parallel.total` is updated during the task execution.
:rtype: int
"""
total = 0
for group in self.groups:
for tg in group:
try:
total += len(tg.tasks)
except AttributeError:
total += 1
return total
def get_targets(self):
"""
Returns the task generator corresponding to the 'targets' list; used internally
by :py:meth:`waflib.Build.BuildContext.get_build_iterator` to perform partial builds::
$ waf --targets=myprogram,myshlib
"""
to_post = []
min_grp = 0
for name in self.targets.split(','):
tg = self.get_tgen_by_name(name)
m = self.get_group_idx(tg)
if m > min_grp:
min_grp = m
to_post = [tg]
elif m == min_grp:
to_post.append(tg)
return (min_grp, to_post)
def get_all_task_gen(self):
"""
Returns a list of all task generators for troubleshooting purposes.
"""
lst = []
for g in self.groups:
lst.extend(g)
return lst
def post_group(self):
"""
Post task generators from the group indexed by self.cur; used internally
by :py:meth:`waflib.Build.BuildContext.get_build_iterator`
"""
if self.targets == '*':
for tg in self.groups[self.cur]:
try:
f = tg.post
except AttributeError:
pass
else:
f()
elif self.targets:
if self.cur < self._min_grp:
for tg in self.groups[self.cur]:
try:
f = tg.post
except AttributeError:
pass
else:
f()
else:
for tg in self._exact_tg:
tg.post()
else:
ln = self.launch_node()
if ln.is_child_of(self.bldnode):
Logs.warn('Building from the build directory, forcing --targets=*')
ln = self.srcnode
elif not ln.is_child_of(self.srcnode):
Logs.warn('CWD %s is not under %s, forcing --targets=* (run distclean?)', ln.abspath(), self.srcnode.abspath())
ln = self.srcnode
for tg in self.groups[self.cur]:
try:
f = tg.post
except AttributeError:
pass
else:
if tg.path.is_child_of(ln):
f()
def get_tasks_group(self, idx):
"""
Returns all task instances for the build group at position idx,
used internally by :py:meth:`waflib.Build.BuildContext.get_build_iterator`
:rtype: list of :py:class:`waflib.Task.TaskBase`
"""
tasks = []
for tg in self.groups[idx]:
try:
tasks.extend(tg.tasks)
except AttributeError: # not a task generator
tasks.append(tg)
return tasks
def get_build_iterator(self):
"""
Creates a Python generator object that returns lists of tasks that may be processed in parallel.
:return: tasks which can be executed immediatly
:rtype: generator returning lists of :py:class:`waflib.Task.TaskBase`
"""
self.cur = 0
if self.targets and self.targets != '*':
(self._min_grp, self._exact_tg) = self.get_targets()
global lazy_post
if self.post_mode != POST_LAZY:
while self.cur < len(self.groups):
self.post_group()
self.cur += 1
self.cur = 0
while self.cur < len(self.groups):
# first post the task generators for the group
if self.post_mode != POST_AT_ONCE:
self.post_group()
# then extract the tasks
tasks = self.get_tasks_group(self.cur)
# if the constraints are set properly (ext_in/ext_out, before/after)
# the call to set_file_constraints may be removed (can be a 15% penalty on no-op rebuilds)
# (but leave set_file_constraints for the installation step)
#
# if the tasks have only files, set_file_constraints is required but set_precedence_constraints is not necessary
#
Task.set_file_constraints(tasks)
Task.set_precedence_constraints(tasks)
self.cur_tasks = tasks
self.cur += 1
if not tasks: # return something else the build will stop
continue
yield tasks
while 1:
yield []
def install_files(self, dest, files, **kw):
"""
Creates a task generator to install files on the system::
def build(bld):
bld.install_files('${DATADIR}', self.path.find_resource('wscript'))
:param dest: path representing the destination directory
:type dest: :py:class:`waflib.Node.Node` or string (absolute path)
:param files: input files
:type files: list of strings or list of :py:class:`waflib.Node.Node`
:param env: configuration set to expand *dest*
:type env: :py:class:`waflib.ConfigSet.ConfigSet`
:param relative_trick: preserve the folder hierarchy when installing whole folders
:type relative_trick: bool
:param cwd: parent node for searching srcfile, when srcfile is not an instance of :py:class:`waflib.Node.Node`
:type cwd: :py:class:`waflib.Node.Node`
:param postpone: execute the task immediately to perform the installation (False by default)
:type postpone: bool
"""
assert(dest)
tg = self(features='install_task', install_to=dest, install_from=files, **kw)
tg.dest = tg.install_to
tg.type = 'install_files'
if not kw.get('postpone', True):
tg.post()
return tg
def install_as(self, dest, srcfile, **kw):
"""
Creates a task generator to install a file on the system with a different name::
def build(bld):
bld.install_as('${PREFIX}/bin', 'myapp', chmod=Utils.O755)
:param dest: destination file
:type dest: :py:class:`waflib.Node.Node` or string (absolute path)
:param srcfile: input file
:type srcfile: string or :py:class:`waflib.Node.Node`
:param cwd: parent node for searching srcfile, when srcfile is not an instance of :py:class:`waflib.Node.Node`
:type cwd: :py:class:`waflib.Node.Node`
:param env: configuration set for performing substitutions in dest
:type env: :py:class:`waflib.ConfigSet.ConfigSet`
:param postpone: execute the task immediately to perform the installation (False by default)
:type postpone: bool
"""
assert(dest)
tg = self(features='install_task', install_to=dest, install_from=srcfile, **kw)
tg.dest = tg.install_to
tg.type = 'install_as'
if not kw.get('postpone', True):
tg.post()
return tg
def symlink_as(self, dest, src, **kw):
"""
Creates a task generator to install a symlink::
def build(bld):
bld.symlink_as('${PREFIX}/lib/libfoo.so', 'libfoo.so.1.2.3')
:param dest: absolute path of the symlink
:type dest: :py:class:`waflib.Node.Node` or string (absolute path)
:param src: link contents, which is a relative or abolute path which may exist or not
:type src: string
:param env: configuration set for performing substitutions in dest
:type env: :py:class:`waflib.ConfigSet.ConfigSet`
:param add: add the task created to a build group - set ``False`` only if the installation task is created after the build has started
:type add: bool
:param postpone: execute the task immediately to perform the installation
:type postpone: bool
:param relative_trick: make the symlink relative (default: ``False``)
:type relative_trick: bool
"""
assert(dest)
tg = self(features='install_task', install_to=dest, install_from=src, **kw)
tg.dest = tg.install_to
tg.type = 'symlink_as'
tg.link = src
# TODO if add: self.add_to_group(tsk)
if not kw.get('postpone', True):
tg.post()
return tg
@TaskGen.feature('install_task')
@TaskGen.before_method('process_rule', 'process_source')
def process_install_task(self):
"""Creates the installation task for the current task generator; uses :py:func:`waflib.Build.add_install_task` internally."""
self.add_install_task(**self.__dict__)
@TaskGen.taskgen_method
def add_install_task(self, **kw):
"""
Creates the installation task for the current task generator, and executes it immediately if necessary
:returns: An installation task
:rtype: :py:class:`waflib.Build.inst`
"""
if not self.bld.is_install:
return
if not kw['install_to']:
return
if kw['type'] == 'symlink_as' and Utils.is_win32:
if kw.get('win32_install'):
kw['type'] = 'install_as'
else:
# just exit
return
tsk = self.install_task = self.create_task('inst')
tsk.chmod = kw.get('chmod', Utils.O644)
tsk.link = kw.get('link', '') or kw.get('install_from', '')
tsk.relative_trick = kw.get('relative_trick', False)
tsk.type = kw['type']
tsk.install_to = tsk.dest = kw['install_to']
tsk.install_from = kw['install_from']
tsk.relative_base = kw.get('cwd') or kw.get('relative_base', self.path)
tsk.install_user = kw.get('install_user')
tsk.install_group = kw.get('install_group')
tsk.init_files()
if not kw.get('postpone', True):
tsk.run_now()
return tsk
@TaskGen.taskgen_method
def add_install_files(self, **kw):
"""
Creates an installation task for files
:returns: An installation task
:rtype: :py:class:`waflib.Build.inst`
"""
kw['type'] = 'install_files'
return self.add_install_task(**kw)
@TaskGen.taskgen_method
def add_install_as(self, **kw):
"""
Creates an installation task for a single file
:returns: An installation task
:rtype: :py:class:`waflib.Build.inst`
"""
kw['type'] = 'install_as'
return self.add_install_task(**kw)
@TaskGen.taskgen_method
def add_symlink_as(self, **kw):
"""
Creates an installation task for a symbolic link
:returns: An installation task
:rtype: :py:class:`waflib.Build.inst`
"""
kw['type'] = 'symlink_as'
return self.add_install_task(**kw)
class inst(Task.Task):
"""Task that installs files or symlinks; it is typically executed by :py:class:`waflib.Build.InstallContext` and :py:class:`waflib.Build.UnInstallContext`"""
def __str__(self):
"""Returns an empty string to disable the standard task display"""
return ''
def uid(self):
"""Returns a unique identifier for the task"""
lst = self.inputs + self.outputs + [self.link, self.generator.path.abspath()]
return Utils.h_list(lst)
def init_files(self):
"""
Initializes the task input and output nodes
"""
if self.type == 'symlink_as':
inputs = []
else:
inputs = self.generator.to_nodes(self.install_from)
if self.type == 'install_as':
assert len(inputs) == 1
self.set_inputs(inputs)
dest = self.get_install_path()
outputs = []
if self.type == 'symlink_as':
if self.relative_trick:
self.link = os.path.relpath(self.link, os.path.dirname(dest))
outputs.append(self.generator.bld.root.make_node(dest))
elif self.type == 'install_as':
outputs.append(self.generator.bld.root.make_node(dest))
else:
for y in inputs:
if self.relative_trick:
destfile = os.path.join(dest, y.path_from(self.relative_base))
else:
destfile = os.path.join(dest, y.name)
outputs.append(self.generator.bld.root.make_node(destfile))
self.set_outputs(outputs)
def runnable_status(self):
"""
Installation tasks are always executed, so this method returns either :py:const:`waflib.Task.ASK_LATER` or :py:const:`waflib.Task.RUN_ME`.
"""
ret = super(inst, self).runnable_status()
if ret == Task.SKIP_ME and self.generator.bld.is_install:
return Task.RUN_ME
return ret
def post_run(self):
"""
Disables any post-run operations
"""
pass
def get_install_path(self, destdir=True):
"""
Returns the destination path where files will be installed, pre-pending `destdir`.
:rtype: string
"""
if isinstance(self.install_to, Node.Node):
dest = self.install_to.abspath()
else:
dest = Utils.subst_vars(self.install_to, self.env)
if destdir and Options.options.destdir:
dest = os.path.join(Options.options.destdir, os.path.splitdrive(dest)[1].lstrip(os.sep))
return dest
def copy_fun(self, src, tgt):
"""
Copies a file from src to tgt, preserving permissions and trying to work
around path limitations on Windows platforms. On Unix-like platforms,
the owner/group of the target file may be set through install_user/install_group
:param src: absolute path
:type src: string
:param tgt: absolute path
:type tgt: string
"""
# override this if you want to strip executables
# kw['tsk'].source is the task that created the files in the build
if Utils.is_win32 and len(tgt) > 259 and not tgt.startswith('\\\\?\\'):
tgt = '\\\\?\\' + tgt
shutil.copy2(src, tgt)
self.fix_perms(tgt)
def rm_empty_dirs(self, tgt):
"""
Removes empty folders recursively when uninstalling.
:param tgt: absolute path
:type tgt: string
"""
while tgt:
tgt = os.path.dirname(tgt)
try:
os.rmdir(tgt)
except OSError:
break
def run(self):
"""
Performs file or symlink installation
"""
is_install = self.generator.bld.is_install
if not is_install: # unnecessary?
return
for x in self.outputs:
if is_install == INSTALL:
x.parent.mkdir()
if self.type == 'symlink_as':
fun = is_install == INSTALL and self.do_link or self.do_unlink
fun(self.link, self.outputs[0].abspath())
else:
fun = is_install == INSTALL and self.do_install or self.do_uninstall
launch_node = self.generator.bld.launch_node()
for x, y in zip(self.inputs, self.outputs):
fun(x.abspath(), y.abspath(), x.path_from(launch_node))
def run_now(self):
"""
Try executing the installation task right now
:raises: :py:class:`waflib.Errors.TaskNotReady`
"""
status = self.runnable_status()
if status not in (Task.RUN_ME, Task.SKIP_ME):
raise Errors.TaskNotReady('Could not process %r: status %r' % (self, status))
self.run()
self.hasrun = Task.SUCCESS
def do_install(self, src, tgt, lbl, **kw):
"""
Copies a file from src to tgt with given file permissions. The actual copy is only performed
if the source and target file sizes or timestamps differ. When the copy occurs,
the file is always first removed and then copied so as to prevent stale inodes.
:param src: file name as absolute path
:type src: string
:param tgt: file destination, as absolute path
:type tgt: string
:param lbl: file source description
:type lbl: string
:param chmod: installation mode
:type chmod: int
:raises: :py:class:`waflib.Errors.WafError` if the file cannot be written
"""
if not Options.options.force:
# check if the file is already there to avoid a copy
try:
st1 = os.stat(tgt)
st2 = os.stat(src)
except OSError:
pass
else:
# same size and identical timestamps -> make no copy
if st1.st_mtime + 2 >= st2.st_mtime and st1.st_size == st2.st_size:
if not self.generator.bld.progress_bar:
Logs.info('- install %s (from %s)', tgt, lbl)
return False
if not self.generator.bld.progress_bar:
Logs.info('+ install %s (from %s)', tgt, lbl)
# Give best attempt at making destination overwritable,
# like the 'install' utility used by 'make install' does.
try:
os.chmod(tgt, Utils.O644 | stat.S_IMODE(os.stat(tgt).st_mode))
except EnvironmentError:
pass
# following is for shared libs and stale inodes (-_-)
try:
os.remove(tgt)
except OSError:
pass
try:
self.copy_fun(src, tgt)
except EnvironmentError as e:
if not os.path.exists(src):
Logs.error('File %r does not exist', src)
elif not os.path.isfile(src):
Logs.error('Input %r is not a file', src)
raise Errors.WafError('Could not install the file %r' % tgt, e)
def fix_perms(self, tgt):
"""
Change the ownership of the file/folder/link pointed by the given path
This looks up for `install_user` or `install_group` attributes
on the task or on the task generator::
def build(bld):
bld.install_as('${PREFIX}/wscript',
'wscript',
install_user='nobody', install_group='nogroup')
bld.symlink_as('${PREFIX}/wscript_link',
Utils.subst_vars('${PREFIX}/wscript', bld.env),
install_user='nobody', install_group='nogroup')
"""
if not Utils.is_win32:
user = getattr(self, 'install_user', None) or getattr(self.generator, 'install_user', None)
group = getattr(self, 'install_group', None) or getattr(self.generator, 'install_group', None)
if user or group:
Utils.lchown(tgt, user or -1, group or -1)
if not os.path.islink(tgt):
os.chmod(tgt, self.chmod)
def do_link(self, src, tgt, **kw):
"""
Creates a symlink from tgt to src.
:param src: file name as absolute path
:type src: string
:param tgt: file destination, as absolute path
:type tgt: string
"""
if os.path.islink(tgt) and os.readlink(tgt) == src:
if not self.generator.bld.progress_bar:
Logs.info('- symlink %s (to %s)', tgt, src)
else:
try:
os.remove(tgt)
except OSError:
pass
if not self.generator.bld.progress_bar:
Logs.info('+ symlink %s (to %s)', tgt, src)
os.symlink(src, tgt)
self.fix_perms(tgt)
def do_uninstall(self, src, tgt, lbl, **kw):
"""
See :py:meth:`waflib.Build.inst.do_install`
"""
if not self.generator.bld.progress_bar:
Logs.info('- remove %s', tgt)
#self.uninstall.append(tgt)
try:
os.remove(tgt)
except OSError as e:
if e.errno != errno.ENOENT:
if not getattr(self, 'uninstall_error', None):
self.uninstall_error = True
Logs.warn('build: some files could not be uninstalled (retry with -vv to list them)')
if Logs.verbose > 1:
Logs.warn('Could not remove %s (error code %r)', e.filename, e.errno)
self.rm_empty_dirs(tgt)
def do_unlink(self, src, tgt, **kw):
"""
See :py:meth:`waflib.Build.inst.do_link`
"""
try:
if not self.generator.bld.progress_bar:
Logs.info('- remove %s', tgt)
os.remove(tgt)
except OSError:
pass
self.rm_empty_dirs(tgt)
class InstallContext(BuildContext):
'''installs the targets on the system'''
cmd = 'install'
def __init__(self, **kw):
super(InstallContext, self).__init__(**kw)
self.is_install = INSTALL
class UninstallContext(InstallContext):
'''removes the targets installed'''
cmd = 'uninstall'
def __init__(self, **kw):
super(UninstallContext, self).__init__(**kw)
self.is_install = UNINSTALL
def execute(self):
"""
See :py:func:`waflib.Build.BuildContext.execute`.
"""
# TODO just mark the tasks are already run with hasrun=Task.SKIPPED?
try:
# do not execute any tasks
def runnable_status(self):
return Task.SKIP_ME
setattr(Task.Task, 'runnable_status_back', Task.Task.runnable_status)
setattr(Task.Task, 'runnable_status', runnable_status)
super(UninstallContext, self).execute()
finally:
setattr(Task.Task, 'runnable_status', Task.Task.runnable_status_back)
class CleanContext(BuildContext):
'''cleans the project'''
cmd = 'clean'
def execute(self):
"""
See :py:func:`waflib.Build.BuildContext.execute`.
"""
self.restore()
if not self.all_envs:
self.load_envs()
self.recurse([self.run_dir])
try:
self.clean()
finally:
self.store()
def clean(self):
"""Remove files from the build directory if possible, and reset the caches"""
Logs.debug('build: clean called')
if self.bldnode != self.srcnode:
# would lead to a disaster if top == out
lst = []
for env in self.all_envs.values():
lst.extend(self.root.find_or_declare(f) for f in env[CFG_FILES])
for n in self.bldnode.ant_glob('**/*', excl='.lock* *conf_check_*/** config.log c4che/*', quiet=True):
if n in lst:
continue
n.delete()
self.root.children = {}
for v in SAVED_ATTRS:
if v == 'root':
continue
setattr(self, v, {})
class ListContext(BuildContext):
'''lists the targets to execute'''
cmd = 'list'
def execute(self):
"""
See :py:func:`waflib.Build.BuildContext.execute`.
"""
self.restore()
if not self.all_envs:
self.load_envs()
self.recurse([self.run_dir])
self.pre_build()
# display the time elapsed in the progress bar
self.timer = Utils.Timer()
for g in self.groups:
for tg in g:
try:
f = tg.post
except AttributeError:
pass
else:
f()
try:
# force the cache initialization
self.get_tgen_by_name('')
except Errors.WafError:
pass
for k in sorted(self.task_gen_cache_names.keys()):
Logs.pprint('GREEN', k)
class StepContext(BuildContext):
'''executes tasks in a step-by-step fashion, for debugging'''
cmd = 'step'
def __init__(self, **kw):
super(StepContext, self).__init__(**kw)
self.files = Options.options.files
def compile(self):
"""
Overrides :py:meth:`waflib.Build.BuildContext.compile` to perform a partial build
on tasks matching the input/output pattern given (regular expression matching)::
$ waf step --files=foo.c,bar.c,in:truc.c,out:bar.o
$ waf step --files=in:foo.cpp.1.o # link task only
"""
if not self.files:
Logs.warn('Add a pattern for the debug build, for example "waf step --files=main.c,app"')
BuildContext.compile(self)
return
targets = []
if self.targets and self.targets != '*':
targets = self.targets.split(',')
for g in self.groups:
for tg in g:
if targets and tg.name not in targets:
continue
try:
f = tg.post
except AttributeError:
pass
else:
f()
for pat in self.files.split(','):
matcher = self.get_matcher(pat)
for tg in g:
if isinstance(tg, Task.TaskBase):
lst = [tg]
else:
lst = tg.tasks
for tsk in lst:
do_exec = False
for node in getattr(tsk, 'inputs', []):
if matcher(node, output=False):
do_exec = True
break
for node in getattr(tsk, 'outputs', []):
if matcher(node, output=True):
do_exec = True
break
if do_exec:
ret = tsk.run()
Logs.info('%s -> exit %r', tsk, ret)
def get_matcher(self, pat):
"""
Converts a step pattern into a function
:param: pat: pattern of the form in:truc.c,out:bar.o
:returns: Python function that uses Node objects as inputs and returns matches
:rtype: function
"""
# this returns a function
inn = True
out = True
if pat.startswith('in:'):
out = False
pat = pat.replace('in:', '')
elif pat.startswith('out:'):
inn = False
pat = pat.replace('out:', '')
anode = self.root.find_node(pat)
pattern = None
if not anode:
if not pat.startswith('^'):
pat = '^.+?%s' % pat
if not pat.endswith('$'):
pat = '%s$' % pat
pattern = re.compile(pat)
def match(node, output):
if output == True and not out:
return False
if output == False and not inn:
return False
if anode:
return anode == node
else:
return pattern.match(node.abspath())
return match
class EnvContext(BuildContext):
"""Subclass EnvContext to create commands that require configuration data in 'env'"""
fun = cmd = None
def execute(self):
"""
See :py:func:`waflib.Build.BuildContext.execute`.
"""
self.restore()
if not self.all_envs:
self.load_envs()
self.recurse([self.run_dir])
| 28.469871 | 158 | 0.682711 |
import os, sys, errno, re, shutil, stat
try:
import cPickle
except ImportError:
import pickle as cPickle
from waflib import Node, Runner, TaskGen, Utils, ConfigSet, Task, Logs, Options, Context, Errors
CACHE_DIR = 'c4che'
CACHE_SUFFIX = '_cache.py'
INSTALL = 1337
UNINSTALL = -1337
SAVED_ATTRS = 'root node_sigs task_sigs imp_sigs raw_deps node_deps'.split()
CFG_FILES = 'cfg_files'
POST_AT_ONCE = 0
POST_LAZY = 1
PROTOCOL = -1
if sys.platform == 'cli':
PROTOCOL = 0
class BuildContext(Context.Context):
cmd = 'build'
variant = ''
def __init__(self, **kw):
super(BuildContext, self).__init__(**kw)
self.is_install = 0
self.top_dir = kw.get('top_dir', Context.top_dir)
self.out_dir = kw.get('out_dir', Context.out_dir)
self.run_dir = kw.get('run_dir', Context.run_dir)
self.launch_dir = Context.launch_dir
self.post_mode = POST_LAZY
self.cache_dir = kw.get('cache_dir')
if not self.cache_dir:
self.cache_dir = os.path.join(self.out_dir, CACHE_DIR)
self.all_envs = {}
self.node_sigs = {}
self.task_sigs = {}
self.imp_sigs = {}
self.node_deps = {}
self.raw_deps = {}
self.task_gen_cache_names = {}
self.jobs = Options.options.jobs
self.targets = Options.options.targets
self.keep = Options.options.keep
self.progress_bar = Options.options.progress_bar
self.deps_man = Utils.defaultdict(list)
self.current_group = 0
self.groups = []
self.group_names = {}
for v in SAVED_ATTRS:
if not hasattr(self, v):
setattr(self, v, {})
def get_variant_dir(self):
if not self.variant:
return self.out_dir
return os.path.join(self.out_dir, self.variant)
variant_dir = property(get_variant_dir, None)
def __call__(self, *k, **kw):
kw['bld'] = self
ret = TaskGen.task_gen(*k, **kw)
self.task_gen_cache_names = {}
self.add_to_group(ret, group=kw.get('group'))
return ret
def rule(self, *k, **kw):
def f(rule):
ret = self(*k, **kw)
ret.rule = rule
return ret
return f
def __copy__(self):
raise Errors.WafError('build contexts cannot be copied')
def load_envs(self):
node = self.root.find_node(self.cache_dir)
if not node:
raise Errors.WafError('The project was not configured: run "waf configure" first!')
lst = node.ant_glob('**/*%s' % CACHE_SUFFIX, quiet=True)
if not lst:
raise Errors.WafError('The cache directory is empty: reconfigure the project')
for x in lst:
name = x.path_from(node).replace(CACHE_SUFFIX, '').replace('\\', '/')
env = ConfigSet.ConfigSet(x.abspath())
self.all_envs[name] = env
for f in env[CFG_FILES]:
newnode = self.root.find_resource(f)
if not newnode or not newnode.exists():
raise Errors.WafError('Missing configuration file %r, reconfigure the project!' % f)
def init_dirs(self):
if not (os.path.isabs(self.top_dir) and os.path.isabs(self.out_dir)):
raise Errors.WafError('The project was not configured: run "waf configure" first!')
self.path = self.srcnode = self.root.find_dir(self.top_dir)
self.bldnode = self.root.make_node(self.variant_dir)
self.bldnode.mkdir()
def execute(self):
self.restore()
if not self.all_envs:
self.load_envs()
self.execute_build()
def execute_build(self):
Logs.info("Waf: Entering directory `%s'", self.variant_dir)
self.recurse([self.run_dir])
self.pre_build()
# display the time elapsed in the progress bar
self.timer = Utils.Timer()
try:
self.compile()
finally:
if self.progress_bar == 1 and sys.stderr.isatty():
c = self.producer.processed or 1
m = self.progress_line(c, c, Logs.colors.BLUE, Logs.colors.NORMAL)
Logs.info(m, extra={'stream': sys.stderr, 'c1': Logs.colors.cursor_off, 'c2' : Logs.colors.cursor_on})
Logs.info("Waf: Leaving directory `%s'", self.variant_dir)
try:
self.producer.bld = None
del self.producer
except AttributeError:
pass
self.post_build()
def restore(self):
try:
env = ConfigSet.ConfigSet(os.path.join(self.cache_dir, 'build.config.py'))
except EnvironmentError:
pass
else:
if env.version < Context.HEXVERSION:
raise Errors.WafError('Version mismatch! reconfigure the project')
for t in env.tools:
self.setup(**t)
dbfn = os.path.join(self.variant_dir, Context.DBFILE)
try:
data = Utils.readf(dbfn, 'rb')
except (EnvironmentError, EOFError):
Logs.debug('build: Could not load the build cache %s (missing)', dbfn)
else:
try:
Node.pickle_lock.acquire()
Node.Nod3 = self.node_class
try:
data = cPickle.loads(data)
except Exception as e:
Logs.debug('build: Could not pickle the build cache %s: %r', dbfn, e)
else:
for x in SAVED_ATTRS:
setattr(self, x, data.get(x, {}))
finally:
Node.pickle_lock.release()
self.init_dirs()
def store(self):
data = {}
for x in SAVED_ATTRS:
data[x] = getattr(self, x)
db = os.path.join(self.variant_dir, Context.DBFILE)
try:
Node.pickle_lock.acquire()
Node.Nod3 = self.node_class
x = cPickle.dumps(data, PROTOCOL)
finally:
Node.pickle_lock.release()
Utils.writef(db + '.tmp', x, m='wb')
try:
st = os.stat(db)
os.remove(db)
if not Utils.is_win32:
os.chown(db + '.tmp', st.st_uid, st.st_gid)
except (AttributeError, OSError):
pass
# do not use shutil.move (copy is not thread-safe)
os.rename(db + '.tmp', db)
def compile(self):
Logs.debug('build: compile()')
# delegate the producer-consumer logic to another object to reduce the complexity
self.producer = Runner.Parallel(self, self.jobs)
self.producer.biter = self.get_build_iterator()
try:
self.producer.start()
except KeyboardInterrupt:
self.store()
raise
else:
if self.producer.dirty:
self.store()
if self.producer.error:
raise Errors.BuildError(self.producer.error)
def setup(self, tool, tooldir=None, funs=None):
if isinstance(tool, list):
for i in tool: self.setup(i, tooldir)
return
module = Context.load_tool(tool, tooldir)
if hasattr(module, "setup"): module.setup(self)
def get_env(self):
try:
return self.all_envs[self.variant]
except KeyError:
return self.all_envs['']
def set_env(self, val):
self.all_envs[self.variant] = val
env = property(get_env, set_env)
def add_manual_dependency(self, path, value):
if not path:
raise ValueError('Invalid input path %r' % path)
if isinstance(path, Node.Node):
node = path
elif os.path.isabs(path):
node = self.root.find_resource(path)
else:
node = self.path.find_resource(path)
if not node:
raise ValueError('Could not find the path %r' % path)
if isinstance(value, list):
self.deps_man[node].extend(value)
else:
self.deps_man[node].append(value)
def launch_node(self):
try:
# private cache
return self.p_ln
except AttributeError:
self.p_ln = self.root.find_dir(self.launch_dir)
return self.p_ln
def hash_env_vars(self, env, vars_lst):
if not env.table:
env = env.parent
if not env:
return Utils.SIG_NIL
idx = str(id(env)) + str(vars_lst)
try:
cache = self.cache_env
except AttributeError:
cache = self.cache_env = {}
else:
try:
return self.cache_env[idx]
except KeyError:
pass
lst = [env[a] for a in vars_lst]
cache[idx] = ret = Utils.h_list(lst)
Logs.debug('envhash: %s %r', Utils.to_hex(ret), lst)
return ret
def get_tgen_by_name(self, name):
cache = self.task_gen_cache_names
if not cache:
# create the index lazily
for g in self.groups:
for tg in g:
try:
cache[tg.name] = tg
except AttributeError:
# raised if not a task generator, which should be uncommon
pass
try:
return cache[name]
except KeyError:
raise Errors.WafError('Could not find a task generator for the name %r' % name)
def progress_line(self, idx, total, col1, col2):
if not sys.stderr.isatty():
return ''
n = len(str(total))
Utils.rot_idx += 1
ind = Utils.rot_chr[Utils.rot_idx % 4]
pc = (100. * idx)/total
fs = "[%%%dd/%%d][%%s%%2d%%%%%%s][%s][" % (n, ind)
left = fs % (idx, total, col1, pc, col2)
right = '][%s%s%s]' % (col1, self.timer, col2)
cols = Logs.get_term_cols() - len(left) - len(right) + 2*len(col1) + 2*len(col2)
if cols < 7: cols = 7
ratio = ((cols * idx)//total) - 1
bar = ('='*ratio+'>').ljust(cols)
msg = Logs.indicator % (left, bar, right)
return msg
def declare_chain(self, *k, **kw):
return TaskGen.declare_chain(*k, **kw)
def pre_build(self):
for m in getattr(self, 'pre_funs', []):
m(self)
def post_build(self):
for m in getattr(self, 'post_funs', []):
m(self)
def add_pre_fun(self, meth):
try:
self.pre_funs.append(meth)
except AttributeError:
self.pre_funs = [meth]
def add_post_fun(self, meth):
try:
self.post_funs.append(meth)
except AttributeError:
self.post_funs = [meth]
def get_group(self, x):
if not self.groups:
self.add_group()
if x is None:
return self.groups[self.current_group]
if x in self.group_names:
return self.group_names[x]
return self.groups[x]
def add_to_group(self, tgen, group=None):
assert(isinstance(tgen, TaskGen.task_gen) or isinstance(tgen, Task.TaskBase))
tgen.bld = self
self.get_group(group).append(tgen)
def get_group_name(self, g):
if not isinstance(g, list):
g = self.groups[g]
for x in self.group_names:
if id(self.group_names[x]) == id(g):
return x
return ''
def get_group_idx(self, tg):
se = id(tg)
for i, tmp in enumerate(self.groups):
for t in tmp:
if id(t) == se:
return i
return None
def add_group(self, name=None, move=True):
if name and name in self.group_names:
raise Errors.WafError('add_group: name %s already present', name)
g = []
self.group_names[name] = g
self.groups.append(g)
if move:
self.current_group = len(self.groups) - 1
def set_group(self, idx):
if isinstance(idx, str):
g = self.group_names[idx]
for i, tmp in enumerate(self.groups):
if id(g) == id(tmp):
self.current_group = i
break
else:
self.current_group = idx
def total(self):
total = 0
for group in self.groups:
for tg in group:
try:
total += len(tg.tasks)
except AttributeError:
total += 1
return total
def get_targets(self):
to_post = []
min_grp = 0
for name in self.targets.split(','):
tg = self.get_tgen_by_name(name)
m = self.get_group_idx(tg)
if m > min_grp:
min_grp = m
to_post = [tg]
elif m == min_grp:
to_post.append(tg)
return (min_grp, to_post)
def get_all_task_gen(self):
lst = []
for g in self.groups:
lst.extend(g)
return lst
def post_group(self):
if self.targets == '*':
for tg in self.groups[self.cur]:
try:
f = tg.post
except AttributeError:
pass
else:
f()
elif self.targets:
if self.cur < self._min_grp:
for tg in self.groups[self.cur]:
try:
f = tg.post
except AttributeError:
pass
else:
f()
else:
for tg in self._exact_tg:
tg.post()
else:
ln = self.launch_node()
if ln.is_child_of(self.bldnode):
Logs.warn('Building from the build directory, forcing --targets=*')
ln = self.srcnode
elif not ln.is_child_of(self.srcnode):
Logs.warn('CWD %s is not under %s, forcing --targets=* (run distclean?)', ln.abspath(), self.srcnode.abspath())
ln = self.srcnode
for tg in self.groups[self.cur]:
try:
f = tg.post
except AttributeError:
pass
else:
if tg.path.is_child_of(ln):
f()
def get_tasks_group(self, idx):
tasks = []
for tg in self.groups[idx]:
try:
tasks.extend(tg.tasks)
except AttributeError: # not a task generator
tasks.append(tg)
return tasks
def get_build_iterator(self):
self.cur = 0
if self.targets and self.targets != '*':
(self._min_grp, self._exact_tg) = self.get_targets()
global lazy_post
if self.post_mode != POST_LAZY:
while self.cur < len(self.groups):
self.post_group()
self.cur += 1
self.cur = 0
while self.cur < len(self.groups):
# first post the task generators for the group
if self.post_mode != POST_AT_ONCE:
self.post_group()
# then extract the tasks
tasks = self.get_tasks_group(self.cur)
# if the constraints are set properly (ext_in/ext_out, before/after)
# the call to set_file_constraints may be removed (can be a 15% penalty on no-op rebuilds)
# (but leave set_file_constraints for the installation step)
#
# if the tasks have only files, set_file_constraints is required but set_precedence_constraints is not necessary
#
Task.set_file_constraints(tasks)
Task.set_precedence_constraints(tasks)
self.cur_tasks = tasks
self.cur += 1
if not tasks: # return something else the build will stop
continue
yield tasks
while 1:
yield []
def install_files(self, dest, files, **kw):
assert(dest)
tg = self(features='install_task', install_to=dest, install_from=files, **kw)
tg.dest = tg.install_to
tg.type = 'install_files'
if not kw.get('postpone', True):
tg.post()
return tg
def install_as(self, dest, srcfile, **kw):
assert(dest)
tg = self(features='install_task', install_to=dest, install_from=srcfile, **kw)
tg.dest = tg.install_to
tg.type = 'install_as'
if not kw.get('postpone', True):
tg.post()
return tg
def symlink_as(self, dest, src, **kw):
assert(dest)
tg = self(features='install_task', install_to=dest, install_from=src, **kw)
tg.dest = tg.install_to
tg.type = 'symlink_as'
tg.link = src
# TODO if add: self.add_to_group(tsk)
if not kw.get('postpone', True):
tg.post()
return tg
@TaskGen.feature('install_task')
@TaskGen.before_method('process_rule', 'process_source')
def process_install_task(self):
self.add_install_task(**self.__dict__)
@TaskGen.taskgen_method
def add_install_task(self, **kw):
if not self.bld.is_install:
return
if not kw['install_to']:
return
if kw['type'] == 'symlink_as' and Utils.is_win32:
if kw.get('win32_install'):
kw['type'] = 'install_as'
else:
# just exit
return
tsk = self.install_task = self.create_task('inst')
tsk.chmod = kw.get('chmod', Utils.O644)
tsk.link = kw.get('link', '') or kw.get('install_from', '')
tsk.relative_trick = kw.get('relative_trick', False)
tsk.type = kw['type']
tsk.install_to = tsk.dest = kw['install_to']
tsk.install_from = kw['install_from']
tsk.relative_base = kw.get('cwd') or kw.get('relative_base', self.path)
tsk.install_user = kw.get('install_user')
tsk.install_group = kw.get('install_group')
tsk.init_files()
if not kw.get('postpone', True):
tsk.run_now()
return tsk
@TaskGen.taskgen_method
def add_install_files(self, **kw):
kw['type'] = 'install_files'
return self.add_install_task(**kw)
@TaskGen.taskgen_method
def add_install_as(self, **kw):
kw['type'] = 'install_as'
return self.add_install_task(**kw)
@TaskGen.taskgen_method
def add_symlink_as(self, **kw):
kw['type'] = 'symlink_as'
return self.add_install_task(**kw)
class inst(Task.Task):
def __str__(self):
return ''
def uid(self):
lst = self.inputs + self.outputs + [self.link, self.generator.path.abspath()]
return Utils.h_list(lst)
def init_files(self):
if self.type == 'symlink_as':
inputs = []
else:
inputs = self.generator.to_nodes(self.install_from)
if self.type == 'install_as':
assert len(inputs) == 1
self.set_inputs(inputs)
dest = self.get_install_path()
outputs = []
if self.type == 'symlink_as':
if self.relative_trick:
self.link = os.path.relpath(self.link, os.path.dirname(dest))
outputs.append(self.generator.bld.root.make_node(dest))
elif self.type == 'install_as':
outputs.append(self.generator.bld.root.make_node(dest))
else:
for y in inputs:
if self.relative_trick:
destfile = os.path.join(dest, y.path_from(self.relative_base))
else:
destfile = os.path.join(dest, y.name)
outputs.append(self.generator.bld.root.make_node(destfile))
self.set_outputs(outputs)
def runnable_status(self):
ret = super(inst, self).runnable_status()
if ret == Task.SKIP_ME and self.generator.bld.is_install:
return Task.RUN_ME
return ret
def post_run(self):
pass
def get_install_path(self, destdir=True):
if isinstance(self.install_to, Node.Node):
dest = self.install_to.abspath()
else:
dest = Utils.subst_vars(self.install_to, self.env)
if destdir and Options.options.destdir:
dest = os.path.join(Options.options.destdir, os.path.splitdrive(dest)[1].lstrip(os.sep))
return dest
def copy_fun(self, src, tgt):
# override this if you want to strip executables
# kw['tsk'].source is the task that created the files in the build
if Utils.is_win32 and len(tgt) > 259 and not tgt.startswith('\\\\?\\'):
tgt = '\\\\?\\' + tgt
shutil.copy2(src, tgt)
self.fix_perms(tgt)
def rm_empty_dirs(self, tgt):
while tgt:
tgt = os.path.dirname(tgt)
try:
os.rmdir(tgt)
except OSError:
break
def run(self):
is_install = self.generator.bld.is_install
if not is_install: # unnecessary?
return
for x in self.outputs:
if is_install == INSTALL:
x.parent.mkdir()
if self.type == 'symlink_as':
fun = is_install == INSTALL and self.do_link or self.do_unlink
fun(self.link, self.outputs[0].abspath())
else:
fun = is_install == INSTALL and self.do_install or self.do_uninstall
launch_node = self.generator.bld.launch_node()
for x, y in zip(self.inputs, self.outputs):
fun(x.abspath(), y.abspath(), x.path_from(launch_node))
def run_now(self):
status = self.runnable_status()
if status not in (Task.RUN_ME, Task.SKIP_ME):
raise Errors.TaskNotReady('Could not process %r: status %r' % (self, status))
self.run()
self.hasrun = Task.SUCCESS
def do_install(self, src, tgt, lbl, **kw):
if not Options.options.force:
# check if the file is already there to avoid a copy
try:
st1 = os.stat(tgt)
st2 = os.stat(src)
except OSError:
pass
else:
# same size and identical timestamps -> make no copy
if st1.st_mtime + 2 >= st2.st_mtime and st1.st_size == st2.st_size:
if not self.generator.bld.progress_bar:
Logs.info('- install %s (from %s)', tgt, lbl)
return False
if not self.generator.bld.progress_bar:
Logs.info('+ install %s (from %s)', tgt, lbl)
# Give best attempt at making destination overwritable,
# like the 'install' utility used by 'make install' does.
try:
os.chmod(tgt, Utils.O644 | stat.S_IMODE(os.stat(tgt).st_mode))
except EnvironmentError:
pass
# following is for shared libs and stale inodes (-_-)
try:
os.remove(tgt)
except OSError:
pass
try:
self.copy_fun(src, tgt)
except EnvironmentError as e:
if not os.path.exists(src):
Logs.error('File %r does not exist', src)
elif not os.path.isfile(src):
Logs.error('Input %r is not a file', src)
raise Errors.WafError('Could not install the file %r' % tgt, e)
def fix_perms(self, tgt):
if not Utils.is_win32:
user = getattr(self, 'install_user', None) or getattr(self.generator, 'install_user', None)
group = getattr(self, 'install_group', None) or getattr(self.generator, 'install_group', None)
if user or group:
Utils.lchown(tgt, user or -1, group or -1)
if not os.path.islink(tgt):
os.chmod(tgt, self.chmod)
def do_link(self, src, tgt, **kw):
if os.path.islink(tgt) and os.readlink(tgt) == src:
if not self.generator.bld.progress_bar:
Logs.info('- symlink %s (to %s)', tgt, src)
else:
try:
os.remove(tgt)
except OSError:
pass
if not self.generator.bld.progress_bar:
Logs.info('+ symlink %s (to %s)', tgt, src)
os.symlink(src, tgt)
self.fix_perms(tgt)
def do_uninstall(self, src, tgt, lbl, **kw):
if not self.generator.bld.progress_bar:
Logs.info('- remove %s', tgt)
#self.uninstall.append(tgt)
try:
os.remove(tgt)
except OSError as e:
if e.errno != errno.ENOENT:
if not getattr(self, 'uninstall_error', None):
self.uninstall_error = True
Logs.warn('build: some files could not be uninstalled (retry with -vv to list them)')
if Logs.verbose > 1:
Logs.warn('Could not remove %s (error code %r)', e.filename, e.errno)
self.rm_empty_dirs(tgt)
def do_unlink(self, src, tgt, **kw):
try:
if not self.generator.bld.progress_bar:
Logs.info('- remove %s', tgt)
os.remove(tgt)
except OSError:
pass
self.rm_empty_dirs(tgt)
class InstallContext(BuildContext):
cmd = 'install'
def __init__(self, **kw):
super(InstallContext, self).__init__(**kw)
self.is_install = INSTALL
class UninstallContext(InstallContext):
cmd = 'uninstall'
def __init__(self, **kw):
super(UninstallContext, self).__init__(**kw)
self.is_install = UNINSTALL
def execute(self):
# TODO just mark the tasks are already run with hasrun=Task.SKIPPED?
try:
# do not execute any tasks
def runnable_status(self):
return Task.SKIP_ME
setattr(Task.Task, 'runnable_status_back', Task.Task.runnable_status)
setattr(Task.Task, 'runnable_status', runnable_status)
super(UninstallContext, self).execute()
finally:
setattr(Task.Task, 'runnable_status', Task.Task.runnable_status_back)
class CleanContext(BuildContext):
cmd = 'clean'
def execute(self):
self.restore()
if not self.all_envs:
self.load_envs()
self.recurse([self.run_dir])
try:
self.clean()
finally:
self.store()
def clean(self):
Logs.debug('build: clean called')
if self.bldnode != self.srcnode:
# would lead to a disaster if top == out
lst = []
for env in self.all_envs.values():
lst.extend(self.root.find_or_declare(f) for f in env[CFG_FILES])
for n in self.bldnode.ant_glob('**/*', excl='.lock* *conf_check_*/** config.log c4che/*', quiet=True):
if n in lst:
continue
n.delete()
self.root.children = {}
for v in SAVED_ATTRS:
if v == 'root':
continue
setattr(self, v, {})
class ListContext(BuildContext):
cmd = 'list'
def execute(self):
self.restore()
if not self.all_envs:
self.load_envs()
self.recurse([self.run_dir])
self.pre_build()
# display the time elapsed in the progress bar
self.timer = Utils.Timer()
for g in self.groups:
for tg in g:
try:
f = tg.post
except AttributeError:
pass
else:
f()
try:
# force the cache initialization
self.get_tgen_by_name('')
except Errors.WafError:
pass
for k in sorted(self.task_gen_cache_names.keys()):
Logs.pprint('GREEN', k)
class StepContext(BuildContext):
cmd = 'step'
def __init__(self, **kw):
super(StepContext, self).__init__(**kw)
self.files = Options.options.files
def compile(self):
if not self.files:
Logs.warn('Add a pattern for the debug build, for example "waf step --files=main.c,app"')
BuildContext.compile(self)
return
targets = []
if self.targets and self.targets != '*':
targets = self.targets.split(',')
for g in self.groups:
for tg in g:
if targets and tg.name not in targets:
continue
try:
f = tg.post
except AttributeError:
pass
else:
f()
for pat in self.files.split(','):
matcher = self.get_matcher(pat)
for tg in g:
if isinstance(tg, Task.TaskBase):
lst = [tg]
else:
lst = tg.tasks
for tsk in lst:
do_exec = False
for node in getattr(tsk, 'inputs', []):
if matcher(node, output=False):
do_exec = True
break
for node in getattr(tsk, 'outputs', []):
if matcher(node, output=True):
do_exec = True
break
if do_exec:
ret = tsk.run()
Logs.info('%s -> exit %r', tsk, ret)
def get_matcher(self, pat):
# this returns a function
inn = True
out = True
if pat.startswith('in:'):
out = False
pat = pat.replace('in:', '')
elif pat.startswith('out:'):
inn = False
pat = pat.replace('out:', '')
anode = self.root.find_node(pat)
pattern = None
if not anode:
if not pat.startswith('^'):
pat = '^.+?%s' % pat
if not pat.endswith('$'):
pat = '%s$' % pat
pattern = re.compile(pat)
def match(node, output):
if output == True and not out:
return False
if output == False and not inn:
return False
if anode:
return anode == node
else:
return pattern.match(node.abspath())
return match
class EnvContext(BuildContext):
fun = cmd = None
def execute(self):
self.restore()
if not self.all_envs:
self.load_envs()
self.recurse([self.run_dir])
| true | true |
1c2b3491c1eb3dd28653acaba916c1f7d1bdac0a | 1,841 | py | Python | var/spack/repos/builtin/packages/py-cmake/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/py-cmake/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8 | 2021-11-09T20:28:40.000Z | 2022-03-15T03:26:33.000Z | var/spack/repos/builtin/packages/py-cmake/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-02-08T20:37:20.000Z | 2019-03-31T15:19:26.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PyCmake(PythonPackage):
"""CMake is an open-source, cross-platform family of tools designed to
build, test and package software
"""
homepage = "https://cmake.org"
git = "https://github.com/scikit-build/cmake-python-distributions.git"
pypi = "cmake/cmake-3.22.2.tar.gz"
version('3.22.2', sha256='b5bd5eeb488b13cf64ec963800f3d979eaeb90b4382861b86909df503379e219')
version('3.21.4', sha256='30fa5ed8a5ad66dcd263adb87f3ce3dc2d0ec0ac3958f5becff577e4b62cd065')
version('3.18.0', sha256='52b98c5ee70b5fa30a8623e96482227e065292f78794eb085fdf0fecb204b79b')
depends_on('ninja', type='build')
depends_on('py-scikit-build@0.12:', type='build')
depends_on('py-setuptools@42:', type='build')
depends_on('git', type='build')
depends_on('cmake@3.22.2', type=('build', 'link', 'run'), when='@3.22.2')
depends_on('cmake@3.21.4', type=('build', 'link', 'run'), when='@3.21.4')
depends_on('cmake@3.18.0', type=('build', 'link', 'run'), when='@3.18.0')
# see:
# https://github.com/scikit-build/cmake-python-distributions/issues/227
# https://github.com/spack/spack/pull/28760#issuecomment-1029362288
for v in ['3.22.2', '3.21.4', '3.18.0']:
resource(name='cmake-src',
git='https://gitlab.kitware.com/cmake/cmake.git',
commit='v{0}'.format(v), when='@{0}'.format(v),
destination='cmake-src', placement='cmake-src')
def install_options(self, spec, prefix):
return [
'-DBUILD_CMAKE_FROM_SOURCE=ON',
'-DCMakeProject_SOURCE_DIR=cmake-src'
]
| 41.840909 | 96 | 0.661597 |
from spack.package import *
class PyCmake(PythonPackage):
homepage = "https://cmake.org"
git = "https://github.com/scikit-build/cmake-python-distributions.git"
pypi = "cmake/cmake-3.22.2.tar.gz"
version('3.22.2', sha256='b5bd5eeb488b13cf64ec963800f3d979eaeb90b4382861b86909df503379e219')
version('3.21.4', sha256='30fa5ed8a5ad66dcd263adb87f3ce3dc2d0ec0ac3958f5becff577e4b62cd065')
version('3.18.0', sha256='52b98c5ee70b5fa30a8623e96482227e065292f78794eb085fdf0fecb204b79b')
depends_on('ninja', type='build')
depends_on('py-scikit-build@0.12:', type='build')
depends_on('py-setuptools@42:', type='build')
depends_on('git', type='build')
depends_on('cmake@3.22.2', type=('build', 'link', 'run'), when='@3.22.2')
depends_on('cmake@3.21.4', type=('build', 'link', 'run'), when='@3.21.4')
depends_on('cmake@3.18.0', type=('build', 'link', 'run'), when='@3.18.0')
'3.21.4', '3.18.0']:
resource(name='cmake-src',
git='https://gitlab.kitware.com/cmake/cmake.git',
commit='v{0}'.format(v), when='@{0}'.format(v),
destination='cmake-src', placement='cmake-src')
def install_options(self, spec, prefix):
return [
'-DBUILD_CMAKE_FROM_SOURCE=ON',
'-DCMakeProject_SOURCE_DIR=cmake-src'
]
| true | true |
1c2b34a62d85606c90a9cd041550a4133f0739cd | 1,849 | py | Python | setup.py | thomwiggers/httpserver | 88a3a35619ce5185347c6764f211878e898e6aad | [
"BSD-3-Clause"
] | 3 | 2017-03-04T12:47:39.000Z | 2018-05-04T13:44:47.000Z | setup.py | thomwiggers/httpserver | 88a3a35619ce5185347c6764f211878e898e6aad | [
"BSD-3-Clause"
] | null | null | null | setup.py | thomwiggers/httpserver | 88a3a35619ce5185347c6764f211878e898e6aad | [
"BSD-3-Clause"
] | 7 | 2015-03-22T15:05:54.000Z | 2022-02-07T07:02:20.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
requirements = [
'docopt'
]
test_requirements = [
'pytest',
'selenium>=3.8',
'freezegun',
]
setup(
name='httpserver',
version='1.1.0',
description="Asyncio implementation of an HTTP server",
long_description=readme + '\n\n' + history,
author="Thom Wiggers and Luuk Scholten",
author_email='thom@thomwiggers.nl, info@luukscholten.com',
maintainer="Thom Wiggers",
maintainer_email='thom@thomwiggers.nl',
url='https://github.com/thomwiggers/httpserver',
packages=[
'httpserver',
],
package_dir={'httpserver':
'httpserver'},
entry_points={
'console_scripts': [
'httpserver = httpserver:run'
]
},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='httpserver',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
tests_require=test_requirements,
cmdclass={'test': PyTest}
)
| 24.653333 | 63 | 0.629529 |
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
requirements = [
'docopt'
]
test_requirements = [
'pytest',
'selenium>=3.8',
'freezegun',
]
setup(
name='httpserver',
version='1.1.0',
description="Asyncio implementation of an HTTP server",
long_description=readme + '\n\n' + history,
author="Thom Wiggers and Luuk Scholten",
author_email='thom@thomwiggers.nl, info@luukscholten.com',
maintainer="Thom Wiggers",
maintainer_email='thom@thomwiggers.nl',
url='https://github.com/thomwiggers/httpserver',
packages=[
'httpserver',
],
package_dir={'httpserver':
'httpserver'},
entry_points={
'console_scripts': [
'httpserver = httpserver:run'
]
},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='httpserver',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
tests_require=test_requirements,
cmdclass={'test': PyTest}
)
| true | true |
1c2b34d3bca81235599234099d80fbe425c8eaa7 | 1,319 | py | Python | modular_program.py | Sanjin84/CompetitionInterface | 50ba1c58b874897d1991b6a28816f2424803a0b2 | [
"CC0-1.0"
] | null | null | null | modular_program.py | Sanjin84/CompetitionInterface | 50ba1c58b874897d1991b6a28816f2424803a0b2 | [
"CC0-1.0"
] | null | null | null | modular_program.py | Sanjin84/CompetitionInterface | 50ba1c58b874897d1991b6a28816f2424803a0b2 | [
"CC0-1.0"
] | null | null | null | from tkinter import *
root = Tk()
root.geometry("1000x600")
root.resizable(True,True)
root.title("DASHBOARD")
#CREATE A START PAGE
start = Frame(root,bg='#539bf9', height=600, width=1000)
start.pack()
start_heading = Label(start,text="GLOBAL WATCHTOWER \n SV 21 INTERFACE",font = "Verdana 30 bold",bg="#539bf9")
start_heading.place(rely=0.1,relx=0.1, relwidth= 0.8)
team_name_label = Label(start,text="ENTER YOUR TEAM NAME",font = "Verdana 30 bold",bg="#539bf9")
team_name_label.place(rely=0.4, relx = 0.1, relwidth = 0.8)
team_name_entry = Entry(start, text = 'ENTER TEAM NAME',bg="#f0f0f0").place(relx=0.1,rely=0.5, relwidth=0.8, relheight=0.15)
button = Button(start,text="GO TO FINISH" ,bg="gray",font = "Verdana 15 bold", command = lambda: switch_frame(start,finish))
button.place(rely=0.65,relx=0.1, relwidth=0.4, relheight=0.15)
#CREATE A FINISH PAGE
finish = Frame(root,bg='#FF0000', height=600, width=1000)
fr = Label(finish,text="VIRUS HAS BEEN STOPPED",font = "Verdana 30 bold",bg="#4472C4")
fr.place(rely=0.1,relx=0.1, relwidth= 0.8)
button2 = Button(finish,text="GO TO START" ,bg="gray",font = "Verdana 15 bold", command = lambda: switch_frame(finish,start))
button2.place(rely=0.65,relx=0.5, relwidth=0.4, relheight=0.15)
def switch_frame (old,new):
old.pack_forget()
new.pack()
mainloop() | 39.969697 | 125 | 0.714936 | from tkinter import *
root = Tk()
root.geometry("1000x600")
root.resizable(True,True)
root.title("DASHBOARD")
start = Frame(root,bg='#539bf9', height=600, width=1000)
start.pack()
start_heading = Label(start,text="GLOBAL WATCHTOWER \n SV 21 INTERFACE",font = "Verdana 30 bold",bg="#539bf9")
start_heading.place(rely=0.1,relx=0.1, relwidth= 0.8)
team_name_label = Label(start,text="ENTER YOUR TEAM NAME",font = "Verdana 30 bold",bg="#539bf9")
team_name_label.place(rely=0.4, relx = 0.1, relwidth = 0.8)
team_name_entry = Entry(start, text = 'ENTER TEAM NAME',bg="#f0f0f0").place(relx=0.1,rely=0.5, relwidth=0.8, relheight=0.15)
button = Button(start,text="GO TO FINISH" ,bg="gray",font = "Verdana 15 bold", command = lambda: switch_frame(start,finish))
button.place(rely=0.65,relx=0.1, relwidth=0.4, relheight=0.15)
finish = Frame(root,bg='#FF0000', height=600, width=1000)
fr = Label(finish,text="VIRUS HAS BEEN STOPPED",font = "Verdana 30 bold",bg="#4472C4")
fr.place(rely=0.1,relx=0.1, relwidth= 0.8)
button2 = Button(finish,text="GO TO START" ,bg="gray",font = "Verdana 15 bold", command = lambda: switch_frame(finish,start))
button2.place(rely=0.65,relx=0.5, relwidth=0.4, relheight=0.15)
def switch_frame (old,new):
old.pack_forget()
new.pack()
mainloop() | true | true |
1c2b35533c323ec4950a6b63a97c10d9551b6a66 | 501 | py | Python | orders/serializers/orderserializer.py | mrearsbig/store | f311c48f8e79f6d6fb7bf2c8c9a0b65d1b271ff0 | [
"MIT"
] | 1 | 2021-11-26T21:39:52.000Z | 2021-11-26T21:39:52.000Z | orders/serializers/orderserializer.py | mrearsbig/backend | f311c48f8e79f6d6fb7bf2c8c9a0b65d1b271ff0 | [
"MIT"
] | null | null | null | orders/serializers/orderserializer.py | mrearsbig/backend | f311c48f8e79f6d6fb7bf2c8c9a0b65d1b271ff0 | [
"MIT"
] | null | null | null | from rest_framework.serializers import ModelSerializer
from orders.models import Order
class OrderSerializer(ModelSerializer):
class Meta:
model = Order
fields = '__all__'
def to_representation(self, instance):
return {
'id': instance.id,
'date': instance.date,
'shipping': instance.shipping,
'total': instance.total,
'client': {
'username': instance.client.username
}
} | 26.368421 | 54 | 0.57485 | from rest_framework.serializers import ModelSerializer
from orders.models import Order
class OrderSerializer(ModelSerializer):
class Meta:
model = Order
fields = '__all__'
def to_representation(self, instance):
return {
'id': instance.id,
'date': instance.date,
'shipping': instance.shipping,
'total': instance.total,
'client': {
'username': instance.client.username
}
} | true | true |
1c2b3604c195de56b7956ab80df51689484ff57b | 815 | py | Python | lib/mplcairo/tk.py | TomJohnZ/mplcairo | b5d119cacd39eeeb6f5e166e11f3ede52b5a28fd | [
"MIT"
] | 55 | 2019-08-03T00:01:19.000Z | 2022-03-02T21:46:51.000Z | lib/mplcairo/tk.py | TomJohnZ/mplcairo | b5d119cacd39eeeb6f5e166e11f3ede52b5a28fd | [
"MIT"
] | 23 | 2019-09-07T14:52:43.000Z | 2022-03-05T19:46:52.000Z | lib/mplcairo/tk.py | TomJohnZ/mplcairo | b5d119cacd39eeeb6f5e166e11f3ede52b5a28fd | [
"MIT"
] | 16 | 2018-03-15T11:57:47.000Z | 2019-03-23T06:03:06.000Z | from functools import partial
from matplotlib.backends._backend_tk import _BackendTk, FigureCanvasTk
from . import _util
from .base import FigureCanvasCairo
try:
from matplotlib.backends._backend_tk import blit as _mpl3_blit
_tk_blit = partial(_mpl3_blit, offsets=(0, 1, 2, 3))
except ImportError:
from matplotlib.backends.tkagg import blit as _mpl2_blit
_tk_blit = partial(_mpl2_blit, colormode=2)
class FigureCanvasTkCairo(FigureCanvasCairo, FigureCanvasTk):
def draw(self):
super().draw()
self.blit()
def blit(self, bbox=None):
buf = _util.cairo_to_straight_rgba8888(
self.get_renderer()._get_buffer())
_tk_blit(self._tkphoto, buf, bbox=bbox)
@_BackendTk.export
class _BackendTkCairo(_BackendTk):
FigureCanvas = FigureCanvasTkCairo
| 27.166667 | 70 | 0.737423 | from functools import partial
from matplotlib.backends._backend_tk import _BackendTk, FigureCanvasTk
from . import _util
from .base import FigureCanvasCairo
try:
from matplotlib.backends._backend_tk import blit as _mpl3_blit
_tk_blit = partial(_mpl3_blit, offsets=(0, 1, 2, 3))
except ImportError:
from matplotlib.backends.tkagg import blit as _mpl2_blit
_tk_blit = partial(_mpl2_blit, colormode=2)
class FigureCanvasTkCairo(FigureCanvasCairo, FigureCanvasTk):
def draw(self):
super().draw()
self.blit()
def blit(self, bbox=None):
buf = _util.cairo_to_straight_rgba8888(
self.get_renderer()._get_buffer())
_tk_blit(self._tkphoto, buf, bbox=bbox)
@_BackendTk.export
class _BackendTkCairo(_BackendTk):
FigureCanvas = FigureCanvasTkCairo
| true | true |
1c2b36405b1140476ad624fc00313de58fe5b45e | 9,495 | py | Python | tests/unit/test_dynamodb.py | ongzhixian/dana_trading_bot | 746d080a42f6c43ab9a96df7b272062a88f47f56 | [
"MIT"
] | null | null | null | tests/unit/test_dynamodb.py | ongzhixian/dana_trading_bot | 746d080a42f6c43ab9a96df7b272062a88f47f56 | [
"MIT"
] | null | null | null | tests/unit/test_dynamodb.py | ongzhixian/dana_trading_bot | 746d080a42f6c43ab9a96df7b272062a88f47f56 | [
"MIT"
] | null | null | null | import unittest
import boto3
from boto3.dynamodb.types import Binary, Decimal
import requests
import warnings
from unittest.mock import patch, Mock
from mods.ddb import store, retrieve, remove
class TestDdb(unittest.TestCase):
# Dictionary of mock test data
# 'RequestId': '75N4RS5BDFE4NGOK864RHBFBH7VV4KQNSO5AEMVJF66Q9ASUAAJG',
mock_data = {
'RESPONSE_METADATA' : {
'ResponseMetadata': {
'RequestId': 'AAA4RS5BDFE4NGOK864RHBFBH7VV4KQNSO5AEMVJF66Q9ASUAAJG',
'HTTPStatusCode': 200,
'HTTPHeaders': {
'server': 'Server',
'date': 'Sat, 21 Aug 2021 13:07:47 GMT',
'content-type': 'application/x-amz-json-1.0',
'content-length': '2',
'connection': 'keep-alive',
'x-amzn-requestid': '75N4RS5BDFE4NGOK864RHBFBH7VV4KQNSO5AEMVJF66Q9ASUAAJG',
'x-amz-crc32': '2745614147'
},
'RetryAttempts': 0
}
},
'PLAIN_TEXT_ITEM' : {
'id' : 'sample7',
"Author": "William Shakespeare",
"Title": "Romeo",
"Category": "Drama"
},
'RETRIEVE_ITEM' : {
'Item': {
'info': {
'rating': Decimal('3'),
'plot': 'awful'
},
'app': {
'name': 'some generic app',
'version': Decimal('10')
},
'and some binary': Binary(b'\x00\x01\x02'),
'year': Decimal('2021'),
'comment': 'alone',
'some numbers': Decimal('99'),
'id': 'enc2',
'title': 'my horrible movie',
'example': 'data'
},
'ResponseMetadata': {
'RequestId': 'OBKNV1BON1MCPUDFVS0A1LC513VV4KQNSO5AEMVJF66Q9ASUAAJG',
'HTTPStatusCode': 200,
'HTTPHeaders': {
'server': 'Server',
'date': 'Sat, 21 Aug 2021 14:19:31 GMT',
'content-type': 'application/x-amz-json-1.0',
'content-length': '308', 'connection': 'keep-alive',
'x-amzn-requestid': 'OBKNV1BON1MCPUDFVS0A1LC513VV4KQNSO5AEMVJF66Q9ASUAAJG',
'x-amz-crc32': '1593809059'
},
'RetryAttempts': 0
}
}
}
def test_store_data(self):
warnings.simplefilter("ignore", ResourceWarning)
# Arrange
plaintext_item = self.mock_data['PLAIN_TEXT_ITEM']
dynamodb = boto3.resource('dynamodb')
dana_table = dynamodb.Table('dana_table')
# Act
mock_table = Mock()
mock_table.put_item.return_value = self.mock_data['RESPONSE_METADATA']
response = store(mock_table, plaintext_item)
response_metadata = None
if 'ResponseMetadata' in response:
response_metadata = response['ResponseMetadata']
if 'HTTPStatusCode' in response_metadata:
http_status_code = response_metadata['HTTPStatusCode']
# Assert(s)
self.assertIsNotNone(response_metadata)
self.assertEqual(200, http_status_code)
def test_store_encrypted_data(self):
warnings.simplefilter("ignore", ResourceWarning)
# Arrange
plaintext_item = self.mock_data['PLAIN_TEXT_ITEM']
dynamodb = boto3.resource('dynamodb')
dana_table = dynamodb.Table('dana_table')
# Act
# Create an instance of a mock(EncryptedTable)
mock_table = Mock()
mock_table.put_item.return_value = self.mock_data['RESPONSE_METADATA']
#with patch('dynamodb_encryption_sdk.encrypted.table.EncryptedTable') as mock_encrypted_table:
with patch('mods.ddb.EncryptedTable') as mock_encrypted_table:
mock_encrypted_table.return_value = mock_table
response = store(Mock(), plaintext_item, encrypt=True)
# with patch(f'{__name__}.store') as mock_module_method:
# mock_module_method.return_value = self.mock_data['RESPONSE_METADATA']
# response = store(dana_table, plaintext_item, encrypt=True)
# mock_table = Mock()
# mock_table.put_item.return_value = self.mock_data['RESPONSE_METADATA']
# response = store(dana_table, plaintext_item, encrypt=True)
# print(response)
response_metadata = None
if 'ResponseMetadata' in response:
response_metadata = response['ResponseMetadata']
if 'HTTPStatusCode' in response_metadata:
http_status_code = response_metadata['HTTPStatusCode']
# Assert(s)
self.assertIsNotNone(response_metadata)
self.assertEqual(200, http_status_code)
#@unittest.skip("Reduce noise while checking other tests")
def test_retrieve_data(self):
warnings.simplefilter("ignore", ResourceWarning)
# Arrange
plaintext_item = self.mock_data['PLAIN_TEXT_ITEM']
#plaintext_item['id'] = 'SAMPLE1
dynamodb = boto3.resource('dynamodb')
dana_table = dynamodb.Table('dana_table')
# Act
mock_table = Mock()
mock_table.get_item.return_value = self.mock_data['RETRIEVE_ITEM']
response = retrieve(mock_table, {'id': 'SAMPLE1'})
# with patch(f'{__name__}.retrieve') as mock_module_method:
# mock_module_method.return_value = self.mock_data['RETRIEVE_ITEM']
# response = retrieve(dana_table, {'id': 'SAMPLE1'})
response_metadata = None
response_item = None
if 'ResponseMetadata' in response:
response_metadata = response['ResponseMetadata']
if 'Item' in response:
response_item = response['Item']
if 'HTTPStatusCode' in response_metadata:
http_status_code = response_metadata['HTTPStatusCode']
# Assert(s)
self.assertIsNotNone(response_metadata)
self.assertIsNotNone(response_item)
self.assertEqual(200, http_status_code)
#@unittest.skip # no reason needed
def test_retrieve_encrypted_data(self):
warnings.simplefilter("ignore", ResourceWarning)
# Arrange
plaintext_item = self.mock_data['PLAIN_TEXT_ITEM']
#plaintext_item['id'] = 'SAMPLE1
dynamodb = boto3.resource('dynamodb')
dana_table = dynamodb.Table('dana_table')
# Act
mock_table = Mock()
mock_table.get_item.return_value = self.mock_data['RETRIEVE_ITEM']
#with patch('dynamodb_encryption_sdk.encrypted.table.EncryptedTable') as mock_encrypted_table:
with patch('mods.ddb.EncryptedTable') as mock_encrypted_table:
mock_encrypted_table.return_value = mock_table
response = retrieve(dana_table, {'id': 'SAMPLE1'}, encrypt=True)
# with patch(f'{__name__}.retrieve') as mock_module_method:
# mock_module_method.return_value = self.mock_data['RETRIEVE_ITEM']
# response = retrieve(dana_table, {'id': 'SAMPLE1'}, encrypt=True)
response_metadata = None
response_item = None
if 'ResponseMetadata' in response:
response_metadata = response['ResponseMetadata']
if 'Item' in response:
response_item = response['Item']
if 'HTTPStatusCode' in response_metadata:
http_status_code = response_metadata['HTTPStatusCode']
# Assert(s)
self.assertIsNotNone(response_metadata)
self.assertIsNotNone(response_item)
self.assertEqual(200, http_status_code)
def test_remove_item(self):
warnings.simplefilter("ignore", ResourceWarning)
# Arrange
plaintext_item = self.mock_data['PLAIN_TEXT_ITEM']
#plaintext_item['id'] = 'SAMPLE1
dynamodb = boto3.resource('dynamodb')
dana_table = dynamodb.Table('dana_table')
# Act
# The following line works when we run test directly (aka: python -m unittest tests\unit\test_dynamodb.py)
# with patch('tests.unit.test_dynamodb.remove') as mock_module_method:
# But patching will fail when we "discover" tests (aka: python -m unittest discover -s tests\unit -v)
# Because of the way patching works in Python, name patch method f"{__name__}.remove"
#with patch(f"{__name__}.remove") as mock_module_method:
# with patch("mods.ddb.remove") as mock_module_method:
# mock_module_method.return_value = self.mock_data['RESPONSE_METADATA']
mock_table = Mock()
mock_table.delete_item.return_value = self.mock_data['RESPONSE_METADATA']
response = remove(mock_table, {'id': 'sample8'})
#print(response)
response_metadata = None
http_status_code = 0
if 'ResponseMetadata' in response:
response_metadata = response['ResponseMetadata']
if 'HTTPStatusCode' in response_metadata:
http_status_code = response_metadata['HTTPStatusCode']
# Assert(s)
self.assertIsNotNone(response_metadata)
self.assertEqual(200, http_status_code)
# if __name__ == '__main__':
# unittest.main()
| 34.154676 | 114 | 0.596419 | import unittest
import boto3
from boto3.dynamodb.types import Binary, Decimal
import requests
import warnings
from unittest.mock import patch, Mock
from mods.ddb import store, retrieve, remove
class TestDdb(unittest.TestCase):
mock_data = {
'RESPONSE_METADATA' : {
'ResponseMetadata': {
'RequestId': 'AAA4RS5BDFE4NGOK864RHBFBH7VV4KQNSO5AEMVJF66Q9ASUAAJG',
'HTTPStatusCode': 200,
'HTTPHeaders': {
'server': 'Server',
'date': 'Sat, 21 Aug 2021 13:07:47 GMT',
'content-type': 'application/x-amz-json-1.0',
'content-length': '2',
'connection': 'keep-alive',
'x-amzn-requestid': '75N4RS5BDFE4NGOK864RHBFBH7VV4KQNSO5AEMVJF66Q9ASUAAJG',
'x-amz-crc32': '2745614147'
},
'RetryAttempts': 0
}
},
'PLAIN_TEXT_ITEM' : {
'id' : 'sample7',
"Author": "William Shakespeare",
"Title": "Romeo",
"Category": "Drama"
},
'RETRIEVE_ITEM' : {
'Item': {
'info': {
'rating': Decimal('3'),
'plot': 'awful'
},
'app': {
'name': 'some generic app',
'version': Decimal('10')
},
'and some binary': Binary(b'\x00\x01\x02'),
'year': Decimal('2021'),
'comment': 'alone',
'some numbers': Decimal('99'),
'id': 'enc2',
'title': 'my horrible movie',
'example': 'data'
},
'ResponseMetadata': {
'RequestId': 'OBKNV1BON1MCPUDFVS0A1LC513VV4KQNSO5AEMVJF66Q9ASUAAJG',
'HTTPStatusCode': 200,
'HTTPHeaders': {
'server': 'Server',
'date': 'Sat, 21 Aug 2021 14:19:31 GMT',
'content-type': 'application/x-amz-json-1.0',
'content-length': '308', 'connection': 'keep-alive',
'x-amzn-requestid': 'OBKNV1BON1MCPUDFVS0A1LC513VV4KQNSO5AEMVJF66Q9ASUAAJG',
'x-amz-crc32': '1593809059'
},
'RetryAttempts': 0
}
}
}
def test_store_data(self):
warnings.simplefilter("ignore", ResourceWarning)
plaintext_item = self.mock_data['PLAIN_TEXT_ITEM']
dynamodb = boto3.resource('dynamodb')
dana_table = dynamodb.Table('dana_table')
mock_table = Mock()
mock_table.put_item.return_value = self.mock_data['RESPONSE_METADATA']
response = store(mock_table, plaintext_item)
response_metadata = None
if 'ResponseMetadata' in response:
response_metadata = response['ResponseMetadata']
if 'HTTPStatusCode' in response_metadata:
http_status_code = response_metadata['HTTPStatusCode']
self.assertIsNotNone(response_metadata)
self.assertEqual(200, http_status_code)
def test_store_encrypted_data(self):
warnings.simplefilter("ignore", ResourceWarning)
plaintext_item = self.mock_data['PLAIN_TEXT_ITEM']
dynamodb = boto3.resource('dynamodb')
dana_table = dynamodb.Table('dana_table')
mock_table = Mock()
mock_table.put_item.return_value = self.mock_data['RESPONSE_METADATA']
with patch('mods.ddb.EncryptedTable') as mock_encrypted_table:
mock_encrypted_table.return_value = mock_table
response = store(Mock(), plaintext_item, encrypt=True)
response_metadata = None
if 'ResponseMetadata' in response:
response_metadata = response['ResponseMetadata']
if 'HTTPStatusCode' in response_metadata:
http_status_code = response_metadata['HTTPStatusCode']
self.assertIsNotNone(response_metadata)
self.assertEqual(200, http_status_code)
def test_retrieve_data(self):
warnings.simplefilter("ignore", ResourceWarning)
plaintext_item = self.mock_data['PLAIN_TEXT_ITEM']
dynamodb = boto3.resource('dynamodb')
dana_table = dynamodb.Table('dana_table')
# Act
mock_table = Mock()
mock_table.get_item.return_value = self.mock_data['RETRIEVE_ITEM']
response = retrieve(mock_table, {'id': 'SAMPLE1'})
# with patch(f'{__name__}.retrieve') as mock_module_method:
# mock_module_method.return_value = self.mock_data['RETRIEVE_ITEM']
# response = retrieve(dana_table, {'id': 'SAMPLE1'})
response_metadata = None
response_item = None
if 'ResponseMetadata' in response:
response_metadata = response['ResponseMetadata']
if 'Item' in response:
response_item = response['Item']
if 'HTTPStatusCode' in response_metadata:
http_status_code = response_metadata['HTTPStatusCode']
# Assert(s)
self.assertIsNotNone(response_metadata)
self.assertIsNotNone(response_item)
self.assertEqual(200, http_status_code)
#@unittest.skip # no reason needed
def test_retrieve_encrypted_data(self):
warnings.simplefilter("ignore", ResourceWarning)
# Arrange
plaintext_item = self.mock_data['PLAIN_TEXT_ITEM']
#plaintext_item['id'] = 'SAMPLE1
dynamodb = boto3.resource('dynamodb')
dana_table = dynamodb.Table('dana_table')
mock_table = Mock()
mock_table.get_item.return_value = self.mock_data['RETRIEVE_ITEM']
with patch('mods.ddb.EncryptedTable') as mock_encrypted_table:
mock_encrypted_table.return_value = mock_table
response = retrieve(dana_table, {'id': 'SAMPLE1'}, encrypt=True)
response_metadata = None
response_item = None
if 'ResponseMetadata' in response:
response_metadata = response['ResponseMetadata']
if 'Item' in response:
response_item = response['Item']
if 'HTTPStatusCode' in response_metadata:
http_status_code = response_metadata['HTTPStatusCode']
self.assertIsNotNone(response_metadata)
self.assertIsNotNone(response_item)
self.assertEqual(200, http_status_code)
def test_remove_item(self):
warnings.simplefilter("ignore", ResourceWarning)
plaintext_item = self.mock_data['PLAIN_TEXT_ITEM']
dynamodb = boto3.resource('dynamodb')
dana_table = dynamodb.Table('dana_table')
# Act
# The following line works when we run test directly (aka: python -m unittest tests\unit\test_dynamodb.py)
# with patch('tests.unit.test_dynamodb.remove') as mock_module_method:
# But patching will fail when we "discover" tests (aka: python -m unittest discover -s tests\unit -v)
# Because of the way patching works in Python, name patch method f"{__name__}.remove"
#with patch(f"{__name__}.remove") as mock_module_method:
# with patch("mods.ddb.remove") as mock_module_method:
# mock_module_method.return_value = self.mock_data['RESPONSE_METADATA']
mock_table = Mock()
mock_table.delete_item.return_value = self.mock_data['RESPONSE_METADATA']
response = remove(mock_table, {'id': 'sample8'})
#print(response)
response_metadata = None
http_status_code = 0
if 'ResponseMetadata' in response:
response_metadata = response['ResponseMetadata']
if 'HTTPStatusCode' in response_metadata:
http_status_code = response_metadata['HTTPStatusCode']
# Assert(s)
self.assertIsNotNone(response_metadata)
self.assertEqual(200, http_status_code)
# if __name__ == '__main__':
# unittest.main()
| true | true |
1c2b3794e77dd8e127e577dd7e5b38673d933e02 | 16,440 | py | Python | src/poetry/mixology/incompatibility.py | mmacchia/poetry | 7c53db9680d021bac99cc366a3bbc88ebbffdf0f | [
"MIT"
] | null | null | null | src/poetry/mixology/incompatibility.py | mmacchia/poetry | 7c53db9680d021bac99cc366a3bbc88ebbffdf0f | [
"MIT"
] | null | null | null | src/poetry/mixology/incompatibility.py | mmacchia/poetry | 7c53db9680d021bac99cc366a3bbc88ebbffdf0f | [
"MIT"
] | null | null | null | from typing import Callable
from typing import Dict
from typing import Iterator
from typing import List
from typing import Optional
from typing import Union
from poetry.mixology.incompatibility_cause import ConflictCause
from poetry.mixology.incompatibility_cause import DependencyCause
from poetry.mixology.incompatibility_cause import IncompatibilityCause
from poetry.mixology.incompatibility_cause import NoVersionsCause
from poetry.mixology.incompatibility_cause import PackageNotFoundCause
from poetry.mixology.incompatibility_cause import PlatformCause
from poetry.mixology.incompatibility_cause import PythonCause
from poetry.mixology.incompatibility_cause import RootCause
from poetry.mixology.term import Term
class Incompatibility:
def __init__(self, terms: List[Term], cause: IncompatibilityCause) -> None:
# Remove the root package from generated incompatibilities, since it will
# always be satisfied. This makes error reporting clearer, and may also
# make solving more efficient.
if (
len(terms) != 1
and isinstance(cause, ConflictCause)
and any(term.is_positive() and term.dependency.is_root for term in terms)
):
terms = [
term
for term in terms
if not term.is_positive() or not term.dependency.is_root
]
if (
len(terms) == 1
# Short-circuit in the common case of a two-term incompatibility with
# two different packages (for example, a dependency).
or len(terms) == 2
and terms[0].dependency.complete_name != terms[-1].dependency.complete_name
):
pass
else:
# Coalesce multiple terms about the same package if possible.
by_name: Dict[str, Dict[str, Term]] = {}
for term in terms:
if term.dependency.complete_name not in by_name:
by_name[term.dependency.complete_name] = {}
by_ref = by_name[term.dependency.complete_name]
ref = term.dependency.complete_name
if ref in by_ref:
by_ref[ref] = by_ref[ref].intersect(term)
# If we have two terms that refer to the same package but have a null
# intersection, they're mutually exclusive, making this incompatibility
# irrelevant, since we already know that mutually exclusive version
# ranges are incompatible. We should never derive an irrelevant
# incompatibility.
assert by_ref[ref] is not None
else:
by_ref[ref] = term
new_terms = []
for by_ref in by_name.values():
positive_terms = [
term for term in by_ref.values() if term.is_positive()
]
if positive_terms:
new_terms += positive_terms
continue
new_terms += list(by_ref.values())
terms = new_terms
self._terms = terms
self._cause = cause
@property
def terms(self) -> List[Term]:
return self._terms
@property
def cause(
self,
) -> Union[
RootCause,
NoVersionsCause,
DependencyCause,
ConflictCause,
PythonCause,
PlatformCause,
PackageNotFoundCause,
]:
return self._cause
@property
def external_incompatibilities(
self,
) -> Iterator[Union[ConflictCause, "Incompatibility"]]:
"""
Returns all external incompatibilities in this incompatibility's
derivation graph.
"""
if isinstance(self._cause, ConflictCause):
cause: ConflictCause = self._cause
yield from cause.conflict.external_incompatibilities
yield from cause.other.external_incompatibilities
else:
yield self
def is_failure(self) -> bool:
return len(self._terms) == 0 or (
len(self._terms) == 1 and self._terms[0].dependency.is_root
)
def __str__(self) -> str:
if isinstance(self._cause, DependencyCause):
assert len(self._terms) == 2
depender = self._terms[0]
dependee = self._terms[1]
assert depender.is_positive()
assert not dependee.is_positive()
return "{} depends on {}".format(
self._terse(depender, allow_every=True), self._terse(dependee)
)
elif isinstance(self._cause, PythonCause):
assert len(self._terms) == 1
assert self._terms[0].is_positive()
cause: PythonCause = self._cause
text = "{} requires ".format(self._terse(self._terms[0], allow_every=True))
text += f"Python {cause.python_version}"
return text
elif isinstance(self._cause, PlatformCause):
assert len(self._terms) == 1
assert self._terms[0].is_positive()
cause: PlatformCause = self._cause
text = "{} requires ".format(self._terse(self._terms[0], allow_every=True))
text += f"platform {cause.platform}"
return text
elif isinstance(self._cause, NoVersionsCause):
assert len(self._terms) == 1
assert self._terms[0].is_positive()
return "no versions of {} match {}".format(
self._terms[0].dependency.name, self._terms[0].constraint
)
elif isinstance(self._cause, PackageNotFoundCause):
assert len(self._terms) == 1
assert self._terms[0].is_positive()
return "{} doesn't exist".format(self._terms[0].dependency.name)
elif isinstance(self._cause, RootCause):
assert len(self._terms) == 1
assert not self._terms[0].is_positive()
assert self._terms[0].dependency.is_root
return "{} is {}".format(
self._terms[0].dependency.name, self._terms[0].dependency.constraint
)
elif self.is_failure():
return "version solving failed"
if len(self._terms) == 1:
term = self._terms[0]
if term.constraint.is_any():
return "{} is {}".format(
term.dependency.name,
"forbidden" if term.is_positive() else "required",
)
else:
return "{} is {}".format(
term.dependency.name,
"forbidden" if term.is_positive() else "required",
)
if len(self._terms) == 2:
term1 = self._terms[0]
term2 = self._terms[1]
if term1.is_positive() == term2.is_positive():
if term1.is_positive():
package1 = (
term1.dependency.name
if term1.constraint.is_any()
else self._terse(term1)
)
package2 = (
term2.dependency.name
if term2.constraint.is_any()
else self._terse(term2)
)
return f"{package1} is incompatible with {package2}"
else:
return "either {} or {}".format(
self._terse(term1), self._terse(term2)
)
positive = []
negative = []
for term in self._terms:
if term.is_positive():
positive.append(self._terse(term))
else:
negative.append(self._terse(term))
if positive and negative:
if len(positive) == 1:
positive_term = [term for term in self._terms if term.is_positive()][0]
return "{} requires {}".format(
self._terse(positive_term, allow_every=True), " or ".join(negative)
)
else:
return "if {} then {}".format(
" and ".join(positive), " or ".join(negative)
)
elif positive:
return "one of {} must be false".format(" or ".join(positive))
else:
return "one of {} must be true".format(" or ".join(negative))
def and_to_string(
self,
other: "Incompatibility",
details: dict,
this_line: Optional[int],
other_line: Optional[int],
) -> str:
requires_both = self._try_requires_both(other, details, this_line, other_line)
if requires_both is not None:
return requires_both
requires_through = self._try_requires_through(
other, details, this_line, other_line
)
if requires_through is not None:
return requires_through
requires_forbidden = self._try_requires_forbidden(
other, details, this_line, other_line
)
if requires_forbidden is not None:
return requires_forbidden
buffer = [str(self)]
if this_line is not None:
buffer.append(" " + str(this_line))
buffer.append(" and {}".format(str(other)))
if other_line is not None:
buffer.append(" " + str(other_line))
return "\n".join(buffer)
def _try_requires_both(
self,
other: "Incompatibility",
details: dict,
this_line: Optional[int],
other_line: Optional[int],
) -> Optional[str]:
if len(self._terms) == 1 or len(other.terms) == 1:
return None
this_positive = self._single_term_where(lambda term: term.is_positive())
if this_positive is None:
return None
other_positive = other._single_term_where(lambda term: term.is_positive())
if other_positive is None:
return None
if this_positive.dependency != other_positive.dependency:
return None
this_negatives = " or ".join(
[self._terse(term) for term in self._terms if not term.is_positive()]
)
other_negatives = " or ".join(
[self._terse(term) for term in other.terms if not term.is_positive()]
)
buffer = [self._terse(this_positive, allow_every=True) + " "]
is_dependency = isinstance(self.cause, DependencyCause) and isinstance(
other.cause, DependencyCause
)
if is_dependency:
buffer.append("depends on")
else:
buffer.append("requires")
buffer.append(f" both {this_negatives}")
if this_line is not None:
buffer.append(f" ({this_line})")
buffer.append(f" and {other_negatives}")
if other_line is not None:
buffer.append(f" ({other_line})")
return "".join(buffer)
def _try_requires_through(
self, other: "Incompatibility", details: dict, this_line: int, other_line: int
) -> Optional[str]:
if len(self._terms) == 1 or len(other.terms) == 1:
return None
this_negative = self._single_term_where(lambda term: not term.is_positive())
other_negative = other._single_term_where(lambda term: not term.is_positive())
if this_negative is None and other_negative is None:
return None
this_positive = self._single_term_where(lambda term: term.is_positive())
other_positive = self._single_term_where(lambda term: term.is_positive())
if (
this_negative is not None
and other_positive is not None
and this_negative.dependency.name == other_positive.dependency.name
and this_negative.inverse.satisfies(other_positive)
):
prior = self
prior_negative = this_negative
prior_line = this_line
latter = other
latter_line = other_line
elif (
other_negative is not None
and this_positive is not None
and other_negative.dependency.name == this_positive.dependency.name
and other_negative.inverse.satisfies(this_positive)
):
prior = other
prior_negative = other_negative
prior_line = other_line
latter = self
latter_line = this_line
else:
return None
prior_positives = [term for term in prior.terms if term.is_positive()]
buffer = []
if len(prior_positives) > 1:
prior_string = " or ".join([self._terse(term) for term in prior_positives])
buffer.append(f"if {prior_string} then ")
else:
if isinstance(prior.cause, DependencyCause):
verb = "depends on"
else:
verb = "requires"
buffer.append(
"{} {} ".format(self._terse(prior_positives[0], allow_every=True), verb)
)
buffer.append(self._terse(prior_negative))
if prior_line is not None:
buffer.append(f" ({prior_line})")
buffer.append(" which ")
if isinstance(latter.cause, DependencyCause):
buffer.append("depends on ")
else:
buffer.append("requires ")
buffer.append(
" or ".join(
[self._terse(term) for term in latter.terms if not term.is_positive()]
)
)
if latter_line is not None:
buffer.append(f" ({latter_line})")
return "".join(buffer)
def _try_requires_forbidden(
self, other: "Incompatibility", details: dict, this_line: int, other_line: int
) -> Optional[str]:
if len(self._terms) != 1 and len(other.terms) != 1:
return None
if len(self.terms) == 1:
prior = other
latter = self
prior_line = other_line
latter_line = this_line
else:
prior = self
latter = other
prior_line = this_line
latter_line = other_line
negative = prior._single_term_where(lambda term: not term.is_positive())
if negative is None:
return None
if not negative.inverse.satisfies(latter.terms[0]):
return None
positives = [t for t in prior.terms if t.is_positive()]
buffer = []
if len(positives) > 1:
prior_string = " or ".join([self._terse(term) for term in positives])
buffer.append(f"if {prior_string} then ")
else:
buffer.append(self._terse(positives[0], allow_every=True))
if isinstance(prior.cause, DependencyCause):
buffer.append(" depends on ")
else:
buffer.append(" requires ")
buffer.append(self._terse(latter.terms[0]) + " ")
if prior_line is not None:
buffer.append(f"({prior_line}) ")
if isinstance(latter.cause, PythonCause):
cause: PythonCause = latter.cause
buffer.append(f"which requires Python {cause.python_version}")
elif isinstance(latter.cause, NoVersionsCause):
buffer.append("which doesn't match any versions")
elif isinstance(latter.cause, PackageNotFoundCause):
buffer.append("which doesn't exist")
else:
buffer.append("which is forbidden")
if latter_line is not None:
buffer.append(f" ({latter_line})")
return "".join(buffer)
def _terse(self, term: Term, allow_every: bool = False) -> str:
if allow_every and term.constraint.is_any():
return f"every version of {term.dependency.complete_name}"
if term.dependency.is_root:
return term.dependency.pretty_name
return "{} ({})".format(
term.dependency.pretty_name, term.dependency.pretty_constraint
)
def _single_term_where(self, callable: Callable[[Term], bool]) -> Optional[Term]:
found = None
for term in self._terms:
if not callable(term):
continue
if found is not None:
return None
found = term
return found
def __repr__(self) -> str:
return "<Incompatibility {}>".format(str(self))
| 34.393305 | 91 | 0.568127 | from typing import Callable
from typing import Dict
from typing import Iterator
from typing import List
from typing import Optional
from typing import Union
from poetry.mixology.incompatibility_cause import ConflictCause
from poetry.mixology.incompatibility_cause import DependencyCause
from poetry.mixology.incompatibility_cause import IncompatibilityCause
from poetry.mixology.incompatibility_cause import NoVersionsCause
from poetry.mixology.incompatibility_cause import PackageNotFoundCause
from poetry.mixology.incompatibility_cause import PlatformCause
from poetry.mixology.incompatibility_cause import PythonCause
from poetry.mixology.incompatibility_cause import RootCause
from poetry.mixology.term import Term
class Incompatibility:
def __init__(self, terms: List[Term], cause: IncompatibilityCause) -> None:
if (
len(terms) != 1
and isinstance(cause, ConflictCause)
and any(term.is_positive() and term.dependency.is_root for term in terms)
):
terms = [
term
for term in terms
if not term.is_positive() or not term.dependency.is_root
]
if (
len(terms) == 1
or len(terms) == 2
and terms[0].dependency.complete_name != terms[-1].dependency.complete_name
):
pass
else:
by_name: Dict[str, Dict[str, Term]] = {}
for term in terms:
if term.dependency.complete_name not in by_name:
by_name[term.dependency.complete_name] = {}
by_ref = by_name[term.dependency.complete_name]
ref = term.dependency.complete_name
if ref in by_ref:
by_ref[ref] = by_ref[ref].intersect(term)
# irrelevant, since we already know that mutually exclusive version
# ranges are incompatible. We should never derive an irrelevant
# incompatibility.
assert by_ref[ref] is not None
else:
by_ref[ref] = term
new_terms = []
for by_ref in by_name.values():
positive_terms = [
term for term in by_ref.values() if term.is_positive()
]
if positive_terms:
new_terms += positive_terms
continue
new_terms += list(by_ref.values())
terms = new_terms
self._terms = terms
self._cause = cause
@property
def terms(self) -> List[Term]:
return self._terms
@property
def cause(
self,
) -> Union[
RootCause,
NoVersionsCause,
DependencyCause,
ConflictCause,
PythonCause,
PlatformCause,
PackageNotFoundCause,
]:
return self._cause
@property
def external_incompatibilities(
self,
) -> Iterator[Union[ConflictCause, "Incompatibility"]]:
if isinstance(self._cause, ConflictCause):
cause: ConflictCause = self._cause
yield from cause.conflict.external_incompatibilities
yield from cause.other.external_incompatibilities
else:
yield self
def is_failure(self) -> bool:
return len(self._terms) == 0 or (
len(self._terms) == 1 and self._terms[0].dependency.is_root
)
def __str__(self) -> str:
if isinstance(self._cause, DependencyCause):
assert len(self._terms) == 2
depender = self._terms[0]
dependee = self._terms[1]
assert depender.is_positive()
assert not dependee.is_positive()
return "{} depends on {}".format(
self._terse(depender, allow_every=True), self._terse(dependee)
)
elif isinstance(self._cause, PythonCause):
assert len(self._terms) == 1
assert self._terms[0].is_positive()
cause: PythonCause = self._cause
text = "{} requires ".format(self._terse(self._terms[0], allow_every=True))
text += f"Python {cause.python_version}"
return text
elif isinstance(self._cause, PlatformCause):
assert len(self._terms) == 1
assert self._terms[0].is_positive()
cause: PlatformCause = self._cause
text = "{} requires ".format(self._terse(self._terms[0], allow_every=True))
text += f"platform {cause.platform}"
return text
elif isinstance(self._cause, NoVersionsCause):
assert len(self._terms) == 1
assert self._terms[0].is_positive()
return "no versions of {} match {}".format(
self._terms[0].dependency.name, self._terms[0].constraint
)
elif isinstance(self._cause, PackageNotFoundCause):
assert len(self._terms) == 1
assert self._terms[0].is_positive()
return "{} doesn't exist".format(self._terms[0].dependency.name)
elif isinstance(self._cause, RootCause):
assert len(self._terms) == 1
assert not self._terms[0].is_positive()
assert self._terms[0].dependency.is_root
return "{} is {}".format(
self._terms[0].dependency.name, self._terms[0].dependency.constraint
)
elif self.is_failure():
return "version solving failed"
if len(self._terms) == 1:
term = self._terms[0]
if term.constraint.is_any():
return "{} is {}".format(
term.dependency.name,
"forbidden" if term.is_positive() else "required",
)
else:
return "{} is {}".format(
term.dependency.name,
"forbidden" if term.is_positive() else "required",
)
if len(self._terms) == 2:
term1 = self._terms[0]
term2 = self._terms[1]
if term1.is_positive() == term2.is_positive():
if term1.is_positive():
package1 = (
term1.dependency.name
if term1.constraint.is_any()
else self._terse(term1)
)
package2 = (
term2.dependency.name
if term2.constraint.is_any()
else self._terse(term2)
)
return f"{package1} is incompatible with {package2}"
else:
return "either {} or {}".format(
self._terse(term1), self._terse(term2)
)
positive = []
negative = []
for term in self._terms:
if term.is_positive():
positive.append(self._terse(term))
else:
negative.append(self._terse(term))
if positive and negative:
if len(positive) == 1:
positive_term = [term for term in self._terms if term.is_positive()][0]
return "{} requires {}".format(
self._terse(positive_term, allow_every=True), " or ".join(negative)
)
else:
return "if {} then {}".format(
" and ".join(positive), " or ".join(negative)
)
elif positive:
return "one of {} must be false".format(" or ".join(positive))
else:
return "one of {} must be true".format(" or ".join(negative))
def and_to_string(
self,
other: "Incompatibility",
details: dict,
this_line: Optional[int],
other_line: Optional[int],
) -> str:
requires_both = self._try_requires_both(other, details, this_line, other_line)
if requires_both is not None:
return requires_both
requires_through = self._try_requires_through(
other, details, this_line, other_line
)
if requires_through is not None:
return requires_through
requires_forbidden = self._try_requires_forbidden(
other, details, this_line, other_line
)
if requires_forbidden is not None:
return requires_forbidden
buffer = [str(self)]
if this_line is not None:
buffer.append(" " + str(this_line))
buffer.append(" and {}".format(str(other)))
if other_line is not None:
buffer.append(" " + str(other_line))
return "\n".join(buffer)
def _try_requires_both(
self,
other: "Incompatibility",
details: dict,
this_line: Optional[int],
other_line: Optional[int],
) -> Optional[str]:
if len(self._terms) == 1 or len(other.terms) == 1:
return None
this_positive = self._single_term_where(lambda term: term.is_positive())
if this_positive is None:
return None
other_positive = other._single_term_where(lambda term: term.is_positive())
if other_positive is None:
return None
if this_positive.dependency != other_positive.dependency:
return None
this_negatives = " or ".join(
[self._terse(term) for term in self._terms if not term.is_positive()]
)
other_negatives = " or ".join(
[self._terse(term) for term in other.terms if not term.is_positive()]
)
buffer = [self._terse(this_positive, allow_every=True) + " "]
is_dependency = isinstance(self.cause, DependencyCause) and isinstance(
other.cause, DependencyCause
)
if is_dependency:
buffer.append("depends on")
else:
buffer.append("requires")
buffer.append(f" both {this_negatives}")
if this_line is not None:
buffer.append(f" ({this_line})")
buffer.append(f" and {other_negatives}")
if other_line is not None:
buffer.append(f" ({other_line})")
return "".join(buffer)
def _try_requires_through(
self, other: "Incompatibility", details: dict, this_line: int, other_line: int
) -> Optional[str]:
if len(self._terms) == 1 or len(other.terms) == 1:
return None
this_negative = self._single_term_where(lambda term: not term.is_positive())
other_negative = other._single_term_where(lambda term: not term.is_positive())
if this_negative is None and other_negative is None:
return None
this_positive = self._single_term_where(lambda term: term.is_positive())
other_positive = self._single_term_where(lambda term: term.is_positive())
if (
this_negative is not None
and other_positive is not None
and this_negative.dependency.name == other_positive.dependency.name
and this_negative.inverse.satisfies(other_positive)
):
prior = self
prior_negative = this_negative
prior_line = this_line
latter = other
latter_line = other_line
elif (
other_negative is not None
and this_positive is not None
and other_negative.dependency.name == this_positive.dependency.name
and other_negative.inverse.satisfies(this_positive)
):
prior = other
prior_negative = other_negative
prior_line = other_line
latter = self
latter_line = this_line
else:
return None
prior_positives = [term for term in prior.terms if term.is_positive()]
buffer = []
if len(prior_positives) > 1:
prior_string = " or ".join([self._terse(term) for term in prior_positives])
buffer.append(f"if {prior_string} then ")
else:
if isinstance(prior.cause, DependencyCause):
verb = "depends on"
else:
verb = "requires"
buffer.append(
"{} {} ".format(self._terse(prior_positives[0], allow_every=True), verb)
)
buffer.append(self._terse(prior_negative))
if prior_line is not None:
buffer.append(f" ({prior_line})")
buffer.append(" which ")
if isinstance(latter.cause, DependencyCause):
buffer.append("depends on ")
else:
buffer.append("requires ")
buffer.append(
" or ".join(
[self._terse(term) for term in latter.terms if not term.is_positive()]
)
)
if latter_line is not None:
buffer.append(f" ({latter_line})")
return "".join(buffer)
def _try_requires_forbidden(
self, other: "Incompatibility", details: dict, this_line: int, other_line: int
) -> Optional[str]:
if len(self._terms) != 1 and len(other.terms) != 1:
return None
if len(self.terms) == 1:
prior = other
latter = self
prior_line = other_line
latter_line = this_line
else:
prior = self
latter = other
prior_line = this_line
latter_line = other_line
negative = prior._single_term_where(lambda term: not term.is_positive())
if negative is None:
return None
if not negative.inverse.satisfies(latter.terms[0]):
return None
positives = [t for t in prior.terms if t.is_positive()]
buffer = []
if len(positives) > 1:
prior_string = " or ".join([self._terse(term) for term in positives])
buffer.append(f"if {prior_string} then ")
else:
buffer.append(self._terse(positives[0], allow_every=True))
if isinstance(prior.cause, DependencyCause):
buffer.append(" depends on ")
else:
buffer.append(" requires ")
buffer.append(self._terse(latter.terms[0]) + " ")
if prior_line is not None:
buffer.append(f"({prior_line}) ")
if isinstance(latter.cause, PythonCause):
cause: PythonCause = latter.cause
buffer.append(f"which requires Python {cause.python_version}")
elif isinstance(latter.cause, NoVersionsCause):
buffer.append("which doesn't match any versions")
elif isinstance(latter.cause, PackageNotFoundCause):
buffer.append("which doesn't exist")
else:
buffer.append("which is forbidden")
if latter_line is not None:
buffer.append(f" ({latter_line})")
return "".join(buffer)
def _terse(self, term: Term, allow_every: bool = False) -> str:
if allow_every and term.constraint.is_any():
return f"every version of {term.dependency.complete_name}"
if term.dependency.is_root:
return term.dependency.pretty_name
return "{} ({})".format(
term.dependency.pretty_name, term.dependency.pretty_constraint
)
def _single_term_where(self, callable: Callable[[Term], bool]) -> Optional[Term]:
found = None
for term in self._terms:
if not callable(term):
continue
if found is not None:
return None
found = term
return found
def __repr__(self) -> str:
return "<Incompatibility {}>".format(str(self))
| true | true |
1c2b37b0c65c4c2670dc787d819a3d20aeae9092 | 93 | py | Python | skeleton/pixelwars/apps.py | GenchoBG/HackTues3 | 1457d44d6f6aeef158e49f91ce4a40246afe9c62 | [
"MIT"
] | null | null | null | skeleton/pixelwars/apps.py | GenchoBG/HackTues3 | 1457d44d6f6aeef158e49f91ce4a40246afe9c62 | [
"MIT"
] | null | null | null | skeleton/pixelwars/apps.py | GenchoBG/HackTues3 | 1457d44d6f6aeef158e49f91ce4a40246afe9c62 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class PixelwarsConfig(AppConfig):
name = 'pixelwars'
| 15.5 | 33 | 0.763441 | from django.apps import AppConfig
class PixelwarsConfig(AppConfig):
name = 'pixelwars'
| true | true |
1c2b380a48509c5afcad0bf4a34adea41e89cc5e | 530 | py | Python | scripts/data_pop.py | ifryed/LinearNet | f4fbdcdc98c275a6c21c9efbbc357aa9e88aed6c | [
"MIT"
] | 3 | 2021-10-05T20:43:13.000Z | 2021-10-09T20:59:47.000Z | scripts/data_pop.py | ifryed/LinearNet | f4fbdcdc98c275a6c21c9efbbc357aa9e88aed6c | [
"MIT"
] | null | null | null | scripts/data_pop.py | ifryed/LinearNet | f4fbdcdc98c275a6c21c9efbbc357aa9e88aed6c | [
"MIT"
] | null | null | null | import os
import sys
import numpy as np
from skimage import io
def main():
images = os.listdir(sys.argv[1])
pop_n = int(sys.argv[2]) if len(sys.argv) > 1 else 200
img = io.imread(os.path.join(sys.argv[1], images[0]))
h, w = img.shape[:2]
crop_size = 256
for i in range(pop_n):
x, y = np.random.randint(0, w - crop_size), np.random.randint(0, h - crop_size)
io.imsave(sys.argv[1] + '/img_{}.png'.format(i), img[y:y + crop_size, x: x + crop_size])
if __name__ == '__main__':
main()
| 24.090909 | 96 | 0.609434 | import os
import sys
import numpy as np
from skimage import io
def main():
images = os.listdir(sys.argv[1])
pop_n = int(sys.argv[2]) if len(sys.argv) > 1 else 200
img = io.imread(os.path.join(sys.argv[1], images[0]))
h, w = img.shape[:2]
crop_size = 256
for i in range(pop_n):
x, y = np.random.randint(0, w - crop_size), np.random.randint(0, h - crop_size)
io.imsave(sys.argv[1] + '/img_{}.png'.format(i), img[y:y + crop_size, x: x + crop_size])
if __name__ == '__main__':
main()
| true | true |
1c2b393f8b5be892bd281a729a6d6f4b16059d4d | 31,890 | py | Python | tensorflow_federated/python/core/api/intrinsics_test.py | abhinavsp0730/federated | 7c5821f85cb2d0379f33bf2b5e02f97d51a16427 | [
"Apache-2.0"
] | null | null | null | tensorflow_federated/python/core/api/intrinsics_test.py | abhinavsp0730/federated | 7c5821f85cb2d0379f33bf2b5e02f97d51a16427 | [
"Apache-2.0"
] | null | null | null | tensorflow_federated/python/core/api/intrinsics_test.py | abhinavsp0730/federated | 7c5821f85cb2d0379f33bf2b5e02f97d51a16427 | [
"Apache-2.0"
] | null | null | null | # Lint as: python3
# Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import itertools
import warnings
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_federated.python.common_libs import anonymous_tuple
from tensorflow_federated.python.common_libs import test as common_test
from tensorflow_federated.python.core.api import computation_types
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.api import intrinsics
from tensorflow_federated.python.core.api import placements
from tensorflow_federated.python.core.api import value_base
from tensorflow_federated.python.core.impl.executors import default_executor
from tensorflow_federated.python.core.impl.executors import executor_stacks
from tensorflow_federated.python.core.impl.executors import executor_test_utils
tf.compat.v1.enable_v2_behavior()
class IntrinsicsTest(parameterized.TestCase):
def assert_type(self, value, type_string):
self.assertEqual(value.type_signature.compact_representation(), type_string)
def test_federated_broadcast_with_server_all_equal_int(self):
@computations.federated_computation(
computation_types.FederatedType(tf.int32, placements.SERVER))
def foo(x):
val = intrinsics.federated_broadcast(x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '(int32@SERVER -> int32@CLIENTS)')
def test_federated_broadcast_with_server_non_all_equal_int(self):
with self.assertRaises(TypeError):
@computations.federated_computation(
computation_types.FederatedType(
tf.int32, placements.SERVER, all_equal=False))
def _(x):
return intrinsics.federated_broadcast(x)
def test_federated_broadcast_with_client_int(self):
with self.assertRaises(TypeError):
@computations.federated_computation(
computation_types.FederatedType(tf.int32, placements.CLIENTS, True))
def _(x):
return intrinsics.federated_broadcast(x)
def test_federated_broadcast_with_non_federated_val(self):
with self.assertRaises(TypeError):
@computations.federated_computation(tf.int32)
def _(x):
return intrinsics.federated_broadcast(x)
def test_federated_eval_rand_on_clients(self):
@computations.federated_computation
def rand_on_clients():
@computations.tf_computation
def rand():
return tf.random.normal([])
val = intrinsics.federated_eval(rand, placements.CLIENTS)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(rand_on_clients, '( -> {float32}@CLIENTS)')
def test_federated_eval_rand_on_server(self):
@computations.federated_computation
def rand_on_server():
@computations.tf_computation
def rand():
return tf.random.normal([])
val = intrinsics.federated_eval(rand, placements.SERVER)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(rand_on_server, '( -> float32@SERVER)')
def test_federated_map_with_client_all_equal_int(self):
@computations.federated_computation(
computation_types.FederatedType(tf.int32, placements.CLIENTS, True))
def foo(x):
val = intrinsics.federated_map(
computations.tf_computation(lambda x: x > 10, tf.int32), x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '(int32@CLIENTS -> {bool}@CLIENTS)')
def test_federated_map_with_client_non_all_equal_int(self):
@computations.federated_computation(
computation_types.FederatedType(tf.int32, placements.CLIENTS))
def foo(x):
val = intrinsics.federated_map(
computations.tf_computation(lambda x: x > 10, tf.int32), x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '({int32}@CLIENTS -> {bool}@CLIENTS)')
def test_federated_map_with_server_int(self):
@computations.federated_computation(
computation_types.FederatedType(tf.int32, placements.SERVER))
def foo(x):
val = intrinsics.federated_map(
computations.tf_computation(lambda x: x > 10, tf.int32), x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '(int32@SERVER -> bool@SERVER)')
def test_federated_map_injected_zip_with_server_int(self):
@computations.federated_computation([
computation_types.FederatedType(tf.int32, placements.SERVER),
computation_types.FederatedType(tf.int32, placements.SERVER)
])
def foo(x, y):
val = intrinsics.federated_map(
computations.tf_computation(lambda x, y: x > 10,
[tf.int32, tf.int32]), [x, y])
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '(<int32@SERVER,int32@SERVER> -> bool@SERVER)')
def test_federated_map_injected_zip_fails_different_placements(self):
def foo(x, y):
val = intrinsics.federated_map(
computations.tf_computation(lambda x, y: x > 10,
[tf.int32, tf.int32]), [x, y])
self.assertIsInstance(val, value_base.Value)
return val
with self.assertRaisesRegex(
TypeError,
'The value to be mapped must be a FederatedType or implicitly '
'convertible to a FederatedType.'):
computations.federated_computation(foo, [
computation_types.FederatedType(tf.int32, placements.SERVER),
computation_types.FederatedType(tf.int32, placements.CLIENTS)
])
def test_federated_map_with_non_federated_val(self):
with self.assertRaises(TypeError):
@computations.federated_computation(tf.int32)
def _(x):
return intrinsics.federated_map(
computations.tf_computation(lambda x: x > 10, tf.int32), x)
def test_federated_sum_with_client_int(self):
@computations.federated_computation(
computation_types.FederatedType(tf.int32, placements.CLIENTS))
def foo(x):
val = intrinsics.federated_sum(x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '({int32}@CLIENTS -> int32@SERVER)')
def test_federated_sum_with_client_string(self):
with self.assertRaises(TypeError):
@computations.federated_computation(
computation_types.FederatedType(tf.string, placements.CLIENTS))
def _(x):
return intrinsics.federated_sum(x)
def test_federated_sum_with_server_int(self):
with self.assertRaises(TypeError):
@computations.federated_computation(
computation_types.FederatedType(tf.int32, placements.SERVER))
def _(x):
return intrinsics.federated_sum(x)
def test_federated_zip_with_client_non_all_equal_int_and_bool(self):
@computations.federated_computation([
computation_types.FederatedType(tf.int32, placements.CLIENTS),
computation_types.FederatedType(tf.bool, placements.CLIENTS, True)
])
def foo(x, y):
val = intrinsics.federated_zip([x, y])
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(
foo, '(<{int32}@CLIENTS,bool@CLIENTS> -> {<int32,bool>}@CLIENTS)')
def test_federated_zip_with_single_unnamed_int_client(self):
@computations.federated_computation([
computation_types.FederatedType(tf.int32, placements.CLIENTS),
])
def foo(x):
val = intrinsics.federated_zip(x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '(<{int32}@CLIENTS> -> {<int32>}@CLIENTS)')
def test_federated_zip_with_single_unnamed_int_server(self):
@computations.federated_computation([
computation_types.FederatedType(tf.int32, placements.SERVER),
])
def foo(x):
val = intrinsics.federated_zip(x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '(<int32@SERVER> -> <int32>@SERVER)')
def test_federated_zip_with_single_named_bool_clients(self):
@computations.federated_computation([
('a', computation_types.FederatedType(tf.bool, placements.CLIENTS)),
])
def foo(x):
val = intrinsics.federated_zip(x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '(<a={bool}@CLIENTS> -> {<a=bool>}@CLIENTS)')
def test_federated_zip_with_single_named_bool_server(self):
@computations.federated_computation([
('a', computation_types.FederatedType(tf.bool, placements.SERVER)),
])
def foo(x):
val = intrinsics.federated_zip(x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '(<a=bool@SERVER> -> <a=bool>@SERVER)')
def test_federated_zip_with_names_client_non_all_equal_int_and_bool(self):
@computations.federated_computation([
computation_types.FederatedType(tf.int32, placements.CLIENTS),
computation_types.FederatedType(tf.bool, placements.CLIENTS, True)
])
def foo(x, y):
a = {'x': x, 'y': y}
val = intrinsics.federated_zip(a)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(
foo, '(<{int32}@CLIENTS,bool@CLIENTS> -> {<x=int32,y=bool>}@CLIENTS)')
def test_federated_zip_with_client_all_equal_int_and_bool(self):
@computations.federated_computation([
computation_types.FederatedType(tf.int32, placements.CLIENTS, True),
computation_types.FederatedType(tf.bool, placements.CLIENTS, True)
])
def foo(x, y):
val = intrinsics.federated_zip([x, y])
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(
foo, '(<int32@CLIENTS,bool@CLIENTS> -> {<int32,bool>}@CLIENTS)')
def test_federated_zip_with_names_client_all_equal_int_and_bool(self):
@computations.federated_computation([
computation_types.FederatedType(tf.int32, placements.CLIENTS, True),
computation_types.FederatedType(tf.bool, placements.CLIENTS, True)
])
def foo(arg):
a = {'x': arg[0], 'y': arg[1]}
val = intrinsics.federated_zip(a)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(
foo, '(<int32@CLIENTS,bool@CLIENTS> -> {<x=int32,y=bool>}@CLIENTS)')
def test_federated_zip_with_server_int_and_bool(self):
@computations.federated_computation([
computation_types.FederatedType(tf.int32, placements.SERVER),
computation_types.FederatedType(tf.bool, placements.SERVER)
])
def foo(x, y):
val = intrinsics.federated_zip([x, y])
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '(<int32@SERVER,bool@SERVER> -> <int32,bool>@SERVER)')
def test_federated_zip_with_names_server_int_and_bool(self):
@computations.federated_computation([
('a', computation_types.FederatedType(tf.int32, placements.SERVER)),
('b', computation_types.FederatedType(tf.bool, placements.SERVER)),
])
def foo(arg):
val = intrinsics.federated_zip(arg)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(
foo, '(<a=int32@SERVER,b=bool@SERVER> -> <a=int32,b=bool>@SERVER)')
def test_federated_zip_error_different_placements(self):
with self.assertRaises(TypeError):
@computations.federated_computation([
('a', computation_types.FederatedType(tf.int32, placements.SERVER)),
('b', computation_types.FederatedType(tf.bool, placements.CLIENTS)),
])
def _(arg):
return intrinsics.federated_zip(arg)
def test_federated_collect_with_client_int(self):
@computations.federated_computation(
computation_types.FederatedType(tf.int32, placements.CLIENTS))
def foo(x):
val = intrinsics.federated_collect(x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '({int32}@CLIENTS -> int32*@SERVER)')
def test_federated_collect_with_server_int_fails(self):
with self.assertRaises(TypeError):
@computations.federated_computation(
computation_types.FederatedType(tf.int32, placements.SERVER))
def _(x):
return intrinsics.federated_collect(x)
def test_federated_mean_with_client_float32_without_weight(self):
@computations.federated_computation(
computation_types.FederatedType(tf.float32, placements.CLIENTS))
def foo(x):
val = intrinsics.federated_mean(x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '({float32}@CLIENTS -> float32@SERVER)')
def test_federated_mean_with_all_equal_client_float32_without_weight(self):
federated_all_equal_float = computation_types.FederatedType(
tf.float32, placements.CLIENTS, all_equal=True)
@computations.federated_computation(federated_all_equal_float)
def foo(x):
val = intrinsics.federated_mean(x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '(float32@CLIENTS -> float32@SERVER)')
def test_federated_mean_with_all_equal_client_float32_with_weight(self):
federated_all_equal_float = computation_types.FederatedType(
tf.float32, placements.CLIENTS, all_equal=True)
@computations.federated_computation(federated_all_equal_float)
def foo(x):
val = intrinsics.federated_mean(x, x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '(float32@CLIENTS -> float32@SERVER)')
def test_federated_mean_with_client_tuple_with_int32_weight(self):
@computations.federated_computation([
computation_types.FederatedType([('x', tf.float64), ('y', tf.float64)],
placements.CLIENTS),
computation_types.FederatedType(tf.int32, placements.CLIENTS)
])
def foo(x, y):
val = intrinsics.federated_mean(x, y)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(
foo, '(<{<x=float64,y=float64>}@CLIENTS,{int32}@CLIENTS> '
'-> <x=float64,y=float64>@SERVER)')
def test_federated_mean_with_client_int32_fails(self):
with self.assertRaises(TypeError):
@computations.federated_computation(
computation_types.FederatedType(tf.int32, placements.CLIENTS))
def _(x):
return intrinsics.federated_mean(x)
def test_federated_mean_with_string_weight_fails(self):
with self.assertRaises(TypeError):
@computations.federated_computation([
computation_types.FederatedType(tf.float32, placements.CLIENTS),
computation_types.FederatedType(tf.string, placements.CLIENTS)
])
def _(x, y):
return intrinsics.federated_mean(x, y)
def test_federated_aggregate_with_client_int(self):
# The representation used during the aggregation process will be a named
# tuple with 2 elements - the integer 'total' that represents the sum of
# elements encountered, and the integer element 'count'.
# pylint: disable=invalid-name
Accumulator = collections.namedtuple('Accumulator', 'total count')
# pylint: enable=invalid-name
accumulator_type = computation_types.NamedTupleType(
Accumulator(tf.int32, tf.int32))
# The operator to use during the first stage simply adds an element to the
# total and updates the count.
@computations.tf_computation([accumulator_type, tf.int32])
def accumulate(accu, elem):
return Accumulator(accu.total + elem, accu.count + 1)
# The operator to use during the second stage simply adds total and count.
@computations.tf_computation([accumulator_type, accumulator_type])
def merge(x, y):
return Accumulator(x.total + y.total, x.count + y.count)
# The operator to use during the final stage simply computes the ratio.
@computations.tf_computation(accumulator_type)
def report(accu):
return tf.cast(accu.total, tf.float32) / tf.cast(accu.count, tf.float32)
@computations.federated_computation(
computation_types.FederatedType(tf.int32, placements.CLIENTS))
def foo(x):
val = intrinsics.federated_aggregate(x, Accumulator(0, 0), accumulate,
merge, report)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '({int32}@CLIENTS -> float32@SERVER)')
def test_federated_aggregate_with_federated_zero_fails(self):
@computations.federated_computation()
def build_federated_zero():
val = intrinsics.federated_value(0, placements.SERVER)
self.assertIsInstance(val, value_base.Value)
return val
@computations.tf_computation([tf.int32, tf.int32])
def accumulate(accu, elem):
return accu + elem
# The operator to use during the second stage simply adds total and count.
@computations.tf_computation([tf.int32, tf.int32])
def merge(x, y):
return x + y
# The operator to use during the final stage simply computes the ratio.
@computations.tf_computation(tf.int32)
def report(accu):
return accu
def foo(x):
return intrinsics.federated_aggregate(x, build_federated_zero(),
accumulate, merge, report)
with self.assertRaisesRegex(
TypeError, 'Expected `zero` to be assignable to type int32, '
'but was of incompatible type int32@SERVER'):
computations.federated_computation(
foo, computation_types.FederatedType(tf.int32, placements.CLIENTS))
def test_federated_aggregate_with_unknown_dimension(self):
Accumulator = collections.namedtuple('Accumulator', ['samples']) # pylint: disable=invalid-name
accumulator_type = computation_types.NamedTupleType(
Accumulator(
samples=computation_types.TensorType(dtype=tf.int32, shape=[None])))
@computations.tf_computation()
def build_empty_accumulator():
return Accumulator(samples=tf.zeros(shape=[0], dtype=tf.int32))
# The operator to use during the first stage simply adds an element to the
# tensor, increasing its size.
@computations.tf_computation([accumulator_type, tf.int32])
def accumulate(accu, elem):
return Accumulator(
samples=tf.concat(
[accu.samples, tf.expand_dims(elem, axis=0)], axis=0))
# The operator to use during the second stage simply adds total and count.
@computations.tf_computation([accumulator_type, accumulator_type])
def merge(x, y):
return Accumulator(samples=tf.concat([x.samples, y.samples], axis=0))
# The operator to use during the final stage simply computes the ratio.
@computations.tf_computation(accumulator_type)
def report(accu):
return accu
@computations.federated_computation(
computation_types.FederatedType(tf.int32, placements.CLIENTS))
def foo(x):
val = intrinsics.federated_aggregate(x, build_empty_accumulator(),
accumulate, merge, report)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '({int32}@CLIENTS -> <samples=int32[?]>@SERVER)')
def test_federated_reduce_with_tf_add_raw_constant(self):
@computations.federated_computation(
computation_types.FederatedType(tf.int32, placements.CLIENTS))
def foo(x):
plus = computations.tf_computation(tf.add, [tf.int32, tf.int32])
val = intrinsics.federated_reduce(x, 0, plus)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '({int32}@CLIENTS -> int32@SERVER)')
def test_num_over_temperature_threshold_example(self):
@computations.federated_computation([
computation_types.FederatedType(tf.float32, placements.CLIENTS),
computation_types.FederatedType(tf.float32, placements.SERVER)
])
def foo(temperatures, threshold):
val = intrinsics.federated_sum(
intrinsics.federated_map(
computations.tf_computation(
lambda x, y: tf.cast(tf.greater(x, y), tf.int32),
[tf.float32, tf.float32]),
[temperatures,
intrinsics.federated_broadcast(threshold)]))
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo,
'(<{float32}@CLIENTS,float32@SERVER> -> int32@SERVER)')
@parameterized.named_parameters(('test_n_2', 2), ('test_n_3', 3),
('test_n_5', 5))
def test_n_tuple_federated_zip_tensor_args(self, n):
fed_type = computation_types.FederatedType(tf.int32, placements.CLIENTS)
initial_tuple_type = computation_types.NamedTupleType([fed_type] * n)
final_fed_type = computation_types.FederatedType([tf.int32] * n,
placements.CLIENTS)
function_type = computation_types.FunctionType(initial_tuple_type,
final_fed_type)
@computations.federated_computation(
[computation_types.FederatedType(tf.int32, placements.CLIENTS)] * n)
def foo(x):
val = intrinsics.federated_zip(x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, function_type.compact_representation())
@parameterized.named_parameters(
('test_n_2_int', 2,
computation_types.FederatedType(tf.int32, placements.CLIENTS)),
('test_n_3_int', 3,
computation_types.FederatedType(tf.int32, placements.CLIENTS)),
('test_n_5_int', 5,
computation_types.FederatedType(tf.int32, placements.CLIENTS)),
('test_n_2_tuple', 2,
computation_types.FederatedType([tf.int32, tf.int32],
placements.CLIENTS)),
('test_n_3_tuple', 3,
computation_types.FederatedType([tf.int32, tf.int32],
placements.CLIENTS)),
('test_n_5_tuple', 5,
computation_types.FederatedType([tf.int32, tf.int32],
placements.CLIENTS)))
def test_named_n_tuple_federated_zip(self, n, fed_type):
initial_tuple_type = computation_types.NamedTupleType([fed_type] * n)
named_fed_type = computation_types.FederatedType(
[(str(k), fed_type.member) for k in range(n)], placements.CLIENTS)
mixed_fed_type = computation_types.FederatedType(
[(str(k), fed_type.member) if k % 2 == 0 else fed_type.member
for k in range(n)], placements.CLIENTS)
named_function_type = computation_types.FunctionType(
initial_tuple_type, named_fed_type)
mixed_function_type = computation_types.FunctionType(
initial_tuple_type, mixed_fed_type)
@computations.federated_computation([fed_type] * n)
def foo(x):
arg = {str(k): x[k] for k in range(n)}
val = intrinsics.federated_zip(arg)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, named_function_type.compact_representation())
def _make_test_tuple(x, k):
"""Make a test tuple with a name if k is even, otherwise unnamed."""
if k % 2 == 0:
return str(k), x[k]
else:
return None, x[k]
@computations.federated_computation([fed_type] * n)
def bar(x):
arg = anonymous_tuple.AnonymousTuple(
_make_test_tuple(x, k) for k in range(n))
val = intrinsics.federated_zip(arg)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(bar, mixed_function_type.compact_representation())
@parameterized.named_parameters([
('test_n_' + str(n) + '_m_' + str(m), n, m)
for n, m in itertools.product([1, 2, 3], [1, 2, 3])
])
def test_n_tuple_federated_zip_mixed_args(self, n, m):
tuple_fed_type = computation_types.FederatedType([tf.int32, tf.int32],
placements.CLIENTS)
single_fed_type = computation_types.FederatedType(tf.int32,
placements.CLIENTS)
initial_tuple_type = computation_types.NamedTupleType([tuple_fed_type] * n +
[single_fed_type] * m)
final_fed_type = computation_types.FederatedType(
[[tf.int32, tf.int32]] * n + [tf.int32] * m, placements.CLIENTS)
function_type = computation_types.FunctionType(initial_tuple_type,
final_fed_type)
@computations.federated_computation([
computation_types.FederatedType(
computation_types.NamedTupleType([tf.int32, tf.int32]),
placements.CLIENTS)
] * n + [computation_types.FederatedType(tf.int32, placements.CLIENTS)] * m)
def baz(x):
val = intrinsics.federated_zip(x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(baz, function_type.compact_representation())
def test_federated_apply_raises_warning(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
@computations.federated_computation(
computation_types.FederatedType(tf.int32, placements.SERVER))
def foo(x):
val = intrinsics.federated_apply(
computations.tf_computation(lambda x: x * x, tf.int32), x)
self.assertIsInstance(val, value_base.Value)
return val
self.assertLen(w, 1)
self.assertIsInstance(w[0].category(), DeprecationWarning)
self.assertIn('tff.federated_apply() is deprecated', str(w[0].message))
self.assert_type(foo, '(int32@SERVER -> int32@SERVER)')
def test_federated_value_with_bool_on_clients(self):
@computations.federated_computation(tf.bool)
def foo(x):
val = intrinsics.federated_value(x, placements.CLIENTS)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '(bool -> bool@CLIENTS)')
def test_federated_value_raw_np_scalar(self):
@computations.federated_computation
def test_np_values():
floatv = np.float64(0)
tff_float = intrinsics.federated_value(floatv, placements.SERVER)
self.assertIsInstance(tff_float, value_base.Value)
self.assert_type(tff_float, 'float64@SERVER')
intv = np.int64(0)
tff_int = intrinsics.federated_value(intv, placements.SERVER)
self.assertIsInstance(tff_int, value_base.Value)
self.assert_type(tff_int, 'int64@SERVER')
return (tff_float, tff_int)
floatv, intv = test_np_values()
self.assertEqual(floatv, 0.0)
self.assertEqual(intv, 0)
def test_federated_value_raw_tf_scalar_variable(self):
v = tf.Variable(initial_value=0., name='test_var')
with self.assertRaisesRegex(
TypeError, 'TensorFlow construct (.*) has been '
'encountered in a federated context.'):
_ = intrinsics.federated_value(v, placements.SERVER)
def test_federated_value_with_bool_on_server(self):
@computations.federated_computation(tf.bool)
def foo(x):
val = intrinsics.federated_value(x, placements.SERVER)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '(bool -> bool@SERVER)')
def test_sequence_sum(self):
@computations.federated_computation(
computation_types.SequenceType(tf.int32))
def foo1(x):
val = intrinsics.sequence_sum(x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo1, '(int32* -> int32)')
@computations.federated_computation(
computation_types.FederatedType(
computation_types.SequenceType(tf.int32), placements.SERVER))
def foo2(x):
val = intrinsics.sequence_sum(x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo2, '(int32*@SERVER -> int32@SERVER)')
@computations.federated_computation(
computation_types.FederatedType(
computation_types.SequenceType(tf.int32), placements.CLIENTS))
def foo3(x):
val = intrinsics.sequence_sum(x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo3, '({int32*}@CLIENTS -> {int32}@CLIENTS)')
def test_sequence_map(self):
@computations.tf_computation(tf.int32)
def over_threshold(x):
return x > 10
@computations.federated_computation(
computation_types.SequenceType(tf.int32))
def foo1(x):
val = intrinsics.sequence_map(over_threshold, x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo1, '(int32* -> bool*)')
@computations.federated_computation(
computation_types.FederatedType(
computation_types.SequenceType(tf.int32), placements.SERVER))
def foo2(x):
val = intrinsics.sequence_map(over_threshold, x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo2, '(int32*@SERVER -> bool*@SERVER)')
@computations.federated_computation(
computation_types.FederatedType(
computation_types.SequenceType(tf.int32), placements.CLIENTS))
def foo3(x):
val = intrinsics.sequence_map(over_threshold, x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo3, '({int32*}@CLIENTS -> {bool*}@CLIENTS)')
def test_sequence_reduce(self):
add_numbers = computations.tf_computation(tf.add, [tf.int32, tf.int32])
@computations.federated_computation(
computation_types.SequenceType(tf.int32))
def foo1(x):
val = intrinsics.sequence_reduce(x, 0, add_numbers)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo1, '(int32* -> int32)')
@computations.federated_computation(
computation_types.FederatedType(
computation_types.SequenceType(tf.int32), placements.SERVER))
def foo2(x):
val = intrinsics.sequence_reduce(x, 0, add_numbers)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo2, '(int32*@SERVER -> int32@SERVER)')
@computations.federated_computation(
computation_types.FederatedType(
computation_types.SequenceType(tf.int32), placements.CLIENTS))
def foo3(x):
val = intrinsics.sequence_reduce(x, 0, add_numbers)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo3, '({int32*}@CLIENTS -> {int32}@CLIENTS)')
@executor_test_utils.executors(
('local', executor_stacks.local_executor_factory()),)
def test_federated_zip_with_twenty_elements_local_executor(self):
n = 20
n_clients = 2
@computations.federated_computation(
[computation_types.FederatedType(tf.int32, placements.CLIENTS)] * n)
def foo(x):
val = intrinsics.federated_zip(x)
self.assertIsInstance(val, value_base.Value)
return val
data = [list(range(n_clients)) for _ in range(n)]
# This would not have ever returned when local executor was scaling
# factorially with number of elements zipped
foo(data)
if __name__ == '__main__':
default_executor.initialize_default_executor()
common_test.main()
| 36.82448 | 100 | 0.6963 |
import collections
import itertools
import warnings
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_federated.python.common_libs import anonymous_tuple
from tensorflow_federated.python.common_libs import test as common_test
from tensorflow_federated.python.core.api import computation_types
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.api import intrinsics
from tensorflow_federated.python.core.api import placements
from tensorflow_federated.python.core.api import value_base
from tensorflow_federated.python.core.impl.executors import default_executor
from tensorflow_federated.python.core.impl.executors import executor_stacks
from tensorflow_federated.python.core.impl.executors import executor_test_utils
tf.compat.v1.enable_v2_behavior()
class IntrinsicsTest(parameterized.TestCase):
def assert_type(self, value, type_string):
self.assertEqual(value.type_signature.compact_representation(), type_string)
def test_federated_broadcast_with_server_all_equal_int(self):
@computations.federated_computation(
computation_types.FederatedType(tf.int32, placements.SERVER))
def foo(x):
val = intrinsics.federated_broadcast(x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '(int32@SERVER -> int32@CLIENTS)')
def test_federated_broadcast_with_server_non_all_equal_int(self):
with self.assertRaises(TypeError):
@computations.federated_computation(
computation_types.FederatedType(
tf.int32, placements.SERVER, all_equal=False))
def _(x):
return intrinsics.federated_broadcast(x)
def test_federated_broadcast_with_client_int(self):
with self.assertRaises(TypeError):
@computations.federated_computation(
computation_types.FederatedType(tf.int32, placements.CLIENTS, True))
def _(x):
return intrinsics.federated_broadcast(x)
def test_federated_broadcast_with_non_federated_val(self):
with self.assertRaises(TypeError):
@computations.federated_computation(tf.int32)
def _(x):
return intrinsics.federated_broadcast(x)
def test_federated_eval_rand_on_clients(self):
@computations.federated_computation
def rand_on_clients():
@computations.tf_computation
def rand():
return tf.random.normal([])
val = intrinsics.federated_eval(rand, placements.CLIENTS)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(rand_on_clients, '( -> {float32}@CLIENTS)')
def test_federated_eval_rand_on_server(self):
@computations.federated_computation
def rand_on_server():
@computations.tf_computation
def rand():
return tf.random.normal([])
val = intrinsics.federated_eval(rand, placements.SERVER)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(rand_on_server, '( -> float32@SERVER)')
def test_federated_map_with_client_all_equal_int(self):
@computations.federated_computation(
computation_types.FederatedType(tf.int32, placements.CLIENTS, True))
def foo(x):
val = intrinsics.federated_map(
computations.tf_computation(lambda x: x > 10, tf.int32), x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '(int32@CLIENTS -> {bool}@CLIENTS)')
def test_federated_map_with_client_non_all_equal_int(self):
@computations.federated_computation(
computation_types.FederatedType(tf.int32, placements.CLIENTS))
def foo(x):
val = intrinsics.federated_map(
computations.tf_computation(lambda x: x > 10, tf.int32), x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '({int32}@CLIENTS -> {bool}@CLIENTS)')
def test_federated_map_with_server_int(self):
@computations.federated_computation(
computation_types.FederatedType(tf.int32, placements.SERVER))
def foo(x):
val = intrinsics.federated_map(
computations.tf_computation(lambda x: x > 10, tf.int32), x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '(int32@SERVER -> bool@SERVER)')
def test_federated_map_injected_zip_with_server_int(self):
@computations.federated_computation([
computation_types.FederatedType(tf.int32, placements.SERVER),
computation_types.FederatedType(tf.int32, placements.SERVER)
])
def foo(x, y):
val = intrinsics.federated_map(
computations.tf_computation(lambda x, y: x > 10,
[tf.int32, tf.int32]), [x, y])
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '(<int32@SERVER,int32@SERVER> -> bool@SERVER)')
def test_federated_map_injected_zip_fails_different_placements(self):
def foo(x, y):
val = intrinsics.federated_map(
computations.tf_computation(lambda x, y: x > 10,
[tf.int32, tf.int32]), [x, y])
self.assertIsInstance(val, value_base.Value)
return val
with self.assertRaisesRegex(
TypeError,
'The value to be mapped must be a FederatedType or implicitly '
'convertible to a FederatedType.'):
computations.federated_computation(foo, [
computation_types.FederatedType(tf.int32, placements.SERVER),
computation_types.FederatedType(tf.int32, placements.CLIENTS)
])
def test_federated_map_with_non_federated_val(self):
with self.assertRaises(TypeError):
@computations.federated_computation(tf.int32)
def _(x):
return intrinsics.federated_map(
computations.tf_computation(lambda x: x > 10, tf.int32), x)
def test_federated_sum_with_client_int(self):
@computations.federated_computation(
computation_types.FederatedType(tf.int32, placements.CLIENTS))
def foo(x):
val = intrinsics.federated_sum(x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '({int32}@CLIENTS -> int32@SERVER)')
def test_federated_sum_with_client_string(self):
with self.assertRaises(TypeError):
@computations.federated_computation(
computation_types.FederatedType(tf.string, placements.CLIENTS))
def _(x):
return intrinsics.federated_sum(x)
def test_federated_sum_with_server_int(self):
with self.assertRaises(TypeError):
@computations.federated_computation(
computation_types.FederatedType(tf.int32, placements.SERVER))
def _(x):
return intrinsics.federated_sum(x)
def test_federated_zip_with_client_non_all_equal_int_and_bool(self):
@computations.federated_computation([
computation_types.FederatedType(tf.int32, placements.CLIENTS),
computation_types.FederatedType(tf.bool, placements.CLIENTS, True)
])
def foo(x, y):
val = intrinsics.federated_zip([x, y])
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(
foo, '(<{int32}@CLIENTS,bool@CLIENTS> -> {<int32,bool>}@CLIENTS)')
def test_federated_zip_with_single_unnamed_int_client(self):
@computations.federated_computation([
computation_types.FederatedType(tf.int32, placements.CLIENTS),
])
def foo(x):
val = intrinsics.federated_zip(x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '(<{int32}@CLIENTS> -> {<int32>}@CLIENTS)')
def test_federated_zip_with_single_unnamed_int_server(self):
@computations.federated_computation([
computation_types.FederatedType(tf.int32, placements.SERVER),
])
def foo(x):
val = intrinsics.federated_zip(x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '(<int32@SERVER> -> <int32>@SERVER)')
def test_federated_zip_with_single_named_bool_clients(self):
@computations.federated_computation([
('a', computation_types.FederatedType(tf.bool, placements.CLIENTS)),
])
def foo(x):
val = intrinsics.federated_zip(x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '(<a={bool}@CLIENTS> -> {<a=bool>}@CLIENTS)')
def test_federated_zip_with_single_named_bool_server(self):
@computations.federated_computation([
('a', computation_types.FederatedType(tf.bool, placements.SERVER)),
])
def foo(x):
val = intrinsics.federated_zip(x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '(<a=bool@SERVER> -> <a=bool>@SERVER)')
def test_federated_zip_with_names_client_non_all_equal_int_and_bool(self):
@computations.federated_computation([
computation_types.FederatedType(tf.int32, placements.CLIENTS),
computation_types.FederatedType(tf.bool, placements.CLIENTS, True)
])
def foo(x, y):
a = {'x': x, 'y': y}
val = intrinsics.federated_zip(a)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(
foo, '(<{int32}@CLIENTS,bool@CLIENTS> -> {<x=int32,y=bool>}@CLIENTS)')
def test_federated_zip_with_client_all_equal_int_and_bool(self):
@computations.federated_computation([
computation_types.FederatedType(tf.int32, placements.CLIENTS, True),
computation_types.FederatedType(tf.bool, placements.CLIENTS, True)
])
def foo(x, y):
val = intrinsics.federated_zip([x, y])
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(
foo, '(<int32@CLIENTS,bool@CLIENTS> -> {<int32,bool>}@CLIENTS)')
def test_federated_zip_with_names_client_all_equal_int_and_bool(self):
@computations.federated_computation([
computation_types.FederatedType(tf.int32, placements.CLIENTS, True),
computation_types.FederatedType(tf.bool, placements.CLIENTS, True)
])
def foo(arg):
a = {'x': arg[0], 'y': arg[1]}
val = intrinsics.federated_zip(a)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(
foo, '(<int32@CLIENTS,bool@CLIENTS> -> {<x=int32,y=bool>}@CLIENTS)')
def test_federated_zip_with_server_int_and_bool(self):
@computations.federated_computation([
computation_types.FederatedType(tf.int32, placements.SERVER),
computation_types.FederatedType(tf.bool, placements.SERVER)
])
def foo(x, y):
val = intrinsics.federated_zip([x, y])
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '(<int32@SERVER,bool@SERVER> -> <int32,bool>@SERVER)')
def test_federated_zip_with_names_server_int_and_bool(self):
@computations.federated_computation([
('a', computation_types.FederatedType(tf.int32, placements.SERVER)),
('b', computation_types.FederatedType(tf.bool, placements.SERVER)),
])
def foo(arg):
val = intrinsics.federated_zip(arg)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(
foo, '(<a=int32@SERVER,b=bool@SERVER> -> <a=int32,b=bool>@SERVER)')
def test_federated_zip_error_different_placements(self):
with self.assertRaises(TypeError):
@computations.federated_computation([
('a', computation_types.FederatedType(tf.int32, placements.SERVER)),
('b', computation_types.FederatedType(tf.bool, placements.CLIENTS)),
])
def _(arg):
return intrinsics.federated_zip(arg)
def test_federated_collect_with_client_int(self):
@computations.federated_computation(
computation_types.FederatedType(tf.int32, placements.CLIENTS))
def foo(x):
val = intrinsics.federated_collect(x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '({int32}@CLIENTS -> int32*@SERVER)')
def test_federated_collect_with_server_int_fails(self):
with self.assertRaises(TypeError):
@computations.federated_computation(
computation_types.FederatedType(tf.int32, placements.SERVER))
def _(x):
return intrinsics.federated_collect(x)
def test_federated_mean_with_client_float32_without_weight(self):
@computations.federated_computation(
computation_types.FederatedType(tf.float32, placements.CLIENTS))
def foo(x):
val = intrinsics.federated_mean(x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '({float32}@CLIENTS -> float32@SERVER)')
def test_federated_mean_with_all_equal_client_float32_without_weight(self):
federated_all_equal_float = computation_types.FederatedType(
tf.float32, placements.CLIENTS, all_equal=True)
@computations.federated_computation(federated_all_equal_float)
def foo(x):
val = intrinsics.federated_mean(x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '(float32@CLIENTS -> float32@SERVER)')
def test_federated_mean_with_all_equal_client_float32_with_weight(self):
federated_all_equal_float = computation_types.FederatedType(
tf.float32, placements.CLIENTS, all_equal=True)
@computations.federated_computation(federated_all_equal_float)
def foo(x):
val = intrinsics.federated_mean(x, x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '(float32@CLIENTS -> float32@SERVER)')
def test_federated_mean_with_client_tuple_with_int32_weight(self):
@computations.federated_computation([
computation_types.FederatedType([('x', tf.float64), ('y', tf.float64)],
placements.CLIENTS),
computation_types.FederatedType(tf.int32, placements.CLIENTS)
])
def foo(x, y):
val = intrinsics.federated_mean(x, y)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(
foo, '(<{<x=float64,y=float64>}@CLIENTS,{int32}@CLIENTS> '
'-> <x=float64,y=float64>@SERVER)')
def test_federated_mean_with_client_int32_fails(self):
with self.assertRaises(TypeError):
@computations.federated_computation(
computation_types.FederatedType(tf.int32, placements.CLIENTS))
def _(x):
return intrinsics.federated_mean(x)
def test_federated_mean_with_string_weight_fails(self):
with self.assertRaises(TypeError):
@computations.federated_computation([
computation_types.FederatedType(tf.float32, placements.CLIENTS),
computation_types.FederatedType(tf.string, placements.CLIENTS)
])
def _(x, y):
return intrinsics.federated_mean(x, y)
def test_federated_aggregate_with_client_int(self):
Accumulator = collections.namedtuple('Accumulator', 'total count')
accumulator_type = computation_types.NamedTupleType(
Accumulator(tf.int32, tf.int32))
@computations.tf_computation([accumulator_type, tf.int32])
def accumulate(accu, elem):
return Accumulator(accu.total + elem, accu.count + 1)
@computations.tf_computation([accumulator_type, accumulator_type])
def merge(x, y):
return Accumulator(x.total + y.total, x.count + y.count)
@computations.tf_computation(accumulator_type)
def report(accu):
return tf.cast(accu.total, tf.float32) / tf.cast(accu.count, tf.float32)
@computations.federated_computation(
computation_types.FederatedType(tf.int32, placements.CLIENTS))
def foo(x):
val = intrinsics.federated_aggregate(x, Accumulator(0, 0), accumulate,
merge, report)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '({int32}@CLIENTS -> float32@SERVER)')
def test_federated_aggregate_with_federated_zero_fails(self):
@computations.federated_computation()
def build_federated_zero():
val = intrinsics.federated_value(0, placements.SERVER)
self.assertIsInstance(val, value_base.Value)
return val
@computations.tf_computation([tf.int32, tf.int32])
def accumulate(accu, elem):
return accu + elem
@computations.tf_computation([tf.int32, tf.int32])
def merge(x, y):
return x + y
@computations.tf_computation(tf.int32)
def report(accu):
return accu
def foo(x):
return intrinsics.federated_aggregate(x, build_federated_zero(),
accumulate, merge, report)
with self.assertRaisesRegex(
TypeError, 'Expected `zero` to be assignable to type int32, '
'but was of incompatible type int32@SERVER'):
computations.federated_computation(
foo, computation_types.FederatedType(tf.int32, placements.CLIENTS))
def test_federated_aggregate_with_unknown_dimension(self):
Accumulator = collections.namedtuple('Accumulator', ['samples'])
accumulator_type = computation_types.NamedTupleType(
Accumulator(
samples=computation_types.TensorType(dtype=tf.int32, shape=[None])))
@computations.tf_computation()
def build_empty_accumulator():
return Accumulator(samples=tf.zeros(shape=[0], dtype=tf.int32))
@computations.tf_computation([accumulator_type, tf.int32])
def accumulate(accu, elem):
return Accumulator(
samples=tf.concat(
[accu.samples, tf.expand_dims(elem, axis=0)], axis=0))
@computations.tf_computation([accumulator_type, accumulator_type])
def merge(x, y):
return Accumulator(samples=tf.concat([x.samples, y.samples], axis=0))
@computations.tf_computation(accumulator_type)
def report(accu):
return accu
@computations.federated_computation(
computation_types.FederatedType(tf.int32, placements.CLIENTS))
def foo(x):
val = intrinsics.federated_aggregate(x, build_empty_accumulator(),
accumulate, merge, report)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '({int32}@CLIENTS -> <samples=int32[?]>@SERVER)')
def test_federated_reduce_with_tf_add_raw_constant(self):
@computations.federated_computation(
computation_types.FederatedType(tf.int32, placements.CLIENTS))
def foo(x):
plus = computations.tf_computation(tf.add, [tf.int32, tf.int32])
val = intrinsics.federated_reduce(x, 0, plus)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '({int32}@CLIENTS -> int32@SERVER)')
def test_num_over_temperature_threshold_example(self):
@computations.federated_computation([
computation_types.FederatedType(tf.float32, placements.CLIENTS),
computation_types.FederatedType(tf.float32, placements.SERVER)
])
def foo(temperatures, threshold):
val = intrinsics.federated_sum(
intrinsics.federated_map(
computations.tf_computation(
lambda x, y: tf.cast(tf.greater(x, y), tf.int32),
[tf.float32, tf.float32]),
[temperatures,
intrinsics.federated_broadcast(threshold)]))
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo,
'(<{float32}@CLIENTS,float32@SERVER> -> int32@SERVER)')
@parameterized.named_parameters(('test_n_2', 2), ('test_n_3', 3),
('test_n_5', 5))
def test_n_tuple_federated_zip_tensor_args(self, n):
fed_type = computation_types.FederatedType(tf.int32, placements.CLIENTS)
initial_tuple_type = computation_types.NamedTupleType([fed_type] * n)
final_fed_type = computation_types.FederatedType([tf.int32] * n,
placements.CLIENTS)
function_type = computation_types.FunctionType(initial_tuple_type,
final_fed_type)
@computations.federated_computation(
[computation_types.FederatedType(tf.int32, placements.CLIENTS)] * n)
def foo(x):
val = intrinsics.federated_zip(x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, function_type.compact_representation())
@parameterized.named_parameters(
('test_n_2_int', 2,
computation_types.FederatedType(tf.int32, placements.CLIENTS)),
('test_n_3_int', 3,
computation_types.FederatedType(tf.int32, placements.CLIENTS)),
('test_n_5_int', 5,
computation_types.FederatedType(tf.int32, placements.CLIENTS)),
('test_n_2_tuple', 2,
computation_types.FederatedType([tf.int32, tf.int32],
placements.CLIENTS)),
('test_n_3_tuple', 3,
computation_types.FederatedType([tf.int32, tf.int32],
placements.CLIENTS)),
('test_n_5_tuple', 5,
computation_types.FederatedType([tf.int32, tf.int32],
placements.CLIENTS)))
def test_named_n_tuple_federated_zip(self, n, fed_type):
initial_tuple_type = computation_types.NamedTupleType([fed_type] * n)
named_fed_type = computation_types.FederatedType(
[(str(k), fed_type.member) for k in range(n)], placements.CLIENTS)
mixed_fed_type = computation_types.FederatedType(
[(str(k), fed_type.member) if k % 2 == 0 else fed_type.member
for k in range(n)], placements.CLIENTS)
named_function_type = computation_types.FunctionType(
initial_tuple_type, named_fed_type)
mixed_function_type = computation_types.FunctionType(
initial_tuple_type, mixed_fed_type)
@computations.federated_computation([fed_type] * n)
def foo(x):
arg = {str(k): x[k] for k in range(n)}
val = intrinsics.federated_zip(arg)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, named_function_type.compact_representation())
def _make_test_tuple(x, k):
if k % 2 == 0:
return str(k), x[k]
else:
return None, x[k]
@computations.federated_computation([fed_type] * n)
def bar(x):
arg = anonymous_tuple.AnonymousTuple(
_make_test_tuple(x, k) for k in range(n))
val = intrinsics.federated_zip(arg)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(bar, mixed_function_type.compact_representation())
@parameterized.named_parameters([
('test_n_' + str(n) + '_m_' + str(m), n, m)
for n, m in itertools.product([1, 2, 3], [1, 2, 3])
])
def test_n_tuple_federated_zip_mixed_args(self, n, m):
tuple_fed_type = computation_types.FederatedType([tf.int32, tf.int32],
placements.CLIENTS)
single_fed_type = computation_types.FederatedType(tf.int32,
placements.CLIENTS)
initial_tuple_type = computation_types.NamedTupleType([tuple_fed_type] * n +
[single_fed_type] * m)
final_fed_type = computation_types.FederatedType(
[[tf.int32, tf.int32]] * n + [tf.int32] * m, placements.CLIENTS)
function_type = computation_types.FunctionType(initial_tuple_type,
final_fed_type)
@computations.federated_computation([
computation_types.FederatedType(
computation_types.NamedTupleType([tf.int32, tf.int32]),
placements.CLIENTS)
] * n + [computation_types.FederatedType(tf.int32, placements.CLIENTS)] * m)
def baz(x):
val = intrinsics.federated_zip(x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(baz, function_type.compact_representation())
def test_federated_apply_raises_warning(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
@computations.federated_computation(
computation_types.FederatedType(tf.int32, placements.SERVER))
def foo(x):
val = intrinsics.federated_apply(
computations.tf_computation(lambda x: x * x, tf.int32), x)
self.assertIsInstance(val, value_base.Value)
return val
self.assertLen(w, 1)
self.assertIsInstance(w[0].category(), DeprecationWarning)
self.assertIn('tff.federated_apply() is deprecated', str(w[0].message))
self.assert_type(foo, '(int32@SERVER -> int32@SERVER)')
def test_federated_value_with_bool_on_clients(self):
@computations.federated_computation(tf.bool)
def foo(x):
val = intrinsics.federated_value(x, placements.CLIENTS)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '(bool -> bool@CLIENTS)')
def test_federated_value_raw_np_scalar(self):
@computations.federated_computation
def test_np_values():
floatv = np.float64(0)
tff_float = intrinsics.federated_value(floatv, placements.SERVER)
self.assertIsInstance(tff_float, value_base.Value)
self.assert_type(tff_float, 'float64@SERVER')
intv = np.int64(0)
tff_int = intrinsics.federated_value(intv, placements.SERVER)
self.assertIsInstance(tff_int, value_base.Value)
self.assert_type(tff_int, 'int64@SERVER')
return (tff_float, tff_int)
floatv, intv = test_np_values()
self.assertEqual(floatv, 0.0)
self.assertEqual(intv, 0)
def test_federated_value_raw_tf_scalar_variable(self):
v = tf.Variable(initial_value=0., name='test_var')
with self.assertRaisesRegex(
TypeError, 'TensorFlow construct (.*) has been '
'encountered in a federated context.'):
_ = intrinsics.federated_value(v, placements.SERVER)
def test_federated_value_with_bool_on_server(self):
@computations.federated_computation(tf.bool)
def foo(x):
val = intrinsics.federated_value(x, placements.SERVER)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo, '(bool -> bool@SERVER)')
def test_sequence_sum(self):
@computations.federated_computation(
computation_types.SequenceType(tf.int32))
def foo1(x):
val = intrinsics.sequence_sum(x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo1, '(int32* -> int32)')
@computations.federated_computation(
computation_types.FederatedType(
computation_types.SequenceType(tf.int32), placements.SERVER))
def foo2(x):
val = intrinsics.sequence_sum(x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo2, '(int32*@SERVER -> int32@SERVER)')
@computations.federated_computation(
computation_types.FederatedType(
computation_types.SequenceType(tf.int32), placements.CLIENTS))
def foo3(x):
val = intrinsics.sequence_sum(x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo3, '({int32*}@CLIENTS -> {int32}@CLIENTS)')
def test_sequence_map(self):
@computations.tf_computation(tf.int32)
def over_threshold(x):
return x > 10
@computations.federated_computation(
computation_types.SequenceType(tf.int32))
def foo1(x):
val = intrinsics.sequence_map(over_threshold, x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo1, '(int32* -> bool*)')
@computations.federated_computation(
computation_types.FederatedType(
computation_types.SequenceType(tf.int32), placements.SERVER))
def foo2(x):
val = intrinsics.sequence_map(over_threshold, x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo2, '(int32*@SERVER -> bool*@SERVER)')
@computations.federated_computation(
computation_types.FederatedType(
computation_types.SequenceType(tf.int32), placements.CLIENTS))
def foo3(x):
val = intrinsics.sequence_map(over_threshold, x)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo3, '({int32*}@CLIENTS -> {bool*}@CLIENTS)')
def test_sequence_reduce(self):
add_numbers = computations.tf_computation(tf.add, [tf.int32, tf.int32])
@computations.federated_computation(
computation_types.SequenceType(tf.int32))
def foo1(x):
val = intrinsics.sequence_reduce(x, 0, add_numbers)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo1, '(int32* -> int32)')
@computations.federated_computation(
computation_types.FederatedType(
computation_types.SequenceType(tf.int32), placements.SERVER))
def foo2(x):
val = intrinsics.sequence_reduce(x, 0, add_numbers)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo2, '(int32*@SERVER -> int32@SERVER)')
@computations.federated_computation(
computation_types.FederatedType(
computation_types.SequenceType(tf.int32), placements.CLIENTS))
def foo3(x):
val = intrinsics.sequence_reduce(x, 0, add_numbers)
self.assertIsInstance(val, value_base.Value)
return val
self.assert_type(foo3, '({int32*}@CLIENTS -> {int32}@CLIENTS)')
@executor_test_utils.executors(
('local', executor_stacks.local_executor_factory()),)
def test_federated_zip_with_twenty_elements_local_executor(self):
n = 20
n_clients = 2
@computations.federated_computation(
[computation_types.FederatedType(tf.int32, placements.CLIENTS)] * n)
def foo(x):
val = intrinsics.federated_zip(x)
self.assertIsInstance(val, value_base.Value)
return val
data = [list(range(n_clients)) for _ in range(n)]
foo(data)
if __name__ == '__main__':
default_executor.initialize_default_executor()
common_test.main()
| true | true |
1c2b394e3a4820d61b1052b437d5ca661ba3b5b2 | 4,911 | py | Python | src/CurlCallee.py | 8ldesign/DataFeedsTester | 187af657cde369baef9f1e00222db5c42320307b | [
"MIT"
] | null | null | null | src/CurlCallee.py | 8ldesign/DataFeedsTester | 187af657cde369baef9f1e00222db5c42320307b | [
"MIT"
] | null | null | null | src/CurlCallee.py | 8ldesign/DataFeedsTester | 187af657cde369baef9f1e00222db5c42320307b | [
"MIT"
] | null | null | null | import logging, re, os, fnmatch, requests
from requests import exceptions
from FileConstants import FileConstants
from HTTPConstants import HTTPConstants
def main():
try:
calleeObj = CurlCallee()
calleeObj.execute()
except KeyboardInterrupt:
logging.info("Program exited by user")
class CurlCallee:
fileConstants = FileConstants()
httpConstants = HTTPConstants()
logging.basicConfig(format='%(asctime)s %(message)s')
logging.getLogger().setLevel(logging.INFO)
logging.getLogger("requests").setLevel(logging.WARNING)
urls = dict()
def findFile(self, pattern, path):
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result
def gatherUrls(self, urls):
try:
for key in self.fileConstants.feedDictionary:
urls[key] = []
for file in self.fileConstants.feedDictionary.get(key):
result = self.findFile(file, os.path.pardir)
if result:
file = result[0]
else:
logging.error("Didn't find feed file for " + key)
print("Gathering URLs from " + file + " of type " + key)
with open(file,'r') as fileName:
lines = fileName.read().splitlines()
for line in lines:
lineUrls = re.findall(r'(?:http|https|www)(?:[^\\\]"<>]*)', line)
#lineUrls = re.findall(r'(https?://\S+)', line)
urls[key].append(lineUrls)
except IOError:
logging.error("Check the file names in FileConstants file")
except UnicodeDecodeError:
logging.error("Make sure that the file is in UTF-8 format " + file)
def printUrls(self, urls):
counter = 0
for key in urls:
for value in urls.get(key):
if isinstance(value, list):
for innerValue in value:
counter += 1
logging.info (str(counter) + ". " + innerValue + " \n")
else:
counter += 1
logging.info (str(counter) + ". " + value + " \n")
def testUrls(self, urls):
counter = 0
for key in urls:
for value in urls.get(key):
if isinstance(value, list):
for innerValue in value:
counter += 1
curlResponse = self.sendCurlRequest(innerValue)
logging.info(str(counter) + ". " + innerValue + " : " + str(curlResponse) )
else:
counter += 1
responseCode = self.sendCurlRequest(value)
if(responseCode == 'OK'):
logging.info(str(counter) + ". " + value + " : " + responseCode )
else:
logging.warning(str(counter) + ". " + value + " : " + responseCode)
def sendCurlRequest(self,site_url):
try:
curlResponse = requests.get(site_url, timeout=10)
return self.processedCurlResponse(curlResponse)
except (requests.exceptions.InvalidSchema):
return 'INVALID URL'
except (requests.exceptions.ConnectionError, ConnectionError):
return 'CONNECTION ERROR'
except (requests.exceptions.ReadTimeout):
return 'READTIMEOUT ERROR'
except (AttributeError):
return 'ATTRIBUTE ERROR'
except (requests.exceptions.TooManyRedirects):
return 'TOO MANY REDIRECTS'
def processedCurlResponse(self, curlResponse):
status_code = str(curlResponse.status_code)
curl_history = curlResponse.history
respCodeDict = self.httpConstants.responseCodeDictionary
if not curl_history:
# If the curl response doesn't have a history, return the status code
if (str(status_code) in respCodeDict.keys()):
return respCodeDict.get(str(status_code))
else:
return 'UNKNOWN'
else:
# curl response has a history - return the first of the response codes
for curlHistResponse in curl_history:
if(str(curlHistResponse.status_code) in respCodeDict.keys()):
return respCodeDict.get(str(curlHistResponse.status_code))
else:
# if none of the response codes are matching, return the original status_code itself
return respCodeDict.get(status_code)
def execute(self):
self.gatherUrls(self.urls)
self.testUrls(self.urls)
| 40.925 | 104 | 0.547343 | import logging, re, os, fnmatch, requests
from requests import exceptions
from FileConstants import FileConstants
from HTTPConstants import HTTPConstants
def main():
try:
calleeObj = CurlCallee()
calleeObj.execute()
except KeyboardInterrupt:
logging.info("Program exited by user")
class CurlCallee:
fileConstants = FileConstants()
httpConstants = HTTPConstants()
logging.basicConfig(format='%(asctime)s %(message)s')
logging.getLogger().setLevel(logging.INFO)
logging.getLogger("requests").setLevel(logging.WARNING)
urls = dict()
def findFile(self, pattern, path):
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result
def gatherUrls(self, urls):
try:
for key in self.fileConstants.feedDictionary:
urls[key] = []
for file in self.fileConstants.feedDictionary.get(key):
result = self.findFile(file, os.path.pardir)
if result:
file = result[0]
else:
logging.error("Didn't find feed file for " + key)
print("Gathering URLs from " + file + " of type " + key)
with open(file,'r') as fileName:
lines = fileName.read().splitlines()
for line in lines:
lineUrls = re.findall(r'(?:http|https|www)(?:[^\\\]"<>]*)', line)
#lineUrls = re.findall(r'(https?://\S+)', line)
urls[key].append(lineUrls)
except IOError:
logging.error("Check the file names in FileConstants file")
except UnicodeDecodeError:
logging.error("Make sure that the file is in UTF-8 format " + file)
def printUrls(self, urls):
counter = 0
for key in urls:
for value in urls.get(key):
if isinstance(value, list):
for innerValue in value:
counter += 1
logging.info (str(counter) + ". " + innerValue + " \n")
else:
counter += 1
logging.info (str(counter) + ". " + value + " \n")
def testUrls(self, urls):
counter = 0
for key in urls:
for value in urls.get(key):
if isinstance(value, list):
for innerValue in value:
counter += 1
curlResponse = self.sendCurlRequest(innerValue)
logging.info(str(counter) + ". " + innerValue + " : " + str(curlResponse) )
else:
counter += 1
responseCode = self.sendCurlRequest(value)
if(responseCode == 'OK'):
logging.info(str(counter) + ". " + value + " : " + responseCode )
else:
logging.warning(str(counter) + ". " + value + " : " + responseCode)
def sendCurlRequest(self,site_url):
try:
curlResponse = requests.get(site_url, timeout=10)
return self.processedCurlResponse(curlResponse)
except (requests.exceptions.InvalidSchema):
return 'INVALID URL'
except (requests.exceptions.ConnectionError, ConnectionError):
return 'CONNECTION ERROR'
except (requests.exceptions.ReadTimeout):
return 'READTIMEOUT ERROR'
except (AttributeError):
return 'ATTRIBUTE ERROR'
except (requests.exceptions.TooManyRedirects):
return 'TOO MANY REDIRECTS'
def processedCurlResponse(self, curlResponse):
status_code = str(curlResponse.status_code)
curl_history = curlResponse.history
respCodeDict = self.httpConstants.responseCodeDictionary
if not curl_history:
# If the curl response doesn't have a history, return the status code
if (str(status_code) in respCodeDict.keys()):
return respCodeDict.get(str(status_code))
else:
return 'UNKNOWN'
else:
# curl response has a history - return the first of the response codes
for curlHistResponse in curl_history:
if(str(curlHistResponse.status_code) in respCodeDict.keys()):
return respCodeDict.get(str(curlHistResponse.status_code))
else:
# if none of the response codes are matching, return the original status_code itself
return respCodeDict.get(status_code)
def execute(self):
self.gatherUrls(self.urls)
self.testUrls(self.urls)
| true | true |
1c2b3ae4f9373b4a2647979ff3fbb85f9a020e94 | 601 | py | Python | test/programytest/parser/template/graph_tests/test_think.py | cdoebler1/AIML2 | ee692ec5ea3794cd1bc4cc8ec2a6b5e5c20a0d6a | [
"MIT"
] | 345 | 2016-11-23T22:37:04.000Z | 2022-03-30T20:44:44.000Z | test/programytest/parser/template/graph_tests/test_think.py | MikeyBeez/program-y | 00d7a0c7d50062f18f0ab6f4a041068e119ef7f0 | [
"MIT"
] | 275 | 2016-12-07T10:30:28.000Z | 2022-02-08T21:28:33.000Z | test/programytest/parser/template/graph_tests/test_think.py | VProgramMist/modified-program-y | f32efcafafd773683b3fe30054d5485fe9002b7d | [
"MIT"
] | 159 | 2016-11-28T18:59:30.000Z | 2022-03-20T18:02:44.000Z | import xml.etree.ElementTree as ET
from programy.parser.template.nodes.think import TemplateThinkNode
from programytest.parser.template.graph_tests.graph_test_client import TemplateGraphTestClient
class TemplateGraphThinkTests(TemplateGraphTestClient):
def test_think(self):
template = ET.fromstring("""
<template>
<think>XYZ</think>
</template>
""")
root = self._graph.parse_template_expression(template)
self.assertIsNotNone(root)
node = root.children[0]
self.assertIsNotNone(node)
self.assertIsInstance(node, TemplateThinkNode)
| 28.619048 | 94 | 0.737105 | import xml.etree.ElementTree as ET
from programy.parser.template.nodes.think import TemplateThinkNode
from programytest.parser.template.graph_tests.graph_test_client import TemplateGraphTestClient
class TemplateGraphThinkTests(TemplateGraphTestClient):
def test_think(self):
template = ET.fromstring("""
<template>
<think>XYZ</think>
</template>
""")
root = self._graph.parse_template_expression(template)
self.assertIsNotNone(root)
node = root.children[0]
self.assertIsNotNone(node)
self.assertIsInstance(node, TemplateThinkNode)
| true | true |
1c2b3b3dbb6a36d103d655777cd6f8d51f19e477 | 4,145 | py | Python | tests/utils/test_vault.py | george0st/mlrun | 6467d3a5ceadf6cd35512b84b3ddc3da611cf39a | [
"Apache-2.0"
] | null | null | null | tests/utils/test_vault.py | george0st/mlrun | 6467d3a5ceadf6cd35512b84b3ddc3da611cf39a | [
"Apache-2.0"
] | null | null | null | tests/utils/test_vault.py | george0st/mlrun | 6467d3a5ceadf6cd35512b84b3ddc3da611cf39a | [
"Apache-2.0"
] | null | null | null | import pytest
import mlrun
from mlrun import code_to_function, get_run_db, mlconf, new_project, new_task
from mlrun.utils.vault import VaultStore
from tests.conftest import examples_path, out_path, verify_state
# Set a proper token value for Vault test
user_token = ""
# Set test secrets and configurations - you may need to modify these.
def _set_vault_mlrun_configuration(api_server_port=None):
if api_server_port:
mlconf.dbpath = f"http://localhost:{api_server_port}"
mlconf.secret_stores.vault.url = "http://localhost:8200"
mlconf.secret_stores.vault.user_token = user_token
# Verify that local activation of Vault functionality is successful. This does not
# test the API-server implementation, which is verified in other tests
@pytest.mark.skipif(user_token == "", reason="no vault configuration")
def test_direct_vault_usage():
_set_vault_mlrun_configuration()
project_name = "the-blair-witch-project"
vault = VaultStore()
vault.delete_vault_secrets(project=project_name)
secrets = vault.get_secrets(None, project=project_name)
assert len(secrets) == 0, "Secrets were not deleted"
expected_secrets = {"secret1": "123456", "secret2": "654321"}
vault.add_vault_secrets(expected_secrets, project=project_name)
secrets = vault.get_secrets(None, project=project_name)
assert (
secrets == expected_secrets
), "Vault contains different set of secrets than expected"
secrets = vault.get_secrets(["secret1"], project=project_name)
assert len(secrets) == 1 and secrets["secret1"] == expected_secrets["secret1"]
# Test the same thing for user
user_name = "pikachu"
vault.delete_vault_secrets(user=user_name)
secrets = vault.get_secrets(None, user=user_name)
assert len(secrets) == 0, "Secrets were not deleted"
vault.add_vault_secrets(expected_secrets, user=user_name)
secrets = vault.get_secrets(None, user=user_name)
assert (
secrets == expected_secrets
), "Vault contains different set of secrets than expected"
# Cleanup
vault.delete_vault_secrets(project=project_name)
vault.delete_vault_secrets(user=user_name)
@pytest.mark.skipif(user_token == "", reason="no vault configuration")
def test_vault_end_to_end():
# This requires an MLRun API server to run and work with Vault. This port should
# be configured to allow access to the server.
api_server_port = 57764
_set_vault_mlrun_configuration(api_server_port)
project_name = "abc"
func_name = "vault-function"
aws_key_value = "1234567890"
github_key_value = "proj1Key!!!"
project = new_project(project_name)
# This call will initialize Vault infrastructure and add the given secrets
# It executes on the API server
project.set_secrets(
{"aws_key": aws_key_value, "github_key": github_key_value},
provider=mlrun.api.schemas.SecretProviderName.vault,
)
# This API executes on the client side
project_secrets = project.get_vault_secret_keys()
assert project_secrets == ["aws_key", "github_key"], "secrets not created"
# Create function and set container configuration
function = code_to_function(
name=func_name,
filename=f"{examples_path}/vault_function.py",
handler="vault_func",
project=project_name,
kind="job",
)
function.spec.image = "saarcoiguazio/mlrun:unstable"
# Create context for the execution
spec = new_task(
project=project_name,
name="vault_test_run",
handler="vault_func",
out_path=out_path,
params={"secrets": ["password", "path", "github_key", "aws_key"]},
)
spec.with_secrets("vault", [])
result = function.run(spec)
verify_state(result)
db = get_run_db().connect()
state, log = db.get_log(result.metadata.uid, project=project_name)
log = str(log)
print(state)
assert (
log.find(f"value: {aws_key_value}") != -1
), "secret value not detected in function output"
assert (
log.find(f"value: {github_key_value}") != -1
), "secret value not detected in function output"
| 34.541667 | 84 | 0.710977 | import pytest
import mlrun
from mlrun import code_to_function, get_run_db, mlconf, new_project, new_task
from mlrun.utils.vault import VaultStore
from tests.conftest import examples_path, out_path, verify_state
user_token = ""
def _set_vault_mlrun_configuration(api_server_port=None):
if api_server_port:
mlconf.dbpath = f"http://localhost:{api_server_port}"
mlconf.secret_stores.vault.url = "http://localhost:8200"
mlconf.secret_stores.vault.user_token = user_token
@pytest.mark.skipif(user_token == "", reason="no vault configuration")
def test_direct_vault_usage():
_set_vault_mlrun_configuration()
project_name = "the-blair-witch-project"
vault = VaultStore()
vault.delete_vault_secrets(project=project_name)
secrets = vault.get_secrets(None, project=project_name)
assert len(secrets) == 0, "Secrets were not deleted"
expected_secrets = {"secret1": "123456", "secret2": "654321"}
vault.add_vault_secrets(expected_secrets, project=project_name)
secrets = vault.get_secrets(None, project=project_name)
assert (
secrets == expected_secrets
), "Vault contains different set of secrets than expected"
secrets = vault.get_secrets(["secret1"], project=project_name)
assert len(secrets) == 1 and secrets["secret1"] == expected_secrets["secret1"]
user_name = "pikachu"
vault.delete_vault_secrets(user=user_name)
secrets = vault.get_secrets(None, user=user_name)
assert len(secrets) == 0, "Secrets were not deleted"
vault.add_vault_secrets(expected_secrets, user=user_name)
secrets = vault.get_secrets(None, user=user_name)
assert (
secrets == expected_secrets
), "Vault contains different set of secrets than expected"
vault.delete_vault_secrets(project=project_name)
vault.delete_vault_secrets(user=user_name)
@pytest.mark.skipif(user_token == "", reason="no vault configuration")
def test_vault_end_to_end():
api_server_port = 57764
_set_vault_mlrun_configuration(api_server_port)
project_name = "abc"
func_name = "vault-function"
aws_key_value = "1234567890"
github_key_value = "proj1Key!!!"
project = new_project(project_name)
project.set_secrets(
{"aws_key": aws_key_value, "github_key": github_key_value},
provider=mlrun.api.schemas.SecretProviderName.vault,
)
project_secrets = project.get_vault_secret_keys()
assert project_secrets == ["aws_key", "github_key"], "secrets not created"
function = code_to_function(
name=func_name,
filename=f"{examples_path}/vault_function.py",
handler="vault_func",
project=project_name,
kind="job",
)
function.spec.image = "saarcoiguazio/mlrun:unstable"
spec = new_task(
project=project_name,
name="vault_test_run",
handler="vault_func",
out_path=out_path,
params={"secrets": ["password", "path", "github_key", "aws_key"]},
)
spec.with_secrets("vault", [])
result = function.run(spec)
verify_state(result)
db = get_run_db().connect()
state, log = db.get_log(result.metadata.uid, project=project_name)
log = str(log)
print(state)
assert (
log.find(f"value: {aws_key_value}") != -1
), "secret value not detected in function output"
assert (
log.find(f"value: {github_key_value}") != -1
), "secret value not detected in function output"
| true | true |
1c2b3b9071125c065a9e9fb7879789b2fcfd2848 | 5,455 | py | Python | keras/utils/test_utils.py | entraned/keras | 9400be98783135a1d42dd238f4e6c3aa048eceea | [
"MIT"
] | 1 | 2019-03-31T00:51:26.000Z | 2019-03-31T00:51:26.000Z | keras/utils/test_utils.py | entraned/keras | 9400be98783135a1d42dd238f4e6c3aa048eceea | [
"MIT"
] | null | null | null | keras/utils/test_utils.py | entraned/keras | 9400be98783135a1d42dd238f4e6c3aa048eceea | [
"MIT"
] | 1 | 2021-03-08T02:28:07.000Z | 2021-03-08T02:28:07.000Z | """Utilities related to Keras unit tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from numpy.testing import assert_allclose
import six
from .generic_utils import has_arg
from ..engine import Model, Input
from ..models import Sequential
from ..models import model_from_json
from .. import backend as K
def get_test_data(num_train=1000, num_test=500, input_shape=(10,),
output_shape=(2,),
classification=True, num_classes=2):
"""Generates test data to train a model on.
classification=True overrides output_shape
(i.e. output_shape is set to (1,)) and the output
consists in integers in [0, num_classes-1].
Otherwise: float output with shape output_shape.
"""
samples = num_train + num_test
if classification:
y = np.random.randint(0, num_classes, size=(samples,))
X = np.zeros((samples,) + input_shape, dtype=np.float32)
for i in range(samples):
X[i] = np.random.normal(loc=y[i], scale=0.7, size=input_shape)
else:
y_loc = np.random.random((samples,))
X = np.zeros((samples,) + input_shape, dtype=np.float32)
y = np.zeros((samples,) + output_shape, dtype=np.float32)
for i in range(samples):
X[i] = np.random.normal(loc=y_loc[i], scale=0.7, size=input_shape)
y[i] = np.random.normal(loc=y_loc[i], scale=0.7, size=output_shape)
return (X[:num_train], y[:num_train]), (X[num_train:], y[num_train:])
def layer_test(layer_cls, kwargs={}, input_shape=None, input_dtype=None,
input_data=None, expected_output=None,
expected_output_dtype=None, fixed_batch_size=False):
"""Test routine for a layer with a single input tensor
and single output tensor.
"""
# generate input data
if input_data is None:
assert input_shape
if not input_dtype:
input_dtype = K.floatx()
input_data_shape = list(input_shape)
for i, e in enumerate(input_data_shape):
if e is None:
input_data_shape[i] = np.random.randint(1, 4)
input_data = (10 * np.random.random(input_data_shape))
input_data = input_data.astype(input_dtype)
else:
if input_shape is None:
input_shape = input_data.shape
if input_dtype is None:
input_dtype = input_data.dtype
if expected_output_dtype is None:
expected_output_dtype = input_dtype
# instantiation
layer = layer_cls(**kwargs)
# test get_weights , set_weights at layer level
weights = layer.get_weights()
layer.set_weights(weights)
# test and instantiation from weights
# Checking for empty weights array to avoid a problem where some
# legacy layers return bad values from get_weights()
if has_arg(layer_cls.__init__, 'weights') and len(weights):
kwargs['weights'] = weights
layer = layer_cls(**kwargs)
expected_output_shape = layer.compute_output_shape(input_shape)
def _layer_in_model_test(model):
actual_output = model.predict(input_data)
actual_output_shape = actual_output.shape
for expected_dim, actual_dim in zip(expected_output_shape,
actual_output_shape):
if expected_dim is not None:
assert expected_dim == actual_dim
if expected_output is not None:
assert_allclose(actual_output, expected_output, rtol=1e-3)
# test serialization, weight setting at model level
model_config = model.get_config()
recovered_model = model.__class__.from_config(model_config)
if model.weights:
weights = model.get_weights()
recovered_model.set_weights(weights)
_output = recovered_model.predict(input_data)
assert_allclose(_output, actual_output, rtol=1e-3)
# test training mode (e.g. useful when the layer has a
# different behavior at training and testing time).
if has_arg(layer.call, 'training'):
model.compile('rmsprop', 'mse')
model.train_on_batch(input_data, actual_output)
return actual_output
# test in functional API
if fixed_batch_size:
x = Input(batch_shape=input_shape, dtype=input_dtype)
else:
x = Input(shape=input_shape[1:], dtype=input_dtype)
y = layer(x)
assert K.dtype(y) == expected_output_dtype
# check with the functional API
model = Model(x, y)
_layer_in_model_test(model)
# test as first layer in Sequential API
layer_config = layer.get_config()
layer_config['batch_input_shape'] = input_shape
layer = layer.__class__.from_config(layer_config)
# check with the sequential API
model = Sequential()
model.add(layer)
actual_output = _layer_in_model_test(model)
# for further checks in the caller function
return actual_output
def keras_test(func):
"""Function wrapper to clean up after TensorFlow tests.
# Arguments
func: test function to clean up after.
# Returns
A function wrapping the input function.
"""
@six.wraps(func)
def wrapper(*args, **kwargs):
output = func(*args, **kwargs)
if K.backend() == 'tensorflow' or K.backend() == 'cntk':
K.clear_session()
return output
return wrapper
| 35.422078 | 79 | 0.659028 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from numpy.testing import assert_allclose
import six
from .generic_utils import has_arg
from ..engine import Model, Input
from ..models import Sequential
from ..models import model_from_json
from .. import backend as K
def get_test_data(num_train=1000, num_test=500, input_shape=(10,),
output_shape=(2,),
classification=True, num_classes=2):
samples = num_train + num_test
if classification:
y = np.random.randint(0, num_classes, size=(samples,))
X = np.zeros((samples,) + input_shape, dtype=np.float32)
for i in range(samples):
X[i] = np.random.normal(loc=y[i], scale=0.7, size=input_shape)
else:
y_loc = np.random.random((samples,))
X = np.zeros((samples,) + input_shape, dtype=np.float32)
y = np.zeros((samples,) + output_shape, dtype=np.float32)
for i in range(samples):
X[i] = np.random.normal(loc=y_loc[i], scale=0.7, size=input_shape)
y[i] = np.random.normal(loc=y_loc[i], scale=0.7, size=output_shape)
return (X[:num_train], y[:num_train]), (X[num_train:], y[num_train:])
def layer_test(layer_cls, kwargs={}, input_shape=None, input_dtype=None,
input_data=None, expected_output=None,
expected_output_dtype=None, fixed_batch_size=False):
if input_data is None:
assert input_shape
if not input_dtype:
input_dtype = K.floatx()
input_data_shape = list(input_shape)
for i, e in enumerate(input_data_shape):
if e is None:
input_data_shape[i] = np.random.randint(1, 4)
input_data = (10 * np.random.random(input_data_shape))
input_data = input_data.astype(input_dtype)
else:
if input_shape is None:
input_shape = input_data.shape
if input_dtype is None:
input_dtype = input_data.dtype
if expected_output_dtype is None:
expected_output_dtype = input_dtype
layer = layer_cls(**kwargs)
weights = layer.get_weights()
layer.set_weights(weights)
if has_arg(layer_cls.__init__, 'weights') and len(weights):
kwargs['weights'] = weights
layer = layer_cls(**kwargs)
expected_output_shape = layer.compute_output_shape(input_shape)
def _layer_in_model_test(model):
actual_output = model.predict(input_data)
actual_output_shape = actual_output.shape
for expected_dim, actual_dim in zip(expected_output_shape,
actual_output_shape):
if expected_dim is not None:
assert expected_dim == actual_dim
if expected_output is not None:
assert_allclose(actual_output, expected_output, rtol=1e-3)
model_config = model.get_config()
recovered_model = model.__class__.from_config(model_config)
if model.weights:
weights = model.get_weights()
recovered_model.set_weights(weights)
_output = recovered_model.predict(input_data)
assert_allclose(_output, actual_output, rtol=1e-3)
if has_arg(layer.call, 'training'):
model.compile('rmsprop', 'mse')
model.train_on_batch(input_data, actual_output)
return actual_output
if fixed_batch_size:
x = Input(batch_shape=input_shape, dtype=input_dtype)
else:
x = Input(shape=input_shape[1:], dtype=input_dtype)
y = layer(x)
assert K.dtype(y) == expected_output_dtype
model = Model(x, y)
_layer_in_model_test(model)
layer_config = layer.get_config()
layer_config['batch_input_shape'] = input_shape
layer = layer.__class__.from_config(layer_config)
model = Sequential()
model.add(layer)
actual_output = _layer_in_model_test(model)
return actual_output
def keras_test(func):
@six.wraps(func)
def wrapper(*args, **kwargs):
output = func(*args, **kwargs)
if K.backend() == 'tensorflow' or K.backend() == 'cntk':
K.clear_session()
return output
return wrapper
| true | true |
1c2b3c1e5a3c81bd0768611b83ddf8ff8f4054ec | 21,136 | py | Python | statsmodels/base/elastic_net.py | xiaowei1234/statsmodels | a8faaf72b7881620552acace6ca352b8bc628dcd | [
"BSD-3-Clause"
] | null | null | null | statsmodels/base/elastic_net.py | xiaowei1234/statsmodels | a8faaf72b7881620552acace6ca352b8bc628dcd | [
"BSD-3-Clause"
] | null | null | null | statsmodels/base/elastic_net.py | xiaowei1234/statsmodels | a8faaf72b7881620552acace6ca352b8bc628dcd | [
"BSD-3-Clause"
] | 4 | 2020-04-07T00:06:17.000Z | 2021-06-17T15:11:36.000Z | import numpy as np
from statsmodels.base.model import Results
import statsmodels.base.wrapper as wrap
from statsmodels.tools.decorators import cache_readonly
from statsmodels.base.constraint import ConstraintProjector
"""
Elastic net regularization.
Routines for fitting regression models using elastic net
regularization. The elastic net minimizes the objective function
-llf / nobs + alpha((1 - L1_wt) * sum(params**2) / 2 +
L1_wt * sum(abs(params)))
The algorithm implemented here closely follows the implementation in
the R glmnet package, documented here:
http://cran.r-project.org/web/packages/glmnet/index.html
and here:
http://www.jstatsoft.org/v33/i01/paper
This routine should work for any regression model that implements
loglike, score, and hess.
"""
def _gen_npfuncs(k, L1_wt, alpha, loglike_kwds, score_kwds, hess_kwds):
"""
Negative penalized log-likelihood functions.
Returns the negative penalized log-likelihood, its derivative, and
its Hessian. The penalty only includes the smooth (L2) term.
All three functions have argument signature (x, model), where
``x`` is a point in the parameter space and ``model`` is an
arbitrary statsmodels regression model.
"""
def nploglike(params, model):
nobs = model.nobs
pen_llf = alpha[k] * (1 - L1_wt) * np.sum(params**2) / 2
llf = model.loglike(np.r_[params], **loglike_kwds)
return - llf / nobs + pen_llf
def npscore(params, model):
nobs = model.nobs
pen_grad = alpha[k] * (1 - L1_wt) * params
gr = -model.score(np.r_[params], **score_kwds)[0] / nobs
return gr + pen_grad
def nphess(params, model):
nobs = model.nobs
pen_hess = alpha[k] * (1 - L1_wt)
h = -model.hessian(np.r_[params], **hess_kwds)[0, 0] / nobs + pen_hess
return h
return nploglike, npscore, nphess
def fit_elasticnet(model, method="coord_descent", maxiter=100,
alpha=0., L1_wt=1., start_params=None, cnvrg_tol=1e-7,
zero_tol=1e-8, refit=False, check_step=True,
loglike_kwds=None, score_kwds=None, hess_kwds=None):
"""
Return an elastic net regularized fit to a regression model.
Parameters
----------
model : model object
A statsmodels object implementing ``loglike``, ``score``, and
``hessian``.
method : {'coord_descent'}
Only the coordinate descent algorithm is implemented.
maxiter : int
The maximum number of iteration cycles (an iteration cycle
involves running coordinate descent on all variables).
alpha : scalar or array_like
The penalty weight. If a scalar, the same penalty weight
applies to all variables in the model. If a vector, it
must have the same length as `params`, and contains a
penalty weight for each coefficient.
L1_wt : scalar
The fraction of the penalty given to the L1 penalty term.
Must be between 0 and 1 (inclusive). If 0, the fit is
a ridge fit, if 1 it is a lasso fit.
start_params : array_like
Starting values for `params`.
cnvrg_tol : scalar
If `params` changes by less than this amount (in sup-norm)
in one iteration cycle, the algorithm terminates with
convergence.
zero_tol : scalar
Any estimated coefficient smaller than this value is
replaced with zero.
refit : bool
If True, the model is refit using only the variables that have
non-zero coefficients in the regularized fit. The refitted
model is not regularized.
check_step : bool
If True, confirm that the first step is an improvement and search
further if it is not.
loglike_kwds : dict-like or None
Keyword arguments for the log-likelihood function.
score_kwds : dict-like or None
Keyword arguments for the score function.
hess_kwds : dict-like or None
Keyword arguments for the Hessian function.
Returns
-------
Results
A results object.
Notes
-----
The ``elastic net`` penalty is a combination of L1 and L2
penalties.
The function that is minimized is:
-loglike/n + alpha*((1-L1_wt)*|params|_2^2/2 + L1_wt*|params|_1)
where |*|_1 and |*|_2 are the L1 and L2 norms.
The computational approach used here is to obtain a quadratic
approximation to the smooth part of the target function:
-loglike/n + alpha*(1-L1_wt)*|params|_2^2/2
then repeatedly optimize the L1 penalized version of this function
along coordinate axes.
"""
k_exog = model.exog.shape[1]
loglike_kwds = {} if loglike_kwds is None else loglike_kwds
score_kwds = {} if score_kwds is None else score_kwds
hess_kwds = {} if hess_kwds is None else hess_kwds
if np.isscalar(alpha):
alpha = alpha * np.ones(k_exog)
# Define starting params
if start_params is None:
params = np.zeros(k_exog)
else:
params = start_params.copy()
btol = 1e-4
params_zero = np.zeros(len(params), dtype=bool)
init_args = model._get_init_kwds()
# we do not need a copy of init_args b/c get_init_kwds provides new dict
init_args['hasconst'] = False
model_offset = init_args.pop('offset', None)
if 'exposure' in init_args and init_args['exposure'] is not None:
if model_offset is None:
model_offset = np.log(init_args.pop('exposure'))
else:
model_offset += np.log(init_args.pop('exposure'))
fgh_list = [
_gen_npfuncs(k, L1_wt, alpha, loglike_kwds, score_kwds, hess_kwds)
for k in range(k_exog)]
for itr in range(maxiter):
# Sweep through the parameters
params_save = params.copy()
for k in range(k_exog):
# Under the active set method, if a parameter becomes
# zero we do not try to change it again.
# TODO : give the user the option to switch this off
if params_zero[k]:
continue
# Set the offset to account for the variables that are
# being held fixed in the current coordinate
# optimization.
params0 = params.copy()
params0[k] = 0
offset = np.dot(model.exog, params0)
if model_offset is not None:
offset += model_offset
# Create a one-variable model for optimization.
model_1var = model.__class__(
model.endog, model.exog[:, k], offset=offset, **init_args)
# Do the one-dimensional optimization.
func, grad, hess = fgh_list[k]
params[k] = _opt_1d(
func, grad, hess, model_1var, params[k], alpha[k]*L1_wt,
tol=btol, check_step=check_step)
# Update the active set
if itr > 0 and np.abs(params[k]) < zero_tol:
params_zero[k] = True
params[k] = 0.
# Check for convergence
pchange = np.max(np.abs(params - params_save))
if pchange < cnvrg_tol:
break
# Set approximate zero coefficients to be exactly zero
params[np.abs(params) < zero_tol] = 0
if not refit:
results = RegularizedResults(model, params)
return RegularizedResultsWrapper(results)
# Fit the reduced model to get standard errors and other
# post-estimation results.
ii = np.flatnonzero(params)
cov = np.zeros((k_exog, k_exog))
init_args = dict([(k, getattr(model, k, None)) for k in model._init_keys])
if len(ii) > 0:
model1 = model.__class__(
model.endog, model.exog[:, ii], **init_args)
rslt = model1.fit()
params[ii] = rslt.params
cov[np.ix_(ii, ii)] = rslt.normalized_cov_params
else:
# Hack: no variables were selected but we need to run fit in
# order to get the correct results class. So just fit a model
# with one variable.
model1 = model.__class__(model.endog, model.exog[:, 0], **init_args)
rslt = model1.fit(maxiter=0)
# fit may return a results or a results wrapper
if issubclass(rslt.__class__, wrap.ResultsWrapper):
klass = rslt._results.__class__
else:
klass = rslt.__class__
# Not all models have a scale
if hasattr(rslt, 'scale'):
scale = rslt.scale
else:
scale = 1.
# The degrees of freedom should reflect the number of parameters
# in the refit model, not including the zeros that are displayed
# to indicate which variables were dropped. See issue #1723 for
# discussion about setting df parameters in model and results
# classes.
p, q = model.df_model, model.df_resid
model.df_model = len(ii)
model.df_resid = model.nobs - model.df_model
# Assuming a standard signature for creating results classes.
refit = klass(model, params, cov, scale=scale)
refit.regularized = True
refit.method = method
refit.fit_history = {'iteration': itr + 1}
# Restore df in model class, see issue #1723 for discussion.
model.df_model, model.df_resid = p, q
return refit
def fit_elasticnet_constrained(model, method="coord_descent", maxiter=100,
alpha=0., L1_wt=1., start_params=None, cnvrg_tol=1e-7,
zero_tol=1e-8, refit=False, check_step=True,
loglike_kwds=None, score_kwds=None, hess_kwds=None,
param_limits = None, A_constr=None, b_constr=None,
verbose=False):
"""
Return an elastic net regularized fit to a regression model.
Parameters
----------
model : model object
A statsmodels object implementing ``loglike``, ``score``, and
``hessian``.
method : {'coord_descent'}
Only the coordinate descent algorithm is implemented.
maxiter : int
The maximum number of iteration cycles (an iteration cycle
involves running coordinate descent on all variables).
alpha : scalar or array_like
The penalty weight. If a scalar, the same penalty weight
applies to all variables in the model. If a vector, it
must have the same length as `params`, and contains a
penalty weight for each coefficient.
L1_wt : scalar
The fraction of the penalty given to the L1 penalty term.
Must be between 0 and 1 (inclusive). If 0, the fit is
a ridge fit, if 1 it is a lasso fit.
start_params : array_like
Starting values for `params`.
cnvrg_tol : scalar
If `params` changes by less than this amount (in sup-norm)
in one iteration cycle, the algorithm terminates with
convergence.
zero_tol : scalar
Any estimated coefficient smaller than this value is
replaced with zero.
refit : bool
If True, the model is refit using only the variables that have
non-zero coefficients in the regularized fit. The refitted
model is not regularized.
check_step : bool
If True, confirm that the first step is an improvement and search
further if it is not.
loglike_kwds : dict-like or None
Keyword arguments for the log-likelihood function.
score_kwds : dict-like or None
Keyword arguments for the score function.
hess_kwds : dict-like or None
Keyword arguments for the Hessian function.
A_constr: array-like
The matrix for linear constraint `A @ params <= b`
b_constr: array-like
The right-hand-side vector for linear constraint `A @ params <= b`.
Returns
-------
Results
A results object.
Notes
-----
The ``elastic net`` penalty is a combination of L1 and L2
penalties.
The function that is minimized is:
-loglike/n + alpha*((1-L1_wt)*|params|_2^2/2 + L1_wt*|params|_1)
where |*|_1 and |*|_2 are the L1 and L2 norms.
The computational approach used here is to obtain a quadratic
approximation to the smooth part of the target function:
-loglike/n + alpha*(1-L1_wt)*|params|_2^2/2
then repeatedly optimize the L1 penalized version of this function
along coordinate axes.
"""
k_exog = model.exog.shape[1]
loglike_kwds = {} if loglike_kwds is None else loglike_kwds
score_kwds = {} if score_kwds is None else score_kwds
hess_kwds = {} if hess_kwds is None else hess_kwds
if np.isscalar(alpha):
alpha = alpha * np.ones(k_exog)
# Define starting params
if start_params is None:
params = np.zeros(k_exog)
else:
params = start_params.copy()
btol = 1e-4
params_zero = np.zeros(len(params), dtype=bool)
init_args = model._get_init_kwds()
# we do not need a copy of init_args b/c get_init_kwds provides new dict
init_args['hasconst'] = False
model_offset = init_args.pop('offset', None)
if 'exposure' in init_args and init_args['exposure'] is not None:
if model_offset is None:
model_offset = np.log(init_args.pop('exposure'))
else:
model_offset += np.log(init_args.pop('exposure'))
fgh_list = [
_gen_npfuncs(k, L1_wt, alpha, loglike_kwds, score_kwds, hess_kwds)
for k in range(k_exog)]
# set up constraint enforcement if constraint is provided
if A_constr is not None:
x_min = [l[0] for l in param_limits]
x_max = [l[1] for l in param_limits]
proj = ConstraintProjector(x_min, x_max, A_constr, b_constr)
for itr in range(maxiter):
# Sweep through the parameters
params_save = params.copy()
for k in range(k_exog):
# Under the active set method, if a parameter becomes
# zero we do not try to change it again.
# TODO : give the user the option to switch this off
if params_zero[k]:
continue
# Set the offset to account for the variables that are
# being held fixed in the current coordinate
# optimization.
params0 = params.copy()
params0[k] = 0
offset = np.dot(model.exog, params0)
if model_offset is not None:
offset += model_offset
# Create a one-variable model for optimization.
model_1var = model.__class__(
model.endog, model.exog[:, k], offset=offset, **init_args)
# Do the one-dimensional optimization.
func, grad, hess = fgh_list[k]
params[k] = _opt_1d(
func, grad, hess, model_1var, params[k], alpha[k]*L1_wt,
tol=btol, check_step=check_step)
# set the parameter to be within the limits
if not param_limits is None:
params[k] = max(param_limits[k][0], min(param_limits[k][1], params[k]))
# Update the active set
if itr > 0 and np.abs(params[k]) < zero_tol:
params_zero[k] = True
params[k] = 0.
if A_constr is not None:
# enforce the constraint
# TODO: can this interfere with the way active set is defined?
params = proj.project(params)
# Check for convergence
pchange = np.max(np.abs(params - params_save))
if pchange < cnvrg_tol:
break
if verbose:
print(f'Elastic Net done after {itr}/{maxiter} iterations. pchange={pchange:0.2e} (cnvrg_tol={cnvrg_tol:0.2e})')
# Set approximate zero coefficients to be exactly zero
params[np.abs(params) < zero_tol] = 0
if not refit:
results = RegularizedResults(model, params)
return RegularizedResultsWrapper(results)
# Fit the reduced model to get standard errors and other
# post-estimation results.
ii = np.flatnonzero(params)
cov = np.zeros((k_exog, k_exog))
init_args = dict([(k, getattr(model, k, None)) for k in model._init_keys])
if len(ii) > 0:
model1 = model.__class__(
model.endog, model.exog[:, ii], **init_args)
rslt = model1.fit()
params[ii] = rslt.params
cov[np.ix_(ii, ii)] = rslt.normalized_cov_params
else:
# Hack: no variables were selected but we need to run fit in
# order to get the correct results class. So just fit a model
# with one variable.
model1 = model.__class__(model.endog, model.exog[:, 0], **init_args)
rslt = model1.fit(maxiter=0)
# fit may return a results or a results wrapper
if issubclass(rslt.__class__, wrap.ResultsWrapper):
klass = rslt._results.__class__
else:
klass = rslt.__class__
# Not all models have a scale
if hasattr(rslt, 'scale'):
scale = rslt.scale
else:
scale = 1.
# The degrees of freedom should reflect the number of parameters
# in the refit model, not including the zeros that are displayed
# to indicate which variables were dropped. See issue #1723 for
# discussion about setting df parameters in model and results
# classes.
p, q = model.df_model, model.df_resid
model.df_model = len(ii)
model.df_resid = model.nobs - model.df_model
# Assuming a standard signature for creating results classes.
refit = klass(model, params, cov, scale=scale)
refit.regularized = True
refit.method = method
refit.fit_history = {'iteration': itr + 1}
# Restore df in model class, see issue #1723 for discussion.
model.df_model, model.df_resid = p, q
return refit
def _opt_1d(func, grad, hess, model, start, L1_wt, tol,
check_step=True):
"""
One-dimensional helper for elastic net.
Parameters
----------
func : function
A smooth function of a single variable to be optimized
with L1 penaty.
grad : function
The gradient of `func`.
hess : function
The Hessian of `func`.
model : statsmodels model
The model being fit.
start : real
A starting value for the function argument
L1_wt : non-negative real
The weight for the L1 penalty function.
tol : non-negative real
A convergence threshold.
check_step : bool
If True, check that the first step is an improvement and
use bisection if it is not. If False, return after the
first step regardless.
Notes
-----
``func``, ``grad``, and ``hess`` have argument signature (x,
model), where ``x`` is a point in the parameter space and
``model`` is the model being fit.
If the log-likelihood for the model is exactly quadratic, the
global minimum is returned in one step. Otherwise numerical
bisection is used.
Returns
-------
The argmin of the objective function.
"""
# Overview:
# We want to minimize L(x) + L1_wt*abs(x), where L() is a smooth
# loss function that includes the log-likelihood and L2 penalty.
# This is a 1-dimensional optimization. If L(x) is exactly
# quadratic we can solve for the argmin exactly. Otherwise we
# approximate L(x) with a quadratic function Q(x) and try to use
# the minimizer of Q(x) + L1_wt*abs(x). But if this yields an
# uphill step for the actual target function L(x) + L1_wt*abs(x),
# then we fall back to a expensive line search. The line search
# is never needed for OLS.
x = start
f = func(x, model)
b = grad(x, model)
c = hess(x, model)
d = b - c*x
# The optimum is achieved by hard thresholding to zero
if L1_wt > np.abs(d):
return 0.
# x + h is the minimizer of the Q(x) + L1_wt*abs(x)
if d >= 0:
h = (L1_wt - b) / c
elif d < 0:
h = -(L1_wt + b) / c
else:
return np.nan
# If the new point is not uphill for the target function, take it
# and return. This check is a bit expensive and un-necessary for
# OLS
if not check_step:
return x + h
f1 = func(x + h, model) + L1_wt*np.abs(x + h)
if f1 <= f + L1_wt*np.abs(x) + 1e-10:
return x + h
# Fallback for models where the loss is not quadratic
from scipy.optimize import brent
x_opt = brent(func, args=(model,), brack=(x-1, x+1), tol=tol)
return x_opt
class RegularizedResults(Results):
"""
Results for models estimated using regularization
Parameters
----------
model : Model
The model instance used to estimate the parameters.
params : ndarray
The estimated (regularized) parameters.
"""
def __init__(self, model, params):
super(RegularizedResults, self).__init__(model, params)
@cache_readonly
def fittedvalues(self):
"""
The predicted values from the model at the estimated parameters.
"""
return self.model.predict(self.params)
class RegularizedResultsWrapper(wrap.ResultsWrapper):
_attrs = {
'params': 'columns',
'resid': 'rows',
'fittedvalues': 'rows',
}
_wrap_attrs = _attrs
wrap.populate_wrapper(RegularizedResultsWrapper, # noqa:E305
RegularizedResults)
| 34.535948 | 120 | 0.633232 | import numpy as np
from statsmodels.base.model import Results
import statsmodels.base.wrapper as wrap
from statsmodels.tools.decorators import cache_readonly
from statsmodels.base.constraint import ConstraintProjector
def _gen_npfuncs(k, L1_wt, alpha, loglike_kwds, score_kwds, hess_kwds):
def nploglike(params, model):
nobs = model.nobs
pen_llf = alpha[k] * (1 - L1_wt) * np.sum(params**2) / 2
llf = model.loglike(np.r_[params], **loglike_kwds)
return - llf / nobs + pen_llf
def npscore(params, model):
nobs = model.nobs
pen_grad = alpha[k] * (1 - L1_wt) * params
gr = -model.score(np.r_[params], **score_kwds)[0] / nobs
return gr + pen_grad
def nphess(params, model):
nobs = model.nobs
pen_hess = alpha[k] * (1 - L1_wt)
h = -model.hessian(np.r_[params], **hess_kwds)[0, 0] / nobs + pen_hess
return h
return nploglike, npscore, nphess
def fit_elasticnet(model, method="coord_descent", maxiter=100,
alpha=0., L1_wt=1., start_params=None, cnvrg_tol=1e-7,
zero_tol=1e-8, refit=False, check_step=True,
loglike_kwds=None, score_kwds=None, hess_kwds=None):
k_exog = model.exog.shape[1]
loglike_kwds = {} if loglike_kwds is None else loglike_kwds
score_kwds = {} if score_kwds is None else score_kwds
hess_kwds = {} if hess_kwds is None else hess_kwds
if np.isscalar(alpha):
alpha = alpha * np.ones(k_exog)
if start_params is None:
params = np.zeros(k_exog)
else:
params = start_params.copy()
btol = 1e-4
params_zero = np.zeros(len(params), dtype=bool)
init_args = model._get_init_kwds()
init_args['hasconst'] = False
model_offset = init_args.pop('offset', None)
if 'exposure' in init_args and init_args['exposure'] is not None:
if model_offset is None:
model_offset = np.log(init_args.pop('exposure'))
else:
model_offset += np.log(init_args.pop('exposure'))
fgh_list = [
_gen_npfuncs(k, L1_wt, alpha, loglike_kwds, score_kwds, hess_kwds)
for k in range(k_exog)]
for itr in range(maxiter):
params_save = params.copy()
for k in range(k_exog):
if params_zero[k]:
continue
params0 = params.copy()
params0[k] = 0
offset = np.dot(model.exog, params0)
if model_offset is not None:
offset += model_offset
model_1var = model.__class__(
model.endog, model.exog[:, k], offset=offset, **init_args)
func, grad, hess = fgh_list[k]
params[k] = _opt_1d(
func, grad, hess, model_1var, params[k], alpha[k]*L1_wt,
tol=btol, check_step=check_step)
if itr > 0 and np.abs(params[k]) < zero_tol:
params_zero[k] = True
params[k] = 0.
pchange = np.max(np.abs(params - params_save))
if pchange < cnvrg_tol:
break
params[np.abs(params) < zero_tol] = 0
if not refit:
results = RegularizedResults(model, params)
return RegularizedResultsWrapper(results)
ii = np.flatnonzero(params)
cov = np.zeros((k_exog, k_exog))
init_args = dict([(k, getattr(model, k, None)) for k in model._init_keys])
if len(ii) > 0:
model1 = model.__class__(
model.endog, model.exog[:, ii], **init_args)
rslt = model1.fit()
params[ii] = rslt.params
cov[np.ix_(ii, ii)] = rslt.normalized_cov_params
else:
model1 = model.__class__(model.endog, model.exog[:, 0], **init_args)
rslt = model1.fit(maxiter=0)
if issubclass(rslt.__class__, wrap.ResultsWrapper):
klass = rslt._results.__class__
else:
klass = rslt.__class__
if hasattr(rslt, 'scale'):
scale = rslt.scale
else:
scale = 1.
p, q = model.df_model, model.df_resid
model.df_model = len(ii)
model.df_resid = model.nobs - model.df_model
refit = klass(model, params, cov, scale=scale)
refit.regularized = True
refit.method = method
refit.fit_history = {'iteration': itr + 1}
model.df_resid = p, q
return refit
def fit_elasticnet_constrained(model, method="coord_descent", maxiter=100,
alpha=0., L1_wt=1., start_params=None, cnvrg_tol=1e-7,
zero_tol=1e-8, refit=False, check_step=True,
loglike_kwds=None, score_kwds=None, hess_kwds=None,
param_limits = None, A_constr=None, b_constr=None,
verbose=False):
k_exog = model.exog.shape[1]
loglike_kwds = {} if loglike_kwds is None else loglike_kwds
score_kwds = {} if score_kwds is None else score_kwds
hess_kwds = {} if hess_kwds is None else hess_kwds
if np.isscalar(alpha):
alpha = alpha * np.ones(k_exog)
if start_params is None:
params = np.zeros(k_exog)
else:
params = start_params.copy()
btol = 1e-4
params_zero = np.zeros(len(params), dtype=bool)
init_args = model._get_init_kwds()
init_args['hasconst'] = False
model_offset = init_args.pop('offset', None)
if 'exposure' in init_args and init_args['exposure'] is not None:
if model_offset is None:
model_offset = np.log(init_args.pop('exposure'))
else:
model_offset += np.log(init_args.pop('exposure'))
fgh_list = [
_gen_npfuncs(k, L1_wt, alpha, loglike_kwds, score_kwds, hess_kwds)
for k in range(k_exog)]
if A_constr is not None:
x_min = [l[0] for l in param_limits]
x_max = [l[1] for l in param_limits]
proj = ConstraintProjector(x_min, x_max, A_constr, b_constr)
for itr in range(maxiter):
params_save = params.copy()
for k in range(k_exog):
if params_zero[k]:
continue
params0 = params.copy()
params0[k] = 0
offset = np.dot(model.exog, params0)
if model_offset is not None:
offset += model_offset
model_1var = model.__class__(
model.endog, model.exog[:, k], offset=offset, **init_args)
func, grad, hess = fgh_list[k]
params[k] = _opt_1d(
func, grad, hess, model_1var, params[k], alpha[k]*L1_wt,
tol=btol, check_step=check_step)
if not param_limits is None:
params[k] = max(param_limits[k][0], min(param_limits[k][1], params[k]))
if itr > 0 and np.abs(params[k]) < zero_tol:
params_zero[k] = True
params[k] = 0.
if A_constr is not None:
params = proj.project(params)
pchange = np.max(np.abs(params - params_save))
if pchange < cnvrg_tol:
break
if verbose:
print(f'Elastic Net done after {itr}/{maxiter} iterations. pchange={pchange:0.2e} (cnvrg_tol={cnvrg_tol:0.2e})')
params[np.abs(params) < zero_tol] = 0
if not refit:
results = RegularizedResults(model, params)
return RegularizedResultsWrapper(results)
ii = np.flatnonzero(params)
cov = np.zeros((k_exog, k_exog))
init_args = dict([(k, getattr(model, k, None)) for k in model._init_keys])
if len(ii) > 0:
model1 = model.__class__(
model.endog, model.exog[:, ii], **init_args)
rslt = model1.fit()
params[ii] = rslt.params
cov[np.ix_(ii, ii)] = rslt.normalized_cov_params
else:
model1 = model.__class__(model.endog, model.exog[:, 0], **init_args)
rslt = model1.fit(maxiter=0)
if issubclass(rslt.__class__, wrap.ResultsWrapper):
klass = rslt._results.__class__
else:
klass = rslt.__class__
if hasattr(rslt, 'scale'):
scale = rslt.scale
else:
scale = 1.
p, q = model.df_model, model.df_resid
model.df_model = len(ii)
model.df_resid = model.nobs - model.df_model
refit = klass(model, params, cov, scale=scale)
refit.regularized = True
refit.method = method
refit.fit_history = {'iteration': itr + 1}
model.df_resid = p, q
return refit
def _opt_1d(func, grad, hess, model, start, L1_wt, tol,
check_step=True):
x = start
f = func(x, model)
b = grad(x, model)
c = hess(x, model)
d = b - c*x
if L1_wt > np.abs(d):
return 0.
if d >= 0:
h = (L1_wt - b) / c
elif d < 0:
h = -(L1_wt + b) / c
else:
return np.nan
if not check_step:
return x + h
f1 = func(x + h, model) + L1_wt*np.abs(x + h)
if f1 <= f + L1_wt*np.abs(x) + 1e-10:
return x + h
from scipy.optimize import brent
x_opt = brent(func, args=(model,), brack=(x-1, x+1), tol=tol)
return x_opt
class RegularizedResults(Results):
def __init__(self, model, params):
super(RegularizedResults, self).__init__(model, params)
@cache_readonly
def fittedvalues(self):
return self.model.predict(self.params)
class RegularizedResultsWrapper(wrap.ResultsWrapper):
_attrs = {
'params': 'columns',
'resid': 'rows',
'fittedvalues': 'rows',
}
_wrap_attrs = _attrs
wrap.populate_wrapper(RegularizedResultsWrapper,
RegularizedResults)
| true | true |
1c2b3cdb92e9d64974b419e6e1547380d9683c17 | 2,948 | py | Python | platforms/osx/build_framework.py | thisisgopalmandal/opencv | 4e2ef8c8f57644ccb8e762a37f70a61007c6be1c | [
"BSD-3-Clause"
] | 56 | 2020-03-24T15:17:56.000Z | 2022-03-21T13:44:08.000Z | platforms/osx/build_framework.py | thisisgopalmandal/opencv | 4e2ef8c8f57644ccb8e762a37f70a61007c6be1c | [
"BSD-3-Clause"
] | 6 | 2021-03-08T13:41:24.000Z | 2022-02-19T08:10:24.000Z | platforms/osx/build_framework.py | thisisgopalmandal/opencv | 4e2ef8c8f57644ccb8e762a37f70a61007c6be1c | [
"BSD-3-Clause"
] | 15 | 2020-05-06T13:41:20.000Z | 2022-03-31T19:15:47.000Z | #!/usr/bin/env python
"""
The script builds OpenCV.framework for OSX.
"""
from __future__ import print_function
import os, os.path, sys, argparse, traceback, multiprocessing
# import common code
sys.path.insert(0, os.path.abspath(os.path.abspath(os.path.dirname(__file__))+'/../ios'))
from build_framework import Builder
MACOSX_DEPLOYMENT_TARGET='10.12' # default, can be changed via command line options or environment variable
class OSXBuilder(Builder):
def getToolchain(self, arch, target):
return None
def getBuildCommand(self, archs, target):
buildcmd = [
"xcodebuild",
"MACOSX_DEPLOYMENT_TARGET=" + os.environ['MACOSX_DEPLOYMENT_TARGET'],
"ARCHS=%s" % archs[0],
"-sdk", target.lower(),
"-configuration", "Debug" if self.debug else "Release",
"-parallelizeTargets",
"-jobs", str(multiprocessing.cpu_count())
]
return buildcmd
def getInfoPlist(self, builddirs):
return os.path.join(builddirs[0], "osx", "Info.plist")
if __name__ == "__main__":
folder = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "../.."))
parser = argparse.ArgumentParser(description='The script builds OpenCV.framework for OSX.')
parser.add_argument('out', metavar='OUTDIR', help='folder to put built framework')
parser.add_argument('--opencv', metavar='DIR', default=folder, help='folder with opencv repository (default is "../.." relative to script location)')
parser.add_argument('--contrib', metavar='DIR', default=None, help='folder with opencv_contrib repository (default is "None" - build only main framework)')
parser.add_argument('--without', metavar='MODULE', default=[], action='append', help='OpenCV modules to exclude from the framework')
parser.add_argument('--disable', metavar='FEATURE', default=[], action='append', help='OpenCV features to disable (add WITH_*=OFF)')
parser.add_argument('--enable_nonfree', default=False, dest='enablenonfree', action='store_true', help='enable non-free modules (disabled by default)')
parser.add_argument('--macosx_deployment_target', default=os.environ.get('MACOSX_DEPLOYMENT_TARGET', MACOSX_DEPLOYMENT_TARGET), help='specify MACOSX_DEPLOYMENT_TARGET')
parser.add_argument('--debug', action='store_true', help='Build "Debug" binaries (CMAKE_BUILD_TYPE=Debug)')
parser.add_argument('--debug_info', action='store_true', help='Build with debug information (useful for Release mode: BUILD_WITH_DEBUG_INFO=ON)')
args = parser.parse_args()
os.environ['MACOSX_DEPLOYMENT_TARGET'] = args.macosx_deployment_target
print('Using MACOSX_DEPLOYMENT_TARGET=' + os.environ['MACOSX_DEPLOYMENT_TARGET'])
b = OSXBuilder(args.opencv, args.contrib, False, False, args.without, args.disable, args.enablenonfree,
[
(["x86_64"], "MacOSX")
], args.debug, args.debug_info)
b.build(args.out)
| 49.966102 | 172 | 0.700814 |
from __future__ import print_function
import os, os.path, sys, argparse, traceback, multiprocessing
sys.path.insert(0, os.path.abspath(os.path.abspath(os.path.dirname(__file__))+'/../ios'))
from build_framework import Builder
MACOSX_DEPLOYMENT_TARGET='10.12'
class OSXBuilder(Builder):
def getToolchain(self, arch, target):
return None
def getBuildCommand(self, archs, target):
buildcmd = [
"xcodebuild",
"MACOSX_DEPLOYMENT_TARGET=" + os.environ['MACOSX_DEPLOYMENT_TARGET'],
"ARCHS=%s" % archs[0],
"-sdk", target.lower(),
"-configuration", "Debug" if self.debug else "Release",
"-parallelizeTargets",
"-jobs", str(multiprocessing.cpu_count())
]
return buildcmd
def getInfoPlist(self, builddirs):
return os.path.join(builddirs[0], "osx", "Info.plist")
if __name__ == "__main__":
folder = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "../.."))
parser = argparse.ArgumentParser(description='The script builds OpenCV.framework for OSX.')
parser.add_argument('out', metavar='OUTDIR', help='folder to put built framework')
parser.add_argument('--opencv', metavar='DIR', default=folder, help='folder with opencv repository (default is "../.." relative to script location)')
parser.add_argument('--contrib', metavar='DIR', default=None, help='folder with opencv_contrib repository (default is "None" - build only main framework)')
parser.add_argument('--without', metavar='MODULE', default=[], action='append', help='OpenCV modules to exclude from the framework')
parser.add_argument('--disable', metavar='FEATURE', default=[], action='append', help='OpenCV features to disable (add WITH_*=OFF)')
parser.add_argument('--enable_nonfree', default=False, dest='enablenonfree', action='store_true', help='enable non-free modules (disabled by default)')
parser.add_argument('--macosx_deployment_target', default=os.environ.get('MACOSX_DEPLOYMENT_TARGET', MACOSX_DEPLOYMENT_TARGET), help='specify MACOSX_DEPLOYMENT_TARGET')
parser.add_argument('--debug', action='store_true', help='Build "Debug" binaries (CMAKE_BUILD_TYPE=Debug)')
parser.add_argument('--debug_info', action='store_true', help='Build with debug information (useful for Release mode: BUILD_WITH_DEBUG_INFO=ON)')
args = parser.parse_args()
os.environ['MACOSX_DEPLOYMENT_TARGET'] = args.macosx_deployment_target
print('Using MACOSX_DEPLOYMENT_TARGET=' + os.environ['MACOSX_DEPLOYMENT_TARGET'])
b = OSXBuilder(args.opencv, args.contrib, False, False, args.without, args.disable, args.enablenonfree,
[
(["x86_64"], "MacOSX")
], args.debug, args.debug_info)
b.build(args.out)
| true | true |
1c2b3cff1088f0e7ddef17027fc2cacfb3cb8c7c | 275 | py | Python | apps/public/schemas.py | aeasringnar/tornado-RESTfulAPI | 911b8d52fdcc8f5a5b96343e74d0ac987f661bd4 | [
"MIT"
] | 5 | 2020-07-31T10:14:09.000Z | 2022-03-03T06:04:21.000Z | apps/public/schemas.py | aeasringnar/tornado-RESTfulAPI | 911b8d52fdcc8f5a5b96343e74d0ac987f661bd4 | [
"MIT"
] | 2 | 2021-06-08T22:12:15.000Z | 2022-01-13T03:09:14.000Z | apps/public/schemas.py | aeasringnar/tornado-RESTfulAPI | 911b8d52fdcc8f5a5b96343e74d0ac987f661bd4 | [
"MIT"
] | 4 | 2020-08-20T15:35:20.000Z | 2022-03-29T11:10:06.000Z | from marshmallow import Schema, fields, ValidationError, validate, validates, pre_load, validates_schema
from base.schema import BaseSchema
class GetMobielCoseSchema(BaseSchema):
mobile = fields.String(label='手机号', required=True, error_messages={"required": "请输入手机号。"}) | 45.833333 | 104 | 0.796364 | from marshmallow import Schema, fields, ValidationError, validate, validates, pre_load, validates_schema
from base.schema import BaseSchema
class GetMobielCoseSchema(BaseSchema):
mobile = fields.String(label='手机号', required=True, error_messages={"required": "请输入手机号。"}) | true | true |
1c2b3ee2cd92e219a0e9956a2ecceaeadd5559a8 | 543 | py | Python | misc/code reference.py | flyingpizza/kaggel-workouts | 744a27736fa7878b24f2fc4dc43e956c49b21fef | [
"MIT"
] | null | null | null | misc/code reference.py | flyingpizza/kaggel-workouts | 744a27736fa7878b24f2fc4dc43e956c49b21fef | [
"MIT"
] | null | null | null | misc/code reference.py | flyingpizza/kaggel-workouts | 744a27736fa7878b24f2fc4dc43e956c49b21fef | [
"MIT"
] | null | null | null | # code to create subplot
import seaborn as sns
import matplotlib.pyplot as plt
fig = plt.figure(figsize = (18, 20))
for index in range(len(cat_features)):
plt.subplot(8, 5, index + 1)
sns.countplot(data = train.dropna(), x = train.loc[:, cat_features[index]])
plt.xticks(rotation = 90)
plt.tight_layout()
# code to create heatmap
import seaborn as sns
import matplotlib.pyplot as plt
plt.figure(figsize=(10,8))
sns.heatmap(train_data.corr(), center = 0)
plt.title("Correlations Between Columns")
plt.show()
| 21.72 | 79 | 0.694291 |
import seaborn as sns
import matplotlib.pyplot as plt
fig = plt.figure(figsize = (18, 20))
for index in range(len(cat_features)):
plt.subplot(8, 5, index + 1)
sns.countplot(data = train.dropna(), x = train.loc[:, cat_features[index]])
plt.xticks(rotation = 90)
plt.tight_layout()
import seaborn as sns
import matplotlib.pyplot as plt
plt.figure(figsize=(10,8))
sns.heatmap(train_data.corr(), center = 0)
plt.title("Correlations Between Columns")
plt.show()
| true | true |
1c2b3f19b287c8900a950d123b13e171d11d9b47 | 16,788 | py | Python | manimlib/mobject/svg/svg_mobject.py | pu17/manim_project | dfea9f6b40c6f78f918970ca5e4574b92839bf0d | [
"MIT"
] | 1 | 2021-02-01T00:40:34.000Z | 2021-02-01T00:40:34.000Z | manimlib/mobject/svg/svg_mobject.py | mohamedballa/manim | fe85d4e02f6935c49fb0b88eebbd492dfff2d324 | [
"MIT"
] | 1 | 2021-02-02T03:43:05.000Z | 2021-02-02T03:43:05.000Z | manimlib/mobject/svg/svg_mobject.py | mohamedballa/manim | fe85d4e02f6935c49fb0b88eebbd492dfff2d324 | [
"MIT"
] | null | null | null | import itertools as it
import re
import string
import warnings
import os
import hashlib
from xml.dom import minidom
from manimlib.constants import DEFAULT_STROKE_WIDTH
from manimlib.constants import ORIGIN, UP, DOWN, LEFT, RIGHT
from manimlib.constants import BLACK
from manimlib.constants import WHITE
from manimlib.mobject.geometry import Circle
from manimlib.mobject.geometry import Rectangle
from manimlib.mobject.geometry import RoundedRectangle
from manimlib.mobject.types.vectorized_mobject import VGroup
from manimlib.mobject.types.vectorized_mobject import VMobject
from manimlib.utils.color import *
from manimlib.utils.config_ops import digest_config
from manimlib.utils.directories import get_mobject_data_dir
from manimlib.utils.images import get_full_vector_image_path
def check_and_fix_percent_bug(sym):
# This is an ugly patch addressing something which should be
# addressed at a deeper level.
# The svg path for percent symbols have a known bug, so this
# checks if the symbol is (probably) a percentage sign, and
# splits it so that it's displayed properly.
if len(sym.get_points()) not in [315, 324, 372, 468, 483] or len(sym.get_subpaths()) != 4:
return
sym = sym.family_members_with_points()[0]
new_sym = VMobject()
path_lengths = [len(path) for path in sym.get_subpaths()]
sym_points = sym.get_points()
if len(sym_points) in [315, 324, 372]:
n = sum(path_lengths[:2])
p1 = sym_points[:n]
p2 = sym_points[n:]
elif len(sym_points) in [468, 483]:
p1 = np.vstack([
sym_points[:path_lengths[0]],
sym_points[-path_lengths[3]:]
])
p2 = sym_points[path_lengths[0]:sum(path_lengths[:3])]
sym.set_points(p1)
new_sym.set_points(p2)
sym.add(new_sym)
sym.refresh_triangulation()
def string_to_numbers(num_string):
num_string = num_string.replace("-", ",-")
num_string = num_string.replace("e,-", "e-")
return [
float(s)
for s in re.split("[ ,]", num_string)
if s != ""
]
class SVGMobject(VMobject):
CONFIG = {
"should_center": True,
"height": 2,
"width": None,
# Must be filled in in a subclass, or when called
"file_name": None,
"unpack_groups": True, # if False, creates a hierarchy of VGroups
# TODO, style components should be read in, not defaulted
"stroke_width": DEFAULT_STROKE_WIDTH,
"fill_opacity": 1.0,
"path_string_config": {}
}
def __init__(self, file_name=None, **kwargs):
digest_config(self, kwargs)
self.file_name = file_name or self.file_name
if file_name is None:
raise Exception("Must specify file for SVGMobject")
self.file_path = get_full_vector_image_path(file_name)
super().__init__(**kwargs)
self.move_into_position()
def move_into_position(self):
if self.should_center:
self.center()
if self.height is not None:
self.set_height(self.height)
if self.width is not None:
self.set_width(self.width)
def init_points(self):
doc = minidom.parse(self.file_path)
self.ref_to_element = {}
for svg in doc.getElementsByTagName("svg"):
mobjects = self.get_mobjects_from(svg)
if self.unpack_groups:
self.add(*mobjects)
else:
self.add(*mobjects[0].submobjects)
doc.unlink()
def get_mobjects_from(self, element):
result = []
if not isinstance(element, minidom.Element):
return result
if element.tagName == 'defs':
self.update_ref_to_element(element)
elif element.tagName == 'style':
pass # TODO, handle style
elif element.tagName in ['g', 'svg', 'symbol']:
result += it.chain(*[
self.get_mobjects_from(child)
for child in element.childNodes
])
elif element.tagName == 'path':
result.append(self.path_string_to_mobject(
element.getAttribute('d')
))
elif element.tagName == 'use':
result += self.use_to_mobjects(element)
elif element.tagName == 'rect':
result.append(self.rect_to_mobject(element))
elif element.tagName == 'circle':
result.append(self.circle_to_mobject(element))
elif element.tagName == 'ellipse':
result.append(self.ellipse_to_mobject(element))
elif element.tagName in ['polygon', 'polyline']:
result.append(self.polygon_to_mobject(element))
else:
pass # TODO
# warnings.warn("Unknown element type: " + element.tagName)
result = [m for m in result if m is not None]
self.handle_transforms(element, VGroup(*result))
if len(result) > 1 and not self.unpack_groups:
result = [VGroup(*result)]
return result
def g_to_mobjects(self, g_element):
mob = VGroup(*self.get_mobjects_from(g_element))
self.handle_transforms(g_element, mob)
return mob.submobjects
def path_string_to_mobject(self, path_string):
return VMobjectFromSVGPathstring(
path_string,
**self.path_string_config,
)
def use_to_mobjects(self, use_element):
# Remove initial "#" character
ref = use_element.getAttribute("xlink:href")[1:]
if ref not in self.ref_to_element:
warnings.warn(f"{ref} not recognized")
return VGroup()
return self.get_mobjects_from(
self.ref_to_element[ref]
)
def attribute_to_float(self, attr):
stripped_attr = "".join([
char for char in attr
if char in string.digits + "." + "-"
])
return float(stripped_attr)
def polygon_to_mobject(self, polygon_element):
path_string = polygon_element.getAttribute("points")
for digit in string.digits:
path_string = path_string.replace(f" {digit}", f"L {digit}")
path_string = path_string.replace("L", "M", 1)
return self.path_string_to_mobject(path_string)
def circle_to_mobject(self, circle_element):
x, y, r = [
self.attribute_to_float(
circle_element.getAttribute(key)
)
if circle_element.hasAttribute(key)
else 0.0
for key in ("cx", "cy", "r")
]
return Circle(radius=r).shift(x * RIGHT + y * DOWN)
def ellipse_to_mobject(self, circle_element):
x, y, rx, ry = [
self.attribute_to_float(
circle_element.getAttribute(key)
)
if circle_element.hasAttribute(key)
else 0.0
for key in ("cx", "cy", "rx", "ry")
]
return Circle().scale(rx * RIGHT + ry * UP).shift(x * RIGHT + y * DOWN)
def rect_to_mobject(self, rect_element):
fill_color = rect_element.getAttribute("fill")
stroke_color = rect_element.getAttribute("stroke")
stroke_width = rect_element.getAttribute("stroke-width")
corner_radius = rect_element.getAttribute("rx")
# input preprocessing
if fill_color in ["", "none", "#FFF", "#FFFFFF"] or Color(fill_color) == Color(WHITE):
opacity = 0
fill_color = BLACK # shdn't be necessary but avoids error msgs
if fill_color in ["#000", "#000000"]:
fill_color = WHITE
if stroke_color in ["", "none", "#FFF", "#FFFFFF"] or Color(stroke_color) == Color(WHITE):
stroke_width = 0
stroke_color = BLACK
if stroke_color in ["#000", "#000000"]:
stroke_color = WHITE
if stroke_width in ["", "none", "0"]:
stroke_width = 0
if corner_radius in ["", "0", "none"]:
corner_radius = 0
corner_radius = float(corner_radius)
if corner_radius == 0:
mob = Rectangle(
width=self.attribute_to_float(
rect_element.getAttribute("width")
),
height=self.attribute_to_float(
rect_element.getAttribute("height")
),
stroke_width=stroke_width,
stroke_color=stroke_color,
fill_color=fill_color,
fill_opacity=opacity
)
else:
mob = RoundedRectangle(
width=self.attribute_to_float(
rect_element.getAttribute("width")
),
height=self.attribute_to_float(
rect_element.getAttribute("height")
),
stroke_width=stroke_width,
stroke_color=stroke_color,
fill_color=fill_color,
fill_opacity=opacity,
corner_radius=corner_radius
)
mob.shift(mob.get_center() - mob.get_corner(UP + LEFT))
return mob
def handle_transforms(self, element, mobject):
# TODO, this could use some cleaning...
x, y = 0, 0
try:
x = self.attribute_to_float(element.getAttribute('x'))
# Flip y
y = -self.attribute_to_float(element.getAttribute('y'))
mobject.shift([x, y, 0])
except Exception:
pass
transform = element.getAttribute('transform')
try: # transform matrix
prefix = "matrix("
suffix = ")"
if not transform.startswith(prefix) or not transform.endswith(suffix):
raise Exception()
transform = transform[len(prefix):-len(suffix)]
transform = string_to_numbers(transform)
transform = np.array(transform).reshape([3, 2])
x = transform[2][0]
y = -transform[2][1]
matrix = np.identity(self.dim)
matrix[:2, :2] = transform[:2, :]
matrix[1] *= -1
matrix[:, 1] *= -1
for mob in mobject.family_members_with_points():
mob.apply_matrix(matrix.T)
mobject.shift(x * RIGHT + y * UP)
except:
pass
try: # transform scale
prefix = "scale("
suffix = ")"
if not transform.startswith(prefix) or not transform.endswith(suffix):
raise Exception()
transform = transform[len(prefix):-len(suffix)]
scale_values = string_to_numbers(transform)
if len(scale_values) == 2:
scale_x, scale_y = scale_values
mobject.scale(np.array([scale_x, scale_y, 1]), about_point=ORIGIN)
elif len(scale_values) == 1:
scale = scale_values[0]
mobject.scale(np.array([scale, scale, 1]), about_point=ORIGIN)
except:
pass
try: # transform translate
prefix = "translate("
suffix = ")"
if not transform.startswith(prefix) or not transform.endswith(suffix):
raise Exception()
transform = transform[len(prefix):-len(suffix)]
x, y = string_to_numbers(transform)
mobject.shift(x * RIGHT + y * DOWN)
except:
pass
# TODO, ...
def flatten(self, input_list):
output_list = []
for i in input_list:
if isinstance(i, list):
output_list.extend(self.flatten(i))
else:
output_list.append(i)
return output_list
def get_all_childNodes_have_id(self, element):
all_childNodes_have_id = []
if not isinstance(element, minidom.Element):
return
if element.hasAttribute('id'):
return [element]
for e in element.childNodes:
all_childNodes_have_id.append(self.get_all_childNodes_have_id(e))
return self.flatten([e for e in all_childNodes_have_id if e])
def update_ref_to_element(self, defs):
new_refs = dict([(e.getAttribute('id'), e) for e in self.get_all_childNodes_have_id(defs)])
self.ref_to_element.update(new_refs)
class VMobjectFromSVGPathstring(VMobject):
CONFIG = {
"long_lines": True,
"should_subdivide_sharp_curves": False,
"should_remove_null_curves": False,
}
def __init__(self, path_string, **kwargs):
self.path_string = path_string
super().__init__(**kwargs)
def init_points(self):
# After a given svg_path has been converted into points, the result
# will be saved to a file so that future calls for the same path
# don't need to retrace the same computation.
hasher = hashlib.sha256(self.path_string.encode())
path_hash = hasher.hexdigest()[:16]
points_filepath = os.path.join(get_mobject_data_dir(), f"{path_hash}_points.npy")
tris_filepath = os.path.join(get_mobject_data_dir(), f"{path_hash}_tris.npy")
if os.path.exists(points_filepath) and os.path.exists(tris_filepath):
self.set_points(np.load(points_filepath))
else:
self.relative_point = np.array(ORIGIN)
for command, coord_string in self.get_commands_and_coord_strings():
new_points = self.string_to_points(command, coord_string)
self.handle_command(command, new_points)
if self.should_subdivide_sharp_curves:
# For a healthy triangulation later
self.subdivide_sharp_curves()
if self.should_remove_null_curves:
# Get rid of any null curves
self.set_points(self.get_points_without_null_curves())
# SVG treats y-coordinate differently
self.stretch(-1, 1, about_point=ORIGIN)
# Save to a file for future use
np.save(points_filepath, self.get_points())
check_and_fix_percent_bug(self)
def get_commands_and_coord_strings(self):
all_commands = list(self.get_command_to_function_map().keys())
all_commands += [c.lower() for c in all_commands]
pattern = "[{}]".format("".join(all_commands))
return zip(
re.findall(pattern, self.path_string),
re.split(pattern, self.path_string)[1:]
)
def handle_command(self, command, new_points):
if command.islower():
# Treat it as a relative command
new_points += self.relative_point
func, n_points = self.command_to_function(command)
func(*new_points[:n_points])
leftover_points = new_points[n_points:]
# Recursively handle the rest of the points
if len(leftover_points) > 0:
if command.upper() == "M":
# Treat following points as relative line coordinates
command = "l"
if command.islower():
leftover_points -= self.relative_point
self.relative_point = self.get_last_point()
self.handle_command(command, leftover_points)
else:
# Command is over, reset for future relative commands
self.relative_point = self.get_last_point()
def string_to_points(self, command, coord_string):
numbers = string_to_numbers(coord_string)
if command.upper() in ["H", "V"]:
i = {"H": 0, "V": 1}[command.upper()]
xy = np.zeros((len(numbers), 2))
xy[:, i] = numbers
if command.isupper():
xy[:, 1 - i] = self.relative_point[1 - i]
elif command.upper() == "A":
raise Exception("Not implemented")
else:
xy = np.array(numbers).reshape((len(numbers) // 2, 2))
result = np.zeros((xy.shape[0], self.dim))
result[:, :2] = xy
return result
def command_to_function(self, command):
return self.get_command_to_function_map()[command.upper()]
def get_command_to_function_map(self):
"""
Associates svg command to VMobject function, and
the number of arguments it takes in
"""
return {
"M": (self.start_new_path, 1),
"L": (self.add_line_to, 1),
"H": (self.add_line_to, 1),
"V": (self.add_line_to, 1),
"C": (self.add_cubic_bezier_curve_to, 3),
"S": (self.add_smooth_cubic_curve_to, 2),
"Q": (self.add_quadratic_bezier_curve_to, 2),
"T": (self.add_smooth_curve_to, 1),
"A": (self.add_quadratic_bezier_curve_to, 2), # TODO
"Z": (self.close_path, 0),
}
def get_original_path_string(self):
return self.path_string
| 36.977974 | 99 | 0.591077 | import itertools as it
import re
import string
import warnings
import os
import hashlib
from xml.dom import minidom
from manimlib.constants import DEFAULT_STROKE_WIDTH
from manimlib.constants import ORIGIN, UP, DOWN, LEFT, RIGHT
from manimlib.constants import BLACK
from manimlib.constants import WHITE
from manimlib.mobject.geometry import Circle
from manimlib.mobject.geometry import Rectangle
from manimlib.mobject.geometry import RoundedRectangle
from manimlib.mobject.types.vectorized_mobject import VGroup
from manimlib.mobject.types.vectorized_mobject import VMobject
from manimlib.utils.color import *
from manimlib.utils.config_ops import digest_config
from manimlib.utils.directories import get_mobject_data_dir
from manimlib.utils.images import get_full_vector_image_path
def check_and_fix_percent_bug(sym):
if len(sym.get_points()) not in [315, 324, 372, 468, 483] or len(sym.get_subpaths()) != 4:
return
sym = sym.family_members_with_points()[0]
new_sym = VMobject()
path_lengths = [len(path) for path in sym.get_subpaths()]
sym_points = sym.get_points()
if len(sym_points) in [315, 324, 372]:
n = sum(path_lengths[:2])
p1 = sym_points[:n]
p2 = sym_points[n:]
elif len(sym_points) in [468, 483]:
p1 = np.vstack([
sym_points[:path_lengths[0]],
sym_points[-path_lengths[3]:]
])
p2 = sym_points[path_lengths[0]:sum(path_lengths[:3])]
sym.set_points(p1)
new_sym.set_points(p2)
sym.add(new_sym)
sym.refresh_triangulation()
def string_to_numbers(num_string):
num_string = num_string.replace("-", ",-")
num_string = num_string.replace("e,-", "e-")
return [
float(s)
for s in re.split("[ ,]", num_string)
if s != ""
]
class SVGMobject(VMobject):
CONFIG = {
"should_center": True,
"height": 2,
"width": None,
# Must be filled in in a subclass, or when called
"file_name": None,
"unpack_groups": True, # if False, creates a hierarchy of VGroups
# TODO, style components should be read in, not defaulted
"stroke_width": DEFAULT_STROKE_WIDTH,
"fill_opacity": 1.0,
"path_string_config": {}
}
def __init__(self, file_name=None, **kwargs):
digest_config(self, kwargs)
self.file_name = file_name or self.file_name
if file_name is None:
raise Exception("Must specify file for SVGMobject")
self.file_path = get_full_vector_image_path(file_name)
super().__init__(**kwargs)
self.move_into_position()
def move_into_position(self):
if self.should_center:
self.center()
if self.height is not None:
self.set_height(self.height)
if self.width is not None:
self.set_width(self.width)
def init_points(self):
doc = minidom.parse(self.file_path)
self.ref_to_element = {}
for svg in doc.getElementsByTagName("svg"):
mobjects = self.get_mobjects_from(svg)
if self.unpack_groups:
self.add(*mobjects)
else:
self.add(*mobjects[0].submobjects)
doc.unlink()
def get_mobjects_from(self, element):
result = []
if not isinstance(element, minidom.Element):
return result
if element.tagName == 'defs':
self.update_ref_to_element(element)
elif element.tagName == 'style':
pass # TODO, handle style
elif element.tagName in ['g', 'svg', 'symbol']:
result += it.chain(*[
self.get_mobjects_from(child)
for child in element.childNodes
])
elif element.tagName == 'path':
result.append(self.path_string_to_mobject(
element.getAttribute('d')
))
elif element.tagName == 'use':
result += self.use_to_mobjects(element)
elif element.tagName == 'rect':
result.append(self.rect_to_mobject(element))
elif element.tagName == 'circle':
result.append(self.circle_to_mobject(element))
elif element.tagName == 'ellipse':
result.append(self.ellipse_to_mobject(element))
elif element.tagName in ['polygon', 'polyline']:
result.append(self.polygon_to_mobject(element))
else:
pass # TODO
# warnings.warn("Unknown element type: " + element.tagName)
result = [m for m in result if m is not None]
self.handle_transforms(element, VGroup(*result))
if len(result) > 1 and not self.unpack_groups:
result = [VGroup(*result)]
return result
def g_to_mobjects(self, g_element):
mob = VGroup(*self.get_mobjects_from(g_element))
self.handle_transforms(g_element, mob)
return mob.submobjects
def path_string_to_mobject(self, path_string):
return VMobjectFromSVGPathstring(
path_string,
**self.path_string_config,
)
def use_to_mobjects(self, use_element):
# Remove initial "#" character
ref = use_element.getAttribute("xlink:href")[1:]
if ref not in self.ref_to_element:
warnings.warn(f"{ref} not recognized")
return VGroup()
return self.get_mobjects_from(
self.ref_to_element[ref]
)
def attribute_to_float(self, attr):
stripped_attr = "".join([
char for char in attr
if char in string.digits + "." + "-"
])
return float(stripped_attr)
def polygon_to_mobject(self, polygon_element):
path_string = polygon_element.getAttribute("points")
for digit in string.digits:
path_string = path_string.replace(f" {digit}", f"L {digit}")
path_string = path_string.replace("L", "M", 1)
return self.path_string_to_mobject(path_string)
def circle_to_mobject(self, circle_element):
x, y, r = [
self.attribute_to_float(
circle_element.getAttribute(key)
)
if circle_element.hasAttribute(key)
else 0.0
for key in ("cx", "cy", "r")
]
return Circle(radius=r).shift(x * RIGHT + y * DOWN)
def ellipse_to_mobject(self, circle_element):
x, y, rx, ry = [
self.attribute_to_float(
circle_element.getAttribute(key)
)
if circle_element.hasAttribute(key)
else 0.0
for key in ("cx", "cy", "rx", "ry")
]
return Circle().scale(rx * RIGHT + ry * UP).shift(x * RIGHT + y * DOWN)
def rect_to_mobject(self, rect_element):
fill_color = rect_element.getAttribute("fill")
stroke_color = rect_element.getAttribute("stroke")
stroke_width = rect_element.getAttribute("stroke-width")
corner_radius = rect_element.getAttribute("rx")
# input preprocessing
if fill_color in ["", "none", "#FFF", "#FFFFFF"] or Color(fill_color) == Color(WHITE):
opacity = 0
fill_color = BLACK # shdn't be necessary but avoids error msgs
if fill_color in ["#000", "#000000"]:
fill_color = WHITE
if stroke_color in ["", "none", "#FFF", "#FFFFFF"] or Color(stroke_color) == Color(WHITE):
stroke_width = 0
stroke_color = BLACK
if stroke_color in ["#000", "#000000"]:
stroke_color = WHITE
if stroke_width in ["", "none", "0"]:
stroke_width = 0
if corner_radius in ["", "0", "none"]:
corner_radius = 0
corner_radius = float(corner_radius)
if corner_radius == 0:
mob = Rectangle(
width=self.attribute_to_float(
rect_element.getAttribute("width")
),
height=self.attribute_to_float(
rect_element.getAttribute("height")
),
stroke_width=stroke_width,
stroke_color=stroke_color,
fill_color=fill_color,
fill_opacity=opacity
)
else:
mob = RoundedRectangle(
width=self.attribute_to_float(
rect_element.getAttribute("width")
),
height=self.attribute_to_float(
rect_element.getAttribute("height")
),
stroke_width=stroke_width,
stroke_color=stroke_color,
fill_color=fill_color,
fill_opacity=opacity,
corner_radius=corner_radius
)
mob.shift(mob.get_center() - mob.get_corner(UP + LEFT))
return mob
def handle_transforms(self, element, mobject):
x, y = 0, 0
try:
x = self.attribute_to_float(element.getAttribute('x'))
y = -self.attribute_to_float(element.getAttribute('y'))
mobject.shift([x, y, 0])
except Exception:
pass
transform = element.getAttribute('transform')
try:
prefix = "matrix("
suffix = ")"
if not transform.startswith(prefix) or not transform.endswith(suffix):
raise Exception()
transform = transform[len(prefix):-len(suffix)]
transform = string_to_numbers(transform)
transform = np.array(transform).reshape([3, 2])
x = transform[2][0]
y = -transform[2][1]
matrix = np.identity(self.dim)
matrix[:2, :2] = transform[:2, :]
matrix[1] *= -1
matrix[:, 1] *= -1
for mob in mobject.family_members_with_points():
mob.apply_matrix(matrix.T)
mobject.shift(x * RIGHT + y * UP)
except:
pass
try:
prefix = "scale("
suffix = ")"
if not transform.startswith(prefix) or not transform.endswith(suffix):
raise Exception()
transform = transform[len(prefix):-len(suffix)]
scale_values = string_to_numbers(transform)
if len(scale_values) == 2:
scale_x, scale_y = scale_values
mobject.scale(np.array([scale_x, scale_y, 1]), about_point=ORIGIN)
elif len(scale_values) == 1:
scale = scale_values[0]
mobject.scale(np.array([scale, scale, 1]), about_point=ORIGIN)
except:
pass
try:
prefix = "translate("
suffix = ")"
if not transform.startswith(prefix) or not transform.endswith(suffix):
raise Exception()
transform = transform[len(prefix):-len(suffix)]
x, y = string_to_numbers(transform)
mobject.shift(x * RIGHT + y * DOWN)
except:
pass
def flatten(self, input_list):
output_list = []
for i in input_list:
if isinstance(i, list):
output_list.extend(self.flatten(i))
else:
output_list.append(i)
return output_list
def get_all_childNodes_have_id(self, element):
all_childNodes_have_id = []
if not isinstance(element, minidom.Element):
return
if element.hasAttribute('id'):
return [element]
for e in element.childNodes:
all_childNodes_have_id.append(self.get_all_childNodes_have_id(e))
return self.flatten([e for e in all_childNodes_have_id if e])
def update_ref_to_element(self, defs):
new_refs = dict([(e.getAttribute('id'), e) for e in self.get_all_childNodes_have_id(defs)])
self.ref_to_element.update(new_refs)
class VMobjectFromSVGPathstring(VMobject):
CONFIG = {
"long_lines": True,
"should_subdivide_sharp_curves": False,
"should_remove_null_curves": False,
}
def __init__(self, path_string, **kwargs):
self.path_string = path_string
super().__init__(**kwargs)
def init_points(self):
hasher = hashlib.sha256(self.path_string.encode())
path_hash = hasher.hexdigest()[:16]
points_filepath = os.path.join(get_mobject_data_dir(), f"{path_hash}_points.npy")
tris_filepath = os.path.join(get_mobject_data_dir(), f"{path_hash}_tris.npy")
if os.path.exists(points_filepath) and os.path.exists(tris_filepath):
self.set_points(np.load(points_filepath))
else:
self.relative_point = np.array(ORIGIN)
for command, coord_string in self.get_commands_and_coord_strings():
new_points = self.string_to_points(command, coord_string)
self.handle_command(command, new_points)
if self.should_subdivide_sharp_curves:
# For a healthy triangulation later
self.subdivide_sharp_curves()
if self.should_remove_null_curves:
# Get rid of any null curves
self.set_points(self.get_points_without_null_curves())
# SVG treats y-coordinate differently
self.stretch(-1, 1, about_point=ORIGIN)
# Save to a file for future use
np.save(points_filepath, self.get_points())
check_and_fix_percent_bug(self)
def get_commands_and_coord_strings(self):
all_commands = list(self.get_command_to_function_map().keys())
all_commands += [c.lower() for c in all_commands]
pattern = "[{}]".format("".join(all_commands))
return zip(
re.findall(pattern, self.path_string),
re.split(pattern, self.path_string)[1:]
)
def handle_command(self, command, new_points):
if command.islower():
# Treat it as a relative command
new_points += self.relative_point
func, n_points = self.command_to_function(command)
func(*new_points[:n_points])
leftover_points = new_points[n_points:]
# Recursively handle the rest of the points
if len(leftover_points) > 0:
if command.upper() == "M":
# Treat following points as relative line coordinates
command = "l"
if command.islower():
leftover_points -= self.relative_point
self.relative_point = self.get_last_point()
self.handle_command(command, leftover_points)
else:
# Command is over, reset for future relative commands
self.relative_point = self.get_last_point()
def string_to_points(self, command, coord_string):
numbers = string_to_numbers(coord_string)
if command.upper() in ["H", "V"]:
i = {"H": 0, "V": 1}[command.upper()]
xy = np.zeros((len(numbers), 2))
xy[:, i] = numbers
if command.isupper():
xy[:, 1 - i] = self.relative_point[1 - i]
elif command.upper() == "A":
raise Exception("Not implemented")
else:
xy = np.array(numbers).reshape((len(numbers) // 2, 2))
result = np.zeros((xy.shape[0], self.dim))
result[:, :2] = xy
return result
def command_to_function(self, command):
return self.get_command_to_function_map()[command.upper()]
def get_command_to_function_map(self):
return {
"M": (self.start_new_path, 1),
"L": (self.add_line_to, 1),
"H": (self.add_line_to, 1),
"V": (self.add_line_to, 1),
"C": (self.add_cubic_bezier_curve_to, 3),
"S": (self.add_smooth_cubic_curve_to, 2),
"Q": (self.add_quadratic_bezier_curve_to, 2),
"T": (self.add_smooth_curve_to, 1),
"A": (self.add_quadratic_bezier_curve_to, 2), # TODO
"Z": (self.close_path, 0),
}
def get_original_path_string(self):
return self.path_string
| true | true |
1c2b3fef6027163a9008d96f75b22e02d4bff261 | 704 | py | Python | 3_complex_deps/setup.py | fracpete/python-console-scripts | d453e492fc19ebc25dee75a2921b27772f9247b3 | [
"MIT"
] | null | null | null | 3_complex_deps/setup.py | fracpete/python-console-scripts | d453e492fc19ebc25dee75a2921b27772f9247b3 | [
"MIT"
] | null | null | null | 3_complex_deps/setup.py | fracpete/python-console-scripts | d453e492fc19ebc25dee75a2921b27772f9247b3 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(
name="mysuperduperproject",
description="My super duper Project.",
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering',
'Programming Language :: Python :: 3',
],
license='MIT License',
package_dir={
'': 'src'
},
packages=[
'msdp',
],
version="0.0.1",
author='Peter Reutemann',
author_email='fracpete@gmail.com',
install_requires=[
"numpy",
"docker-banner-gen",
],
entry_points={
"console_scripts": [
"msdp-hello=msdp.hello:sys_main",
]
}
)
| 21.333333 | 49 | 0.548295 | from setuptools import setup
setup(
name="mysuperduperproject",
description="My super duper Project.",
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering',
'Programming Language :: Python :: 3',
],
license='MIT License',
package_dir={
'': 'src'
},
packages=[
'msdp',
],
version="0.0.1",
author='Peter Reutemann',
author_email='fracpete@gmail.com',
install_requires=[
"numpy",
"docker-banner-gen",
],
entry_points={
"console_scripts": [
"msdp-hello=msdp.hello:sys_main",
]
}
)
| true | true |
1c2b4033a165cb65c328c3308770545073b6325e | 3,458 | py | Python | sphinx/util/images.py | pvcraven/sphinx | b103b3c24ac8d983498f1170d8e104f8cd72c3df | [
"BSD-2-Clause"
] | null | null | null | sphinx/util/images.py | pvcraven/sphinx | b103b3c24ac8d983498f1170d8e104f8cd72c3df | [
"BSD-2-Clause"
] | null | null | null | sphinx/util/images.py | pvcraven/sphinx | b103b3c24ac8d983498f1170d8e104f8cd72c3df | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
sphinx.util.images
~~~~~~~~~~~~~~~~~~
Image utility functions for Sphinx.
:copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import absolute_import
import base64
import imghdr
import imagesize
from os import path
from collections import OrderedDict
from six import PY3, BytesIO, iteritems
from typing import NamedTuple
try:
from PIL import Image # check for the Python Imaging Library
except ImportError:
try:
import Image
except ImportError:
Image = None
if False:
# For type annotation
from typing import Dict, IO, List, Tuple # NOQA
if PY3:
unicode = str # special alias for static typing...
mime_suffixes = OrderedDict([
('.gif', 'image/gif'),
('.jpg', 'image/jpeg'),
('.png', 'image/png'),
('.pdf', 'application/pdf'),
('.svg', 'image/svg+xml'),
('.svgz', 'image/svg+xml'),
]) # type: Dict[unicode, unicode]
DataURI = NamedTuple('DataURI', [('mimetype', unicode),
('charset', unicode),
('data', bytes)])
def get_image_size(filename):
# type: (unicode) -> Tuple[int, int]
try:
size = imagesize.get(filename)
if size[0] == -1:
size = None
if size is None and Image: # fallback to PIL
im = Image.open(filename)
size = im.size
try:
im.fp.close()
except Exception:
pass
return size
except Exception:
return None
def guess_mimetype_for_stream(stream, default=None):
# type: (IO, unicode) -> unicode
imgtype = imghdr.what(stream)
if imgtype:
return 'image/' + imgtype
else:
return default
def guess_mimetype(filename='', content=None, default=None):
# type: (unicode, unicode, unicode) -> unicode
_, ext = path.splitext(filename.lower())
if ext in mime_suffixes:
return mime_suffixes[ext]
elif content:
return guess_mimetype_for_stream(BytesIO(content), default=default)
elif path.exists(filename):
with open(filename, 'rb') as f:
return guess_mimetype_for_stream(f, default=default)
return default
def get_image_extension(mimetype):
# type: (unicode) -> unicode
for ext, _mimetype in iteritems(mime_suffixes):
if mimetype == _mimetype:
return ext
return None
def parse_data_uri(uri):
# type: (unicode) -> DataURI
if not uri.startswith('data:'):
return None
# data:[<MIME-type>][;charset=<encoding>][;base64],<data>
mimetype = u'text/plain'
charset = u'US-ASCII'
properties, data = uri[5:].split(',', 1)
for prop in properties.split(';'):
if prop == 'base64':
pass # skip
elif prop.startswith('charset='):
charset = prop[8:]
elif prop:
mimetype = prop
image_data = base64.b64decode(data)
return DataURI(mimetype, charset, image_data)
def test_svg(h, f):
"""An additional imghdr library helper; test the header is SVG's or not."""
try:
if '<svg' in h.decode('utf-8').lower():
return 'svg+xml'
except UnicodeDecodeError:
pass
# install test_svg() to imghdr
# refs: https://docs.python.org/3.6/library/imghdr.html#imghdr.tests
imghdr.tests.append(test_svg)
| 25.240876 | 79 | 0.600925 |
from __future__ import absolute_import
import base64
import imghdr
import imagesize
from os import path
from collections import OrderedDict
from six import PY3, BytesIO, iteritems
from typing import NamedTuple
try:
from PIL import Image
except ImportError:
try:
import Image
except ImportError:
Image = None
if False:
from typing import Dict, IO, List, Tuple
if PY3:
unicode = str
mime_suffixes = OrderedDict([
('.gif', 'image/gif'),
('.jpg', 'image/jpeg'),
('.png', 'image/png'),
('.pdf', 'application/pdf'),
('.svg', 'image/svg+xml'),
('.svgz', 'image/svg+xml'),
])
DataURI = NamedTuple('DataURI', [('mimetype', unicode),
('charset', unicode),
('data', bytes)])
def get_image_size(filename):
try:
size = imagesize.get(filename)
if size[0] == -1:
size = None
if size is None and Image:
im = Image.open(filename)
size = im.size
try:
im.fp.close()
except Exception:
pass
return size
except Exception:
return None
def guess_mimetype_for_stream(stream, default=None):
imgtype = imghdr.what(stream)
if imgtype:
return 'image/' + imgtype
else:
return default
def guess_mimetype(filename='', content=None, default=None):
_, ext = path.splitext(filename.lower())
if ext in mime_suffixes:
return mime_suffixes[ext]
elif content:
return guess_mimetype_for_stream(BytesIO(content), default=default)
elif path.exists(filename):
with open(filename, 'rb') as f:
return guess_mimetype_for_stream(f, default=default)
return default
def get_image_extension(mimetype):
for ext, _mimetype in iteritems(mime_suffixes):
if mimetype == _mimetype:
return ext
return None
def parse_data_uri(uri):
if not uri.startswith('data:'):
return None
mimetype = u'text/plain'
charset = u'US-ASCII'
properties, data = uri[5:].split(',', 1)
for prop in properties.split(';'):
if prop == 'base64':
pass
elif prop.startswith('charset='):
charset = prop[8:]
elif prop:
mimetype = prop
image_data = base64.b64decode(data)
return DataURI(mimetype, charset, image_data)
def test_svg(h, f):
try:
if '<svg' in h.decode('utf-8').lower():
return 'svg+xml'
except UnicodeDecodeError:
pass
.append(test_svg)
| true | true |
1c2b403d8f76a046dcd02a038b6389cdf69f814c | 446 | py | Python | sacrerouge/data/dataset_readers/__init__.py | danieldeutsch/decomposed-rouge | 0d723be8e3359f0bdcc9c7940336800895e46dbb | [
"Apache-2.0"
] | 81 | 2020-07-10T15:45:08.000Z | 2022-03-30T12:19:11.000Z | sacrerouge/data/dataset_readers/__init__.py | danieldeutsch/decomposed-rouge | 0d723be8e3359f0bdcc9c7940336800895e46dbb | [
"Apache-2.0"
] | 29 | 2020-08-03T21:50:45.000Z | 2022-02-23T14:34:16.000Z | sacrerouge/data/dataset_readers/__init__.py | danieldeutsch/decomposed-rouge | 0d723be8e3359f0bdcc9c7940336800895e46dbb | [
"Apache-2.0"
] | 7 | 2020-08-14T09:54:08.000Z | 2022-03-30T12:19:25.000Z | from sacrerouge.data.dataset_readers.dataset_reader import DatasetReader
from sacrerouge.data.dataset_readers.document_based import DocumentBasedDatasetReader, SplitDocumentBasedDatasetReader
from sacrerouge.data.dataset_readers.pyramid_based import PyramidBasedDatasetReader
from sacrerouge.data.dataset_readers.reference_based import ReferenceBasedDatasetReader
from sacrerouge.data.dataset_readers.summary_only import SummaryOnlyDatasetReader
| 74.333333 | 118 | 0.91704 | from sacrerouge.data.dataset_readers.dataset_reader import DatasetReader
from sacrerouge.data.dataset_readers.document_based import DocumentBasedDatasetReader, SplitDocumentBasedDatasetReader
from sacrerouge.data.dataset_readers.pyramid_based import PyramidBasedDatasetReader
from sacrerouge.data.dataset_readers.reference_based import ReferenceBasedDatasetReader
from sacrerouge.data.dataset_readers.summary_only import SummaryOnlyDatasetReader
| true | true |
1c2b4074fba3021590b0a2b809fcd8d7de83cb64 | 887 | py | Python | webdriver.py | aLily11/xmu-daily-report | ee99d9669d7c318de20d88f8d6723693f9b48e7b | [
"MIT"
] | null | null | null | webdriver.py | aLily11/xmu-daily-report | ee99d9669d7c318de20d88f8d6723693f9b48e7b | [
"MIT"
] | null | null | null | webdriver.py | aLily11/xmu-daily-report | ee99d9669d7c318de20d88f8d6723693f9b48e7b | [
"MIT"
] | null | null | null | from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.webdriver import WebDriver
from utils import debug
chrome_options = Options()
# 谷歌文档提到需要加上这个属性来规避bug
chrome_options.add_argument('--disable-gpu')
# 隐藏滚动条, 应对一些特殊页面
chrome_options.add_argument('--hide-scrollbars')
# 不加载图片, 提升速度
chrome_options.add_argument('blink-settings=imagesEnabled=false')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument('--headless')
driver = None
def refresh():
close()
global driver
if debug:
driver = webdriver.Edge()
else:
driver = webdriver.Chrome(options=chrome_options)
driver.maximize_window()
def get() -> WebDriver:
global driver
return driver
def close():
if driver is not None:
driver.close()
| 22.175 | 65 | 0.742954 | from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.webdriver import WebDriver
from utils import debug
chrome_options = Options()
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--hide-scrollbars')
chrome_options.add_argument('blink-settings=imagesEnabled=false')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument('--headless')
driver = None
def refresh():
close()
global driver
if debug:
driver = webdriver.Edge()
else:
driver = webdriver.Chrome(options=chrome_options)
driver.maximize_window()
def get() -> WebDriver:
global driver
return driver
def close():
if driver is not None:
driver.close()
| true | true |
1c2b4110f88f48555cdc775f285c52646d4c6b49 | 1,081 | py | Python | bvs/background_verification/doctype/verify_address_check4/verify_address_check4.py | vhrspvl/vhrs-bvs | 56667039d9cc09ad0b092e5e6c5dd6598ff41e7b | [
"MIT"
] | 1 | 2021-08-19T11:16:47.000Z | 2021-08-19T11:16:47.000Z | bvs/background_verification/doctype/verify_address_check4/verify_address_check4.py | vhrspvl/vhrs-bvs | 56667039d9cc09ad0b092e5e6c5dd6598ff41e7b | [
"MIT"
] | null | null | null | bvs/background_verification/doctype/verify_address_check4/verify_address_check4.py | vhrspvl/vhrs-bvs | 56667039d9cc09ad0b092e5e6c5dd6598ff41e7b | [
"MIT"
] | 4 | 2018-03-21T05:57:54.000Z | 2020-11-26T00:37:29.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2018, VHRS and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class VerifyAddressCheck4(Document):
pass
@frappe.whitelist()
def get_check(applicant_id):
address_check4_id = frappe.get_list("Address Check4", filters={"applicant_id":applicant_id}, fields=("name"))
# frappe.errprint(employment_check1_id)
return address_check4_id
@frappe.whitelist()
def get_tat():
aadhar = frappe.db.sql(""" select name from `tabVerify Address Check4` where status = 'Pending'""", as_dict = 1)
for a in aadhar:
aadhar_id = frappe.get_doc("Verify Address Check4",a["name"])
tat = aadhar_id.tat
in_date = aadhar_id.in_date
if in_date:
today = date.today()
day = (today - in_date).days
tat = tat - day
aadhar_id.update({
"tat": tat
})
aadhar_id.save(ignore_permissions=True)
frappe.db.commit() | 30.885714 | 116 | 0.653099 |
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class VerifyAddressCheck4(Document):
pass
@frappe.whitelist()
def get_check(applicant_id):
address_check4_id = frappe.get_list("Address Check4", filters={"applicant_id":applicant_id}, fields=("name"))
return address_check4_id
@frappe.whitelist()
def get_tat():
aadhar = frappe.db.sql(""" select name from `tabVerify Address Check4` where status = 'Pending'""", as_dict = 1)
for a in aadhar:
aadhar_id = frappe.get_doc("Verify Address Check4",a["name"])
tat = aadhar_id.tat
in_date = aadhar_id.in_date
if in_date:
today = date.today()
day = (today - in_date).days
tat = tat - day
aadhar_id.update({
"tat": tat
})
aadhar_id.save(ignore_permissions=True)
frappe.db.commit() | true | true |
1c2b42828634a45d43436a3c4ea189c43a454a0f | 1,510 | py | Python | backend/KeywordMatch.py | jonathanjameswatson/web-app | af4a0f54a06fcd4dfabd19c05b83369533116c7b | [
"MIT"
] | null | null | null | backend/KeywordMatch.py | jonathanjameswatson/web-app | af4a0f54a06fcd4dfabd19c05b83369533116c7b | [
"MIT"
] | 1 | 2022-01-22T15:49:41.000Z | 2022-01-22T15:49:41.000Z | backend/KeywordMatch.py | jonathanjameswatson/web-app | af4a0f54a06fcd4dfabd19c05b83369533116c7b | [
"MIT"
] | 3 | 2022-01-22T14:23:15.000Z | 2022-01-22T18:01:39.000Z | import yake
from nltk.stem import PorterStemmer
class KeywordMatch:
def __init__(self):
language = "en"
max_ngram_size = 3
deduplication_threshold = 0.9
numOfKeywords = 5
self.custom_kw_extractor = yake.KeywordExtractor(
lan=language,
n=max_ngram_size,
dedupLim=deduplication_threshold,
top=numOfKeywords,
features=None,
)
def stem_phrases(self, words):
stemmed = set()
stemmer = PorterStemmer()
for word in words:
stemmed.add(" ".join([stemmer.stem(x) for x in word.split(" ")]))
return stemmed
def find_keyword_match(self, text1, text2):
keywords1 = [
x[0]
for x in sorted(
self.custom_kw_extractor.extract_keywords(text1),
key=lambda x: x[1],
reverse=True,
)
]
keywords2 = [
x[0]
for x in sorted(
self.custom_kw_extractor.extract_keywords(text2),
key=lambda x: x[1],
reverse=True,
)
]
keyword_set_1 = self.stem_phrases(keywords1)
keyword_set_2 = self.stem_phrases(keywords2)
if len(keyword_set_1) + len(keyword_set_2) <= 6:
threshold = 1
else:
threshold = 2
score = len(set.intersection(keyword_set_1, keyword_set_2))
return score if score >= threshold else None
| 27.454545 | 77 | 0.543709 | import yake
from nltk.stem import PorterStemmer
class KeywordMatch:
def __init__(self):
language = "en"
max_ngram_size = 3
deduplication_threshold = 0.9
numOfKeywords = 5
self.custom_kw_extractor = yake.KeywordExtractor(
lan=language,
n=max_ngram_size,
dedupLim=deduplication_threshold,
top=numOfKeywords,
features=None,
)
def stem_phrases(self, words):
stemmed = set()
stemmer = PorterStemmer()
for word in words:
stemmed.add(" ".join([stemmer.stem(x) for x in word.split(" ")]))
return stemmed
def find_keyword_match(self, text1, text2):
keywords1 = [
x[0]
for x in sorted(
self.custom_kw_extractor.extract_keywords(text1),
key=lambda x: x[1],
reverse=True,
)
]
keywords2 = [
x[0]
for x in sorted(
self.custom_kw_extractor.extract_keywords(text2),
key=lambda x: x[1],
reverse=True,
)
]
keyword_set_1 = self.stem_phrases(keywords1)
keyword_set_2 = self.stem_phrases(keywords2)
if len(keyword_set_1) + len(keyword_set_2) <= 6:
threshold = 1
else:
threshold = 2
score = len(set.intersection(keyword_set_1, keyword_set_2))
return score if score >= threshold else None
| true | true |
1c2b428b594614f8cb16beb47d54c5472904cb6b | 1,819 | py | Python | vsts/vsts/test/v4_1/models/test_result_model_base.py | kenkuo/azure-devops-python-api | 9e920bd25e938fa89ff7f60153e5b9e113ca839d | [
"MIT"
] | null | null | null | vsts/vsts/test/v4_1/models/test_result_model_base.py | kenkuo/azure-devops-python-api | 9e920bd25e938fa89ff7f60153e5b9e113ca839d | [
"MIT"
] | null | null | null | vsts/vsts/test/v4_1/models/test_result_model_base.py | kenkuo/azure-devops-python-api | 9e920bd25e938fa89ff7f60153e5b9e113ca839d | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class TestResultModelBase(Model):
"""TestResultModelBase.
:param comment:
:type comment: str
:param completed_date:
:type completed_date: datetime
:param duration_in_ms:
:type duration_in_ms: float
:param error_message:
:type error_message: str
:param outcome:
:type outcome: str
:param started_date:
:type started_date: datetime
"""
_attribute_map = {
'comment': {'key': 'comment', 'type': 'str'},
'completed_date': {'key': 'completedDate', 'type': 'iso-8601'},
'duration_in_ms': {'key': 'durationInMs', 'type': 'float'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
'outcome': {'key': 'outcome', 'type': 'str'},
'started_date': {'key': 'startedDate', 'type': 'iso-8601'}
}
def __init__(self, comment=None, completed_date=None, duration_in_ms=None, error_message=None, outcome=None, started_date=None):
super(TestResultModelBase, self).__init__()
self.comment = comment
self.completed_date = completed_date
self.duration_in_ms = duration_in_ms
self.error_message = error_message
self.outcome = outcome
self.started_date = started_date
| 39.543478 | 132 | 0.563496 |
from msrest.serialization import Model
class TestResultModelBase(Model):
_attribute_map = {
'comment': {'key': 'comment', 'type': 'str'},
'completed_date': {'key': 'completedDate', 'type': 'iso-8601'},
'duration_in_ms': {'key': 'durationInMs', 'type': 'float'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
'outcome': {'key': 'outcome', 'type': 'str'},
'started_date': {'key': 'startedDate', 'type': 'iso-8601'}
}
def __init__(self, comment=None, completed_date=None, duration_in_ms=None, error_message=None, outcome=None, started_date=None):
super(TestResultModelBase, self).__init__()
self.comment = comment
self.completed_date = completed_date
self.duration_in_ms = duration_in_ms
self.error_message = error_message
self.outcome = outcome
self.started_date = started_date
| true | true |
1c2b43d5f5cacb1854bbd69115dbf2fdc09465c1 | 1,109 | py | Python | utils/samplers.py | nvvaulin/medical_imaging | ff00fc43ac0edcfb2151478f89e6c82be40af433 | [
"Apache-2.0"
] | null | null | null | utils/samplers.py | nvvaulin/medical_imaging | ff00fc43ac0edcfb2151478f89e6c82be40af433 | [
"Apache-2.0"
] | null | null | null | utils/samplers.py | nvvaulin/medical_imaging | ff00fc43ac0edcfb2151478f89e6c82be40af433 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import torch
class WeightedClassRandomSampler(torch.utils.data.WeightedRandomSampler):
def __init__(self, labels, class_weights=None, label_names=None, names_weights=None):
if class_weights is None:
class_weights = [names_weights.get(i, None) for i in label_names]
mask = np.array([not (i is None) for i in class_weights])
if mask.sum() < len(mask):
labels = labels[:, mask]
labels = np.concatenate((labels, (labels.max(1) == 0)[:, None]), 1)
assert (labels.sum(1).max() != 1).sum() == 0, 'for weighted classes labels should be one hot encoded'
class_ratios = labels.mean(0)
class_weights = np.array(class_weights, dtype=np.float32)
if mask.sum() < len(mask):
class_weights = class_weights[mask]
class_weights = np.concatenate((class_weights, np.array([1. - class_weights.sum()])))
else:
class_weights /=class_weights.sum()
weights = ((class_weights / class_ratios)[None, :] * labels).max(1)
super().__init__(weights, len(labels)) | 46.208333 | 109 | 0.633904 | import numpy as np
import torch
class WeightedClassRandomSampler(torch.utils.data.WeightedRandomSampler):
def __init__(self, labels, class_weights=None, label_names=None, names_weights=None):
if class_weights is None:
class_weights = [names_weights.get(i, None) for i in label_names]
mask = np.array([not (i is None) for i in class_weights])
if mask.sum() < len(mask):
labels = labels[:, mask]
labels = np.concatenate((labels, (labels.max(1) == 0)[:, None]), 1)
assert (labels.sum(1).max() != 1).sum() == 0, 'for weighted classes labels should be one hot encoded'
class_ratios = labels.mean(0)
class_weights = np.array(class_weights, dtype=np.float32)
if mask.sum() < len(mask):
class_weights = class_weights[mask]
class_weights = np.concatenate((class_weights, np.array([1. - class_weights.sum()])))
else:
class_weights /=class_weights.sum()
weights = ((class_weights / class_ratios)[None, :] * labels).max(1)
super().__init__(weights, len(labels)) | true | true |
1c2b445c7e3b23d11298841c4f31e3e72c0b3203 | 90,293 | py | Python | tests/unit/gapic/dialogflow_v2beta1/test_versions.py | LaudateCorpus1/python-dialogflow | 0d6bebd2c28d46bfd06d42da30778d3b55a1878e | [
"Apache-2.0"
] | null | null | null | tests/unit/gapic/dialogflow_v2beta1/test_versions.py | LaudateCorpus1/python-dialogflow | 0d6bebd2c28d46bfd06d42da30778d3b55a1878e | [
"Apache-2.0"
] | null | null | null | tests/unit/gapic/dialogflow_v2beta1/test_versions.py | LaudateCorpus1/python-dialogflow | 0d6bebd2c28d46bfd06d42da30778d3b55a1878e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dialogflow_v2beta1.services.versions import VersionsAsyncClient
from google.cloud.dialogflow_v2beta1.services.versions import VersionsClient
from google.cloud.dialogflow_v2beta1.services.versions import pagers
from google.cloud.dialogflow_v2beta1.services.versions import transports
from google.cloud.dialogflow_v2beta1.types import version
from google.cloud.dialogflow_v2beta1.types import version as gcd_version
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert VersionsClient._get_default_mtls_endpoint(None) is None
assert VersionsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert (
VersionsClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
VersionsClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
VersionsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert VersionsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [VersionsClient, VersionsAsyncClient,])
def test_versions_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.VersionsGrpcTransport, "grpc"),
(transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_versions_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [VersionsClient, VersionsAsyncClient,])
def test_versions_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_versions_client_get_transport_class():
transport = VersionsClient.get_transport_class()
available_transports = [
transports.VersionsGrpcTransport,
]
assert transport in available_transports
transport = VersionsClient.get_transport_class("grpc")
assert transport == transports.VersionsGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(VersionsClient, transports.VersionsGrpcTransport, "grpc"),
(VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
@mock.patch.object(
VersionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsClient)
)
@mock.patch.object(
VersionsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VersionsAsyncClient),
)
def test_versions_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(VersionsClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(VersionsClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(VersionsClient, transports.VersionsGrpcTransport, "grpc", "true"),
(
VersionsAsyncClient,
transports.VersionsGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(VersionsClient, transports.VersionsGrpcTransport, "grpc", "false"),
(
VersionsAsyncClient,
transports.VersionsGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
VersionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsClient)
)
@mock.patch.object(
VersionsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VersionsAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_versions_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [VersionsClient, VersionsAsyncClient])
@mock.patch.object(
VersionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsClient)
)
@mock.patch.object(
VersionsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VersionsAsyncClient),
)
def test_versions_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(VersionsClient, transports.VersionsGrpcTransport, "grpc"),
(VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_versions_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(VersionsClient, transports.VersionsGrpcTransport, "grpc"),
(VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_versions_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_versions_client_client_options_from_dict():
with mock.patch(
"google.cloud.dialogflow_v2beta1.services.versions.transports.VersionsGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = VersionsClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("request_type", [version.ListVersionsRequest, dict,])
def test_list_versions(request_type, transport: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = version.ListVersionsResponse(
next_page_token="next_page_token_value",
)
response = client.list_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == version.ListVersionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListVersionsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_versions_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
client.list_versions()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == version.ListVersionsRequest()
@pytest.mark.asyncio
async def test_list_versions_async(
transport: str = "grpc_asyncio", request_type=version.ListVersionsRequest
):
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
version.ListVersionsResponse(next_page_token="next_page_token_value",)
)
response = await client.list_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == version.ListVersionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListVersionsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_versions_async_from_dict():
await test_list_versions_async(request_type=dict)
def test_list_versions_field_headers():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.ListVersionsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
call.return_value = version.ListVersionsResponse()
client.list_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_versions_field_headers_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.ListVersionsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
version.ListVersionsResponse()
)
await client.list_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_versions_flattened():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = version.ListVersionsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_versions(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_versions_flattened_error():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_versions(
version.ListVersionsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_versions_flattened_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = version.ListVersionsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
version.ListVersionsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_versions(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_versions_flattened_error_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_versions(
version.ListVersionsRequest(), parent="parent_value",
)
def test_list_versions_pager(transport_name: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
version.ListVersionsResponse(
versions=[version.Version(), version.Version(), version.Version(),],
next_page_token="abc",
),
version.ListVersionsResponse(versions=[], next_page_token="def",),
version.ListVersionsResponse(
versions=[version.Version(),], next_page_token="ghi",
),
version.ListVersionsResponse(
versions=[version.Version(), version.Version(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_versions(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, version.Version) for i in results)
def test_list_versions_pages(transport_name: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
version.ListVersionsResponse(
versions=[version.Version(), version.Version(), version.Version(),],
next_page_token="abc",
),
version.ListVersionsResponse(versions=[], next_page_token="def",),
version.ListVersionsResponse(
versions=[version.Version(),], next_page_token="ghi",
),
version.ListVersionsResponse(
versions=[version.Version(), version.Version(),],
),
RuntimeError,
)
pages = list(client.list_versions(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_versions_async_pager():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_versions), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
version.ListVersionsResponse(
versions=[version.Version(), version.Version(), version.Version(),],
next_page_token="abc",
),
version.ListVersionsResponse(versions=[], next_page_token="def",),
version.ListVersionsResponse(
versions=[version.Version(),], next_page_token="ghi",
),
version.ListVersionsResponse(
versions=[version.Version(), version.Version(),],
),
RuntimeError,
)
async_pager = await client.list_versions(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, version.Version) for i in responses)
@pytest.mark.asyncio
async def test_list_versions_async_pages():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_versions), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
version.ListVersionsResponse(
versions=[version.Version(), version.Version(), version.Version(),],
next_page_token="abc",
),
version.ListVersionsResponse(versions=[], next_page_token="def",),
version.ListVersionsResponse(
versions=[version.Version(),], next_page_token="ghi",
),
version.ListVersionsResponse(
versions=[version.Version(), version.Version(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_versions(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [version.GetVersionRequest, dict,])
def test_get_version(request_type, transport: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = version.Version(
name="name_value",
description="description_value",
version_number=1518,
status=version.Version.VersionStatus.IN_PROGRESS,
)
response = client.get_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == version.GetVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, version.Version)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.version_number == 1518
assert response.status == version.Version.VersionStatus.IN_PROGRESS
def test_get_version_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
client.get_version()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == version.GetVersionRequest()
@pytest.mark.asyncio
async def test_get_version_async(
transport: str = "grpc_asyncio", request_type=version.GetVersionRequest
):
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
version.Version(
name="name_value",
description="description_value",
version_number=1518,
status=version.Version.VersionStatus.IN_PROGRESS,
)
)
response = await client.get_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == version.GetVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, version.Version)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.version_number == 1518
assert response.status == version.Version.VersionStatus.IN_PROGRESS
@pytest.mark.asyncio
async def test_get_version_async_from_dict():
await test_get_version_async(request_type=dict)
def test_get_version_field_headers():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.GetVersionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
call.return_value = version.Version()
client.get_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_version_field_headers_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.GetVersionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(version.Version())
await client.get_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_version_flattened():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = version.Version()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_version(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_version_flattened_error():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_version(
version.GetVersionRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_version_flattened_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = version.Version()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(version.Version())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_version(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_version_flattened_error_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_version(
version.GetVersionRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [gcd_version.CreateVersionRequest, dict,])
def test_create_version(request_type, transport: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_version.Version(
name="name_value",
description="description_value",
version_number=1518,
status=gcd_version.Version.VersionStatus.IN_PROGRESS,
)
response = client.create_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_version.CreateVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_version.Version)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.version_number == 1518
assert response.status == gcd_version.Version.VersionStatus.IN_PROGRESS
def test_create_version_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
client.create_version()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_version.CreateVersionRequest()
@pytest.mark.asyncio
async def test_create_version_async(
transport: str = "grpc_asyncio", request_type=gcd_version.CreateVersionRequest
):
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_version.Version(
name="name_value",
description="description_value",
version_number=1518,
status=gcd_version.Version.VersionStatus.IN_PROGRESS,
)
)
response = await client.create_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_version.CreateVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_version.Version)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.version_number == 1518
assert response.status == gcd_version.Version.VersionStatus.IN_PROGRESS
@pytest.mark.asyncio
async def test_create_version_async_from_dict():
await test_create_version_async(request_type=dict)
def test_create_version_field_headers():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_version.CreateVersionRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
call.return_value = gcd_version.Version()
client.create_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_version_field_headers_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_version.CreateVersionRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_version.Version())
await client.create_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_version_flattened():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_version.Version()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_version(
parent="parent_value", version=gcd_version.Version(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].version
mock_val = gcd_version.Version(name="name_value")
assert arg == mock_val
def test_create_version_flattened_error():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_version(
gcd_version.CreateVersionRequest(),
parent="parent_value",
version=gcd_version.Version(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_version_flattened_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_version.Version()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_version.Version())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_version(
parent="parent_value", version=gcd_version.Version(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].version
mock_val = gcd_version.Version(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_version_flattened_error_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_version(
gcd_version.CreateVersionRequest(),
parent="parent_value",
version=gcd_version.Version(name="name_value"),
)
@pytest.mark.parametrize("request_type", [gcd_version.UpdateVersionRequest, dict,])
def test_update_version(request_type, transport: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_version.Version(
name="name_value",
description="description_value",
version_number=1518,
status=gcd_version.Version.VersionStatus.IN_PROGRESS,
)
response = client.update_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_version.UpdateVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_version.Version)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.version_number == 1518
assert response.status == gcd_version.Version.VersionStatus.IN_PROGRESS
def test_update_version_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
client.update_version()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_version.UpdateVersionRequest()
@pytest.mark.asyncio
async def test_update_version_async(
transport: str = "grpc_asyncio", request_type=gcd_version.UpdateVersionRequest
):
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_version.Version(
name="name_value",
description="description_value",
version_number=1518,
status=gcd_version.Version.VersionStatus.IN_PROGRESS,
)
)
response = await client.update_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_version.UpdateVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_version.Version)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.version_number == 1518
assert response.status == gcd_version.Version.VersionStatus.IN_PROGRESS
@pytest.mark.asyncio
async def test_update_version_async_from_dict():
await test_update_version_async(request_type=dict)
def test_update_version_field_headers():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_version.UpdateVersionRequest()
request.version.name = "version.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
call.return_value = gcd_version.Version()
client.update_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "version.name=version.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_version_field_headers_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_version.UpdateVersionRequest()
request.version.name = "version.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_version.Version())
await client.update_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "version.name=version.name/value",) in kw[
"metadata"
]
def test_update_version_flattened():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_version.Version()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_version(
version=gcd_version.Version(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].version
mock_val = gcd_version.Version(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_version_flattened_error():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_version(
gcd_version.UpdateVersionRequest(),
version=gcd_version.Version(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_version_flattened_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_version.Version()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_version.Version())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_version(
version=gcd_version.Version(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].version
mock_val = gcd_version.Version(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_version_flattened_error_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_version(
gcd_version.UpdateVersionRequest(),
version=gcd_version.Version(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize("request_type", [version.DeleteVersionRequest, dict,])
def test_delete_version(request_type, transport: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == version.DeleteVersionRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_version_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
client.delete_version()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == version.DeleteVersionRequest()
@pytest.mark.asyncio
async def test_delete_version_async(
transport: str = "grpc_asyncio", request_type=version.DeleteVersionRequest
):
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == version.DeleteVersionRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_version_async_from_dict():
await test_delete_version_async(request_type=dict)
def test_delete_version_field_headers():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.DeleteVersionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
call.return_value = None
client.delete_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_version_field_headers_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.DeleteVersionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_version_flattened():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_version(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_version_flattened_error():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_version(
version.DeleteVersionRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_version_flattened_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_version(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_version_flattened_error_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_version(
version.DeleteVersionRequest(), name="name_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.VersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.VersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = VersionsClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.VersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = VersionsClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = VersionsClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.VersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = VersionsClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.VersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = VersionsClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.VersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.VersionsGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport,],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.VersionsGrpcTransport,)
def test_versions_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.VersionsTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_versions_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.dialogflow_v2beta1.services.versions.transports.VersionsTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.VersionsTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"list_versions",
"get_version",
"create_version",
"update_version",
"delete_version",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_versions_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.dialogflow_v2beta1.services.versions.transports.VersionsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.VersionsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
def test_versions_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.dialogflow_v2beta1.services.versions.transports.VersionsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.VersionsTransport()
adc.assert_called_once()
def test_versions_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
VersionsClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport,],
)
def test_versions_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.VersionsGrpcTransport, grpc_helpers),
(transports.VersionsGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_versions_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
scopes=["1", "2"],
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport],
)
def test_versions_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_versions_host_no_port():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com"
),
)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_versions_host_with_port():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com:8000"
),
)
assert client.transport._host == "dialogflow.googleapis.com:8000"
def test_versions_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.VersionsGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_versions_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.VersionsGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport],
)
def test_versions_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport],
)
def test_versions_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_version_path():
project = "squid"
version = "clam"
expected = "projects/{project}/agent/versions/{version}".format(
project=project, version=version,
)
actual = VersionsClient.version_path(project, version)
assert expected == actual
def test_parse_version_path():
expected = {
"project": "whelk",
"version": "octopus",
}
path = VersionsClient.version_path(**expected)
# Check that the path construction is reversible.
actual = VersionsClient.parse_version_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "oyster"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = VersionsClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nudibranch",
}
path = VersionsClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = VersionsClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "cuttlefish"
expected = "folders/{folder}".format(folder=folder,)
actual = VersionsClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "mussel",
}
path = VersionsClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = VersionsClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "winkle"
expected = "organizations/{organization}".format(organization=organization,)
actual = VersionsClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nautilus",
}
path = VersionsClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = VersionsClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "scallop"
expected = "projects/{project}".format(project=project,)
actual = VersionsClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "abalone",
}
path = VersionsClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = VersionsClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "squid"
location = "clam"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = VersionsClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "whelk",
"location": "octopus",
}
path = VersionsClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = VersionsClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.VersionsTransport, "_prep_wrapped_messages"
) as prep:
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.VersionsTransport, "_prep_wrapped_messages"
) as prep:
transport_class = VersionsClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(VersionsClient, transports.VersionsGrpcTransport),
(VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
| 38.936179 | 111 | 0.685945 |
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dialogflow_v2beta1.services.versions import VersionsAsyncClient
from google.cloud.dialogflow_v2beta1.services.versions import VersionsClient
from google.cloud.dialogflow_v2beta1.services.versions import pagers
from google.cloud.dialogflow_v2beta1.services.versions import transports
from google.cloud.dialogflow_v2beta1.types import version
from google.cloud.dialogflow_v2beta1.types import version as gcd_version
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2
from google.protobuf import timestamp_pb2
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert VersionsClient._get_default_mtls_endpoint(None) is None
assert VersionsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert (
VersionsClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
VersionsClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
VersionsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert VersionsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [VersionsClient, VersionsAsyncClient,])
def test_versions_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.VersionsGrpcTransport, "grpc"),
(transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_versions_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [VersionsClient, VersionsAsyncClient,])
def test_versions_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_versions_client_get_transport_class():
transport = VersionsClient.get_transport_class()
available_transports = [
transports.VersionsGrpcTransport,
]
assert transport in available_transports
transport = VersionsClient.get_transport_class("grpc")
assert transport == transports.VersionsGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(VersionsClient, transports.VersionsGrpcTransport, "grpc"),
(VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
@mock.patch.object(
VersionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsClient)
)
@mock.patch.object(
VersionsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VersionsAsyncClient),
)
def test_versions_client_client_options(client_class, transport_class, transport_name):
with mock.patch.object(VersionsClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(VersionsClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(VersionsClient, transports.VersionsGrpcTransport, "grpc", "true"),
(
VersionsAsyncClient,
transports.VersionsGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(VersionsClient, transports.VersionsGrpcTransport, "grpc", "false"),
(
VersionsAsyncClient,
transports.VersionsGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
VersionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsClient)
)
@mock.patch.object(
VersionsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VersionsAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_versions_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [VersionsClient, VersionsAsyncClient])
@mock.patch.object(
VersionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsClient)
)
@mock.patch.object(
VersionsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VersionsAsyncClient),
)
def test_versions_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(VersionsClient, transports.VersionsGrpcTransport, "grpc"),
(VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_versions_client_client_options_scopes(
client_class, transport_class, transport_name
):
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(VersionsClient, transports.VersionsGrpcTransport, "grpc"),
(VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_versions_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_versions_client_client_options_from_dict():
with mock.patch(
"google.cloud.dialogflow_v2beta1.services.versions.transports.VersionsGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = VersionsClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("request_type", [version.ListVersionsRequest, dict,])
def test_list_versions(request_type, transport: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
request = request_type()
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
call.return_value = version.ListVersionsResponse(
next_page_token="next_page_token_value",
)
response = client.list_versions(request)
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == version.ListVersionsRequest()
assert isinstance(response, pagers.ListVersionsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_versions_empty_call():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
client.list_versions()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == version.ListVersionsRequest()
@pytest.mark.asyncio
async def test_list_versions_async(
transport: str = "grpc_asyncio", request_type=version.ListVersionsRequest
):
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
request = request_type()
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
version.ListVersionsResponse(next_page_token="next_page_token_value",)
)
response = await client.list_versions(request)
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == version.ListVersionsRequest()
assert isinstance(response, pagers.ListVersionsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_versions_async_from_dict():
await test_list_versions_async(request_type=dict)
def test_list_versions_field_headers():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
request = version.ListVersionsRequest()
request.parent = "parent/value"
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
call.return_value = version.ListVersionsResponse()
client.list_versions(request)
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_versions_field_headers_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
request = version.ListVersionsRequest()
request.parent = "parent/value"
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
version.ListVersionsResponse()
)
await client.list_versions(request)
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_versions_flattened():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
call.return_value = version.ListVersionsResponse()
client.list_versions(parent="parent_value",)
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_versions_flattened_error():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
with pytest.raises(ValueError):
client.list_versions(
version.ListVersionsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_versions_flattened_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
call.return_value = version.ListVersionsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
version.ListVersionsResponse()
)
response = await client.list_versions(parent="parent_value",)
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_versions_flattened_error_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
with pytest.raises(ValueError):
await client.list_versions(
version.ListVersionsRequest(), parent="parent_value",
)
def test_list_versions_pager(transport_name: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
call.side_effect = (
version.ListVersionsResponse(
versions=[version.Version(), version.Version(), version.Version(),],
next_page_token="abc",
),
version.ListVersionsResponse(versions=[], next_page_token="def",),
version.ListVersionsResponse(
versions=[version.Version(),], next_page_token="ghi",
),
version.ListVersionsResponse(
versions=[version.Version(), version.Version(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_versions(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, version.Version) for i in results)
def test_list_versions_pages(transport_name: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
call.side_effect = (
version.ListVersionsResponse(
versions=[version.Version(), version.Version(), version.Version(),],
next_page_token="abc",
),
version.ListVersionsResponse(versions=[], next_page_token="def",),
version.ListVersionsResponse(
versions=[version.Version(),], next_page_token="ghi",
),
version.ListVersionsResponse(
versions=[version.Version(), version.Version(),],
),
RuntimeError,
)
pages = list(client.list_versions(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_versions_async_pager():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
with mock.patch.object(
type(client.transport.list_versions), "__call__", new_callable=mock.AsyncMock
) as call:
call.side_effect = (
version.ListVersionsResponse(
versions=[version.Version(), version.Version(), version.Version(),],
next_page_token="abc",
),
version.ListVersionsResponse(versions=[], next_page_token="def",),
version.ListVersionsResponse(
versions=[version.Version(),], next_page_token="ghi",
),
version.ListVersionsResponse(
versions=[version.Version(), version.Version(),],
),
RuntimeError,
)
async_pager = await client.list_versions(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, version.Version) for i in responses)
@pytest.mark.asyncio
async def test_list_versions_async_pages():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
with mock.patch.object(
type(client.transport.list_versions), "__call__", new_callable=mock.AsyncMock
) as call:
call.side_effect = (
version.ListVersionsResponse(
versions=[version.Version(), version.Version(), version.Version(),],
next_page_token="abc",
),
version.ListVersionsResponse(versions=[], next_page_token="def",),
version.ListVersionsResponse(
versions=[version.Version(),], next_page_token="ghi",
),
version.ListVersionsResponse(
versions=[version.Version(), version.Version(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_versions(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [version.GetVersionRequest, dict,])
def test_get_version(request_type, transport: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
request = request_type()
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
call.return_value = version.Version(
name="name_value",
description="description_value",
version_number=1518,
status=version.Version.VersionStatus.IN_PROGRESS,
)
response = client.get_version(request)
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == version.GetVersionRequest()
assert isinstance(response, version.Version)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.version_number == 1518
assert response.status == version.Version.VersionStatus.IN_PROGRESS
def test_get_version_empty_call():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
client.get_version()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == version.GetVersionRequest()
@pytest.mark.asyncio
async def test_get_version_async(
transport: str = "grpc_asyncio", request_type=version.GetVersionRequest
):
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
request = request_type()
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
version.Version(
name="name_value",
description="description_value",
version_number=1518,
status=version.Version.VersionStatus.IN_PROGRESS,
)
)
response = await client.get_version(request)
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == version.GetVersionRequest()
assert isinstance(response, version.Version)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.version_number == 1518
assert response.status == version.Version.VersionStatus.IN_PROGRESS
@pytest.mark.asyncio
async def test_get_version_async_from_dict():
await test_get_version_async(request_type=dict)
def test_get_version_field_headers():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
request = version.GetVersionRequest()
request.name = "name/value"
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
call.return_value = version.Version()
client.get_version(request)
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_version_field_headers_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
request = version.GetVersionRequest()
request.name = "name/value"
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(version.Version())
await client.get_version(request)
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_version_flattened():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
call.return_value = version.Version()
client.get_version(name="name_value",)
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_version_flattened_error():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
with pytest.raises(ValueError):
client.get_version(
version.GetVersionRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_version_flattened_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
call.return_value = version.Version()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(version.Version())
response = await client.get_version(name="name_value",)
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_version_flattened_error_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
with pytest.raises(ValueError):
await client.get_version(
version.GetVersionRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [gcd_version.CreateVersionRequest, dict,])
def test_create_version(request_type, transport: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
request = request_type()
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
call.return_value = gcd_version.Version(
name="name_value",
description="description_value",
version_number=1518,
status=gcd_version.Version.VersionStatus.IN_PROGRESS,
)
response = client.create_version(request)
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_version.CreateVersionRequest()
assert isinstance(response, gcd_version.Version)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.version_number == 1518
assert response.status == gcd_version.Version.VersionStatus.IN_PROGRESS
def test_create_version_empty_call():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
client.create_version()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_version.CreateVersionRequest()
@pytest.mark.asyncio
async def test_create_version_async(
transport: str = "grpc_asyncio", request_type=gcd_version.CreateVersionRequest
):
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
request = request_type()
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_version.Version(
name="name_value",
description="description_value",
version_number=1518,
status=gcd_version.Version.VersionStatus.IN_PROGRESS,
)
)
response = await client.create_version(request)
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_version.CreateVersionRequest()
assert isinstance(response, gcd_version.Version)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.version_number == 1518
assert response.status == gcd_version.Version.VersionStatus.IN_PROGRESS
@pytest.mark.asyncio
async def test_create_version_async_from_dict():
await test_create_version_async(request_type=dict)
def test_create_version_field_headers():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
request = gcd_version.CreateVersionRequest()
request.parent = "parent/value"
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
call.return_value = gcd_version.Version()
client.create_version(request)
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_version_field_headers_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
request = gcd_version.CreateVersionRequest()
request.parent = "parent/value"
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_version.Version())
await client.create_version(request)
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_version_flattened():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
call.return_value = gcd_version.Version()
client.create_version(
parent="parent_value", version=gcd_version.Version(name="name_value"),
)
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].version
mock_val = gcd_version.Version(name="name_value")
assert arg == mock_val
def test_create_version_flattened_error():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
with pytest.raises(ValueError):
client.create_version(
gcd_version.CreateVersionRequest(),
parent="parent_value",
version=gcd_version.Version(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_version_flattened_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
call.return_value = gcd_version.Version()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_version.Version())
response = await client.create_version(
parent="parent_value", version=gcd_version.Version(name="name_value"),
)
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].version
mock_val = gcd_version.Version(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_version_flattened_error_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
with pytest.raises(ValueError):
await client.create_version(
gcd_version.CreateVersionRequest(),
parent="parent_value",
version=gcd_version.Version(name="name_value"),
)
@pytest.mark.parametrize("request_type", [gcd_version.UpdateVersionRequest, dict,])
def test_update_version(request_type, transport: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
request = request_type()
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
call.return_value = gcd_version.Version(
name="name_value",
description="description_value",
version_number=1518,
status=gcd_version.Version.VersionStatus.IN_PROGRESS,
)
response = client.update_version(request)
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_version.UpdateVersionRequest()
assert isinstance(response, gcd_version.Version)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.version_number == 1518
assert response.status == gcd_version.Version.VersionStatus.IN_PROGRESS
def test_update_version_empty_call():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
client.update_version()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_version.UpdateVersionRequest()
@pytest.mark.asyncio
async def test_update_version_async(
transport: str = "grpc_asyncio", request_type=gcd_version.UpdateVersionRequest
):
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
request = request_type()
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_version.Version(
name="name_value",
description="description_value",
version_number=1518,
status=gcd_version.Version.VersionStatus.IN_PROGRESS,
)
)
response = await client.update_version(request)
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_version.UpdateVersionRequest()
assert isinstance(response, gcd_version.Version)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.version_number == 1518
assert response.status == gcd_version.Version.VersionStatus.IN_PROGRESS
@pytest.mark.asyncio
async def test_update_version_async_from_dict():
await test_update_version_async(request_type=dict)
def test_update_version_field_headers():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
request = gcd_version.UpdateVersionRequest()
request.version.name = "version.name/value"
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
call.return_value = gcd_version.Version()
client.update_version(request)
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "version.name=version.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_version_field_headers_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
request = gcd_version.UpdateVersionRequest()
request.version.name = "version.name/value"
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_version.Version())
await client.update_version(request)
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "version.name=version.name/value",) in kw[
"metadata"
]
def test_update_version_flattened():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
call.return_value = gcd_version.Version()
client.update_version(
version=gcd_version.Version(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].version
mock_val = gcd_version.Version(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_version_flattened_error():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
with pytest.raises(ValueError):
client.update_version(
gcd_version.UpdateVersionRequest(),
version=gcd_version.Version(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_version_flattened_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
call.return_value = gcd_version.Version()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_version.Version())
response = await client.update_version(
version=gcd_version.Version(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].version
mock_val = gcd_version.Version(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_version_flattened_error_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
with pytest.raises(ValueError):
await client.update_version(
gcd_version.UpdateVersionRequest(),
version=gcd_version.Version(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize("request_type", [version.DeleteVersionRequest, dict,])
def test_delete_version(request_type, transport: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
request = request_type()
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
call.return_value = None
response = client.delete_version(request)
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == version.DeleteVersionRequest()
assert response is None
def test_delete_version_empty_call():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
client.delete_version()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == version.DeleteVersionRequest()
@pytest.mark.asyncio
async def test_delete_version_async(
transport: str = "grpc_asyncio", request_type=version.DeleteVersionRequest
):
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
request = request_type()
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_version(request)
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == version.DeleteVersionRequest()
assert response is None
@pytest.mark.asyncio
async def test_delete_version_async_from_dict():
await test_delete_version_async(request_type=dict)
def test_delete_version_field_headers():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
request = version.DeleteVersionRequest()
request.name = "name/value"
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
call.return_value = None
client.delete_version(request)
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_version_field_headers_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
request = version.DeleteVersionRequest()
request.name = "name/value"
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_version(request)
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_version_flattened():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
call.return_value = None
client.delete_version(name="name_value",)
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_version_flattened_error():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
with pytest.raises(ValueError):
client.delete_version(
version.DeleteVersionRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_version_flattened_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_version(name="name_value",)
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_version_flattened_error_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
with pytest.raises(ValueError):
await client.delete_version(
version.DeleteVersionRequest(), name="name_value",
)
def test_credentials_transport_error():
transport = transports.VersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
transport = transports.VersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = VersionsClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
transport = transports.VersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = VersionsClient(client_options=options, transport=transport,)
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = VersionsClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
transport = transports.VersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = VersionsClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
transport = transports.VersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = VersionsClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
transport = transports.VersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.VersionsGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport,],
)
def test_transport_adc(transport_class):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.VersionsGrpcTransport,)
def test_versions_base_transport_error():
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.VersionsTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_versions_base_transport():
with mock.patch(
"google.cloud.dialogflow_v2beta1.services.versions.transports.VersionsTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.VersionsTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
methods = (
"list_versions",
"get_version",
"create_version",
"update_version",
"delete_version",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_versions_base_transport_with_credentials_file():
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.dialogflow_v2beta1.services.versions.transports.VersionsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.VersionsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
def test_versions_base_transport_with_adc():
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.dialogflow_v2beta1.services.versions.transports.VersionsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.VersionsTransport()
adc.assert_called_once()
def test_versions_auth_adc():
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
VersionsClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport,],
)
def test_versions_transport_auth_adc(transport_class):
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.VersionsGrpcTransport, grpc_helpers),
(transports.VersionsGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_versions_transport_create_channel(transport_class, grpc_helpers):
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
scopes=["1", "2"],
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport],
)
def test_versions_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_versions_host_no_port():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com"
),
)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_versions_host_with_port():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com:8000"
),
)
assert client.transport._host == "dialogflow.googleapis.com:8000"
def test_versions_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
transport = transports.VersionsGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_versions_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
transport = transports.VersionsGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
@pytest.mark.parametrize(
"transport_class",
[transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport],
)
def test_versions_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
@pytest.mark.parametrize(
"transport_class",
[transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport],
)
def test_versions_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_version_path():
project = "squid"
version = "clam"
expected = "projects/{project}/agent/versions/{version}".format(
project=project, version=version,
)
actual = VersionsClient.version_path(project, version)
assert expected == actual
def test_parse_version_path():
expected = {
"project": "whelk",
"version": "octopus",
}
path = VersionsClient.version_path(**expected)
actual = VersionsClient.parse_version_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "oyster"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = VersionsClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nudibranch",
}
path = VersionsClient.common_billing_account_path(**expected)
actual = VersionsClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "cuttlefish"
expected = "folders/{folder}".format(folder=folder,)
actual = VersionsClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "mussel",
}
path = VersionsClient.common_folder_path(**expected)
actual = VersionsClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "winkle"
expected = "organizations/{organization}".format(organization=organization,)
actual = VersionsClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nautilus",
}
path = VersionsClient.common_organization_path(**expected)
actual = VersionsClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "scallop"
expected = "projects/{project}".format(project=project,)
actual = VersionsClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "abalone",
}
path = VersionsClient.common_project_path(**expected)
actual = VersionsClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "squid"
location = "clam"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = VersionsClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "whelk",
"location": "octopus",
}
path = VersionsClient.common_location_path(**expected)
actual = VersionsClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.VersionsTransport, "_prep_wrapped_messages"
) as prep:
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.VersionsTransport, "_prep_wrapped_messages"
) as prep:
transport_class = VersionsClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(VersionsClient, transports.VersionsGrpcTransport),
(VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
| true | true |
1c2b446ecc23d924dba0c07b4632ad36b37b677c | 522 | py | Python | python/blender/render_minimal_demo.py | jeremiedecock/snippets | 4bd4e7f459eee610d5cf19f845299ca942ff4b64 | [
"MIT"
] | 23 | 2015-06-08T13:01:00.000Z | 2021-12-30T08:20:04.000Z | python/blender/render_minimal_demo.py | jeremiedecock/snippets | 4bd4e7f459eee610d5cf19f845299ca942ff4b64 | [
"MIT"
] | 1 | 2020-10-22T02:36:10.000Z | 2020-10-22T02:36:10.000Z | python/blender/render_minimal_demo.py | jeremiedecock/snippets | 4bd4e7f459eee610d5cf19f845299ca942ff4b64 | [
"MIT"
] | 7 | 2017-10-31T09:48:14.000Z | 2022-01-04T15:59:45.000Z | # A minimal example of render of the default scene.
# The blender default scene contain a cube, a lamp and a camera.
# USAGE: blender --background --python render_minimal_demo.py
# See http://wiki.blender.org/index.php/Doc:2.6/Manual/Extensions/Python
import bpy
# Alias
render = bpy.context.scene.render
# Set render resolution
render.resolution_x = 800
render.resolution_y = 600
# Set Scenes output filename
render.filepath = 'out.png'
# Render Scene and store the scene
bpy.ops.render.render(write_still=True)
| 23.727273 | 72 | 0.764368 |
import bpy
render = bpy.context.scene.render
render.resolution_x = 800
render.resolution_y = 600
render.filepath = 'out.png'
bpy.ops.render.render(write_still=True)
| true | true |
1c2b44b17c7a42098dd9c09f3dbf8d2d50c5c78a | 8,629 | py | Python | tests/test_split.py | Jeremiah-England/Shapely | 769b203f2b7cbeeb0a694c21440b4025a563f807 | [
"BSD-3-Clause"
] | 2,382 | 2015-01-04T03:16:59.000Z | 2021-12-10T15:48:56.000Z | tests/test_split.py | Jeremiah-England/Shapely | 769b203f2b7cbeeb0a694c21440b4025a563f807 | [
"BSD-3-Clause"
] | 1,009 | 2015-01-03T23:44:02.000Z | 2021-12-10T16:02:42.000Z | tests/test_split.py | Jeremiah-England/Shapely | 769b203f2b7cbeeb0a694c21440b4025a563f807 | [
"BSD-3-Clause"
] | 467 | 2015-01-19T23:18:33.000Z | 2021-12-09T18:31:28.000Z | from shapely.ops import split
from . import unittest
from shapely.errors import GeometryTypeError
from shapely.geometry import Point, MultiPoint, LineString, MultiLineString, Polygon, MultiPolygon, GeometryCollection
from shapely.ops import linemerge, unary_union
class TestSplitGeometry(unittest.TestCase):
# helper class for testing below
def helper(self, geom, splitter, expected_chunks):
s = split(geom, splitter)
self.assertEqual(s.type, "GeometryCollection")
self.assertEqual(len(s.geoms), expected_chunks)
if expected_chunks > 1:
# split --> expected collection that when merged is again equal to original geometry
if s.geoms[0].type == 'LineString':
self.assertTrue(linemerge(s).simplify(0.000001).equals(geom))
elif s.geoms[0].type == 'Polygon':
union = unary_union(s).simplify(0.000001)
self.assertTrue(union.equals(geom))
self.assertEqual(union.area, geom.area)
else:
raise ValueError
elif expected_chunks == 1:
# not split --> expected equal to line
self.assertTrue(s.geoms[0].equals(geom))
def test_split_closed_line_with_point(self):
# point at start/end of closed ring -> return equal
# see GH #524
ls = LineString([(0,0), (0, 1), (1, 1), (1, 0), (0, 0)])
splitter = Point(0, 0)
self.helper(ls, splitter, 1)
class TestSplitPolygon(TestSplitGeometry):
poly_simple = Polygon([(0, 0), (2, 0), (2, 2), (0, 2), (0, 0)])
poly_hole = Polygon([(0, 0), (2, 0), (2, 2), (0, 2), (0, 0)], [[(0.5, 0.5), (0.5, 1.5), (1.5, 1.5), (1.5, 0.5), (0.5, 0.5)]])
def test_split_poly_with_line(self):
# crossing at 2 points --> return 2 segments
splitter = LineString([(1, 3), (1, -3)])
self.helper(self.poly_simple, splitter, 2)
self.helper(self.poly_hole, splitter, 2)
# touching the boundary--> return equal
splitter = LineString([(0, 2), (5, 2)])
self.helper(self.poly_simple, splitter, 1)
self.helper(self.poly_hole, splitter, 1)
# inside the polygon --> return equal
splitter = LineString([(0.2, 0.2), (1.7, 1.7), (3, 2)])
self.helper(self.poly_simple, splitter, 1)
self.helper(self.poly_hole, splitter, 1)
# outside the polygon --> return equal
splitter = LineString([(0, 3), (3, 3) , (3, 0)])
self.helper(self.poly_simple, splitter, 1)
self.helper(self.poly_hole, splitter, 1)
def test_split_poly_with_other(self):
with self.assertRaises(GeometryTypeError):
split(self.poly_simple, Point(1, 1))
with self.assertRaises(GeometryTypeError):
split(self.poly_simple, MultiPoint([(1, 1), (3, 4)]))
with self.assertRaises(GeometryTypeError):
split(self.poly_simple, self.poly_hole)
class TestSplitLine(TestSplitGeometry):
ls = LineString([(0, 0), (1.5, 1.5), (3.0, 4.0)])
def test_split_line_with_point(self):
# point on line interior --> return 2 segments
splitter = Point(1, 1)
self.helper(self.ls, splitter, 2)
# point on line point --> return 2 segments
splitter = Point(1.5, 1.5)
self.helper(self.ls, splitter, 2)
# point on boundary --> return equal
splitter = Point(3, 4)
self.helper(self.ls, splitter, 1)
# point on exterior of line --> return equal
splitter = Point(2, 2)
self.helper(self.ls, splitter, 1)
def test_split_line_with_multipoint(self):
# points on line interior --> return 4 segments
splitter = MultiPoint([(1,1), (1.5, 1.5), (0.5, 0.5)])
self.helper(self.ls, splitter, 4)
# points on line interior and boundary -> return 2 segments
splitter = MultiPoint([(1, 1), (3, 4)])
self.helper(self.ls, splitter, 2)
# point on linear interior but twice --> return 2 segments
splitter = MultiPoint([(1, 1), (1.5, 1.5), (1, 1)])
self.helper(self.ls, splitter, 3)
def test_split_line_with_line(self):
# crosses at one point --> return 2 segments
splitter = LineString([(0, 1), (1, 0)])
self.helper(self.ls, splitter, 2)
# crosses at two points --> return 3 segments
splitter = LineString([(0, 1), (1, 0), (1, 2)])
self.helper(self.ls, splitter, 3)
# overlaps --> raise
splitter = LineString([(0, 0), (15, 15)])
with self.assertRaises(ValueError):
self.helper(self.ls, splitter, 1)
# does not cross --> return equal
splitter = LineString([(0, 1), (0, 2)])
self.helper(self.ls, splitter, 1)
# is touching the boundary --> return equal
splitter = LineString([(-1, 1), (1, -1)])
self.assertTrue(splitter.touches(self.ls))
self.helper(self.ls, splitter, 1)
# splitter boundary touches interior of line --> return 2 segments
splitter = LineString([(0, 1), (1, 1)]) # touches at (1, 1)
self.assertTrue(splitter.touches(self.ls))
self.helper(self.ls, splitter, 2)
def test_split_line_with_multiline(self):
# crosses at one point --> return 2 segments
splitter = MultiLineString([[(0, 1), (1, 0)], [(0, 0), (2, -2)]])
self.helper(self.ls, splitter, 2)
# crosses at two points --> return 3 segments
splitter = MultiLineString([[(0, 1), (1, 0)], [(0, 2), (2, 0)]])
self.helper(self.ls, splitter, 3)
# crosses at three points --> return 4 segments
splitter = MultiLineString([[(0, 1), (1, 0)], [(0, 2), (2, 0), (2.2, 3.2)]])
self.helper(self.ls, splitter, 4)
# overlaps --> raise
splitter = MultiLineString([[(0, 0), (1.5, 1.5)], [(1.5, 1.5), (3, 4)]])
with self.assertRaises(ValueError):
self.helper(self.ls, splitter, 1)
# does not cross --> return equal
splitter = MultiLineString([[(0, 1), (0, 2)], [(1, 0), (2, 0)]])
self.helper(self.ls, splitter, 1)
def test_split_line_with_polygon(self):
# crosses at two points --> return 3 segments
splitter = Polygon([(1, 0), (1, 2), (2, 2), (2, 0), (1, 0)])
self.helper(self.ls, splitter, 3)
# crosses at one point and touches boundary --> return 2 segments
splitter = Polygon([(0, 0), (1, 2), (2, 2), (1, 0), (0, 0)])
self.helper(self.ls, splitter, 2)
# exterior crosses at one point and touches at (0, 0)
# interior crosses at two points
splitter = Polygon([(0, 0), (2, 0), (2, 2), (0, 2), (0, 0)], [[(0.5, 0.5), (0.5, 1.5), (1.5, 1.5), (1.5, 0.5), (0.5, 0.5)]])
self.helper(self.ls, splitter, 4)
def test_split_line_with_multipolygon(self):
poly1 = Polygon([(0, 0), (2, 0), (2, 2), (0, 2), (0, 0)]) # crosses at one point and touches at (0, 0)
poly2 = Polygon([(0.5, 0.5), (0.5, 1.5), (1.5, 1.5), (1.5, 0.5), (0.5, 0.5)]) # crosses at two points
poly3 = Polygon([(0, 0), (0, -2), (-2, -2), (-2, 0), (0, 0)]) # not crossing
splitter = MultiPolygon([poly1, poly2, poly3])
self.helper(self.ls, splitter, 4)
class TestSplitClosedRing(TestSplitGeometry):
ls = LineString([[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]])
def test_split_closed_ring_with_point(self):
splitter = Point([0.0, 0.0])
self.helper(self.ls, splitter, 1)
splitter = Point([0.0, 0.5])
self.helper(self.ls, splitter, 2)
result = split(self.ls, splitter)
assert result.geoms[0].coords[:] == [(0, 0), (0.0, 0.5)]
assert result.geoms[1].coords[:] == [(0.0, 0.5), (0, 1), (1, 1), (1, 0), (0, 0)]
# previously failed, see GH#585
splitter = Point([0.5, 0.0])
self.helper(self.ls, splitter, 2)
result = split(self.ls, splitter)
assert result.geoms[0].coords[:] == [(0, 0), (0, 1), (1, 1), (1, 0), (0.5, 0)]
assert result.geoms[1].coords[:] == [(0.5, 0), (0, 0)]
splitter = Point([2.0, 2.0])
self.helper(self.ls, splitter, 1)
class TestSplitMulti(TestSplitGeometry):
def test_split_multiline_with_point(self):
# a cross-like multilinestring with a point in the middle --> return 4 line segments
l1 = LineString([(0, 1), (2, 1)])
l2 = LineString([(1, 0), (1, 2)])
ml = MultiLineString([l1, l2])
splitter = Point((1, 1))
self.helper(ml, splitter, 4)
def test_split_multiline_with_multipoint(self):
# a cross-like multilinestring with a point in middle, a point on one of the lines and a point in the exterior
# --> return 4+1 line segments
l1 = LineString([(0, 1), (3, 1)])
l2 = LineString([(1, 0), (1, 2)])
ml = MultiLineString([l1, l2])
splitter = MultiPoint([(1, 1), (2, 1), (4, 2)])
self.helper(ml, splitter, 5)
def test_split_multipolygon_with_line(self):
# two polygons with a crossing line --> return 4 triangles
poly1 = Polygon([(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)])
poly2 = Polygon([(1, 1), (1, 2), (2, 2), (2, 1), (1, 1)])
mpoly = MultiPolygon([poly1, poly2])
ls = LineString([(-1, -1), (3, 3)])
self.helper(mpoly, ls, 4)
# two polygons away from the crossing line --> return identity
poly1 = Polygon([(10, 10), (10, 11), (11, 11), (11, 10), (10, 10)])
poly2 = Polygon([(-10, -10), (-10, -11), (-11, -11), (-11, -10), (-10, -10)])
mpoly = MultiPolygon([poly1, poly2])
ls = LineString([(-1, -1), (3, 3)])
self.helper(mpoly, ls, 2)
| 37.354978 | 126 | 0.636343 | from shapely.ops import split
from . import unittest
from shapely.errors import GeometryTypeError
from shapely.geometry import Point, MultiPoint, LineString, MultiLineString, Polygon, MultiPolygon, GeometryCollection
from shapely.ops import linemerge, unary_union
class TestSplitGeometry(unittest.TestCase):
def helper(self, geom, splitter, expected_chunks):
s = split(geom, splitter)
self.assertEqual(s.type, "GeometryCollection")
self.assertEqual(len(s.geoms), expected_chunks)
if expected_chunks > 1:
if s.geoms[0].type == 'LineString':
self.assertTrue(linemerge(s).simplify(0.000001).equals(geom))
elif s.geoms[0].type == 'Polygon':
union = unary_union(s).simplify(0.000001)
self.assertTrue(union.equals(geom))
self.assertEqual(union.area, geom.area)
else:
raise ValueError
elif expected_chunks == 1:
self.assertTrue(s.geoms[0].equals(geom))
def test_split_closed_line_with_point(self):
s = LineString([(0,0), (0, 1), (1, 1), (1, 0), (0, 0)])
splitter = Point(0, 0)
self.helper(ls, splitter, 1)
class TestSplitPolygon(TestSplitGeometry):
poly_simple = Polygon([(0, 0), (2, 0), (2, 2), (0, 2), (0, 0)])
poly_hole = Polygon([(0, 0), (2, 0), (2, 2), (0, 2), (0, 0)], [[(0.5, 0.5), (0.5, 1.5), (1.5, 1.5), (1.5, 0.5), (0.5, 0.5)]])
def test_split_poly_with_line(self):
splitter = LineString([(1, 3), (1, -3)])
self.helper(self.poly_simple, splitter, 2)
self.helper(self.poly_hole, splitter, 2)
splitter = LineString([(0, 2), (5, 2)])
self.helper(self.poly_simple, splitter, 1)
self.helper(self.poly_hole, splitter, 1)
splitter = LineString([(0.2, 0.2), (1.7, 1.7), (3, 2)])
self.helper(self.poly_simple, splitter, 1)
self.helper(self.poly_hole, splitter, 1)
splitter = LineString([(0, 3), (3, 3) , (3, 0)])
self.helper(self.poly_simple, splitter, 1)
self.helper(self.poly_hole, splitter, 1)
def test_split_poly_with_other(self):
with self.assertRaises(GeometryTypeError):
split(self.poly_simple, Point(1, 1))
with self.assertRaises(GeometryTypeError):
split(self.poly_simple, MultiPoint([(1, 1), (3, 4)]))
with self.assertRaises(GeometryTypeError):
split(self.poly_simple, self.poly_hole)
class TestSplitLine(TestSplitGeometry):
ls = LineString([(0, 0), (1.5, 1.5), (3.0, 4.0)])
def test_split_line_with_point(self):
splitter = Point(1, 1)
self.helper(self.ls, splitter, 2)
splitter = Point(1.5, 1.5)
self.helper(self.ls, splitter, 2)
splitter = Point(3, 4)
self.helper(self.ls, splitter, 1)
splitter = Point(2, 2)
self.helper(self.ls, splitter, 1)
def test_split_line_with_multipoint(self):
splitter = MultiPoint([(1,1), (1.5, 1.5), (0.5, 0.5)])
self.helper(self.ls, splitter, 4)
splitter = MultiPoint([(1, 1), (3, 4)])
self.helper(self.ls, splitter, 2)
splitter = MultiPoint([(1, 1), (1.5, 1.5), (1, 1)])
self.helper(self.ls, splitter, 3)
def test_split_line_with_line(self):
splitter = LineString([(0, 1), (1, 0)])
self.helper(self.ls, splitter, 2)
splitter = LineString([(0, 1), (1, 0), (1, 2)])
self.helper(self.ls, splitter, 3)
splitter = LineString([(0, 0), (15, 15)])
with self.assertRaises(ValueError):
self.helper(self.ls, splitter, 1)
splitter = LineString([(0, 1), (0, 2)])
self.helper(self.ls, splitter, 1)
splitter = LineString([(-1, 1), (1, -1)])
self.assertTrue(splitter.touches(self.ls))
self.helper(self.ls, splitter, 1)
splitter = LineString([(0, 1), (1, 1)])
self.assertTrue(splitter.touches(self.ls))
self.helper(self.ls, splitter, 2)
def test_split_line_with_multiline(self):
splitter = MultiLineString([[(0, 1), (1, 0)], [(0, 0), (2, -2)]])
self.helper(self.ls, splitter, 2)
splitter = MultiLineString([[(0, 1), (1, 0)], [(0, 2), (2, 0)]])
self.helper(self.ls, splitter, 3)
splitter = MultiLineString([[(0, 1), (1, 0)], [(0, 2), (2, 0), (2.2, 3.2)]])
self.helper(self.ls, splitter, 4)
splitter = MultiLineString([[(0, 0), (1.5, 1.5)], [(1.5, 1.5), (3, 4)]])
with self.assertRaises(ValueError):
self.helper(self.ls, splitter, 1)
splitter = MultiLineString([[(0, 1), (0, 2)], [(1, 0), (2, 0)]])
self.helper(self.ls, splitter, 1)
def test_split_line_with_polygon(self):
splitter = Polygon([(1, 0), (1, 2), (2, 2), (2, 0), (1, 0)])
self.helper(self.ls, splitter, 3)
splitter = Polygon([(0, 0), (1, 2), (2, 2), (1, 0), (0, 0)])
self.helper(self.ls, splitter, 2)
splitter = Polygon([(0, 0), (2, 0), (2, 2), (0, 2), (0, 0)], [[(0.5, 0.5), (0.5, 1.5), (1.5, 1.5), (1.5, 0.5), (0.5, 0.5)]])
self.helper(self.ls, splitter, 4)
def test_split_line_with_multipolygon(self):
poly1 = Polygon([(0, 0), (2, 0), (2, 2), (0, 2), (0, 0)])
poly2 = Polygon([(0.5, 0.5), (0.5, 1.5), (1.5, 1.5), (1.5, 0.5), (0.5, 0.5)])
poly3 = Polygon([(0, 0), (0, -2), (-2, -2), (-2, 0), (0, 0)])
splitter = MultiPolygon([poly1, poly2, poly3])
self.helper(self.ls, splitter, 4)
class TestSplitClosedRing(TestSplitGeometry):
ls = LineString([[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]])
def test_split_closed_ring_with_point(self):
splitter = Point([0.0, 0.0])
self.helper(self.ls, splitter, 1)
splitter = Point([0.0, 0.5])
self.helper(self.ls, splitter, 2)
result = split(self.ls, splitter)
assert result.geoms[0].coords[:] == [(0, 0), (0.0, 0.5)]
assert result.geoms[1].coords[:] == [(0.0, 0.5), (0, 1), (1, 1), (1, 0), (0, 0)]
plitter = Point([0.5, 0.0])
self.helper(self.ls, splitter, 2)
result = split(self.ls, splitter)
assert result.geoms[0].coords[:] == [(0, 0), (0, 1), (1, 1), (1, 0), (0.5, 0)]
assert result.geoms[1].coords[:] == [(0.5, 0), (0, 0)]
splitter = Point([2.0, 2.0])
self.helper(self.ls, splitter, 1)
class TestSplitMulti(TestSplitGeometry):
def test_split_multiline_with_point(self):
l1 = LineString([(0, 1), (2, 1)])
l2 = LineString([(1, 0), (1, 2)])
ml = MultiLineString([l1, l2])
splitter = Point((1, 1))
self.helper(ml, splitter, 4)
def test_split_multiline_with_multipoint(self):
l1 = LineString([(0, 1), (3, 1)])
l2 = LineString([(1, 0), (1, 2)])
ml = MultiLineString([l1, l2])
splitter = MultiPoint([(1, 1), (2, 1), (4, 2)])
self.helper(ml, splitter, 5)
def test_split_multipolygon_with_line(self):
poly1 = Polygon([(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)])
poly2 = Polygon([(1, 1), (1, 2), (2, 2), (2, 1), (1, 1)])
mpoly = MultiPolygon([poly1, poly2])
ls = LineString([(-1, -1), (3, 3)])
self.helper(mpoly, ls, 4)
poly1 = Polygon([(10, 10), (10, 11), (11, 11), (11, 10), (10, 10)])
poly2 = Polygon([(-10, -10), (-10, -11), (-11, -11), (-11, -10), (-10, -10)])
mpoly = MultiPolygon([poly1, poly2])
ls = LineString([(-1, -1), (3, 3)])
self.helper(mpoly, ls, 2)
| true | true |
1c2b44ecfb13bdce6c05318e53447706b0408ecb | 124,747 | py | Python | nikola/nikola.py | asmeurer/nikola | ea1c651bfed0fd6337f1d22cf8dd99899722912c | [
"MIT"
] | null | null | null | nikola/nikola.py | asmeurer/nikola | ea1c651bfed0fd6337f1d22cf8dd99899722912c | [
"MIT"
] | null | null | null | nikola/nikola.py | asmeurer/nikola | ea1c651bfed0fd6337f1d22cf8dd99899722912c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright © 2012-2020 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""The main Nikola site object."""
import datetime
import io
import json
import functools
import logging
import operator
import os
import sys
import mimetypes
from collections import defaultdict
from copy import copy
from urllib.parse import urlparse, urlsplit, urlunsplit, urljoin, unquote, parse_qs
import dateutil.tz
import lxml.etree
import lxml.html
import natsort
import PyRSS2Gen as rss
from pkg_resources import resource_filename
from blinker import signal
from yapsy.PluginManager import PluginManager
from . import DEBUG, SHOW_TRACEBACKS, filters, utils, hierarchy_utils, shortcodes
from . import metadata_extractors
from .metadata_extractors import default_metadata_extractors_by
from .post import Post # NOQA
from .plugin_categories import (
Command,
LateTask,
PageCompiler,
CompilerExtension,
MarkdownExtension,
RestExtension,
MetadataExtractor,
ShortcodePlugin,
Task,
TaskMultiplier,
TemplateSystem,
SignalHandler,
ConfigPlugin,
PostScanner,
Taxonomy,
)
from .state import Persistor
try:
import pyphen
except ImportError:
pyphen = None
if DEBUG:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.ERROR)
# Default "Read more..." link
DEFAULT_INDEX_READ_MORE_LINK = '<p class="more"><a href="{link}">{read_more}…</a></p>'
DEFAULT_FEED_READ_MORE_LINK = '<p><a href="{link}">{read_more}…</a> ({min_remaining_read})</p>'
config_changed = utils.config_changed
__all__ = ('Nikola',)
# We store legal values for some settings here. For internal use.
LEGAL_VALUES = {
'DEFAULT_THEME': 'bootblog4',
'COMMENT_SYSTEM': [
'disqus',
'facebook',
'intensedebate',
'isso',
'muut',
'commento',
],
'TRANSLATIONS': {
'af': 'Afrikaans',
'ar': 'Arabic',
'az': 'Azerbaijani',
'bg': 'Bulgarian',
'bs': 'Bosnian',
'ca': 'Catalan',
('cs', 'cz'): 'Czech',
'da': 'Danish',
'de': 'German',
('el', '!gr'): 'Greek',
'en': 'English',
'eo': 'Esperanto',
'es': 'Spanish',
'et': 'Estonian',
'eu': 'Basque',
'fa': 'Persian',
'fi': 'Finnish',
'fr': 'French',
'fur': 'Friulian',
'gl': 'Galician',
'he': 'Hebrew',
'hi': 'Hindi',
'hr': 'Croatian',
'hu': 'Hungarian',
'ia': 'Interlingua',
'id': 'Indonesian',
'it': 'Italian',
('ja', '!jp'): 'Japanese',
'ko': 'Korean',
'lt': 'Lithuanian',
'ml': 'Malayalam',
'mr': 'Marathi',
'nb': 'Norwegian (Bokmål)',
'nl': 'Dutch',
'pa': 'Punjabi',
'pl': 'Polish',
'pt': 'Portuguese',
'pt_br': 'Portuguese (Brazil)',
'ru': 'Russian',
'sk': 'Slovak',
'sl': 'Slovene',
'sq': 'Albanian',
'sr': 'Serbian (Cyrillic)',
'sr_latin': 'Serbian (Latin)',
'sv': 'Swedish',
'te': 'Telugu',
'th': 'Thai',
('tr', '!tr_TR'): 'Turkish',
'uk': 'Ukrainian',
'ur': 'Urdu',
'vi': 'Vietnamese',
'zh_cn': 'Chinese (Simplified)',
'zh_tw': 'Chinese (Traditional)'
},
'_TRANSLATIONS_WITH_COUNTRY_SPECIFIERS': {
# This dict is used in `init` in case of locales that exist with a
# country specifier. If there is no other locale that has the same
# language with a different country, ``nikola init`` (but nobody else!)
# will accept it, warning the user about it.
# This dict is currently empty.
},
'LOCALES_BASE': {
# A list of locale mappings to apply for every site. Can be overridden in the config.
'sr_latin': 'sr_Latn',
},
'RTL_LANGUAGES': ('ar', 'fa', 'he', 'ur'),
'LUXON_LOCALES': defaultdict(lambda: 'en', **{
'af': 'af',
'ar': 'ar',
'az': 'az',
'bg': 'bg',
'bn': 'bn',
'bs': 'bs',
'ca': 'ca',
'cs': 'cs',
'cz': 'cs',
'da': 'da',
'de': 'de',
'el': 'el',
'en': 'en',
'eo': 'eo',
'es': 'es',
'et': 'et',
'eu': 'eu',
'fa': 'fa',
'fi': 'fi',
'fr': 'fr',
'fur': 'fur',
'gl': 'gl',
'hi': 'hi',
'he': 'he',
'hr': 'hr',
'hu': 'hu',
'ia': 'ia',
'id': 'id',
'it': 'it',
'ja': 'ja',
'ko': 'ko',
'lt': 'lt',
'ml': 'ml',
'mr': 'mr',
'nb': 'nb',
'nl': 'nl',
'pa': 'pa',
'pl': 'pl',
'pt': 'pt',
'pt_br': 'pt-BR',
'ru': 'ru',
'sk': 'sk',
'sl': 'sl',
'sq': 'sq',
'sr': 'sr-Cyrl',
'sr_latin': 'sr-Latn',
'sv': 'sv',
'te': 'te',
'tr': 'tr',
'th': 'th',
'uk': 'uk',
'ur': 'ur',
'vi': 'vi',
'zh_cn': 'zh-CN',
'zh_tw': 'zh-TW'
}),
# TODO: remove in v9
'MOMENTJS_LOCALES': defaultdict(lambda: 'en', **{
'af': 'af',
'ar': 'ar',
'az': 'az',
'bg': 'bg',
'bn': 'bn',
'bs': 'bs',
'ca': 'ca',
'cs': 'cs',
'cz': 'cs',
'da': 'da',
'de': 'de',
'el': 'el',
'en': 'en',
'eo': 'eo',
'es': 'es',
'et': 'et',
'eu': 'eu',
'fa': 'fa',
'fi': 'fi',
'fr': 'fr',
'gl': 'gl',
'hi': 'hi',
'he': 'he',
'hr': 'hr',
'hu': 'hu',
'id': 'id',
'it': 'it',
'ja': 'ja',
'ko': 'ko',
'lt': 'lt',
'ml': 'ml',
'mr': 'mr',
'nb': 'nb',
'nl': 'nl',
'pa': 'pa-in',
'pl': 'pl',
'pt': 'pt',
'pt_br': 'pt-br',
'ru': 'ru',
'sk': 'sk',
'sl': 'sl',
'sq': 'sq',
'sr': 'sr-cyrl',
'sr_latin': 'sr',
'sv': 'sv',
'te': 'te',
'tr': 'tr',
'th': 'th',
'uk': 'uk',
'ur': 'ur',
'vi': 'vi',
'zh_cn': 'zh-cn',
'zh_tw': 'zh-tw'
}),
'PYPHEN_LOCALES': {
'af': 'af',
'bg': 'bg',
'ca': 'ca',
'cs': 'cs',
'cz': 'cs',
'da': 'da',
'de': 'de',
'el': 'el',
'en': 'en_US',
'es': 'es',
'et': 'et',
'fr': 'fr',
'hr': 'hr',
'hu': 'hu',
'it': 'it',
'lt': 'lt',
'nb': 'nb',
'nl': 'nl',
'pl': 'pl',
'pt': 'pt',
'pt_br': 'pt_BR',
'ru': 'ru',
'sk': 'sk',
'sl': 'sl',
'sr': 'sr',
'sv': 'sv',
'te': 'te',
'uk': 'uk',
},
'DOCUTILS_LOCALES': {
'af': 'af',
'ca': 'ca',
'da': 'da',
'de': 'de',
'en': 'en',
'eo': 'eo',
'es': 'es',
'fa': 'fa',
'fi': 'fi',
'fr': 'fr',
'gl': 'gl',
'he': 'he',
'it': 'it',
'ja': 'ja',
'lt': 'lt',
'nl': 'nl',
'pl': 'pl',
'pt': 'pt_br', # hope nobody will mind
'pt_br': 'pt_br',
'ru': 'ru',
'sk': 'sk',
'sv': 'sv',
'zh_cn': 'zh_cn',
'zh_tw': 'zh_tw'
},
"METADATA_MAPPING": ["yaml", "toml", "rest_docinfo", "markdown_metadata"],
}
# Mapping old pre-taxonomy plugin names to new post-taxonomy plugin names
TAXONOMY_COMPATIBILITY_PLUGIN_NAME_MAP = {
"render_archive": ["classify_archive"],
"render_authors": ["classify_authors"],
"render_indexes": ["classify_page_index", "classify_sections"], # "classify_indexes" removed from list (see #2591 and special-case logic below)
"render_tags": ["classify_categories", "classify_tags"],
}
# Default value for the pattern used to name translated files
DEFAULT_TRANSLATIONS_PATTERN = '{path}.{lang}.{ext}'
def _enclosure(post, lang):
"""Add an enclosure to RSS."""
enclosure = post.meta('enclosure', lang)
if enclosure:
try:
length = int(post.meta('enclosure_length', lang) or 0)
except KeyError:
length = 0
except ValueError:
utils.LOGGER.warning("Invalid enclosure length for post {0}".format(post.source_path))
length = 0
url = enclosure
mime = mimetypes.guess_type(url)[0]
return url, length, mime
class Nikola(object):
"""Class that handles site generation.
Takes a site config as argument on creation.
"""
def __init__(self, **config):
"""Initialize proper environment for running tasks."""
# Register our own path handlers
self.path_handlers = {
'slug': self.slug_path,
'post_path': self.post_path,
'root': self.root_path,
'filename': self.filename_path,
}
self.strict = False
self.posts = []
self.all_posts = []
self.posts_per_year = defaultdict(list)
self.posts_per_month = defaultdict(list)
self.posts_per_tag = defaultdict(list)
self.posts_per_category = defaultdict(list)
self.tags_per_language = defaultdict(list)
self.post_per_file = {}
self.timeline = []
self.pages = []
self._scanned = False
self._template_system = None
self._THEMES = None
self._MESSAGES = None
self.filters = {}
self.debug = DEBUG
self.show_tracebacks = SHOW_TRACEBACKS
self.colorful = config.pop('__colorful__', False)
self.invariant = config.pop('__invariant__', False)
self.quiet = config.pop('__quiet__', False)
self._doit_config = config.pop('DOIT_CONFIG', {})
self.original_cwd = config.pop('__cwd__', False)
self.configuration_filename = config.pop('__configuration_filename__', False)
self.configured = bool(config)
self.injected_deps = defaultdict(list)
self.shortcode_registry = {}
self.metadata_extractors_by = default_metadata_extractors_by()
self.rst_transforms = []
self.template_hooks = {
'extra_head': utils.TemplateHookRegistry('extra_head', self),
'body_end': utils.TemplateHookRegistry('body_end', self),
'page_header': utils.TemplateHookRegistry('page_header', self),
'menu': utils.TemplateHookRegistry('menu', self),
'menu_alt': utils.TemplateHookRegistry('menu_alt', self),
'page_footer': utils.TemplateHookRegistry('page_footer', self),
}
# Maintain API
utils.generic_rss_renderer = self.generic_rss_renderer
# This is the default config
self.config = {
'ARCHIVE_PATH': "",
'ARCHIVE_FILENAME': "archive.html",
'ARCHIVES_ARE_INDEXES': False,
'AUTHOR_PATH': 'authors',
'AUTHOR_PAGES_ARE_INDEXES': False,
'AUTHOR_PAGES_DESCRIPTIONS': {},
'AUTHORLIST_MINIMUM_POSTS': 1,
'BLOG_AUTHOR': 'Default Author',
'BLOG_TITLE': 'Default Title',
'BLOG_EMAIL': '',
'BLOG_DESCRIPTION': 'Default Description',
'BODY_END': "",
'CACHE_FOLDER': 'cache',
'CATEGORIES_INDEX_PATH': '',
'CATEGORY_PATH': None, # None means: same as TAG_PATH
'CATEGORY_PAGES_ARE_INDEXES': None, # None means: same as TAG_PAGES_ARE_INDEXES
'CATEGORY_DESCRIPTIONS': {},
'CATEGORY_TITLES': {},
'CATEGORY_PREFIX': 'cat_',
'CATEGORY_ALLOW_HIERARCHIES': False,
'CATEGORY_OUTPUT_FLAT_HIERARCHY': False,
'CATEGORY_DESTPATH_AS_DEFAULT': False,
'CATEGORY_DESTPATH_TRIM_PREFIX': False,
'CATEGORY_DESTPATH_FIRST_DIRECTORY_ONLY': True,
'CATEGORY_DESTPATH_NAMES': {},
'CATEGORY_PAGES_FOLLOW_DESTPATH': False,
'CATEGORY_TRANSLATIONS': [],
'CATEGORY_TRANSLATIONS_ADD_DEFAULTS': False,
'CODE_COLOR_SCHEME': 'default',
'COMMENT_SYSTEM': 'disqus',
'COMMENTS_IN_GALLERIES': False,
'COMMENTS_IN_PAGES': False,
'COMPILERS': {
"rest": ('.txt', '.rst'),
"markdown": ('.md', '.mdown', '.markdown'),
"textile": ('.textile',),
"txt2tags": ('.t2t',),
"bbcode": ('.bb',),
"wiki": ('.wiki',),
"ipynb": ('.ipynb',),
"html": ('.html', '.htm')
},
'CONTENT_FOOTER': '',
'CONTENT_FOOTER_FORMATS': {},
'RSS_COPYRIGHT': '',
'RSS_COPYRIGHT_PLAIN': '',
'RSS_COPYRIGHT_FORMATS': {},
'COPY_SOURCES': True,
'CREATE_ARCHIVE_NAVIGATION': False,
'CREATE_MONTHLY_ARCHIVE': False,
'CREATE_SINGLE_ARCHIVE': False,
'CREATE_FULL_ARCHIVES': False,
'CREATE_DAILY_ARCHIVE': False,
'DATE_FORMAT': 'yyyy-MM-dd HH:mm',
'DISABLE_INDEXES': False,
'DISABLE_MAIN_ATOM_FEED': False,
'DISABLE_MAIN_RSS_FEED': False,
'MOMENTJS_DATE_FORMAT': 'YYYY-MM-DD HH:mm',
'LUXON_DATE_FORMAT': {},
'DATE_FANCINESS': 0,
'DEFAULT_LANG': "en",
'DEPLOY_COMMANDS': {'default': []},
'DISABLED_PLUGINS': [],
'EXTRA_PLUGINS_DIRS': [],
'EXTRA_THEMES_DIRS': [],
'COMMENT_SYSTEM_ID': 'nikolademo',
'ENABLE_AUTHOR_PAGES': True,
'EXIF_WHITELIST': {},
'EXTRA_HEAD_DATA': '',
'FAVICONS': (),
'FEED_LENGTH': 10,
'FILE_METADATA_REGEXP': None,
'FILE_METADATA_UNSLUGIFY_TITLES': True,
'ADDITIONAL_METADATA': {},
'FILES_FOLDERS': {'files': ''},
'FILTERS': {},
'FORCE_ISO8601': False,
'FRONT_INDEX_HEADER': '',
'GALLERY_FOLDERS': {'galleries': 'galleries'},
'GALLERY_SORT_BY_DATE': True,
'GALLERIES_USE_THUMBNAIL': False,
'GALLERIES_DEFAULT_THUMBNAIL': None,
'GLOBAL_CONTEXT_FILLER': [],
'GZIP_COMMAND': None,
'GZIP_FILES': False,
'GZIP_EXTENSIONS': ('.txt', '.htm', '.html', '.css', '.js', '.json', '.xml'),
'HIDDEN_AUTHORS': [],
'HIDDEN_TAGS': [],
'HIDE_REST_DOCINFO': False,
'HIDDEN_CATEGORIES': [],
'HYPHENATE': False,
'IMAGE_FOLDERS': {'images': ''},
'INDEX_DISPLAY_POST_COUNT': 10,
'INDEX_FILE': 'index.html',
'INDEX_TEASERS': False,
'IMAGE_THUMBNAIL_SIZE': 400,
'IMAGE_THUMBNAIL_FORMAT': '{name}.thumbnail{ext}',
'INDEXES_TITLE': "",
'INDEXES_PAGES': "",
'INDEXES_PAGES_MAIN': False,
'INDEXES_PRETTY_PAGE_URL': False,
'INDEXES_STATIC': True,
'INDEX_PATH': '',
'IPYNB_CONFIG': {},
'KATEX_AUTO_RENDER': '',
'LICENSE': '',
'LINK_CHECK_WHITELIST': [],
'LISTINGS_FOLDERS': {'listings': 'listings'},
'LOGO_URL': '',
'DEFAULT_PREVIEW_IMAGE': None,
'NAVIGATION_LINKS': {},
'NAVIGATION_ALT_LINKS': {},
'MARKDOWN_EXTENSIONS': ['fenced_code', 'codehilite', 'extra'],
'MARKDOWN_EXTENSION_CONFIGS': {},
'MAX_IMAGE_SIZE': 1280,
'MATHJAX_CONFIG': '',
'METADATA_FORMAT': 'nikola',
'METADATA_MAPPING': {},
'NEW_POST_DATE_PATH': False,
'NEW_POST_DATE_PATH_FORMAT': '%Y/%m/%d',
'OLD_THEME_SUPPORT': True,
'OUTPUT_FOLDER': 'output',
'POSTS': (("posts/*.txt", "posts", "post.tmpl"),),
'PRESERVE_EXIF_DATA': False,
'PRESERVE_ICC_PROFILES': False,
'PAGES': (("pages/*.txt", "pages", "page.tmpl"),),
'PANDOC_OPTIONS': [],
'PRETTY_URLS': True,
'FUTURE_IS_NOW': False,
'INDEX_READ_MORE_LINK': DEFAULT_INDEX_READ_MORE_LINK,
'REDIRECTIONS': [],
'ROBOTS_EXCLUSIONS': [],
'GENERATE_ATOM': False,
'ATOM_EXTENSION': '.atom',
'ATOM_PATH': '',
'ATOM_FILENAME_BASE': 'index',
'FEED_TEASERS': True,
'FEED_PLAIN': False,
'FEED_READ_MORE_LINK': DEFAULT_FEED_READ_MORE_LINK,
'FEED_LINKS_APPEND_QUERY': False,
'GENERATE_RSS': True,
'RSS_EXTENSION': '.xml',
'RSS_LINK': None,
'RSS_PATH': '',
'RSS_FILENAME_BASE': 'rss',
'SEARCH_FORM': '',
'SHOW_BLOG_TITLE': True,
'SHOW_INDEX_PAGE_NAVIGATION': False,
'SHOW_SOURCELINK': True,
'SHOW_UNTRANSLATED_POSTS': True,
'SLUG_AUTHOR_PATH': True,
'SLUG_TAG_PATH': True,
'SOCIAL_BUTTONS_CODE': '',
'SITE_URL': 'https://example.com/',
'PAGE_INDEX': False,
'SECTION_PATH': '',
'STRIP_INDEXES': True,
'TAG_PATH': 'categories',
'TAG_PAGES_ARE_INDEXES': False,
'TAG_DESCRIPTIONS': {},
'TAG_TITLES': {},
'TAG_TRANSLATIONS': [],
'TAG_TRANSLATIONS_ADD_DEFAULTS': False,
'TAGS_INDEX_PATH': '',
'TAGLIST_MINIMUM_POSTS': 1,
'TEMPLATE_FILTERS': {},
'THEME': LEGAL_VALUES['DEFAULT_THEME'],
'THEME_COLOR': '#5670d4', # light "corporate blue"
'THEME_CONFIG': {},
'THUMBNAIL_SIZE': 180,
'TRANSLATIONS_PATTERN': DEFAULT_TRANSLATIONS_PATTERN,
'URL_TYPE': 'rel_path',
'USE_BUNDLES': True,
'USE_CDN': False,
'USE_CDN_WARNING': True,
'USE_REST_DOCINFO_METADATA': False,
'USE_FILENAME_AS_TITLE': True,
'USE_KATEX': False,
'USE_SLUGIFY': True,
'USE_TAG_METADATA': True,
'TIMEZONE': 'UTC',
'WARN_ABOUT_TAG_METADATA': True,
'DEPLOY_DRAFTS': True,
'DEPLOY_FUTURE': False,
'SCHEDULE_ALL': False,
'SCHEDULE_RULE': '',
'DEMOTE_HEADERS': 1,
'GITHUB_SOURCE_BRANCH': 'master',
'GITHUB_DEPLOY_BRANCH': 'gh-pages',
'GITHUB_REMOTE_NAME': 'origin',
'GITHUB_COMMIT_SOURCE': False, # WARNING: conf.py.in overrides this with True for backwards compatibility
'META_GENERATOR_TAG': True,
'REST_FILE_INSERTION_ENABLED': True,
'TYPES_TO_HIDE_TITLE': [],
}
# set global_context for template rendering
self._GLOBAL_CONTEXT = {}
# dependencies for all pages, not included in global context
self.ALL_PAGE_DEPS = {}
self.config.update(config)
# __builtins__ contains useless cruft
if '__builtins__' in self.config:
try:
del self.config['__builtins__']
except KeyError:
del self.config[b'__builtins__']
self.config['__colorful__'] = self.colorful
self.config['__invariant__'] = self.invariant
self.config['__quiet__'] = self.quiet
# Use ATOM_PATH when set
self.config['ATOM_PATH'] = self.config['ATOM_PATH'] or self.config['INDEX_PATH']
# Make sure we have sane NAVIGATION_LINKS and NAVIGATION_ALT_LINKS.
if not self.config['NAVIGATION_LINKS']:
self.config['NAVIGATION_LINKS'] = {self.config['DEFAULT_LANG']: ()}
if not self.config['NAVIGATION_ALT_LINKS']:
self.config['NAVIGATION_ALT_LINKS'] = {self.config['DEFAULT_LANG']: ()}
# Translatability configuration.
self.config['TRANSLATIONS'] = self.config.get('TRANSLATIONS',
{self.config['DEFAULT_LANG']: ''})
for k, v in self.config['TRANSLATIONS'].items():
if os.path.isabs(v):
self.config['TRANSLATIONS'][k] = os.path.relpath(v, '/')
utils.TranslatableSetting.default_lang = self.config['DEFAULT_LANG']
self.TRANSLATABLE_SETTINGS = ('BLOG_AUTHOR',
'BLOG_TITLE',
'BLOG_DESCRIPTION',
'LICENSE',
'CONTENT_FOOTER',
'SOCIAL_BUTTONS_CODE',
'SEARCH_FORM',
'BODY_END',
'EXTRA_HEAD_DATA',
'NAVIGATION_LINKS',
'NAVIGATION_ALT_LINKS',
'FRONT_INDEX_HEADER',
'INDEX_READ_MORE_LINK',
'FEED_READ_MORE_LINK',
'INDEXES_TITLE',
'CATEGORY_DESTPATH_NAMES',
'INDEXES_PAGES',
'INDEXES_PRETTY_PAGE_URL',
'THEME_CONFIG',
# PATH options (Issue #1914)
'ARCHIVE_PATH',
'ARCHIVE_FILENAME',
'TAG_PATH',
'TAGS_INDEX_PATH',
'CATEGORY_PATH',
'CATEGORIES_INDEX_PATH',
'SECTION_PATH',
'INDEX_PATH',
'ATOM_PATH',
'RSS_PATH',
'RSS_FILENAME_BASE',
'ATOM_FILENAME_BASE',
'AUTHOR_PATH',
'DATE_FORMAT',
'LUXON_DATE_FORMAT',
'MOMENTJS_DATE_FORMAT', # TODO: remove in v9
'RSS_COPYRIGHT',
'RSS_COPYRIGHT_PLAIN',
# Issue #2970
'MARKDOWN_EXTENSION_CONFIGS',
)
self._GLOBAL_CONTEXT_TRANSLATABLE = ('blog_author',
'blog_title',
'blog_description',
'license',
'content_footer',
'social_buttons_code',
'search_form',
'body_end',
'extra_head_data',
'date_format',
'js_date_format',
'luxon_date_format',
'front_index_header',
'theme_config',
)
self._ALL_PAGE_DEPS_TRANSLATABLE = ('atom_path',
'rss_path',
'rss_filename_base',
'atom_filename_base',
)
# WARNING: navigation_(alt_)links SHOULD NOT be added to the list above.
# Themes ask for [lang] there and we should provide it.
# Luxon setup is a dict of dicts, so we need to set up the default here.
if not self.config['LUXON_DATE_FORMAT']:
self.config['LUXON_DATE_FORMAT'] = {self.config['DEFAULT_LANG']: {'preset': False, 'format': 'yyyy-MM-dd HH:mm'}}
# TODO: remove Moment.js stuff in v9
if 'JS_DATE_FORMAT' in self.config:
utils.LOGGER.warning("Moment.js was replaced by Luxon in the default themes, which uses different date formats.")
utils.LOGGER.warning("If you’re using a built-in theme, set LUXON_DATE_FORMAT. If your theme uses Moment.js, you can silence this warning by renaming JS_DATE_FORMAT to MOMENTJS_DATE_FORMAT.")
utils.LOGGER.warning("Sample Luxon config: LUXON_DATE_FORMAT = " + str(self.config['LUXON_DATE_FORMAT']))
self.config['MOMENTJS_DATE_FORMAT'] = self.config['LUXON_DATE_FORMAT']
# We first have to massage MOMENTJS_DATE_FORMAT and LUXON_DATE_FORMAT, otherwise we run into trouble
if 'MOMENTJS_DATE_FORMAT' in self.config:
if isinstance(self.config['MOMENTJS_DATE_FORMAT'], dict):
for k in self.config['MOMENTJS_DATE_FORMAT']:
self.config['MOMENTJS_DATE_FORMAT'][k] = json.dumps(self.config['MOMENTJS_DATE_FORMAT'][k])
else:
self.config['MOMENTJS_DATE_FORMAT'] = json.dumps(self.config['MOMENTJS_DATE_FORMAT'])
if 'LUXON_DATE_FORMAT' in self.config:
for k in self.config['LUXON_DATE_FORMAT']:
self.config['LUXON_DATE_FORMAT'][k] = json.dumps(self.config['LUXON_DATE_FORMAT'][k])
for i in self.TRANSLATABLE_SETTINGS:
try:
self.config[i] = utils.TranslatableSetting(i, self.config[i], self.config['TRANSLATIONS'])
except KeyError:
pass
# A EXIF_WHITELIST implies you want to keep EXIF data
if self.config['EXIF_WHITELIST'] and not self.config['PRESERVE_EXIF_DATA']:
utils.LOGGER.warning('Setting EXIF_WHITELIST implies PRESERVE_EXIF_DATA is set to True')
self.config['PRESERVE_EXIF_DATA'] = True
# Setting PRESERVE_EXIF_DATA with an empty EXIF_WHITELIST implies 'keep everything'
if self.config['PRESERVE_EXIF_DATA'] and not self.config['EXIF_WHITELIST']:
utils.LOGGER.warning('You are setting PRESERVE_EXIF_DATA and not EXIF_WHITELIST so EXIF data is not really kept.')
if 'UNSLUGIFY_TITLES' in self.config:
utils.LOGGER.warning('The UNSLUGIFY_TITLES setting was renamed to FILE_METADATA_UNSLUGIFY_TITLES.')
self.config['FILE_METADATA_UNSLUGIFY_TITLES'] = self.config['UNSLUGIFY_TITLES']
if 'TAG_PAGES_TITLES' in self.config:
utils.LOGGER.warning('The TAG_PAGES_TITLES setting was renamed to TAG_TITLES.')
self.config['TAG_TITLES'] = self.config['TAG_PAGES_TITLES']
if 'TAG_PAGES_DESCRIPTIONS' in self.config:
utils.LOGGER.warning('The TAG_PAGES_DESCRIPTIONS setting was renamed to TAG_DESCRIPTIONS.')
self.config['TAG_DESCRIPTIONS'] = self.config['TAG_PAGES_DESCRIPTIONS']
if 'CATEGORY_PAGES_TITLES' in self.config:
utils.LOGGER.warning('The CATEGORY_PAGES_TITLES setting was renamed to CATEGORY_TITLES.')
self.config['CATEGORY_TITLES'] = self.config['CATEGORY_PAGES_TITLES']
if 'CATEGORY_PAGES_DESCRIPTIONS' in self.config:
utils.LOGGER.warning('The CATEGORY_PAGES_DESCRIPTIONS setting was renamed to CATEGORY_DESCRIPTIONS.')
self.config['CATEGORY_DESCRIPTIONS'] = self.config['CATEGORY_PAGES_DESCRIPTIONS']
if 'DISABLE_INDEXES_PLUGIN_INDEX_AND_ATOM_FEED' in self.config:
utils.LOGGER.warning('The DISABLE_INDEXES_PLUGIN_INDEX_AND_ATOM_FEED setting was renamed and split to DISABLE_INDEXES and DISABLE_MAIN_ATOM_FEED.')
self.config['DISABLE_INDEXES'] = self.config['DISABLE_INDEXES_PLUGIN_INDEX_AND_ATOM_FEED']
self.config['DISABLE_MAIN_ATOM_FEED'] = self.config['DISABLE_INDEXES_PLUGIN_INDEX_AND_ATOM_FEED']
if 'DISABLE_INDEXES_PLUGIN_RSS_FEED' in self.config:
utils.LOGGER.warning('The DISABLE_INDEXES_PLUGIN_RSS_FEED setting was renamed to DISABLE_MAIN_RSS_FEED.')
self.config['DISABLE_MAIN_RSS_FEED'] = self.config['DISABLE_INDEXES_PLUGIN_RSS_FEED']
for val in self.config['DATE_FORMAT'].values.values():
if '%' in val:
utils.LOGGER.error('The DATE_FORMAT setting needs to be upgraded.')
utils.LOGGER.warning("Nikola now uses CLDR-style date strings. http://cldr.unicode.org/translation/date-time")
utils.LOGGER.warning("Example: %Y-%m-%d %H:%M ==> yyyy-MM-dd HH:mm")
utils.LOGGER.warning("(note it’s different to what moment.js uses!)")
sys.exit(1)
# Silently upgrade LOCALES (remove encoding)
locales = LEGAL_VALUES['LOCALES_BASE']
if 'LOCALES' in self.config:
for k, v in self.config['LOCALES'].items():
self.config['LOCALES'][k] = v.split('.')[0]
locales.update(self.config['LOCALES'])
self.config['LOCALES'] = locales
if self.config.get('POSTS_SECTIONS'):
utils.LOGGER.warning("The sections feature has been removed and its functionality has been merged into categories.")
utils.LOGGER.warning("For more information on how to migrate, please read: https://getnikola.com/blog/upgrading-to-nikola-v8.html#sections-were-replaced-by-categories")
for section_config_suffix, cat_config_suffix in (
('DESCRIPTIONS', 'DESCRIPTIONS'),
('TITLE', 'TITLES'),
('TRANSLATIONS', 'TRANSLATIONS')
):
section_config = 'POSTS_SECTION_' + section_config_suffix
cat_config = 'CATEGORY_' + cat_config_suffix
if section_config in self.config:
self.config[section_config].update(self.config[cat_config])
self.config[cat_config] = self.config[section_config]
self.config['CATEGORY_DESTPATH_NAMES'] = self.config.get('POSTS_SECTION_NAME', {})
# Need to mark this translatable manually.
self.config['CATEGORY_DESTPATH_NAMES'] = utils.TranslatableSetting('CATEGORY_DESTPATH_NAMES', self.config['CATEGORY_DESTPATH_NAMES'], self.config['TRANSLATIONS'])
self.config['CATEGORY_DESTPATH_AS_DEFAULT'] = not self.config.get('POSTS_SECTION_FROM_META')
utils.LOGGER.info("Setting CATEGORY_DESTPATH_AS_DEFAULT = " + str(self.config['CATEGORY_DESTPATH_AS_DEFAULT']))
if self.config.get('CATEGORY_PAGES_FOLLOW_DESTPATH') and (not self.config.get('CATEGORY_ALLOW_HIERARCHIES') or self.config.get('CATEGORY_OUTPUT_FLAT_HIERARCHY')):
utils.LOGGER.error('CATEGORY_PAGES_FOLLOW_DESTPATH requires CATEGORY_ALLOW_HIERARCHIES = True, CATEGORY_OUTPUT_FLAT_HIERARCHY = False.')
sys.exit(1)
# Handle CONTENT_FOOTER and RSS_COPYRIGHT* properly.
# We provide the arguments to format in CONTENT_FOOTER_FORMATS and RSS_COPYRIGHT_FORMATS.
self.config['CONTENT_FOOTER'].langformat(self.config['CONTENT_FOOTER_FORMATS'])
self.config['RSS_COPYRIGHT'].langformat(self.config['RSS_COPYRIGHT_FORMATS'])
self.config['RSS_COPYRIGHT_PLAIN'].langformat(self.config['RSS_COPYRIGHT_FORMATS'])
# propagate USE_SLUGIFY
utils.USE_SLUGIFY = self.config['USE_SLUGIFY']
# Make sure we have pyphen installed if we are using it
if self.config.get('HYPHENATE') and pyphen is None:
utils.LOGGER.warning('To use the hyphenation, you have to install '
'the "pyphen" package.')
utils.LOGGER.warning('Setting HYPHENATE to False.')
self.config['HYPHENATE'] = False
# FIXME: Internally, we still use post_pages because it's a pain to change it
self.config['post_pages'] = []
for i1, i2, i3 in self.config['POSTS']:
self.config['post_pages'].append([i1, i2, i3, True])
for i1, i2, i3 in self.config['PAGES']:
self.config['post_pages'].append([i1, i2, i3, False])
# Handle old plugin names (from before merging the taxonomy PR #2535)
for old_plugin_name, new_plugin_names in TAXONOMY_COMPATIBILITY_PLUGIN_NAME_MAP.items():
if old_plugin_name in self.config['DISABLED_PLUGINS']:
missing_plugins = []
for plugin_name in new_plugin_names:
if plugin_name not in self.config['DISABLED_PLUGINS']:
missing_plugins.append(plugin_name)
if missing_plugins:
utils.LOGGER.warning('The "{}" plugin was replaced by several taxonomy plugins (see PR #2535): {}'.format(old_plugin_name, ', '.join(new_plugin_names)))
utils.LOGGER.warning('You are currently disabling "{}", but not the following new taxonomy plugins: {}'.format(old_plugin_name, ', '.join(missing_plugins)))
utils.LOGGER.warning('Please also disable these new plugins or remove "{}" from the DISABLED_PLUGINS list.'.format(old_plugin_name))
self.config['DISABLED_PLUGINS'].extend(missing_plugins)
# Special-case logic for "render_indexes" to fix #2591
if 'render_indexes' in self.config['DISABLED_PLUGINS']:
if 'generate_rss' in self.config['DISABLED_PLUGINS'] or self.config['GENERATE_RSS'] is False:
if 'classify_indexes' not in self.config['DISABLED_PLUGINS']:
utils.LOGGER.warning('You are disabling the "render_indexes" plugin, as well as disabling the "generate_rss" plugin or setting GENERATE_RSS to False. To achieve the same effect, please disable the "classify_indexes" plugin in the future.')
self.config['DISABLED_PLUGINS'].append('classify_indexes')
else:
if not self.config['DISABLE_INDEXES']:
utils.LOGGER.warning('You are disabling the "render_indexes" plugin, but not the generation of RSS feeds. Please put "DISABLE_INDEXES = True" into your configuration instead.')
self.config['DISABLE_INDEXES'] = True
# Disable RSS. For a successful disable, we must have both the option
# false and the plugin disabled through the official means.
if 'generate_rss' in self.config['DISABLED_PLUGINS'] and self.config['GENERATE_RSS'] is True:
utils.LOGGER.warning('Please use GENERATE_RSS to disable RSS feed generation, instead of mentioning generate_rss in DISABLED_PLUGINS.')
self.config['GENERATE_RSS'] = False
self.config['DISABLE_MAIN_RSS_FEED'] = True
# PRETTY_URLS defaults to enabling STRIP_INDEXES unless explicitly disabled
if self.config.get('PRETTY_URLS') and 'STRIP_INDEXES' not in config:
self.config['STRIP_INDEXES'] = True
if not self.config.get('COPY_SOURCES'):
self.config['SHOW_SOURCELINK'] = False
if self.config['CATEGORY_PATH']._inp is None:
self.config['CATEGORY_PATH'] = self.config['TAG_PATH']
if self.config['CATEGORY_PAGES_ARE_INDEXES'] is None:
self.config['CATEGORY_PAGES_ARE_INDEXES'] = self.config['TAG_PAGES_ARE_INDEXES']
self.default_lang = self.config['DEFAULT_LANG']
self.translations = self.config['TRANSLATIONS']
utils.LocaleBorg.initialize(self.config.get('LOCALES', {}), self.default_lang)
# BASE_URL defaults to SITE_URL
if 'BASE_URL' not in self.config:
self.config['BASE_URL'] = self.config.get('SITE_URL')
# BASE_URL should *always* end in /
if self.config['BASE_URL'] and self.config['BASE_URL'][-1] != '/':
utils.LOGGER.warning("Your BASE_URL doesn't end in / -- adding it, but please fix it in your config file!")
self.config['BASE_URL'] += '/'
try:
_bnl = urlsplit(self.config['BASE_URL']).netloc
_bnl.encode('ascii')
urlsplit(self.config['SITE_URL']).netloc.encode('ascii')
except (UnicodeEncodeError, UnicodeDecodeError):
utils.LOGGER.error("Your BASE_URL or SITE_URL contains an IDN expressed in Unicode. Please convert it to Punycode.")
utils.LOGGER.error("Punycode of {}: {}".format(_bnl, _bnl.encode('idna')))
sys.exit(1)
# Load built-in metadata extractors
metadata_extractors.load_defaults(self, self.metadata_extractors_by)
if metadata_extractors.DEFAULT_EXTRACTOR is None:
utils.LOGGER.error("Could not find default meta extractor ({})".format(
metadata_extractors.DEFAULT_EXTRACTOR_NAME))
sys.exit(1)
# The Pelican metadata format requires a markdown extension
if config.get('METADATA_FORMAT', 'nikola').lower() == 'pelican':
if 'markdown.extensions.meta' not in config.get('MARKDOWN_EXTENSIONS', []) \
and 'markdown' in self.config['COMPILERS']:
utils.LOGGER.warning(
'To use the Pelican metadata format, you need to add '
'"markdown.extensions.meta" to your MARKDOWN_EXTENSIONS setting.')
# We use one global tzinfo object all over Nikola.
try:
self.tzinfo = dateutil.tz.gettz(self.config['TIMEZONE'])
except Exception as exc:
utils.LOGGER.warning("Error getting TZ: {}", exc)
self.tzinfo = dateutil.tz.gettz()
self.config['__tzinfo__'] = self.tzinfo
# Store raw compilers for internal use (need a copy for that)
self.config['_COMPILERS_RAW'] = {}
for k, v in self.config['COMPILERS'].items():
self.config['_COMPILERS_RAW'][k] = list(v)
# Get search path for themes
self.themes_dirs = ['themes'] + self.config['EXTRA_THEMES_DIRS']
# Register default filters
filter_name_format = 'filters.{0}'
for filter_name, filter_definition in filters.__dict__.items():
# Ignore objects whose name starts with an underscore, or which are not callable
if filter_name.startswith('_') or not callable(filter_definition):
continue
# Register all other objects as filters
self.register_filter(filter_name_format.format(filter_name), filter_definition)
self._set_global_context_from_config()
self._set_all_page_deps_from_config()
# Read data files only if a site exists (Issue #2708)
if self.configured:
self._set_global_context_from_data()
# Set persistent state facility
self.state = Persistor('state_data.json')
# Set cache facility
self.cache = Persistor(os.path.join(self.config['CACHE_FOLDER'], 'cache_data.json'))
# Create directories for persistors only if a site exists (Issue #2334)
if self.configured:
self.state._set_site(self)
self.cache._set_site(self)
def _filter_duplicate_plugins(self, plugin_list):
"""Find repeated plugins and discard the less local copy."""
def plugin_position_in_places(plugin):
# plugin here is a tuple:
# (path to the .plugin file, path to plugin module w/o .py, plugin metadata)
for i, place in enumerate(self._plugin_places):
if plugin[0].startswith(place):
return i
utils.LOGGER.warn("Duplicate plugin found in unexpected location: {}".format(plugin[0]))
return len(self._plugin_places)
plugin_dict = defaultdict(list)
for data in plugin_list:
plugin_dict[data[2].name].append(data)
result = []
for _, plugins in plugin_dict.items():
if len(plugins) > 1:
# Sort by locality
plugins.sort(key=plugin_position_in_places)
utils.LOGGER.debug("Plugin {} exists in multiple places, using {}".format(
plugins[-1][2].name, plugins[-1][0]))
result.append(plugins[-1])
return result
def init_plugins(self, commands_only=False, load_all=False):
"""Load plugins as needed."""
self.plugin_manager = PluginManager(categories_filter={
"Command": Command,
"Task": Task,
"LateTask": LateTask,
"TemplateSystem": TemplateSystem,
"PageCompiler": PageCompiler,
"TaskMultiplier": TaskMultiplier,
"CompilerExtension": CompilerExtension,
"MarkdownExtension": MarkdownExtension,
"RestExtension": RestExtension,
"MetadataExtractor": MetadataExtractor,
"ShortcodePlugin": ShortcodePlugin,
"SignalHandler": SignalHandler,
"ConfigPlugin": ConfigPlugin,
"PostScanner": PostScanner,
"Taxonomy": Taxonomy,
})
self.plugin_manager.getPluginLocator().setPluginInfoExtension('plugin')
extra_plugins_dirs = self.config['EXTRA_PLUGINS_DIRS']
self._plugin_places = [
resource_filename('nikola', 'plugins'),
os.path.expanduser(os.path.join('~', '.nikola', 'plugins')),
os.path.join(os.getcwd(), 'plugins'),
] + [path for path in extra_plugins_dirs if path]
compilers = defaultdict(set)
# Also add aliases for combinations with TRANSLATIONS_PATTERN
for compiler, exts in self.config['COMPILERS'].items():
for ext in exts:
compilers[compiler].add(ext)
for lang in self.config['TRANSLATIONS'].keys():
candidate = utils.get_translation_candidate(self.config, "f" + ext, lang)
compilers[compiler].add(candidate)
# Avoid redundant compilers (if load_all is False):
# Remove compilers (and corresponding compiler extensions) that are not marked as
# needed by any PostScanner plugin and put them into self.disabled_compilers
# (respectively self.disabled_compiler_extensions).
self.config['COMPILERS'] = {}
self.disabled_compilers = {}
self.disabled_compiler_extensions = defaultdict(list)
self.plugin_manager.getPluginLocator().setPluginPlaces(self._plugin_places)
self.plugin_manager.locatePlugins()
bad_candidates = set([])
if not load_all:
for p in self.plugin_manager._candidates:
if commands_only:
if p[-1].details.has_option('Nikola', 'PluginCategory'):
# FIXME TemplateSystem should not be needed
if p[-1].details.get('Nikola', 'PluginCategory') not in {'Command', 'Template'}:
bad_candidates.add(p)
else:
bad_candidates.add(p)
elif self.configured: # Not commands-only, and configured
# Remove blacklisted plugins
if p[-1].name in self.config['DISABLED_PLUGINS']:
bad_candidates.add(p)
utils.LOGGER.debug('Not loading disabled plugin {}', p[-1].name)
# Remove compilers we don't use
if p[-1].details.has_option('Nikola', 'PluginCategory') and p[-1].details.get('Nikola', 'PluginCategory') in ('Compiler', 'PageCompiler'):
bad_candidates.add(p)
self.disabled_compilers[p[-1].name] = p
# Remove compiler extensions we don't need
if p[-1].details.has_option('Nikola', 'compiler') and p[-1].details.get('Nikola', 'compiler') in self.disabled_compilers:
bad_candidates.add(p)
self.disabled_compiler_extensions[p[-1].details.get('Nikola', 'compiler')].append(p)
self.plugin_manager._candidates = list(set(self.plugin_manager._candidates) - bad_candidates)
self.plugin_manager._candidates = self._filter_duplicate_plugins(self.plugin_manager._candidates)
self.plugin_manager.loadPlugins()
# Search for compiler plugins which we disabled but shouldn't have
self._activate_plugins_of_category("PostScanner")
if not load_all:
file_extensions = set()
for post_scanner in [p.plugin_object for p in self.plugin_manager.getPluginsOfCategory('PostScanner')]:
exts = post_scanner.supported_extensions()
if exts is not None:
file_extensions.update(exts)
else:
# Stop scanning for more: once we get None, we have to load all compilers anyway
utils.LOGGER.debug("Post scanner {0!r} does not implement `supported_extensions`, loading all compilers".format(post_scanner))
file_extensions = None
break
to_add = []
for k, v in compilers.items():
if file_extensions is None or file_extensions.intersection(v):
self.config['COMPILERS'][k] = sorted(list(v))
p = self.disabled_compilers.pop(k, None)
if p:
to_add.append(p)
for p in self.disabled_compiler_extensions.pop(k, []):
to_add.append(p)
for _, p in self.disabled_compilers.items():
utils.LOGGER.debug('Not loading unneeded compiler {}', p[-1].name)
for _, plugins in self.disabled_compiler_extensions.items():
for p in plugins:
utils.LOGGER.debug('Not loading compiler extension {}', p[-1].name)
if to_add:
self.plugin_manager._candidates = self._filter_duplicate_plugins(to_add)
self.plugin_manager.loadPlugins()
# Jupyter theme configuration. If a website has ipynb enabled in post_pages
# we should enable the Jupyter CSS (leaving that up to the theme itself).
if 'needs_ipython_css' not in self._GLOBAL_CONTEXT:
self._GLOBAL_CONTEXT['needs_ipython_css'] = 'ipynb' in self.config['COMPILERS']
# Activate metadata extractors and prepare them for use
for p in self._activate_plugins_of_category("MetadataExtractor"):
metadata_extractors.classify_extractor(p.plugin_object, self.metadata_extractors_by)
self._activate_plugins_of_category("Taxonomy")
self.taxonomy_plugins = {}
for taxonomy in [p.plugin_object for p in self.plugin_manager.getPluginsOfCategory('Taxonomy')]:
if not taxonomy.is_enabled():
continue
if taxonomy.classification_name in self.taxonomy_plugins:
utils.LOGGER.error("Found more than one taxonomy with classification name '{}'!".format(taxonomy.classification_name))
sys.exit(1)
self.taxonomy_plugins[taxonomy.classification_name] = taxonomy
self._activate_plugins_of_category("SignalHandler")
# Emit signal for SignalHandlers which need to start running immediately.
signal('sighandlers_loaded').send(self)
self._commands = {}
command_plugins = self._activate_plugins_of_category("Command")
for plugin_info in command_plugins:
plugin_info.plugin_object.short_help = plugin_info.description
self._commands[plugin_info.name] = plugin_info.plugin_object
self._activate_plugins_of_category("Task")
self._activate_plugins_of_category("LateTask")
self._activate_plugins_of_category("TaskMultiplier")
# Activate all required compiler plugins
self.compiler_extensions = self._activate_plugins_of_category("CompilerExtension")
for plugin_info in self.plugin_manager.getPluginsOfCategory("PageCompiler"):
if plugin_info.name in self.config["COMPILERS"].keys():
self.plugin_manager.activatePluginByName(plugin_info.name)
plugin_info.plugin_object.set_site(self)
# Activate shortcode plugins
self._activate_plugins_of_category("ShortcodePlugin")
# Load compiler plugins
self.compilers = {}
self.inverse_compilers = {}
for plugin_info in self.plugin_manager.getPluginsOfCategory(
"PageCompiler"):
self.compilers[plugin_info.name] = \
plugin_info.plugin_object
# Load config plugins and register templated shortcodes
self._activate_plugins_of_category("ConfigPlugin")
self._register_templated_shortcodes()
# Check with registered filters and configure filters
for actions in self.config['FILTERS'].values():
for i, f in enumerate(actions):
if isinstance(f, str):
# Check whether this denotes a registered filter
_f = self.filters.get(f)
if _f is not None:
f = _f
actions[i] = f
if hasattr(f, 'configuration_variables'):
args = {}
for arg, config in f.configuration_variables.items():
if config in self.config:
args[arg] = self.config[config]
if args:
actions[i] = functools.partial(f, **args)
# Signal that we are configured
signal('configured').send(self)
def _set_global_context_from_config(self):
"""Create global context from configuration.
These are options that are used by templates, so they always need to be
available.
"""
self._GLOBAL_CONTEXT['url_type'] = self.config['URL_TYPE']
self._GLOBAL_CONTEXT['timezone'] = self.tzinfo
self._GLOBAL_CONTEXT['_link'] = self.link
try:
self._GLOBAL_CONTEXT['set_locale'] = utils.LocaleBorg().set_locale
except utils.LocaleBorgUninitializedException:
self._GLOBAL_CONTEXT['set_locale'] = None
self._GLOBAL_CONTEXT['rel_link'] = self.rel_link
self._GLOBAL_CONTEXT['abs_link'] = self.abs_link
self._GLOBAL_CONTEXT['exists'] = self.file_exists
self._GLOBAL_CONTEXT['index_display_post_count'] = self.config[
'INDEX_DISPLAY_POST_COUNT']
self._GLOBAL_CONTEXT['index_file'] = self.config['INDEX_FILE']
self._GLOBAL_CONTEXT['use_bundles'] = self.config['USE_BUNDLES']
self._GLOBAL_CONTEXT['use_cdn'] = self.config.get("USE_CDN")
self._GLOBAL_CONTEXT['theme_color'] = self.config.get("THEME_COLOR")
self._GLOBAL_CONTEXT['theme_config'] = self.config.get("THEME_CONFIG")
self._GLOBAL_CONTEXT['favicons'] = self.config['FAVICONS']
self._GLOBAL_CONTEXT['date_format'] = self.config.get('DATE_FORMAT')
self._GLOBAL_CONTEXT['blog_author'] = self.config.get('BLOG_AUTHOR')
self._GLOBAL_CONTEXT['blog_title'] = self.config.get('BLOG_TITLE')
self._GLOBAL_CONTEXT['blog_email'] = self.config.get('BLOG_EMAIL')
self._GLOBAL_CONTEXT['show_blog_title'] = self.config.get('SHOW_BLOG_TITLE')
self._GLOBAL_CONTEXT['logo_url'] = self.config.get('LOGO_URL')
self._GLOBAL_CONTEXT['blog_description'] = self.config.get('BLOG_DESCRIPTION')
self._GLOBAL_CONTEXT['front_index_header'] = self.config.get('FRONT_INDEX_HEADER')
self._GLOBAL_CONTEXT['color_hsl_adjust_hex'] = utils.color_hsl_adjust_hex
self._GLOBAL_CONTEXT['colorize_str_from_base_color'] = utils.colorize_str_from_base_color
self._GLOBAL_CONTEXT['blog_url'] = self.config.get('SITE_URL')
self._GLOBAL_CONTEXT['template_hooks'] = self.template_hooks
self._GLOBAL_CONTEXT['body_end'] = self.config.get('BODY_END')
self._GLOBAL_CONTEXT['social_buttons_code'] = self.config.get('SOCIAL_BUTTONS_CODE')
self._GLOBAL_CONTEXT['translations'] = self.config.get('TRANSLATIONS')
self._GLOBAL_CONTEXT['license'] = self.config.get('LICENSE')
self._GLOBAL_CONTEXT['search_form'] = self.config.get('SEARCH_FORM')
self._GLOBAL_CONTEXT['comment_system'] = self.config.get('COMMENT_SYSTEM')
self._GLOBAL_CONTEXT['comment_system_id'] = self.config.get('COMMENT_SYSTEM_ID')
self._GLOBAL_CONTEXT['site_has_comments'] = bool(self.config.get('COMMENT_SYSTEM'))
self._GLOBAL_CONTEXT['mathjax_config'] = self.config.get(
'MATHJAX_CONFIG')
self._GLOBAL_CONTEXT['use_katex'] = self.config.get('USE_KATEX')
self._GLOBAL_CONTEXT['katex_auto_render'] = self.config.get('KATEX_AUTO_RENDER')
self._GLOBAL_CONTEXT['content_footer'] = self.config.get(
'CONTENT_FOOTER')
self._GLOBAL_CONTEXT['generate_atom'] = self.config.get('GENERATE_ATOM')
self._GLOBAL_CONTEXT['generate_rss'] = self.config.get('GENERATE_RSS')
self._GLOBAL_CONTEXT['rss_link'] = self.config.get('RSS_LINK')
self._GLOBAL_CONTEXT['navigation_links'] = self.config.get('NAVIGATION_LINKS')
self._GLOBAL_CONTEXT['navigation_alt_links'] = self.config.get('NAVIGATION_ALT_LINKS')
self._GLOBAL_CONTEXT['twitter_card'] = self.config.get(
'TWITTER_CARD', {})
self._GLOBAL_CONTEXT['hide_sourcelink'] = not self.config.get(
'SHOW_SOURCELINK')
self._GLOBAL_CONTEXT['show_sourcelink'] = self.config.get(
'SHOW_SOURCELINK')
self._GLOBAL_CONTEXT['extra_head_data'] = self.config.get('EXTRA_HEAD_DATA')
self._GLOBAL_CONTEXT['date_fanciness'] = self.config.get('DATE_FANCINESS')
self._GLOBAL_CONTEXT['luxon_locales'] = LEGAL_VALUES['LUXON_LOCALES']
self._GLOBAL_CONTEXT['luxon_date_format'] = self.config.get('LUXON_DATE_FORMAT')
# TODO: remove in v9
self._GLOBAL_CONTEXT['js_date_format'] = self.config.get('MOMENTJS_DATE_FORMAT')
self._GLOBAL_CONTEXT['momentjs_locales'] = LEGAL_VALUES['MOMENTJS_LOCALES']
# Patch missing locales into momentjs defaulting to English (Issue #3216)
for l in self._GLOBAL_CONTEXT['translations']:
if l not in self._GLOBAL_CONTEXT['momentjs_locales']:
self._GLOBAL_CONTEXT['momentjs_locales'][l] = ""
self._GLOBAL_CONTEXT['hidden_tags'] = self.config.get('HIDDEN_TAGS')
self._GLOBAL_CONTEXT['hidden_categories'] = self.config.get('HIDDEN_CATEGORIES')
self._GLOBAL_CONTEXT['hidden_authors'] = self.config.get('HIDDEN_AUTHORS')
self._GLOBAL_CONTEXT['url_replacer'] = self.url_replacer
self._GLOBAL_CONTEXT['sort_posts'] = utils.sort_posts
self._GLOBAL_CONTEXT['smartjoin'] = utils.smartjoin
self._GLOBAL_CONTEXT['colorize_str'] = utils.colorize_str
self._GLOBAL_CONTEXT['meta_generator_tag'] = self.config.get('META_GENERATOR_TAG')
self._GLOBAL_CONTEXT.update(self.config.get('GLOBAL_CONTEXT', {}))
def _set_global_context_from_data(self):
"""Load files from data/ and put them in the global context."""
self._GLOBAL_CONTEXT['data'] = {}
for root, dirs, files in os.walk('data', followlinks=True):
for fname in files:
fname = os.path.join(root, fname)
data = utils.load_data(fname)
key = os.path.splitext(fname.split(os.sep, 1)[1])[0]
self._GLOBAL_CONTEXT['data'][key] = data
# Offer global_data as an alias for data (Issue #2488)
self._GLOBAL_CONTEXT['global_data'] = self._GLOBAL_CONTEXT['data']
def _set_all_page_deps_from_config(self):
"""Save dependencies for all pages from configuration.
Changes of values in this dict will force a rebuild of all pages.
Unlike global context, contents are NOT available to templates.
"""
self.ALL_PAGE_DEPS['atom_extension'] = self.config.get('ATOM_EXTENSION')
self.ALL_PAGE_DEPS['atom_path'] = self.config.get('ATOM_PATH')
self.ALL_PAGE_DEPS['rss_extension'] = self.config.get('RSS_EXTENSION')
self.ALL_PAGE_DEPS['rss_path'] = self.config.get('RSS_PATH')
self.ALL_PAGE_DEPS['rss_filename_base'] = self.config.get('RSS_FILENAME_BASE')
self.ALL_PAGE_DEPS['atom_filename_base'] = self.config.get('ATOM_FILENAME_BASE')
self.ALL_PAGE_DEPS['slug_author_path'] = self.config.get('SLUG_AUTHOR_PATH')
self.ALL_PAGE_DEPS['slug_tag_path'] = self.config.get('SLUG_TAG_PATH')
self.ALL_PAGE_DEPS['locale'] = self.config.get('LOCALE')
def _activate_plugins_of_category(self, category):
"""Activate all the plugins of a given category and return them."""
# this code duplicated in tests/base.py
plugins = []
for plugin_info in self.plugin_manager.getPluginsOfCategory(category):
self.plugin_manager.activatePluginByName(plugin_info.name)
plugin_info.plugin_object.set_site(self)
plugins.append(plugin_info)
return plugins
def _get_themes(self):
if self._THEMES is None:
try:
self._THEMES = utils.get_theme_chain(self.config['THEME'], self.themes_dirs)
except Exception:
if self.config['THEME'] != LEGAL_VALUES['DEFAULT_THEME']:
utils.LOGGER.warning('''Cannot load theme "{0}", using '{1}' instead.'''.format(
self.config['THEME'], LEGAL_VALUES['DEFAULT_THEME']))
self.config['THEME'] = LEGAL_VALUES['DEFAULT_THEME']
return self._get_themes()
raise
# Check consistency of USE_CDN and the current THEME (Issue #386)
if self.config['USE_CDN'] and self.config['USE_CDN_WARNING']:
bootstrap_path = utils.get_asset_path(os.path.join(
'assets', 'css', 'bootstrap.min.css'), self._THEMES)
if bootstrap_path and bootstrap_path.split(os.sep)[-4] not in ['bootstrap', 'bootstrap3', 'bootstrap4']:
utils.LOGGER.warning('The USE_CDN option may be incompatible with your theme, because it uses a hosted version of bootstrap.')
return self._THEMES
THEMES = property(_get_themes)
def _get_messages(self):
try:
if self._MESSAGES is None:
self._MESSAGES = utils.load_messages(self.THEMES,
self.translations,
self.default_lang,
themes_dirs=self.themes_dirs)
return self._MESSAGES
except utils.LanguageNotFoundError as e:
utils.LOGGER.error('''Cannot load language "{0}". Please make sure it is supported by Nikola itself, or that you have the appropriate messages files in your themes.'''.format(e.lang))
sys.exit(1)
MESSAGES = property(_get_messages)
def _get_global_context(self):
"""Initialize some parts of GLOBAL_CONTEXT only when it's queried."""
if 'messages' not in self._GLOBAL_CONTEXT:
self._GLOBAL_CONTEXT['messages'] = self.MESSAGES
if 'has_custom_css' not in self._GLOBAL_CONTEXT:
# check if custom css exist and is not empty
custom_css_path = utils.get_asset_path(
'assets/css/custom.css',
self.THEMES,
self.config['FILES_FOLDERS']
)
if custom_css_path and self.file_exists(custom_css_path, not_empty=True):
self._GLOBAL_CONTEXT['has_custom_css'] = True
else:
self._GLOBAL_CONTEXT['has_custom_css'] = False
return self._GLOBAL_CONTEXT
GLOBAL_CONTEXT = property(_get_global_context)
def _get_template_system(self):
if self._template_system is None:
# Load template plugin
template_sys_name = utils.get_template_engine(self.THEMES)
pi = self.plugin_manager.getPluginByName(
template_sys_name, "TemplateSystem")
if pi is None:
sys.stderr.write("Error loading {0} template system "
"plugin\n".format(template_sys_name))
sys.exit(1)
self._template_system = pi.plugin_object
lookup_dirs = ['templates'] + [os.path.join(utils.get_theme_path(name), "templates")
for name in self.THEMES]
self._template_system.set_directories(lookup_dirs,
self.config['CACHE_FOLDER'])
self._template_system.set_site(self)
return self._template_system
template_system = property(_get_template_system)
def get_compiler(self, source_name):
"""Get the correct compiler for a post from `conf.COMPILERS`.
To make things easier for users, the mapping in conf.py is
compiler->[extensions], although this is less convenient for us.
The majority of this function is reversing that dictionary and error checking.
"""
ext = os.path.splitext(source_name)[1]
try:
compiler = self.inverse_compilers[ext]
except KeyError:
# Find the correct compiler for this files extension
lang_exts_tab = list(self.config['COMPILERS'].items())
langs = [lang for lang, exts in lang_exts_tab if ext in exts or
len([ext_ for ext_ in exts if source_name.endswith(ext_)]) > 0]
if len(langs) != 1:
if len(set(langs)) > 1:
sys.exit("Your file extension->compiler definition is "
"ambiguous.\nPlease remove one of the file "
"extensions from 'COMPILERS' in conf.py\n(The "
"error is in one of {0})".format(', '.join(langs)))
elif len(langs) > 1:
langs = langs[:1]
else:
sys.exit("COMPILERS in conf.py does not tell me how to "
"handle '{0}' extensions.".format(ext))
lang = langs[0]
try:
compiler = self.compilers[lang]
except KeyError:
sys.exit("Cannot find '{0}' compiler; "
"it might require an extra plugin -- "
"do you have it installed?".format(lang))
self.inverse_compilers[ext] = compiler
return compiler
def render_template(self, template_name, output_name, context, url_type=None, is_fragment=False):
"""Render a template with the global context.
If ``output_name`` is None, will return a string and all URL
normalization will be ignored (including the link:// scheme).
If ``output_name`` is a string, URLs will be normalized and
the resultant HTML will be saved to the named file (path must
start with OUTPUT_FOLDER).
The argument ``url_type`` allows to override the ``URL_TYPE``
configuration.
If ``is_fragment`` is set to ``True``, a HTML fragment will
be rendered and not a whole HTML document.
"""
local_context = {}
local_context["template_name"] = template_name
local_context.update(self.GLOBAL_CONTEXT)
local_context.update(context)
for k in self._GLOBAL_CONTEXT_TRANSLATABLE:
local_context[k] = local_context[k](local_context['lang'])
local_context['is_rtl'] = local_context['lang'] in LEGAL_VALUES['RTL_LANGUAGES']
local_context['url_type'] = self.config['URL_TYPE'] if url_type is None else url_type
local_context["translations_feedorder"] = sorted(
local_context["translations"],
key=lambda x: (int(x != local_context['lang']), x)
)
# string, arguments
local_context["formatmsg"] = lambda s, *a: s % a
for h in local_context['template_hooks'].values():
h.context = context
for func in self.config['GLOBAL_CONTEXT_FILLER']:
func(local_context, template_name)
data = self.template_system.render_template(
template_name, None, local_context)
if output_name is None:
return data
if not output_name.startswith(self.config["OUTPUT_FOLDER"]):
raise ValueError("Output path for templates must start with OUTPUT_FOLDER")
url_part = output_name[len(self.config["OUTPUT_FOLDER"]) + 1:]
# Treat our site as if output/ is "/" and then make all URLs relative,
# making the site "relocatable"
src = os.sep + url_part
src = os.path.normpath(src)
# The os.sep is because normpath will change "/" to "\" on windows
src = "/".join(src.split(os.sep))
utils.makedirs(os.path.dirname(output_name))
parser = lxml.html.HTMLParser(remove_blank_text=True)
if is_fragment:
doc = lxml.html.fragment_fromstring(data.strip(), parser)
else:
doc = lxml.html.document_fromstring(data.strip(), parser)
self.rewrite_links(doc, src, context['lang'], url_type)
if is_fragment:
# doc.text contains text before the first HTML, or None if there was no text
# The text after HTML elements is added by tostring() (because its implicit
# argument with_tail has default value True).
data = (doc.text or '').encode('utf-8') + b''.join([lxml.html.tostring(child, encoding='utf-8', method='html') for child in doc.iterchildren()])
else:
data = lxml.html.tostring(doc, encoding='utf8', method='html', pretty_print=True, doctype='<!DOCTYPE html>')
with open(output_name, "wb+") as post_file:
post_file.write(data)
def rewrite_links(self, doc, src, lang, url_type=None):
"""Replace links in document to point to the right places."""
# First let lxml replace most of them
doc.rewrite_links(lambda dst: self.url_replacer(src, dst, lang, url_type), resolve_base_href=False)
# lxml ignores srcset in img and source elements, so do that by hand
objs = list(doc.xpath('(//img|//source)'))
for obj in objs:
if 'srcset' in obj.attrib:
urls = [u.strip() for u in obj.attrib['srcset'].split(',')]
urls = [self.url_replacer(src, dst, lang, url_type) for dst in urls]
obj.set('srcset', ', '.join(urls))
def url_replacer(self, src, dst, lang=None, url_type=None):
"""Mangle URLs.
* Replaces link:// URLs with real links
* Makes dst relative to src
* Leaves fragments unchanged
* Leaves full URLs unchanged
* Avoids empty links
src is the URL where this link is used
dst is the link to be mangled
lang is used for language-sensitive URLs in link://
url_type is used to determine final link appearance, defaulting to URL_TYPE from config
"""
# Avoid mangling links within the page
if dst.startswith('#'):
return dst
parsed_src = urlsplit(src)
src_elems = parsed_src.path.split('/')[1:]
dst_url = urlparse(dst)
if lang is None:
lang = self.default_lang
if url_type is None:
url_type = self.config.get('URL_TYPE')
if dst_url.scheme and dst_url.scheme not in ['http', 'https', 'link']:
return dst
# Refuse to replace links that are full URLs.
if dst_url.netloc:
if dst_url.scheme == 'link': # Magic link
if dst_url.query:
# If query strings are used in magic link, they will be
# passed to the path handler as keyword arguments (strings)
link_kwargs = {unquote(k): unquote(v[-1]) for k, v in parse_qs(dst_url.query).items()}
else:
link_kwargs = {}
# unquote from issue #2934
dst = self.link(dst_url.netloc, unquote(dst_url.path.lstrip('/')), lang, **link_kwargs)
if dst_url.fragment:
dst += '#' + dst_url.fragment
# Assuming the site is served over one of these, and
# since those are the only URLs we want to rewrite...
else:
if '%' in dst_url.netloc:
# convert lxml percent-encoded garbage to punycode
nl = unquote(dst_url.netloc)
try:
nl = nl.decode('utf-8')
except AttributeError:
# python 3: already unicode
pass
nl = nl.encode('idna')
if isinstance(nl, bytes):
nl = nl.decode('latin-1') # so idna stays unchanged
dst = urlunsplit((dst_url.scheme,
nl,
dst_url.path,
dst_url.query,
dst_url.fragment))
return dst
elif dst_url.scheme == 'link': # Magic absolute path link:
dst = dst_url.path
return dst
# Refuse to replace links that consist of a fragment only
if ((not dst_url.scheme) and (not dst_url.netloc) and
(not dst_url.path) and (not dst_url.params) and
(not dst_url.query) and dst_url.fragment):
return dst
# Normalize
dst = urljoin(src, dst)
# Avoid empty links.
if src == dst:
if url_type == 'absolute':
dst = urljoin(self.config['BASE_URL'], dst.lstrip('/'))
return dst
elif url_type == 'full_path':
dst = urljoin(self.config['BASE_URL'], dst.lstrip('/'))
return utils.full_path_from_urlparse(urlparse(dst))
else:
return "#"
# Check that link can be made relative, otherwise return dest
parsed_dst = urlsplit(dst)
if parsed_src[:2] != parsed_dst[:2]:
if url_type == 'absolute':
dst = urljoin(self.config['BASE_URL'], dst)
return dst
if url_type in ('full_path', 'absolute'):
dst = urljoin(self.config['BASE_URL'], dst.lstrip('/'))
if url_type == 'full_path':
parsed = urlparse(urljoin(self.config['BASE_URL'], dst.lstrip('/')))
dst = utils.full_path_from_urlparse(parsed)
return dst
# Now both paths are on the same site and absolute
dst_elems = parsed_dst.path.split('/')[1:]
i = 0
for (i, s), d in zip(enumerate(src_elems), dst_elems):
if s != d:
break
# Now i is the longest common prefix
result = '/'.join(['..'] * (len(src_elems) - i - 1) + dst_elems[i:])
if not result and not parsed_dst.fragment:
result = "."
# Don't forget the query part of the link
if parsed_dst.query:
result += "?" + parsed_dst.query
# Don't forget the fragment (anchor) part of the link
if parsed_dst.fragment:
result += "#" + parsed_dst.fragment
if not result:
raise ValueError("Failed to parse link: {0}".format((src, dst, i, src_elems, dst_elems)))
return result
def _make_renderfunc(self, t_data, fname=None):
"""Return a function that can be registered as a template shortcode.
The returned function has access to the passed template data and
accepts any number of positional and keyword arguments. Positional
arguments values are added as a tuple under the key ``_args`` to the
keyword argument dict and then the latter provides the template
context.
Global context keys are made available as part of the context,
respecting locale.
As a special quirk, the "data" key from global_context is
available only as "global_data" because of name clobbering.
"""
def render_shortcode(*args, **kw):
context = self.GLOBAL_CONTEXT.copy()
context.update(kw)
context['_args'] = args
context['lang'] = utils.LocaleBorg().current_lang
for k in self._GLOBAL_CONTEXT_TRANSLATABLE:
context[k] = context[k](context['lang'])
output = self.template_system.render_template_to_string(t_data, context)
if fname is not None:
dependencies = [fname] + self.template_system.get_deps(fname)
else:
dependencies = []
return output, dependencies
return render_shortcode
def _register_templated_shortcodes(self):
"""Register shortcodes based on templates.
This will register a shortcode for any template found in shortcodes/
folders and a generic "template" shortcode which will consider the
content in the shortcode as a template in itself.
"""
self.register_shortcode('template', self._template_shortcode_handler)
builtin_sc_dir = resource_filename(
'nikola',
os.path.join('data', 'shortcodes', utils.get_template_engine(self.THEMES)))
for sc_dir in [builtin_sc_dir, 'shortcodes']:
if not os.path.isdir(sc_dir):
continue
for fname in os.listdir(sc_dir):
name, ext = os.path.splitext(fname)
if ext != '.tmpl':
continue
with open(os.path.join(sc_dir, fname)) as fd:
self.register_shortcode(name, self._make_renderfunc(
fd.read(), os.path.join(sc_dir, fname)))
def _template_shortcode_handler(self, *args, **kw):
t_data = kw.pop('data', '')
context = self.GLOBAL_CONTEXT.copy()
context.update(kw)
context['_args'] = args
context['lang'] = utils.LocaleBorg().current_lang
for k in self._GLOBAL_CONTEXT_TRANSLATABLE:
context[k] = context[k](context['lang'])
output = self.template_system.render_template_to_string(t_data, context)
dependencies = self.template_system.get_string_deps(t_data)
return output, dependencies
def register_shortcode(self, name, f):
"""Register function f to handle shortcode "name"."""
if name in self.shortcode_registry:
utils.LOGGER.warning('Shortcode name conflict: {}', name)
return
self.shortcode_registry[name] = f
def apply_shortcodes(self, data, filename=None, lang=None, extra_context=None):
"""Apply shortcodes from the registry on data."""
if extra_context is None:
extra_context = {}
if lang is None:
lang = utils.LocaleBorg().current_lang
return shortcodes.apply_shortcodes(data, self.shortcode_registry, self, filename, lang=lang, extra_context=extra_context)
def apply_shortcodes_uuid(self, data, _shortcodes, filename=None, lang=None, extra_context=None):
"""Apply shortcodes from the registry on data."""
if lang is None:
lang = utils.LocaleBorg().current_lang
if extra_context is None:
extra_context = {}
deps = []
for k, v in _shortcodes.items():
replacement, _deps = shortcodes.apply_shortcodes(v, self.shortcode_registry, self, filename, lang=lang, extra_context=extra_context)
data = data.replace(k, replacement)
deps.extend(_deps)
return data, deps
def _get_rss_copyright(self, lang, rss_plain):
if rss_plain:
return (
self.config['RSS_COPYRIGHT_PLAIN'](lang) or
lxml.html.fromstring(self.config['RSS_COPYRIGHT'](lang)).text_content().strip())
else:
return self.config['RSS_COPYRIGHT'](lang)
def generic_rss_feed(self, lang, title, link, description, timeline,
rss_teasers, rss_plain, feed_length=10, feed_url=None,
enclosure=_enclosure, rss_links_append_query=None, copyright_=None):
"""Generate an ExtendedRSS2 feed object for later use."""
rss_obj = utils.ExtendedRSS2(
title=title,
link=utils.encodelink(link),
description=description,
lastBuildDate=datetime.datetime.utcnow(),
generator='Nikola (getnikola.com)',
language=lang
)
if copyright_ is None:
copyright_ = self._get_rss_copyright(lang, rss_plain)
# Use the configured or specified copyright string if present.
if copyright_:
rss_obj.copyright = copyright_
if feed_url:
absurl = '/' + feed_url[len(self.config['BASE_URL']):]
rss_obj.xsl_stylesheet_href = self.url_replacer(absurl, "/assets/xml/rss.xsl")
items = []
feed_append_query = None
if rss_links_append_query:
if rss_links_append_query is True:
raise ValueError("RSS_LINKS_APPEND_QUERY (or FEED_LINKS_APPEND_QUERY) cannot be True. Valid values are False or a formattable string.")
feed_append_query = rss_links_append_query.format(
feedRelUri='/' + feed_url[len(self.config['BASE_URL']):],
feedFormat="rss")
for post in timeline[:feed_length]:
data = post.text(lang, teaser_only=rss_teasers, strip_html=rss_plain,
feed_read_more_link=True, feed_links_append_query=feed_append_query)
if feed_url is not None and data:
# Massage the post's HTML (unless plain)
if not rss_plain:
if 'previewimage' in post.meta[lang] and post.meta[lang]['previewimage'] not in data:
data = "<figure><img src=\"{}\"></figure> {}".format(post.meta[lang]['previewimage'], data)
# FIXME: this is duplicated with code in Post.text()
try:
doc = lxml.html.document_fromstring(data)
doc.rewrite_links(lambda dst: self.url_replacer(post.permalink(), dst, lang, 'absolute'))
try:
body = doc.body
data = (body.text or '') + ''.join(
[lxml.html.tostring(child, encoding='unicode')
for child in body.iterchildren()])
except IndexError: # No body there, it happens sometimes
data = ''
except lxml.etree.ParserError as e:
if str(e) == "Document is empty":
data = ""
else: # let other errors raise
raise
args = {
'title': post.title(lang) if post.should_show_title() else None,
'link': post.permalink(lang, absolute=True, query=feed_append_query),
'description': data,
# PyRSS2Gen's pubDate is GMT time.
'pubDate': (post.date if post.date.tzinfo is None else
post.date.astimezone(dateutil.tz.tzutc())),
'categories': post._tags.get(lang, []),
'creator': post.author(lang),
'guid': post.guid(lang),
}
if post.author(lang):
rss_obj.rss_attrs["xmlns:dc"] = "http://purl.org/dc/elements/1.1/"
if enclosure:
# enclosure callback returns None if post has no enclosure, or a
# 3-tuple of (url, length (0 is valid), mimetype)
enclosure_details = enclosure(post=post, lang=lang)
if enclosure_details is not None:
args['enclosure'] = rss.Enclosure(*enclosure_details)
items.append(utils.ExtendedItem(**args))
rss_obj.items = items
rss_obj.self_url = feed_url
rss_obj.rss_attrs["xmlns:atom"] = "http://www.w3.org/2005/Atom"
return rss_obj
def generic_rss_renderer(self, lang, title, link, description, timeline, output_path,
rss_teasers, rss_plain, feed_length=10, feed_url=None,
enclosure=_enclosure, rss_links_append_query=None, copyright_=None):
"""Take all necessary data, and render a RSS feed in output_path."""
rss_obj = self.generic_rss_feed(lang, title, link, description, timeline,
rss_teasers, rss_plain, feed_length=feed_length, feed_url=feed_url,
enclosure=enclosure, rss_links_append_query=rss_links_append_query, copyright_=copyright_)
utils.rss_writer(rss_obj, output_path)
def path(self, kind, name, lang=None, is_link=False, **kwargs):
r"""Build the path to a certain kind of page.
These are mostly defined by plugins by registering via the
register_path_handler method, except for slug, post_path, root
and filename which are defined in this class' init method.
Here's some of the others, for historical reasons:
* root (name is ignored)
* tag_index (name is ignored)
* tag (and name is the tag name)
* tag_rss (name is the tag name)
* category (and name is the category name)
* category_rss (and name is the category name)
* archive (and name is the year, or None for the main archive index)
* index (name is the number in index-number)
* rss (name is ignored)
* gallery (name is the gallery name)
* listing (name is the source code file name)
* post_path (name is 1st element in a POSTS/PAGES tuple)
* slug (name is the slug of a post or page)
* filename (name is the source filename of a post/page, in DEFAULT_LANG, relative to conf.py)
The returned value is either a path relative to output, like "categories/whatever.html", or
an absolute URL ("https://getnikola.com/"), if path handler returns a string.
If is_link is True, the path is absolute and uses "/" as separator
(ex: "/archive/index.html").
If is_link is False, the path is relative to output and uses the
platform's separator.
(ex: "archive\index.html")
If the registered path handler returns a string instead of path component list - it's
considered to be an absolute URL and returned as is.
"""
if lang is None:
lang = utils.LocaleBorg().current_lang
try:
path = self.path_handlers[kind](name, lang, **kwargs)
except KeyError:
utils.LOGGER.warning("Unknown path request of kind: {0}".format(kind))
return ""
# If path handler returns a string we consider it to be an absolute URL not requiring any
# further processing, i.e 'https://getnikola.com/'. See Issue #2876.
if isinstance(path, str):
return path
if path is None:
path = "#"
else:
path = [os.path.normpath(p) for p in path if p != '.'] # Fix Issue #1028
if is_link:
link = '/' + ('/'.join(path))
index_len = len(self.config['INDEX_FILE'])
if self.config['STRIP_INDEXES'] and \
link[-(1 + index_len):] == '/' + self.config['INDEX_FILE']:
return link[:-index_len]
else:
return link
else:
return os.path.join(*path)
def post_path(self, name, lang):
"""Link to the destination of an element in the POSTS/PAGES settings.
Example:
link://post_path/posts => /blog
"""
return [_f for _f in [self.config['TRANSLATIONS'][lang],
os.path.dirname(name),
self.config['INDEX_FILE']] if _f]
def root_path(self, name, lang):
"""Link to the current language's root.
Example:
link://root_path => /
link://root_path => /translations/spanish/
"""
d = self.config['TRANSLATIONS'][lang]
if d:
return [d, '']
else:
return []
def slug_path(self, name, lang):
"""Return a link to a post with given slug, if not ambiguous.
Example:
link://slug/yellow-camaro => /posts/cars/awful/yellow-camaro/index.html
"""
results = [p for p in self.timeline if p.meta('slug') == name]
if not results:
utils.LOGGER.warning("Cannot resolve path request for slug: {0}".format(name))
else:
if len(results) > 1:
utils.LOGGER.warning('Ambiguous path request for slug: {0}'.format(name))
return [_f for _f in results[0].permalink(lang).split('/')]
def filename_path(self, name, lang):
"""Link to post or page by source filename.
Example:
link://filename/manual.txt => /docs/handbook.html
"""
results = [p for p in self.timeline if p.source_path == name]
if not results:
utils.LOGGER.warning("Cannot resolve path request for filename: {0}".format(name))
else:
if len(results) > 1:
utils.LOGGER.error("Ambiguous path request for filename: {0}".format(name))
return [_f for _f in results[0].permalink(lang).split('/') if _f]
def register_path_handler(self, kind, f):
"""Register a path handler."""
if kind in self.path_handlers:
utils.LOGGER.warning('Conflicting path handlers for kind: {0}'.format(kind))
else:
self.path_handlers[kind] = f
def link(self, *args, **kwargs):
"""Create a link."""
url = self.path(*args, is_link=True, **kwargs)
url = utils.encodelink(url)
return url
def abs_link(self, dst, protocol_relative=False):
"""Get an absolute link."""
# Normalize
if dst: # Mako templates and empty strings evaluate to False
dst = urljoin(self.config['BASE_URL'], dst.lstrip('/'))
else:
dst = self.config['BASE_URL']
url = urlparse(dst).geturl()
if protocol_relative:
url = url.split(":", 1)[1]
url = utils.encodelink(url)
return url
def rel_link(self, src, dst):
"""Get a relative link."""
# Normalize
src = urljoin(self.config['BASE_URL'], src)
dst = urljoin(src, dst)
# Avoid empty links.
if src == dst:
return "#"
# Check that link can be made relative, otherwise return dest
parsed_src = urlsplit(src)
parsed_dst = urlsplit(dst)
if parsed_src[:2] != parsed_dst[:2]:
return utils.encodelink(dst)
# Now both paths are on the same site and absolute
src_elems = parsed_src.path.split('/')[1:]
dst_elems = parsed_dst.path.split('/')[1:]
i = 0
for (i, s), d in zip(enumerate(src_elems), dst_elems):
if s != d:
break
else:
i += 1
# Now i is the longest common prefix
url = '/'.join(['..'] * (len(src_elems) - i - 1) + dst_elems[i:])
url = utils.encodelink(url)
return url
def register_filter(self, filter_name, filter_definition):
"""Register a filter.
filter_name should be a name not confusable with an actual
executable. filter_definition should be a callable accepting
one argument (the filename).
"""
if filter_name in self.filters:
utils.LOGGER.warning('''The filter "{0}" is defined more than once.'''.format(filter_name))
self.filters[filter_name] = filter_definition
def file_exists(self, path, not_empty=False):
"""Check if the file exists. If not_empty is True, it also must not be empty."""
exists = os.path.exists(path)
if exists and not_empty:
exists = os.stat(path).st_size > 0
return exists
def clean_task_paths(self, task):
"""Normalize target paths in the task."""
targets = task.get('targets', None)
if targets is not None:
task['targets'] = [os.path.normpath(t) for t in targets]
return task
def gen_tasks(self, name, plugin_category, doc=''):
"""Generate tasks."""
def flatten(task):
"""Flatten lists of tasks."""
if isinstance(task, dict):
yield task
else:
for t in task:
for ft in flatten(t):
yield ft
task_dep = []
for pluginInfo in self.plugin_manager.getPluginsOfCategory(plugin_category):
for task in flatten(pluginInfo.plugin_object.gen_tasks()):
if 'basename' not in task:
raise ValueError("Task {0} does not have a basename".format(task))
task = self.clean_task_paths(task)
if 'task_dep' not in task:
task['task_dep'] = []
task['task_dep'].extend(self.injected_deps[task['basename']])
yield task
for multi in self.plugin_manager.getPluginsOfCategory("TaskMultiplier"):
flag = False
for task in multi.plugin_object.process(task, name):
flag = True
yield self.clean_task_paths(task)
if flag:
task_dep.append('{0}_{1}'.format(name, multi.plugin_object.name))
if pluginInfo.plugin_object.is_default:
task_dep.append(pluginInfo.plugin_object.name)
yield {
'basename': name,
'doc': doc,
'actions': None,
'clean': True,
'task_dep': task_dep
}
def parse_category_name(self, category_name):
"""Parse a category name into a hierarchy."""
if self.config['CATEGORY_ALLOW_HIERARCHIES']:
try:
return hierarchy_utils.parse_escaped_hierarchical_category_name(category_name)
except Exception as e:
utils.LOGGER.error(str(e))
sys.exit(1)
else:
return [category_name] if len(category_name) > 0 else []
def category_path_to_category_name(self, category_path):
"""Translate a category path to a category name."""
if self.config['CATEGORY_ALLOW_HIERARCHIES']:
return hierarchy_utils.join_hierarchical_category_path(category_path)
else:
return ''.join(category_path)
def _add_post_to_category(self, post, category_name):
"""Add a post to a category."""
category_path = self.parse_category_name(category_name)
current_path = []
current_subtree = self.category_hierarchy
for current in category_path:
current_path.append(current)
if current not in current_subtree:
current_subtree[current] = {}
current_subtree = current_subtree[current]
self.posts_per_category[self.category_path_to_category_name(current_path)].append(post)
def _sort_category_hierarchy(self):
"""Sort category hierarchy."""
# First create a hierarchy of TreeNodes
self.category_hierarchy_lookup = {}
def create_hierarchy(cat_hierarchy, parent=None):
"""Create category hierarchy."""
result = []
for name, children in cat_hierarchy.items():
node = hierarchy_utils.TreeNode(name, parent)
node.children = create_hierarchy(children, node)
node.category_path = [pn.name for pn in node.get_path()]
node.category_name = self.category_path_to_category_name(node.category_path)
self.category_hierarchy_lookup[node.category_name] = node
if node.category_name not in self.config.get('HIDDEN_CATEGORIES'):
result.append(node)
return natsort.natsorted(result, key=lambda e: e.name, alg=natsort.ns.F | natsort.ns.IC)
root_list = create_hierarchy(self.category_hierarchy)
# Next, flatten the hierarchy
self.category_hierarchy = hierarchy_utils.flatten_tree_structure(root_list)
@staticmethod
def sort_posts_chronologically(posts, lang=None):
"""Sort a list of posts chronologically.
This function also takes priority, title and source path into account.
"""
# Last tie breaker: sort by source path (A-Z)
posts = sorted(posts, key=lambda p: p.source_path)
# Next tie breaker: sort by title if language is given (A-Z)
if lang is not None:
posts = natsort.natsorted(posts, key=lambda p: p.title(lang), alg=natsort.ns.F | natsort.ns.IC)
# Next tie breaker: sort by date (reverse chronological order)
posts = sorted(posts, key=lambda p: p.date, reverse=True)
# Finally, sort by priority meta value (descending)
posts = sorted(posts, key=lambda p: int(p.meta('priority')) if p.meta('priority') else 0, reverse=True)
# Return result
return posts
def scan_posts(self, really=False, ignore_quit=False, quiet=False):
"""Scan all the posts.
The `quiet` option is ignored.
"""
if self._scanned and not really:
return
# Reset things
self.posts = []
self.all_posts = []
self.posts_per_year = defaultdict(list)
self.posts_per_month = defaultdict(list)
self.posts_per_tag = defaultdict(list)
self.posts_per_category = defaultdict(list)
self.tags_per_language = defaultdict(list)
self.category_hierarchy = {}
self.post_per_file = {}
self.post_per_input_file = {}
self.timeline = []
self.pages = []
for p in sorted(self.plugin_manager.getPluginsOfCategory('PostScanner'), key=operator.attrgetter('name')):
try:
timeline = p.plugin_object.scan()
except Exception:
utils.LOGGER.error('Error reading timeline')
raise
# FIXME: can there be conflicts here?
self.timeline.extend(timeline)
quit = False
# Classify posts per year/tag/month/whatever
slugged_tags = defaultdict(set)
for post in self.timeline:
if post.use_in_feeds:
self.posts.append(post)
self.posts_per_year[str(post.date.year)].append(post)
self.posts_per_month[
'{0}/{1:02d}'.format(post.date.year, post.date.month)].append(post)
for lang in self.config['TRANSLATIONS'].keys():
for tag in post.tags_for_language(lang):
_tag_slugified = utils.slugify(tag, lang)
slugged_tags[lang].add(_tag_slugified)
if post not in self.posts_per_tag[tag]:
self.posts_per_tag[tag].append(post)
self.tags_per_language[lang].extend(post.tags_for_language(lang))
self._add_post_to_category(post, post.meta('category'))
if post.is_post:
# unpublished posts
self.all_posts.append(post)
else:
self.pages.append(post)
for lang in self.config['TRANSLATIONS'].keys():
dest = post.destination_path(lang=lang)
src_dest = post.destination_path(lang=lang, extension=post.source_ext())
src_file = post.translated_source_path(lang=lang)
if dest in self.post_per_file:
utils.LOGGER.error('Two posts are trying to generate {0}: {1} and {2}'.format(
dest,
self.post_per_file[dest].source_path,
post.source_path))
quit = True
if (src_dest in self.post_per_file) and self.config['COPY_SOURCES']:
utils.LOGGER.error('Two posts are trying to generate {0}: {1} and {2}'.format(
src_dest,
self.post_per_file[dest].source_path,
post.source_path))
quit = True
self.post_per_file[dest] = post
self.post_per_file[src_dest] = post
if src_file is not None:
self.post_per_input_file[src_file] = post
# deduplicate tags_per_language
self.tags_per_language[lang] = list(set(self.tags_per_language[lang]))
# Sort everything.
self.timeline = self.sort_posts_chronologically(self.timeline)
self.posts = self.sort_posts_chronologically(self.posts)
self.all_posts = self.sort_posts_chronologically(self.all_posts)
self.pages = self.sort_posts_chronologically(self.pages)
self._sort_category_hierarchy()
for i, p in enumerate(self.posts[1:]):
p.next_post = self.posts[i]
for i, p in enumerate(self.posts[:-1]):
p.prev_post = self.posts[i + 1]
self._scanned = True
if not self.quiet:
print("done!", file=sys.stderr)
if quit and not ignore_quit:
sys.exit(1)
signal('scanned').send(self)
def generic_renderer(self, lang, output_name, template_name, filters, file_deps=None, uptodate_deps=None, context=None, context_deps_remove=None, post_deps_dict=None, url_type=None, is_fragment=False):
"""Create tasks for rendering pages and post lists and other related pages.
lang is the current language.
output_name is the destination file name.
template_name is the template to be used.
filters is the list of filters (usually site.config['FILTERS']) which will be used to post-process the result.
file_deps (optional) is a list of additional file dependencies (next to template and its dependencies).
uptodate_deps (optional) is a list of additional entries added to the task's uptodate list.
context (optional) a dict used as a basis for the template context. The lang parameter will always be added.
context_deps_remove (optional) is a list of keys to remove from the context after using it as an uptodate dependency. This should name all keys containing non-trivial Python objects; they can be replaced by adding JSON-style dicts in post_deps_dict.
post_deps_dict (optional) is a dict merged into the copy of context which is used as an uptodate dependency.
url_type (optional) allows to override the ``URL_TYPE`` configuration.
is_fragment (optional) allows to write a HTML fragment instead of a HTML document.
"""
utils.LocaleBorg().set_locale(lang)
file_deps = copy(file_deps) if file_deps else []
file_deps += self.template_system.template_deps(template_name)
file_deps = sorted(list(filter(None, file_deps)))
context = copy(context) if context else {}
context["lang"] = lang
deps_dict = copy(context)
if context_deps_remove:
for key in context_deps_remove:
deps_dict.pop(key)
deps_dict['OUTPUT_FOLDER'] = self.config['OUTPUT_FOLDER']
deps_dict['TRANSLATIONS'] = self.config['TRANSLATIONS']
deps_dict['global'] = self.GLOBAL_CONTEXT
deps_dict['all_page_deps'] = self.ALL_PAGE_DEPS
if post_deps_dict:
deps_dict.update(post_deps_dict)
for k, v in self.GLOBAL_CONTEXT['template_hooks'].items():
deps_dict['||template_hooks|{0}||'.format(k)] = v.calculate_deps()
for k in self._GLOBAL_CONTEXT_TRANSLATABLE:
deps_dict[k] = deps_dict['global'][k](lang)
for k in self._ALL_PAGE_DEPS_TRANSLATABLE:
deps_dict[k] = deps_dict['all_page_deps'][k](lang)
deps_dict['navigation_links'] = deps_dict['global']['navigation_links'](lang)
deps_dict['navigation_alt_links'] = deps_dict['global']['navigation_alt_links'](lang)
task = {
'name': os.path.normpath(output_name),
'targets': [output_name],
'file_dep': file_deps,
'actions': [(self.render_template, [template_name, output_name,
context, url_type, is_fragment])],
'clean': True,
'uptodate': [config_changed(deps_dict, 'nikola.nikola.Nikola.generic_renderer')] + ([] if uptodate_deps is None else uptodate_deps)
}
return utils.apply_filters(task, filters)
def generic_page_renderer(self, lang, post, filters, context=None):
"""Render post fragments to final HTML pages."""
extension = post.compiler.extension()
output_name = os.path.join(self.config['OUTPUT_FOLDER'],
post.destination_path(lang, extension))
deps = post.deps(lang)
uptodate_deps = post.deps_uptodate(lang)
deps.extend(utils.get_asset_path(x, self.THEMES) for x in ('bundles', 'parent', 'engine'))
_theme_ini = utils.get_asset_path(self.config['THEME'] + '.theme', self.THEMES)
if _theme_ini:
deps.append(_theme_ini)
context = copy(context) if context else {}
context['post'] = post
context['title'] = post.title(lang)
context['description'] = post.description(lang)
context['permalink'] = post.permalink(lang)
if 'crumbs' not in context:
crumb_path = post.permalink(lang).lstrip('/')
if crumb_path.endswith(self.config['INDEX_FILE']):
crumb_path = crumb_path[:-len(self.config['INDEX_FILE'])]
if crumb_path.endswith('/'):
context['crumbs'] = utils.get_crumbs(crumb_path.rstrip('/'), is_file=False)
else:
context['crumbs'] = utils.get_crumbs(crumb_path, is_file=True)
if 'pagekind' not in context:
context['pagekind'] = ['generic_page']
if post.use_in_feeds:
context['enable_comments'] = True
else:
context['enable_comments'] = self.config['COMMENTS_IN_PAGES']
deps_dict = {}
if post.prev_post:
deps_dict['PREV_LINK'] = [post.prev_post.permalink(lang)]
if post.next_post:
deps_dict['NEXT_LINK'] = [post.next_post.permalink(lang)]
deps_dict['comments'] = context['enable_comments']
if post:
deps_dict['post_translations'] = post.translated_to
signal('render_post').send({
'site': self,
'post': post,
'lang': lang,
'context': context,
'deps_dict': deps_dict,
})
yield self.generic_renderer(lang, output_name, post.template_name, filters,
file_deps=deps,
uptodate_deps=uptodate_deps,
context=context,
context_deps_remove=['post'],
post_deps_dict=deps_dict,
url_type=post.url_type)
def generic_post_list_renderer(self, lang, posts, output_name, template_name, filters, extra_context):
"""Render pages with lists of posts."""
deps = []
uptodate_deps = []
for post in posts:
deps += post.deps(lang)
uptodate_deps += post.deps_uptodate(lang)
context = {}
context["posts"] = posts
context["title"] = self.config['BLOG_TITLE'](lang)
context["description"] = self.config['BLOG_DESCRIPTION'](lang)
context["prevlink"] = None
context["nextlink"] = None
if extra_context:
context.update(extra_context)
if 'has_other_languages' not in context:
context['has_other_languages'] = False
post_deps_dict = {}
post_deps_dict["posts"] = [(p.meta[lang]['title'], p.permalink(lang)) for p in posts]
return self.generic_renderer(lang, output_name, template_name, filters,
file_deps=deps,
uptodate_deps=uptodate_deps,
context=context,
post_deps_dict=post_deps_dict)
def atom_feed_renderer(self, lang, posts, output_path, filters,
extra_context):
"""Render Atom feeds and archives with lists of posts.
Feeds are considered archives when no future updates to them are expected.
"""
def atom_link(link_rel, link_type, link_href):
link = lxml.etree.Element("link")
link.set("rel", link_rel)
link.set("type", link_type)
link.set("href", utils.encodelink(link_href))
return link
utils.LocaleBorg().set_locale(lang)
deps = []
uptodate_deps = []
for post in posts:
deps += post.deps(lang)
uptodate_deps += post.deps_uptodate(lang)
context = {}
blog_title = self.config['BLOG_TITLE'](lang)
context["posts"] = posts
context["title"] = blog_title
context["description"] = self.config['BLOG_DESCRIPTION'](lang)
context["lang"] = lang
context.update(extra_context)
context["title"] = "{0} ({1})".format(blog_title, context["title"]) if blog_title != context["title"] else blog_title
deps_context = copy(context)
deps_context["posts"] = [(p.meta[lang]['title'], p.permalink(lang)) for p in
posts]
deps_context["global"] = self.GLOBAL_CONTEXT
deps_context["all_page_deps"] = self.ALL_PAGE_DEPS
for k in self._GLOBAL_CONTEXT_TRANSLATABLE:
deps_context[k] = deps_context['global'][k](lang)
for k in self._ALL_PAGE_DEPS_TRANSLATABLE:
deps_context[k] = deps_context['all_page_deps'][k](lang)
feed_xsl_link = self.abs_link("/assets/xml/atom.xsl")
feed_root = lxml.etree.Element("feed")
feed_root.addprevious(lxml.etree.ProcessingInstruction(
"xml-stylesheet",
'href="' + utils.encodelink(feed_xsl_link) + '" type="text/xsl media="all"'))
feed_root.set("{http://www.w3.org/XML/1998/namespace}lang", lang)
feed_root.set("xmlns", "http://www.w3.org/2005/Atom")
feed_title = lxml.etree.SubElement(feed_root, "title")
feed_title.text = context["title"]
feed_id = lxml.etree.SubElement(feed_root, "id")
feed_id.text = self.abs_link(context["feedlink"])
feed_updated = lxml.etree.SubElement(feed_root, "updated")
feed_updated.text = utils.LocaleBorg().formatted_date('webiso', datetime.datetime.now(tz=dateutil.tz.tzutc()))
feed_author = lxml.etree.SubElement(feed_root, "author")
feed_author_name = lxml.etree.SubElement(feed_author, "name")
feed_author_name.text = self.config["BLOG_AUTHOR"](lang)
feed_root.append(atom_link("self", "application/atom+xml",
self.abs_link(context["feedlink"])))
feed_root.append(atom_link("alternate", "text/html",
self.abs_link(context["permalink"])))
feed_generator = lxml.etree.SubElement(feed_root, "generator")
feed_generator.set("uri", "https://getnikola.com/")
feed_generator.text = "Nikola"
feed_append_query = None
if self.config["FEED_LINKS_APPEND_QUERY"]:
feed_append_query = self.config["FEED_LINKS_APPEND_QUERY"].format(
feedRelUri=context["feedlink"],
feedFormat="atom")
def atom_post_text(post, text):
if not self.config["FEED_PLAIN"]:
if 'previewimage' in post.meta[lang] and post.meta[lang]['previewimage'] not in text:
text = "<figure><img src=\"{}\"></figure> {}".format(post.meta[lang]['previewimage'], text)
# FIXME: this is duplicated with code in Post.text() and generic_rss_renderer
try:
doc = lxml.html.document_fromstring(text)
doc.rewrite_links(lambda dst: self.url_replacer(post.permalink(lang), dst, lang, 'absolute'))
try:
body = doc.body
text = (body.text or '') + ''.join(
[lxml.html.tostring(child, encoding='unicode')
for child in body.iterchildren()])
except IndexError: # No body there, it happens sometimes
text = ''
except lxml.etree.ParserError as e:
if str(e) == "Document is empty":
text = ""
else: # let other errors raise
raise
return text.strip()
for post in posts:
summary = atom_post_text(post, post.text(lang, teaser_only=True,
strip_html=self.config["FEED_PLAIN"],
feed_read_more_link=True,
feed_links_append_query=feed_append_query))
content = None
if not self.config["FEED_TEASERS"]:
content = atom_post_text(post, post.text(lang, teaser_only=self.config["FEED_TEASERS"],
strip_html=self.config["FEED_PLAIN"],
feed_read_more_link=True,
feed_links_append_query=feed_append_query))
entry_root = lxml.etree.SubElement(feed_root, "entry")
entry_title = lxml.etree.SubElement(entry_root, "title")
entry_title.text = post.title(lang)
entry_id = lxml.etree.SubElement(entry_root, "id")
entry_id.text = post.permalink(lang, absolute=True)
entry_updated = lxml.etree.SubElement(entry_root, "updated")
entry_updated.text = post.formatted_updated('webiso')
entry_published = lxml.etree.SubElement(entry_root, "published")
entry_published.text = post.formatted_date('webiso')
entry_author = lxml.etree.SubElement(entry_root, "author")
entry_author_name = lxml.etree.SubElement(entry_author, "name")
entry_author_name.text = post.author(lang)
entry_root.append(atom_link("alternate", "text/html",
post.permalink(lang, absolute=True,
query=feed_append_query)))
entry_summary = lxml.etree.SubElement(entry_root, "summary")
if not self.config["FEED_PLAIN"]:
entry_summary.set("type", "html")
else:
entry_summary.set("type", "text")
entry_summary.text = summary
if content:
entry_content = lxml.etree.SubElement(entry_root, "content")
if not self.config["FEED_PLAIN"]:
entry_content.set("type", "html")
else:
entry_content.set("type", "text")
entry_content.text = content
for category in post.tags_for_language(lang):
entry_category = lxml.etree.SubElement(entry_root, "category")
entry_category.set("term", utils.slugify(category, lang))
entry_category.set("label", category)
dst_dir = os.path.dirname(output_path)
utils.makedirs(dst_dir)
with io.open(output_path, "w+", encoding="utf-8") as atom_file:
data = lxml.etree.tostring(feed_root.getroottree(), encoding="UTF-8", pretty_print=True, xml_declaration=True)
if isinstance(data, bytes):
data = data.decode('utf-8')
atom_file.write(data)
def generic_index_renderer(self, lang, posts, indexes_title, template_name, context_source, kw, basename, page_link, page_path, additional_dependencies=None):
"""Create an index page.
lang: The language
posts: A list of posts
indexes_title: Title
template_name: Name of template file
context_source: This will be copied and extended and used as every
page's context
kw: An extended version will be used for uptodate dependencies
basename: Basename for task
page_link: A function accepting an index i, the displayed page number,
the number of pages, and a boolean force_addition
which creates a link to the i-th page (where i ranges
between 0 and num_pages-1). The displayed page (between 1
and num_pages) is the number (optionally) displayed as
'page %d' on the rendered page. If force_addition is True,
the appendum (inserting '-%d' etc.) should be done also for
i == 0.
page_path: A function accepting an index i, the displayed page number,
the number of pages, and a boolean force_addition,
which creates a path to the i-th page. All arguments are
as the ones for page_link.
additional_dependencies: a list of dependencies which will be added
to task['uptodate']
Note: if context['featured'] is present, it must be a list of posts,
whose dependencies will be taken added to task['uptodate'].
"""
# Update kw
kw = kw.copy()
kw["tag_pages_are_indexes"] = self.config['TAG_PAGES_ARE_INDEXES']
kw["index_display_post_count"] = self.config['INDEX_DISPLAY_POST_COUNT']
kw["index_teasers"] = self.config['INDEX_TEASERS']
kw["indexes_pages"] = self.config['INDEXES_PAGES'](lang)
kw["indexes_pages_main"] = self.config['INDEXES_PAGES_MAIN']
kw["indexes_static"] = self.config['INDEXES_STATIC']
kw['indexes_pretty_page_url'] = self.config["INDEXES_PRETTY_PAGE_URL"]
kw['show_index_page_navigation'] = self.config['SHOW_INDEX_PAGE_NAVIGATION']
if additional_dependencies is None:
additional_dependencies = []
# Split in smaller lists
lists = []
if kw["indexes_static"]:
lists.append(posts[:kw["index_display_post_count"]])
posts = posts[kw["index_display_post_count"]:]
while posts:
lists.append(posts[-kw["index_display_post_count"]:])
posts = posts[:-kw["index_display_post_count"]]
else:
while posts:
lists.append(posts[:kw["index_display_post_count"]])
posts = posts[kw["index_display_post_count"]:]
if not lists:
lists.append([])
num_pages = len(lists)
displayed_page_numbers = [utils.get_displayed_page_number(i, num_pages, self) for i in range(num_pages)]
page_links = [page_link(i, page_number, num_pages, False) for i, page_number in enumerate(displayed_page_numbers)]
if kw['show_index_page_navigation']:
# Since the list displayed_page_numbers is not necessarily
# sorted -- in case INDEXES_STATIC is True, it is of the
# form [num_pages, 1, 2, ..., num_pages - 1] -- we order it
# via a map. This allows to not replicate the logic of
# utils.get_displayed_page_number() here.
if not kw["indexes_pages_main"] and not kw["indexes_static"]:
temp_map = {page_number: link for page_number, link in zip(displayed_page_numbers, page_links)}
else:
temp_map = {page_number - 1: link for page_number, link in zip(displayed_page_numbers, page_links)}
page_links_context = [temp_map[i] for i in range(num_pages)]
for i, post_list in enumerate(lists):
context = context_source.copy()
if 'pagekind' not in context:
context['pagekind'] = ['index']
if 'has_other_languages' not in context:
context['has_other_languages'] = False
ipages_i = displayed_page_numbers[i]
if kw["indexes_pages"]:
indexes_pages = kw["indexes_pages"] % ipages_i
else:
if kw["indexes_pages_main"]:
ipages_msg = "page %d"
else:
ipages_msg = "old posts, page %d"
indexes_pages = " (" + \
kw["messages"][lang][ipages_msg] % ipages_i + ")"
if i > 0 or kw["indexes_pages_main"]:
context["title"] = indexes_title + indexes_pages
else:
context["title"] = indexes_title
context["prevlink"] = None
context["nextlink"] = None
context['index_teasers'] = kw['index_teasers']
prevlink = None
nextlink = None
if kw["indexes_static"]:
if i > 0:
if i < num_pages - 1:
prevlink = i + 1
elif i == num_pages - 1:
prevlink = 0
if num_pages > 1:
if i > 1:
nextlink = i - 1
elif i == 0:
nextlink = num_pages - 1
else:
if i >= 1:
prevlink = i - 1
if i < num_pages - 1:
nextlink = i + 1
if prevlink is not None:
context["prevlink"] = page_links[prevlink]
context["prevfeedlink"] = page_link(prevlink, displayed_page_numbers[prevlink],
num_pages, False, extension=".atom")
if nextlink is not None:
context["nextlink"] = page_links[nextlink]
context["nextfeedlink"] = page_link(nextlink, displayed_page_numbers[nextlink],
num_pages, False, extension=".atom")
context['show_index_page_navigation'] = kw['show_index_page_navigation']
if kw['show_index_page_navigation']:
context['page_links'] = page_links_context
if not kw["indexes_pages_main"] and not kw["indexes_static"]:
context['current_page'] = ipages_i
else:
context['current_page'] = ipages_i - 1
context['prev_next_links_reversed'] = kw['indexes_static']
context["permalink"] = page_links[i]
context["is_frontmost_index"] = i == 0
# Add dependencies to featured posts
if 'featured' in context:
for post in context['featured']:
additional_dependencies += post.deps_uptodate(lang)
output_name = os.path.join(kw['output_folder'], page_path(i, ipages_i, num_pages, False))
task = self.generic_post_list_renderer(
lang,
post_list,
output_name,
template_name,
kw['filters'],
context,
)
task['uptodate'] = task['uptodate'] + [utils.config_changed(kw, 'nikola.nikola.Nikola.generic_index_renderer')] + additional_dependencies
task['basename'] = basename
yield task
if kw["indexes_pages_main"] and kw['indexes_pretty_page_url'](lang):
# create redirection
output_name = os.path.join(kw['output_folder'], page_path(0, displayed_page_numbers[0], num_pages, True))
link = page_links[0]
yield utils.apply_filters({
'basename': basename,
'name': output_name,
'targets': [output_name],
'actions': [(utils.create_redirect, (output_name, link))],
'clean': True,
'uptodate': [utils.config_changed(kw, 'nikola.nikola.Nikola.generic_index_renderer')],
}, kw["filters"])
def generic_atom_renderer(self, lang, posts, context_source, kw, basename, classification, kind, additional_dependencies=None):
"""Create an Atom feed.
lang: The language
posts: A list of posts
context_source: This will be copied and extended and used as every
page's context
kw: An extended version will be used for uptodate dependencies
basename: Basename for task
classification: name of current classification (used to generate links)
kind: classification kind (used to generate links)
additional_dependencies: a list of dependencies which will be added
to task['uptodate']
"""
# Update kw
kw = kw.copy()
kw["feed_length"] = self.config['FEED_LENGTH']
kw['generate_atom'] = self.config["GENERATE_ATOM"]
kw['feed_links_append_query'] = self.config["FEED_LINKS_APPEND_QUERY"]
kw['feed_teasers'] = self.config['FEED_TEASERS']
kw['feed_plain'] = self.config['FEED_PLAIN']
if additional_dependencies is None:
additional_dependencies = []
post_list = posts[:kw["feed_length"]]
feedlink = self.link(kind + "_atom", classification, lang)
feedpath = self.path(kind + "_atom", classification, lang)
context = context_source.copy()
if 'has_other_languages' not in context:
context['has_other_languages'] = False
output_name = os.path.join(kw['output_folder'], feedpath)
context["feedlink"] = feedlink
task = {
"basename": basename,
"name": output_name,
"file_dep": sorted([_.base_path for _ in post_list]),
"task_dep": ['render_posts'],
"targets": [output_name],
"actions": [(self.atom_feed_renderer,
(lang,
post_list,
output_name,
kw['filters'],
context,))],
"clean": True,
"uptodate": [utils.config_changed(kw, 'nikola.nikola.Nikola.atom_feed_renderer')] + additional_dependencies
}
yield utils.apply_filters(task, kw['filters'])
def __repr__(self):
"""Representation of a Nikola site."""
return '<Nikola Site: {0!r}>'.format(self.config['BLOG_TITLE'](self.config['DEFAULT_LANG']))
| 45.39556 | 259 | 0.577136 |
import datetime
import io
import json
import functools
import logging
import operator
import os
import sys
import mimetypes
from collections import defaultdict
from copy import copy
from urllib.parse import urlparse, urlsplit, urlunsplit, urljoin, unquote, parse_qs
import dateutil.tz
import lxml.etree
import lxml.html
import natsort
import PyRSS2Gen as rss
from pkg_resources import resource_filename
from blinker import signal
from yapsy.PluginManager import PluginManager
from . import DEBUG, SHOW_TRACEBACKS, filters, utils, hierarchy_utils, shortcodes
from . import metadata_extractors
from .metadata_extractors import default_metadata_extractors_by
from .post import Post
from .plugin_categories import (
Command,
LateTask,
PageCompiler,
CompilerExtension,
MarkdownExtension,
RestExtension,
MetadataExtractor,
ShortcodePlugin,
Task,
TaskMultiplier,
TemplateSystem,
SignalHandler,
ConfigPlugin,
PostScanner,
Taxonomy,
)
from .state import Persistor
try:
import pyphen
except ImportError:
pyphen = None
if DEBUG:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.ERROR)
DEFAULT_INDEX_READ_MORE_LINK = '<p class="more"><a href="{link}">{read_more}…</a></p>'
DEFAULT_FEED_READ_MORE_LINK = '<p><a href="{link}">{read_more}…</a> ({min_remaining_read})</p>'
config_changed = utils.config_changed
__all__ = ('Nikola',)
LEGAL_VALUES = {
'DEFAULT_THEME': 'bootblog4',
'COMMENT_SYSTEM': [
'disqus',
'facebook',
'intensedebate',
'isso',
'muut',
'commento',
],
'TRANSLATIONS': {
'af': 'Afrikaans',
'ar': 'Arabic',
'az': 'Azerbaijani',
'bg': 'Bulgarian',
'bs': 'Bosnian',
'ca': 'Catalan',
('cs', 'cz'): 'Czech',
'da': 'Danish',
'de': 'German',
('el', '!gr'): 'Greek',
'en': 'English',
'eo': 'Esperanto',
'es': 'Spanish',
'et': 'Estonian',
'eu': 'Basque',
'fa': 'Persian',
'fi': 'Finnish',
'fr': 'French',
'fur': 'Friulian',
'gl': 'Galician',
'he': 'Hebrew',
'hi': 'Hindi',
'hr': 'Croatian',
'hu': 'Hungarian',
'ia': 'Interlingua',
'id': 'Indonesian',
'it': 'Italian',
('ja', '!jp'): 'Japanese',
'ko': 'Korean',
'lt': 'Lithuanian',
'ml': 'Malayalam',
'mr': 'Marathi',
'nb': 'Norwegian (Bokmål)',
'nl': 'Dutch',
'pa': 'Punjabi',
'pl': 'Polish',
'pt': 'Portuguese',
'pt_br': 'Portuguese (Brazil)',
'ru': 'Russian',
'sk': 'Slovak',
'sl': 'Slovene',
'sq': 'Albanian',
'sr': 'Serbian (Cyrillic)',
'sr_latin': 'Serbian (Latin)',
'sv': 'Swedish',
'te': 'Telugu',
'th': 'Thai',
('tr', '!tr_TR'): 'Turkish',
'uk': 'Ukrainian',
'ur': 'Urdu',
'vi': 'Vietnamese',
'zh_cn': 'Chinese (Simplified)',
'zh_tw': 'Chinese (Traditional)'
},
'_TRANSLATIONS_WITH_COUNTRY_SPECIFIERS': {
},
'LOCALES_BASE': {
'sr_latin': 'sr_Latn',
},
'RTL_LANGUAGES': ('ar', 'fa', 'he', 'ur'),
'LUXON_LOCALES': defaultdict(lambda: 'en', **{
'af': 'af',
'ar': 'ar',
'az': 'az',
'bg': 'bg',
'bn': 'bn',
'bs': 'bs',
'ca': 'ca',
'cs': 'cs',
'cz': 'cs',
'da': 'da',
'de': 'de',
'el': 'el',
'en': 'en',
'eo': 'eo',
'es': 'es',
'et': 'et',
'eu': 'eu',
'fa': 'fa',
'fi': 'fi',
'fr': 'fr',
'fur': 'fur',
'gl': 'gl',
'hi': 'hi',
'he': 'he',
'hr': 'hr',
'hu': 'hu',
'ia': 'ia',
'id': 'id',
'it': 'it',
'ja': 'ja',
'ko': 'ko',
'lt': 'lt',
'ml': 'ml',
'mr': 'mr',
'nb': 'nb',
'nl': 'nl',
'pa': 'pa',
'pl': 'pl',
'pt': 'pt',
'pt_br': 'pt-BR',
'ru': 'ru',
'sk': 'sk',
'sl': 'sl',
'sq': 'sq',
'sr': 'sr-Cyrl',
'sr_latin': 'sr-Latn',
'sv': 'sv',
'te': 'te',
'tr': 'tr',
'th': 'th',
'uk': 'uk',
'ur': 'ur',
'vi': 'vi',
'zh_cn': 'zh-CN',
'zh_tw': 'zh-TW'
}),
'MOMENTJS_LOCALES': defaultdict(lambda: 'en', **{
'af': 'af',
'ar': 'ar',
'az': 'az',
'bg': 'bg',
'bn': 'bn',
'bs': 'bs',
'ca': 'ca',
'cs': 'cs',
'cz': 'cs',
'da': 'da',
'de': 'de',
'el': 'el',
'en': 'en',
'eo': 'eo',
'es': 'es',
'et': 'et',
'eu': 'eu',
'fa': 'fa',
'fi': 'fi',
'fr': 'fr',
'gl': 'gl',
'hi': 'hi',
'he': 'he',
'hr': 'hr',
'hu': 'hu',
'id': 'id',
'it': 'it',
'ja': 'ja',
'ko': 'ko',
'lt': 'lt',
'ml': 'ml',
'mr': 'mr',
'nb': 'nb',
'nl': 'nl',
'pa': 'pa-in',
'pl': 'pl',
'pt': 'pt',
'pt_br': 'pt-br',
'ru': 'ru',
'sk': 'sk',
'sl': 'sl',
'sq': 'sq',
'sr': 'sr-cyrl',
'sr_latin': 'sr',
'sv': 'sv',
'te': 'te',
'tr': 'tr',
'th': 'th',
'uk': 'uk',
'ur': 'ur',
'vi': 'vi',
'zh_cn': 'zh-cn',
'zh_tw': 'zh-tw'
}),
'PYPHEN_LOCALES': {
'af': 'af',
'bg': 'bg',
'ca': 'ca',
'cs': 'cs',
'cz': 'cs',
'da': 'da',
'de': 'de',
'el': 'el',
'en': 'en_US',
'es': 'es',
'et': 'et',
'fr': 'fr',
'hr': 'hr',
'hu': 'hu',
'it': 'it',
'lt': 'lt',
'nb': 'nb',
'nl': 'nl',
'pl': 'pl',
'pt': 'pt',
'pt_br': 'pt_BR',
'ru': 'ru',
'sk': 'sk',
'sl': 'sl',
'sr': 'sr',
'sv': 'sv',
'te': 'te',
'uk': 'uk',
},
'DOCUTILS_LOCALES': {
'af': 'af',
'ca': 'ca',
'da': 'da',
'de': 'de',
'en': 'en',
'eo': 'eo',
'es': 'es',
'fa': 'fa',
'fi': 'fi',
'fr': 'fr',
'gl': 'gl',
'he': 'he',
'it': 'it',
'ja': 'ja',
'lt': 'lt',
'nl': 'nl',
'pl': 'pl',
'pt': 'pt_br',
'pt_br': 'pt_br',
'ru': 'ru',
'sk': 'sk',
'sv': 'sv',
'zh_cn': 'zh_cn',
'zh_tw': 'zh_tw'
},
"METADATA_MAPPING": ["yaml", "toml", "rest_docinfo", "markdown_metadata"],
}
TAXONOMY_COMPATIBILITY_PLUGIN_NAME_MAP = {
"render_archive": ["classify_archive"],
"render_authors": ["classify_authors"],
"render_indexes": ["classify_page_index", "classify_sections"], gories", "classify_tags"],
}
DEFAULT_TRANSLATIONS_PATTERN = '{path}.{lang}.{ext}'
def _enclosure(post, lang):
enclosure = post.meta('enclosure', lang)
if enclosure:
try:
length = int(post.meta('enclosure_length', lang) or 0)
except KeyError:
length = 0
except ValueError:
utils.LOGGER.warning("Invalid enclosure length for post {0}".format(post.source_path))
length = 0
url = enclosure
mime = mimetypes.guess_type(url)[0]
return url, length, mime
class Nikola(object):
def __init__(self, **config):
self.path_handlers = {
'slug': self.slug_path,
'post_path': self.post_path,
'root': self.root_path,
'filename': self.filename_path,
}
self.strict = False
self.posts = []
self.all_posts = []
self.posts_per_year = defaultdict(list)
self.posts_per_month = defaultdict(list)
self.posts_per_tag = defaultdict(list)
self.posts_per_category = defaultdict(list)
self.tags_per_language = defaultdict(list)
self.post_per_file = {}
self.timeline = []
self.pages = []
self._scanned = False
self._template_system = None
self._THEMES = None
self._MESSAGES = None
self.filters = {}
self.debug = DEBUG
self.show_tracebacks = SHOW_TRACEBACKS
self.colorful = config.pop('__colorful__', False)
self.invariant = config.pop('__invariant__', False)
self.quiet = config.pop('__quiet__', False)
self._doit_config = config.pop('DOIT_CONFIG', {})
self.original_cwd = config.pop('__cwd__', False)
self.configuration_filename = config.pop('__configuration_filename__', False)
self.configured = bool(config)
self.injected_deps = defaultdict(list)
self.shortcode_registry = {}
self.metadata_extractors_by = default_metadata_extractors_by()
self.rst_transforms = []
self.template_hooks = {
'extra_head': utils.TemplateHookRegistry('extra_head', self),
'body_end': utils.TemplateHookRegistry('body_end', self),
'page_header': utils.TemplateHookRegistry('page_header', self),
'menu': utils.TemplateHookRegistry('menu', self),
'menu_alt': utils.TemplateHookRegistry('menu_alt', self),
'page_footer': utils.TemplateHookRegistry('page_footer', self),
}
utils.generic_rss_renderer = self.generic_rss_renderer
self.config = {
'ARCHIVE_PATH': "",
'ARCHIVE_FILENAME': "archive.html",
'ARCHIVES_ARE_INDEXES': False,
'AUTHOR_PATH': 'authors',
'AUTHOR_PAGES_ARE_INDEXES': False,
'AUTHOR_PAGES_DESCRIPTIONS': {},
'AUTHORLIST_MINIMUM_POSTS': 1,
'BLOG_AUTHOR': 'Default Author',
'BLOG_TITLE': 'Default Title',
'BLOG_EMAIL': '',
'BLOG_DESCRIPTION': 'Default Description',
'BODY_END': "",
'CACHE_FOLDER': 'cache',
'CATEGORIES_INDEX_PATH': '',
'CATEGORY_PATH': None,
'CATEGORY_PAGES_ARE_INDEXES': None,
'CATEGORY_DESCRIPTIONS': {},
'CATEGORY_TITLES': {},
'CATEGORY_PREFIX': 'cat_',
'CATEGORY_ALLOW_HIERARCHIES': False,
'CATEGORY_OUTPUT_FLAT_HIERARCHY': False,
'CATEGORY_DESTPATH_AS_DEFAULT': False,
'CATEGORY_DESTPATH_TRIM_PREFIX': False,
'CATEGORY_DESTPATH_FIRST_DIRECTORY_ONLY': True,
'CATEGORY_DESTPATH_NAMES': {},
'CATEGORY_PAGES_FOLLOW_DESTPATH': False,
'CATEGORY_TRANSLATIONS': [],
'CATEGORY_TRANSLATIONS_ADD_DEFAULTS': False,
'CODE_COLOR_SCHEME': 'default',
'COMMENT_SYSTEM': 'disqus',
'COMMENTS_IN_GALLERIES': False,
'COMMENTS_IN_PAGES': False,
'COMPILERS': {
"rest": ('.txt', '.rst'),
"markdown": ('.md', '.mdown', '.markdown'),
"textile": ('.textile',),
"txt2tags": ('.t2t',),
"bbcode": ('.bb',),
"wiki": ('.wiki',),
"ipynb": ('.ipynb',),
"html": ('.html', '.htm')
},
'CONTENT_FOOTER': '',
'CONTENT_FOOTER_FORMATS': {},
'RSS_COPYRIGHT': '',
'RSS_COPYRIGHT_PLAIN': '',
'RSS_COPYRIGHT_FORMATS': {},
'COPY_SOURCES': True,
'CREATE_ARCHIVE_NAVIGATION': False,
'CREATE_MONTHLY_ARCHIVE': False,
'CREATE_SINGLE_ARCHIVE': False,
'CREATE_FULL_ARCHIVES': False,
'CREATE_DAILY_ARCHIVE': False,
'DATE_FORMAT': 'yyyy-MM-dd HH:mm',
'DISABLE_INDEXES': False,
'DISABLE_MAIN_ATOM_FEED': False,
'DISABLE_MAIN_RSS_FEED': False,
'MOMENTJS_DATE_FORMAT': 'YYYY-MM-DD HH:mm',
'LUXON_DATE_FORMAT': {},
'DATE_FANCINESS': 0,
'DEFAULT_LANG': "en",
'DEPLOY_COMMANDS': {'default': []},
'DISABLED_PLUGINS': [],
'EXTRA_PLUGINS_DIRS': [],
'EXTRA_THEMES_DIRS': [],
'COMMENT_SYSTEM_ID': 'nikolademo',
'ENABLE_AUTHOR_PAGES': True,
'EXIF_WHITELIST': {},
'EXTRA_HEAD_DATA': '',
'FAVICONS': (),
'FEED_LENGTH': 10,
'FILE_METADATA_REGEXP': None,
'FILE_METADATA_UNSLUGIFY_TITLES': True,
'ADDITIONAL_METADATA': {},
'FILES_FOLDERS': {'files': ''},
'FILTERS': {},
'FORCE_ISO8601': False,
'FRONT_INDEX_HEADER': '',
'GALLERY_FOLDERS': {'galleries': 'galleries'},
'GALLERY_SORT_BY_DATE': True,
'GALLERIES_USE_THUMBNAIL': False,
'GALLERIES_DEFAULT_THUMBNAIL': None,
'GLOBAL_CONTEXT_FILLER': [],
'GZIP_COMMAND': None,
'GZIP_FILES': False,
'GZIP_EXTENSIONS': ('.txt', '.htm', '.html', '.css', '.js', '.json', '.xml'),
'HIDDEN_AUTHORS': [],
'HIDDEN_TAGS': [],
'HIDE_REST_DOCINFO': False,
'HIDDEN_CATEGORIES': [],
'HYPHENATE': False,
'IMAGE_FOLDERS': {'images': ''},
'INDEX_DISPLAY_POST_COUNT': 10,
'INDEX_FILE': 'index.html',
'INDEX_TEASERS': False,
'IMAGE_THUMBNAIL_SIZE': 400,
'IMAGE_THUMBNAIL_FORMAT': '{name}.thumbnail{ext}',
'INDEXES_TITLE': "",
'INDEXES_PAGES': "",
'INDEXES_PAGES_MAIN': False,
'INDEXES_PRETTY_PAGE_URL': False,
'INDEXES_STATIC': True,
'INDEX_PATH': '',
'IPYNB_CONFIG': {},
'KATEX_AUTO_RENDER': '',
'LICENSE': '',
'LINK_CHECK_WHITELIST': [],
'LISTINGS_FOLDERS': {'listings': 'listings'},
'LOGO_URL': '',
'DEFAULT_PREVIEW_IMAGE': None,
'NAVIGATION_LINKS': {},
'NAVIGATION_ALT_LINKS': {},
'MARKDOWN_EXTENSIONS': ['fenced_code', 'codehilite', 'extra'],
'MARKDOWN_EXTENSION_CONFIGS': {},
'MAX_IMAGE_SIZE': 1280,
'MATHJAX_CONFIG': '',
'METADATA_FORMAT': 'nikola',
'METADATA_MAPPING': {},
'NEW_POST_DATE_PATH': False,
'NEW_POST_DATE_PATH_FORMAT': '%Y/%m/%d',
'OLD_THEME_SUPPORT': True,
'OUTPUT_FOLDER': 'output',
'POSTS': (("posts/*.txt", "posts", "post.tmpl"),),
'PRESERVE_EXIF_DATA': False,
'PRESERVE_ICC_PROFILES': False,
'PAGES': (("pages/*.txt", "pages", "page.tmpl"),),
'PANDOC_OPTIONS': [],
'PRETTY_URLS': True,
'FUTURE_IS_NOW': False,
'INDEX_READ_MORE_LINK': DEFAULT_INDEX_READ_MORE_LINK,
'REDIRECTIONS': [],
'ROBOTS_EXCLUSIONS': [],
'GENERATE_ATOM': False,
'ATOM_EXTENSION': '.atom',
'ATOM_PATH': '',
'ATOM_FILENAME_BASE': 'index',
'FEED_TEASERS': True,
'FEED_PLAIN': False,
'FEED_READ_MORE_LINK': DEFAULT_FEED_READ_MORE_LINK,
'FEED_LINKS_APPEND_QUERY': False,
'GENERATE_RSS': True,
'RSS_EXTENSION': '.xml',
'RSS_LINK': None,
'RSS_PATH': '',
'RSS_FILENAME_BASE': 'rss',
'SEARCH_FORM': '',
'SHOW_BLOG_TITLE': True,
'SHOW_INDEX_PAGE_NAVIGATION': False,
'SHOW_SOURCELINK': True,
'SHOW_UNTRANSLATED_POSTS': True,
'SLUG_AUTHOR_PATH': True,
'SLUG_TAG_PATH': True,
'SOCIAL_BUTTONS_CODE': '',
'SITE_URL': 'https://example.com/',
'PAGE_INDEX': False,
'SECTION_PATH': '',
'STRIP_INDEXES': True,
'TAG_PATH': 'categories',
'TAG_PAGES_ARE_INDEXES': False,
'TAG_DESCRIPTIONS': {},
'TAG_TITLES': {},
'TAG_TRANSLATIONS': [],
'TAG_TRANSLATIONS_ADD_DEFAULTS': False,
'TAGS_INDEX_PATH': '',
'TAGLIST_MINIMUM_POSTS': 1,
'TEMPLATE_FILTERS': {},
'THEME': LEGAL_VALUES['DEFAULT_THEME'],
'THEME_COLOR': '#5670d4',
'THEME_CONFIG': {},
'THUMBNAIL_SIZE': 180,
'TRANSLATIONS_PATTERN': DEFAULT_TRANSLATIONS_PATTERN,
'URL_TYPE': 'rel_path',
'USE_BUNDLES': True,
'USE_CDN': False,
'USE_CDN_WARNING': True,
'USE_REST_DOCINFO_METADATA': False,
'USE_FILENAME_AS_TITLE': True,
'USE_KATEX': False,
'USE_SLUGIFY': True,
'USE_TAG_METADATA': True,
'TIMEZONE': 'UTC',
'WARN_ABOUT_TAG_METADATA': True,
'DEPLOY_DRAFTS': True,
'DEPLOY_FUTURE': False,
'SCHEDULE_ALL': False,
'SCHEDULE_RULE': '',
'DEMOTE_HEADERS': 1,
'GITHUB_SOURCE_BRANCH': 'master',
'GITHUB_DEPLOY_BRANCH': 'gh-pages',
'GITHUB_REMOTE_NAME': 'origin',
'GITHUB_COMMIT_SOURCE': False,
'META_GENERATOR_TAG': True,
'REST_FILE_INSERTION_ENABLED': True,
'TYPES_TO_HIDE_TITLE': [],
}
self._GLOBAL_CONTEXT = {}
self.ALL_PAGE_DEPS = {}
self.config.update(config)
if '__builtins__' in self.config:
try:
del self.config['__builtins__']
except KeyError:
del self.config[b'__builtins__']
self.config['__colorful__'] = self.colorful
self.config['__invariant__'] = self.invariant
self.config['__quiet__'] = self.quiet
self.config['ATOM_PATH'] = self.config['ATOM_PATH'] or self.config['INDEX_PATH']
if not self.config['NAVIGATION_LINKS']:
self.config['NAVIGATION_LINKS'] = {self.config['DEFAULT_LANG']: ()}
if not self.config['NAVIGATION_ALT_LINKS']:
self.config['NAVIGATION_ALT_LINKS'] = {self.config['DEFAULT_LANG']: ()}
self.config['TRANSLATIONS'] = self.config.get('TRANSLATIONS',
{self.config['DEFAULT_LANG']: ''})
for k, v in self.config['TRANSLATIONS'].items():
if os.path.isabs(v):
self.config['TRANSLATIONS'][k] = os.path.relpath(v, '/')
utils.TranslatableSetting.default_lang = self.config['DEFAULT_LANG']
self.TRANSLATABLE_SETTINGS = ('BLOG_AUTHOR',
'BLOG_TITLE',
'BLOG_DESCRIPTION',
'LICENSE',
'CONTENT_FOOTER',
'SOCIAL_BUTTONS_CODE',
'SEARCH_FORM',
'BODY_END',
'EXTRA_HEAD_DATA',
'NAVIGATION_LINKS',
'NAVIGATION_ALT_LINKS',
'FRONT_INDEX_HEADER',
'INDEX_READ_MORE_LINK',
'FEED_READ_MORE_LINK',
'INDEXES_TITLE',
'CATEGORY_DESTPATH_NAMES',
'INDEXES_PAGES',
'INDEXES_PRETTY_PAGE_URL',
'THEME_CONFIG',
'ARCHIVE_PATH',
'ARCHIVE_FILENAME',
'TAG_PATH',
'TAGS_INDEX_PATH',
'CATEGORY_PATH',
'CATEGORIES_INDEX_PATH',
'SECTION_PATH',
'INDEX_PATH',
'ATOM_PATH',
'RSS_PATH',
'RSS_FILENAME_BASE',
'ATOM_FILENAME_BASE',
'AUTHOR_PATH',
'DATE_FORMAT',
'LUXON_DATE_FORMAT',
'MOMENTJS_DATE_FORMAT',
'RSS_COPYRIGHT',
'RSS_COPYRIGHT_PLAIN',
'MARKDOWN_EXTENSION_CONFIGS',
)
self._GLOBAL_CONTEXT_TRANSLATABLE = ('blog_author',
'blog_title',
'blog_description',
'license',
'content_footer',
'social_buttons_code',
'search_form',
'body_end',
'extra_head_data',
'date_format',
'js_date_format',
'luxon_date_format',
'front_index_header',
'theme_config',
)
self._ALL_PAGE_DEPS_TRANSLATABLE = ('atom_path',
'rss_path',
'rss_filename_base',
'atom_filename_base',
)
if not self.config['LUXON_DATE_FORMAT']:
self.config['LUXON_DATE_FORMAT'] = {self.config['DEFAULT_LANG']: {'preset': False, 'format': 'yyyy-MM-dd HH:mm'}}
if 'JS_DATE_FORMAT' in self.config:
utils.LOGGER.warning("Moment.js was replaced by Luxon in the default themes, which uses different date formats.")
utils.LOGGER.warning("If you’re using a built-in theme, set LUXON_DATE_FORMAT. If your theme uses Moment.js, you can silence this warning by renaming JS_DATE_FORMAT to MOMENTJS_DATE_FORMAT.")
utils.LOGGER.warning("Sample Luxon config: LUXON_DATE_FORMAT = " + str(self.config['LUXON_DATE_FORMAT']))
self.config['MOMENTJS_DATE_FORMAT'] = self.config['LUXON_DATE_FORMAT']
if 'MOMENTJS_DATE_FORMAT' in self.config:
if isinstance(self.config['MOMENTJS_DATE_FORMAT'], dict):
for k in self.config['MOMENTJS_DATE_FORMAT']:
self.config['MOMENTJS_DATE_FORMAT'][k] = json.dumps(self.config['MOMENTJS_DATE_FORMAT'][k])
else:
self.config['MOMENTJS_DATE_FORMAT'] = json.dumps(self.config['MOMENTJS_DATE_FORMAT'])
if 'LUXON_DATE_FORMAT' in self.config:
for k in self.config['LUXON_DATE_FORMAT']:
self.config['LUXON_DATE_FORMAT'][k] = json.dumps(self.config['LUXON_DATE_FORMAT'][k])
for i in self.TRANSLATABLE_SETTINGS:
try:
self.config[i] = utils.TranslatableSetting(i, self.config[i], self.config['TRANSLATIONS'])
except KeyError:
pass
if self.config['EXIF_WHITELIST'] and not self.config['PRESERVE_EXIF_DATA']:
utils.LOGGER.warning('Setting EXIF_WHITELIST implies PRESERVE_EXIF_DATA is set to True')
self.config['PRESERVE_EXIF_DATA'] = True
if self.config['PRESERVE_EXIF_DATA'] and not self.config['EXIF_WHITELIST']:
utils.LOGGER.warning('You are setting PRESERVE_EXIF_DATA and not EXIF_WHITELIST so EXIF data is not really kept.')
if 'UNSLUGIFY_TITLES' in self.config:
utils.LOGGER.warning('The UNSLUGIFY_TITLES setting was renamed to FILE_METADATA_UNSLUGIFY_TITLES.')
self.config['FILE_METADATA_UNSLUGIFY_TITLES'] = self.config['UNSLUGIFY_TITLES']
if 'TAG_PAGES_TITLES' in self.config:
utils.LOGGER.warning('The TAG_PAGES_TITLES setting was renamed to TAG_TITLES.')
self.config['TAG_TITLES'] = self.config['TAG_PAGES_TITLES']
if 'TAG_PAGES_DESCRIPTIONS' in self.config:
utils.LOGGER.warning('The TAG_PAGES_DESCRIPTIONS setting was renamed to TAG_DESCRIPTIONS.')
self.config['TAG_DESCRIPTIONS'] = self.config['TAG_PAGES_DESCRIPTIONS']
if 'CATEGORY_PAGES_TITLES' in self.config:
utils.LOGGER.warning('The CATEGORY_PAGES_TITLES setting was renamed to CATEGORY_TITLES.')
self.config['CATEGORY_TITLES'] = self.config['CATEGORY_PAGES_TITLES']
if 'CATEGORY_PAGES_DESCRIPTIONS' in self.config:
utils.LOGGER.warning('The CATEGORY_PAGES_DESCRIPTIONS setting was renamed to CATEGORY_DESCRIPTIONS.')
self.config['CATEGORY_DESCRIPTIONS'] = self.config['CATEGORY_PAGES_DESCRIPTIONS']
if 'DISABLE_INDEXES_PLUGIN_INDEX_AND_ATOM_FEED' in self.config:
utils.LOGGER.warning('The DISABLE_INDEXES_PLUGIN_INDEX_AND_ATOM_FEED setting was renamed and split to DISABLE_INDEXES and DISABLE_MAIN_ATOM_FEED.')
self.config['DISABLE_INDEXES'] = self.config['DISABLE_INDEXES_PLUGIN_INDEX_AND_ATOM_FEED']
self.config['DISABLE_MAIN_ATOM_FEED'] = self.config['DISABLE_INDEXES_PLUGIN_INDEX_AND_ATOM_FEED']
if 'DISABLE_INDEXES_PLUGIN_RSS_FEED' in self.config:
utils.LOGGER.warning('The DISABLE_INDEXES_PLUGIN_RSS_FEED setting was renamed to DISABLE_MAIN_RSS_FEED.')
self.config['DISABLE_MAIN_RSS_FEED'] = self.config['DISABLE_INDEXES_PLUGIN_RSS_FEED']
for val in self.config['DATE_FORMAT'].values.values():
if '%' in val:
utils.LOGGER.error('The DATE_FORMAT setting needs to be upgraded.')
utils.LOGGER.warning("Nikola now uses CLDR-style date strings. http://cldr.unicode.org/translation/date-time")
utils.LOGGER.warning("Example: %Y-%m-%d %H:%M ==> yyyy-MM-dd HH:mm")
utils.LOGGER.warning("(note it’s different to what moment.js uses!)")
sys.exit(1)
locales = LEGAL_VALUES['LOCALES_BASE']
if 'LOCALES' in self.config:
for k, v in self.config['LOCALES'].items():
self.config['LOCALES'][k] = v.split('.')[0]
locales.update(self.config['LOCALES'])
self.config['LOCALES'] = locales
if self.config.get('POSTS_SECTIONS'):
utils.LOGGER.warning("The sections feature has been removed and its functionality has been merged into categories.")
utils.LOGGER.warning("For more information on how to migrate, please read: https://getnikola.com/blog/upgrading-to-nikola-v8.html#sections-were-replaced-by-categories")
for section_config_suffix, cat_config_suffix in (
('DESCRIPTIONS', 'DESCRIPTIONS'),
('TITLE', 'TITLES'),
('TRANSLATIONS', 'TRANSLATIONS')
):
section_config = 'POSTS_SECTION_' + section_config_suffix
cat_config = 'CATEGORY_' + cat_config_suffix
if section_config in self.config:
self.config[section_config].update(self.config[cat_config])
self.config[cat_config] = self.config[section_config]
self.config['CATEGORY_DESTPATH_NAMES'] = self.config.get('POSTS_SECTION_NAME', {})
self.config['CATEGORY_DESTPATH_NAMES'] = utils.TranslatableSetting('CATEGORY_DESTPATH_NAMES', self.config['CATEGORY_DESTPATH_NAMES'], self.config['TRANSLATIONS'])
self.config['CATEGORY_DESTPATH_AS_DEFAULT'] = not self.config.get('POSTS_SECTION_FROM_META')
utils.LOGGER.info("Setting CATEGORY_DESTPATH_AS_DEFAULT = " + str(self.config['CATEGORY_DESTPATH_AS_DEFAULT']))
if self.config.get('CATEGORY_PAGES_FOLLOW_DESTPATH') and (not self.config.get('CATEGORY_ALLOW_HIERARCHIES') or self.config.get('CATEGORY_OUTPUT_FLAT_HIERARCHY')):
utils.LOGGER.error('CATEGORY_PAGES_FOLLOW_DESTPATH requires CATEGORY_ALLOW_HIERARCHIES = True, CATEGORY_OUTPUT_FLAT_HIERARCHY = False.')
sys.exit(1)
self.config['CONTENT_FOOTER'].langformat(self.config['CONTENT_FOOTER_FORMATS'])
self.config['RSS_COPYRIGHT'].langformat(self.config['RSS_COPYRIGHT_FORMATS'])
self.config['RSS_COPYRIGHT_PLAIN'].langformat(self.config['RSS_COPYRIGHT_FORMATS'])
utils.USE_SLUGIFY = self.config['USE_SLUGIFY']
if self.config.get('HYPHENATE') and pyphen is None:
utils.LOGGER.warning('To use the hyphenation, you have to install '
'the "pyphen" package.')
utils.LOGGER.warning('Setting HYPHENATE to False.')
self.config['HYPHENATE'] = False
self.config['post_pages'] = []
for i1, i2, i3 in self.config['POSTS']:
self.config['post_pages'].append([i1, i2, i3, True])
for i1, i2, i3 in self.config['PAGES']:
self.config['post_pages'].append([i1, i2, i3, False])
# Handle old plugin names (from before merging the taxonomy PR #2535)
for old_plugin_name, new_plugin_names in TAXONOMY_COMPATIBILITY_PLUGIN_NAME_MAP.items():
if old_plugin_name in self.config['DISABLED_PLUGINS']:
missing_plugins = []
for plugin_name in new_plugin_names:
if plugin_name not in self.config['DISABLED_PLUGINS']:
missing_plugins.append(plugin_name)
if missing_plugins:
utils.LOGGER.warning('The "{}" plugin was replaced by several taxonomy plugins (see PR
utils.LOGGER.warning('You are currently disabling "{}", but not the following new taxonomy plugins: {}'.format(old_plugin_name, ', '.join(missing_plugins)))
utils.LOGGER.warning('Please also disable these new plugins or remove "{}" from the DISABLED_PLUGINS list.'.format(old_plugin_name))
self.config['DISABLED_PLUGINS'].extend(missing_plugins)
# Special-case logic for "render_indexes" to fix #2591
if 'render_indexes' in self.config['DISABLED_PLUGINS']:
if 'generate_rss' in self.config['DISABLED_PLUGINS'] or self.config['GENERATE_RSS'] is False:
if 'classify_indexes' not in self.config['DISABLED_PLUGINS']:
utils.LOGGER.warning('You are disabling the "render_indexes" plugin, as well as disabling the "generate_rss" plugin or setting GENERATE_RSS to False. To achieve the same effect, please disable the "classify_indexes" plugin in the future.')
self.config['DISABLED_PLUGINS'].append('classify_indexes')
else:
if not self.config['DISABLE_INDEXES']:
utils.LOGGER.warning('You are disabling the "render_indexes" plugin, but not the generation of RSS feeds. Please put "DISABLE_INDEXES = True" into your configuration instead.')
self.config['DISABLE_INDEXES'] = True
# Disable RSS. For a successful disable, we must have both the option
# false and the plugin disabled through the official means.
if 'generate_rss' in self.config['DISABLED_PLUGINS'] and self.config['GENERATE_RSS'] is True:
utils.LOGGER.warning('Please use GENERATE_RSS to disable RSS feed generation, instead of mentioning generate_rss in DISABLED_PLUGINS.')
self.config['GENERATE_RSS'] = False
self.config['DISABLE_MAIN_RSS_FEED'] = True
# PRETTY_URLS defaults to enabling STRIP_INDEXES unless explicitly disabled
if self.config.get('PRETTY_URLS') and 'STRIP_INDEXES' not in config:
self.config['STRIP_INDEXES'] = True
if not self.config.get('COPY_SOURCES'):
self.config['SHOW_SOURCELINK'] = False
if self.config['CATEGORY_PATH']._inp is None:
self.config['CATEGORY_PATH'] = self.config['TAG_PATH']
if self.config['CATEGORY_PAGES_ARE_INDEXES'] is None:
self.config['CATEGORY_PAGES_ARE_INDEXES'] = self.config['TAG_PAGES_ARE_INDEXES']
self.default_lang = self.config['DEFAULT_LANG']
self.translations = self.config['TRANSLATIONS']
utils.LocaleBorg.initialize(self.config.get('LOCALES', {}), self.default_lang)
# BASE_URL defaults to SITE_URL
if 'BASE_URL' not in self.config:
self.config['BASE_URL'] = self.config.get('SITE_URL')
# BASE_URL should *always* end in /
if self.config['BASE_URL'] and self.config['BASE_URL'][-1] != '/':
utils.LOGGER.warning("Your BASE_URL doesn't end in / -- adding it, but please fix it in your config file!")
self.config['BASE_URL'] += '/'
try:
_bnl = urlsplit(self.config['BASE_URL']).netloc
_bnl.encode('ascii')
urlsplit(self.config['SITE_URL']).netloc.encode('ascii')
except (UnicodeEncodeError, UnicodeDecodeError):
utils.LOGGER.error("Your BASE_URL or SITE_URL contains an IDN expressed in Unicode. Please convert it to Punycode.")
utils.LOGGER.error("Punycode of {}: {}".format(_bnl, _bnl.encode('idna')))
sys.exit(1)
metadata_extractors.load_defaults(self, self.metadata_extractors_by)
if metadata_extractors.DEFAULT_EXTRACTOR is None:
utils.LOGGER.error("Could not find default meta extractor ({})".format(
metadata_extractors.DEFAULT_EXTRACTOR_NAME))
sys.exit(1)
if config.get('METADATA_FORMAT', 'nikola').lower() == 'pelican':
if 'markdown.extensions.meta' not in config.get('MARKDOWN_EXTENSIONS', []) \
and 'markdown' in self.config['COMPILERS']:
utils.LOGGER.warning(
'To use the Pelican metadata format, you need to add '
'"markdown.extensions.meta" to your MARKDOWN_EXTENSIONS setting.')
try:
self.tzinfo = dateutil.tz.gettz(self.config['TIMEZONE'])
except Exception as exc:
utils.LOGGER.warning("Error getting TZ: {}", exc)
self.tzinfo = dateutil.tz.gettz()
self.config['__tzinfo__'] = self.tzinfo
self.config['_COMPILERS_RAW'] = {}
for k, v in self.config['COMPILERS'].items():
self.config['_COMPILERS_RAW'][k] = list(v)
self.themes_dirs = ['themes'] + self.config['EXTRA_THEMES_DIRS']
filter_name_format = 'filters.{0}'
for filter_name, filter_definition in filters.__dict__.items():
if filter_name.startswith('_') or not callable(filter_definition):
continue
self.register_filter(filter_name_format.format(filter_name), filter_definition)
self._set_global_context_from_config()
self._set_all_page_deps_from_config()
if self.configured:
self._set_global_context_from_data()
self.state = Persistor('state_data.json')
self.cache = Persistor(os.path.join(self.config['CACHE_FOLDER'], 'cache_data.json'))
if self.configured:
self.state._set_site(self)
self.cache._set_site(self)
def _filter_duplicate_plugins(self, plugin_list):
def plugin_position_in_places(plugin):
for i, place in enumerate(self._plugin_places):
if plugin[0].startswith(place):
return i
utils.LOGGER.warn("Duplicate plugin found in unexpected location: {}".format(plugin[0]))
return len(self._plugin_places)
plugin_dict = defaultdict(list)
for data in plugin_list:
plugin_dict[data[2].name].append(data)
result = []
for _, plugins in plugin_dict.items():
if len(plugins) > 1:
plugins.sort(key=plugin_position_in_places)
utils.LOGGER.debug("Plugin {} exists in multiple places, using {}".format(
plugins[-1][2].name, plugins[-1][0]))
result.append(plugins[-1])
return result
def init_plugins(self, commands_only=False, load_all=False):
self.plugin_manager = PluginManager(categories_filter={
"Command": Command,
"Task": Task,
"LateTask": LateTask,
"TemplateSystem": TemplateSystem,
"PageCompiler": PageCompiler,
"TaskMultiplier": TaskMultiplier,
"CompilerExtension": CompilerExtension,
"MarkdownExtension": MarkdownExtension,
"RestExtension": RestExtension,
"MetadataExtractor": MetadataExtractor,
"ShortcodePlugin": ShortcodePlugin,
"SignalHandler": SignalHandler,
"ConfigPlugin": ConfigPlugin,
"PostScanner": PostScanner,
"Taxonomy": Taxonomy,
})
self.plugin_manager.getPluginLocator().setPluginInfoExtension('plugin')
extra_plugins_dirs = self.config['EXTRA_PLUGINS_DIRS']
self._plugin_places = [
resource_filename('nikola', 'plugins'),
os.path.expanduser(os.path.join('~', '.nikola', 'plugins')),
os.path.join(os.getcwd(), 'plugins'),
] + [path for path in extra_plugins_dirs if path]
compilers = defaultdict(set)
for compiler, exts in self.config['COMPILERS'].items():
for ext in exts:
compilers[compiler].add(ext)
for lang in self.config['TRANSLATIONS'].keys():
candidate = utils.get_translation_candidate(self.config, "f" + ext, lang)
compilers[compiler].add(candidate)
self.config['COMPILERS'] = {}
self.disabled_compilers = {}
self.disabled_compiler_extensions = defaultdict(list)
self.plugin_manager.getPluginLocator().setPluginPlaces(self._plugin_places)
self.plugin_manager.locatePlugins()
bad_candidates = set([])
if not load_all:
for p in self.plugin_manager._candidates:
if commands_only:
if p[-1].details.has_option('Nikola', 'PluginCategory'):
if p[-1].details.get('Nikola', 'PluginCategory') not in {'Command', 'Template'}:
bad_candidates.add(p)
else:
bad_candidates.add(p)
elif self.configured:
if p[-1].name in self.config['DISABLED_PLUGINS']:
bad_candidates.add(p)
utils.LOGGER.debug('Not loading disabled plugin {}', p[-1].name)
if p[-1].details.has_option('Nikola', 'PluginCategory') and p[-1].details.get('Nikola', 'PluginCategory') in ('Compiler', 'PageCompiler'):
bad_candidates.add(p)
self.disabled_compilers[p[-1].name] = p
# Remove compiler extensions we don't need
if p[-1].details.has_option('Nikola', 'compiler') and p[-1].details.get('Nikola', 'compiler') in self.disabled_compilers:
bad_candidates.add(p)
self.disabled_compiler_extensions[p[-1].details.get('Nikola', 'compiler')].append(p)
self.plugin_manager._candidates = list(set(self.plugin_manager._candidates) - bad_candidates)
self.plugin_manager._candidates = self._filter_duplicate_plugins(self.plugin_manager._candidates)
self.plugin_manager.loadPlugins()
self._activate_plugins_of_category("PostScanner")
if not load_all:
file_extensions = set()
for post_scanner in [p.plugin_object for p in self.plugin_manager.getPluginsOfCategory('PostScanner')]:
exts = post_scanner.supported_extensions()
if exts is not None:
file_extensions.update(exts)
else:
# Stop scanning for more: once we get None, we have to load all compilers anyway
utils.LOGGER.debug("Post scanner {0!r} does not implement `supported_extensions`, loading all compilers".format(post_scanner))
file_extensions = None
break
to_add = []
for k, v in compilers.items():
if file_extensions is None or file_extensions.intersection(v):
self.config['COMPILERS'][k] = sorted(list(v))
p = self.disabled_compilers.pop(k, None)
if p:
to_add.append(p)
for p in self.disabled_compiler_extensions.pop(k, []):
to_add.append(p)
for _, p in self.disabled_compilers.items():
utils.LOGGER.debug('Not loading unneeded compiler {}', p[-1].name)
for _, plugins in self.disabled_compiler_extensions.items():
for p in plugins:
utils.LOGGER.debug('Not loading compiler extension {}', p[-1].name)
if to_add:
self.plugin_manager._candidates = self._filter_duplicate_plugins(to_add)
self.plugin_manager.loadPlugins()
# Jupyter theme configuration. If a website has ipynb enabled in post_pages
# we should enable the Jupyter CSS (leaving that up to the theme itself).
if 'needs_ipython_css' not in self._GLOBAL_CONTEXT:
self._GLOBAL_CONTEXT['needs_ipython_css'] = 'ipynb' in self.config['COMPILERS']
# Activate metadata extractors and prepare them for use
for p in self._activate_plugins_of_category("MetadataExtractor"):
metadata_extractors.classify_extractor(p.plugin_object, self.metadata_extractors_by)
self._activate_plugins_of_category("Taxonomy")
self.taxonomy_plugins = {}
for taxonomy in [p.plugin_object for p in self.plugin_manager.getPluginsOfCategory('Taxonomy')]:
if not taxonomy.is_enabled():
continue
if taxonomy.classification_name in self.taxonomy_plugins:
utils.LOGGER.error("Found more than one taxonomy with classification name '{}'!".format(taxonomy.classification_name))
sys.exit(1)
self.taxonomy_plugins[taxonomy.classification_name] = taxonomy
self._activate_plugins_of_category("SignalHandler")
# Emit signal for SignalHandlers which need to start running immediately.
signal('sighandlers_loaded').send(self)
self._commands = {}
command_plugins = self._activate_plugins_of_category("Command")
for plugin_info in command_plugins:
plugin_info.plugin_object.short_help = plugin_info.description
self._commands[plugin_info.name] = plugin_info.plugin_object
self._activate_plugins_of_category("Task")
self._activate_plugins_of_category("LateTask")
self._activate_plugins_of_category("TaskMultiplier")
# Activate all required compiler plugins
self.compiler_extensions = self._activate_plugins_of_category("CompilerExtension")
for plugin_info in self.plugin_manager.getPluginsOfCategory("PageCompiler"):
if plugin_info.name in self.config["COMPILERS"].keys():
self.plugin_manager.activatePluginByName(plugin_info.name)
plugin_info.plugin_object.set_site(self)
# Activate shortcode plugins
self._activate_plugins_of_category("ShortcodePlugin")
# Load compiler plugins
self.compilers = {}
self.inverse_compilers = {}
for plugin_info in self.plugin_manager.getPluginsOfCategory(
"PageCompiler"):
self.compilers[plugin_info.name] = \
plugin_info.plugin_object
# Load config plugins and register templated shortcodes
self._activate_plugins_of_category("ConfigPlugin")
self._register_templated_shortcodes()
# Check with registered filters and configure filters
for actions in self.config['FILTERS'].values():
for i, f in enumerate(actions):
if isinstance(f, str):
# Check whether this denotes a registered filter
_f = self.filters.get(f)
if _f is not None:
f = _f
actions[i] = f
if hasattr(f, 'configuration_variables'):
args = {}
for arg, config in f.configuration_variables.items():
if config in self.config:
args[arg] = self.config[config]
if args:
actions[i] = functools.partial(f, **args)
# Signal that we are configured
signal('configured').send(self)
def _set_global_context_from_config(self):
self._GLOBAL_CONTEXT['url_type'] = self.config['URL_TYPE']
self._GLOBAL_CONTEXT['timezone'] = self.tzinfo
self._GLOBAL_CONTEXT['_link'] = self.link
try:
self._GLOBAL_CONTEXT['set_locale'] = utils.LocaleBorg().set_locale
except utils.LocaleBorgUninitializedException:
self._GLOBAL_CONTEXT['set_locale'] = None
self._GLOBAL_CONTEXT['rel_link'] = self.rel_link
self._GLOBAL_CONTEXT['abs_link'] = self.abs_link
self._GLOBAL_CONTEXT['exists'] = self.file_exists
self._GLOBAL_CONTEXT['index_display_post_count'] = self.config[
'INDEX_DISPLAY_POST_COUNT']
self._GLOBAL_CONTEXT['index_file'] = self.config['INDEX_FILE']
self._GLOBAL_CONTEXT['use_bundles'] = self.config['USE_BUNDLES']
self._GLOBAL_CONTEXT['use_cdn'] = self.config.get("USE_CDN")
self._GLOBAL_CONTEXT['theme_color'] = self.config.get("THEME_COLOR")
self._GLOBAL_CONTEXT['theme_config'] = self.config.get("THEME_CONFIG")
self._GLOBAL_CONTEXT['favicons'] = self.config['FAVICONS']
self._GLOBAL_CONTEXT['date_format'] = self.config.get('DATE_FORMAT')
self._GLOBAL_CONTEXT['blog_author'] = self.config.get('BLOG_AUTHOR')
self._GLOBAL_CONTEXT['blog_title'] = self.config.get('BLOG_TITLE')
self._GLOBAL_CONTEXT['blog_email'] = self.config.get('BLOG_EMAIL')
self._GLOBAL_CONTEXT['show_blog_title'] = self.config.get('SHOW_BLOG_TITLE')
self._GLOBAL_CONTEXT['logo_url'] = self.config.get('LOGO_URL')
self._GLOBAL_CONTEXT['blog_description'] = self.config.get('BLOG_DESCRIPTION')
self._GLOBAL_CONTEXT['front_index_header'] = self.config.get('FRONT_INDEX_HEADER')
self._GLOBAL_CONTEXT['color_hsl_adjust_hex'] = utils.color_hsl_adjust_hex
self._GLOBAL_CONTEXT['colorize_str_from_base_color'] = utils.colorize_str_from_base_color
self._GLOBAL_CONTEXT['blog_url'] = self.config.get('SITE_URL')
self._GLOBAL_CONTEXT['template_hooks'] = self.template_hooks
self._GLOBAL_CONTEXT['body_end'] = self.config.get('BODY_END')
self._GLOBAL_CONTEXT['social_buttons_code'] = self.config.get('SOCIAL_BUTTONS_CODE')
self._GLOBAL_CONTEXT['translations'] = self.config.get('TRANSLATIONS')
self._GLOBAL_CONTEXT['license'] = self.config.get('LICENSE')
self._GLOBAL_CONTEXT['search_form'] = self.config.get('SEARCH_FORM')
self._GLOBAL_CONTEXT['comment_system'] = self.config.get('COMMENT_SYSTEM')
self._GLOBAL_CONTEXT['comment_system_id'] = self.config.get('COMMENT_SYSTEM_ID')
self._GLOBAL_CONTEXT['site_has_comments'] = bool(self.config.get('COMMENT_SYSTEM'))
self._GLOBAL_CONTEXT['mathjax_config'] = self.config.get(
'MATHJAX_CONFIG')
self._GLOBAL_CONTEXT['use_katex'] = self.config.get('USE_KATEX')
self._GLOBAL_CONTEXT['katex_auto_render'] = self.config.get('KATEX_AUTO_RENDER')
self._GLOBAL_CONTEXT['content_footer'] = self.config.get(
'CONTENT_FOOTER')
self._GLOBAL_CONTEXT['generate_atom'] = self.config.get('GENERATE_ATOM')
self._GLOBAL_CONTEXT['generate_rss'] = self.config.get('GENERATE_RSS')
self._GLOBAL_CONTEXT['rss_link'] = self.config.get('RSS_LINK')
self._GLOBAL_CONTEXT['navigation_links'] = self.config.get('NAVIGATION_LINKS')
self._GLOBAL_CONTEXT['navigation_alt_links'] = self.config.get('NAVIGATION_ALT_LINKS')
self._GLOBAL_CONTEXT['twitter_card'] = self.config.get(
'TWITTER_CARD', {})
self._GLOBAL_CONTEXT['hide_sourcelink'] = not self.config.get(
'SHOW_SOURCELINK')
self._GLOBAL_CONTEXT['show_sourcelink'] = self.config.get(
'SHOW_SOURCELINK')
self._GLOBAL_CONTEXT['extra_head_data'] = self.config.get('EXTRA_HEAD_DATA')
self._GLOBAL_CONTEXT['date_fanciness'] = self.config.get('DATE_FANCINESS')
self._GLOBAL_CONTEXT['luxon_locales'] = LEGAL_VALUES['LUXON_LOCALES']
self._GLOBAL_CONTEXT['luxon_date_format'] = self.config.get('LUXON_DATE_FORMAT')
# TODO: remove in v9
self._GLOBAL_CONTEXT['js_date_format'] = self.config.get('MOMENTJS_DATE_FORMAT')
self._GLOBAL_CONTEXT['momentjs_locales'] = LEGAL_VALUES['MOMENTJS_LOCALES']
# Patch missing locales into momentjs defaulting to English (Issue #3216)
for l in self._GLOBAL_CONTEXT['translations']:
if l not in self._GLOBAL_CONTEXT['momentjs_locales']:
self._GLOBAL_CONTEXT['momentjs_locales'][l] = ""
self._GLOBAL_CONTEXT['hidden_tags'] = self.config.get('HIDDEN_TAGS')
self._GLOBAL_CONTEXT['hidden_categories'] = self.config.get('HIDDEN_CATEGORIES')
self._GLOBAL_CONTEXT['hidden_authors'] = self.config.get('HIDDEN_AUTHORS')
self._GLOBAL_CONTEXT['url_replacer'] = self.url_replacer
self._GLOBAL_CONTEXT['sort_posts'] = utils.sort_posts
self._GLOBAL_CONTEXT['smartjoin'] = utils.smartjoin
self._GLOBAL_CONTEXT['colorize_str'] = utils.colorize_str
self._GLOBAL_CONTEXT['meta_generator_tag'] = self.config.get('META_GENERATOR_TAG')
self._GLOBAL_CONTEXT.update(self.config.get('GLOBAL_CONTEXT', {}))
def _set_global_context_from_data(self):
self._GLOBAL_CONTEXT['data'] = {}
for root, dirs, files in os.walk('data', followlinks=True):
for fname in files:
fname = os.path.join(root, fname)
data = utils.load_data(fname)
key = os.path.splitext(fname.split(os.sep, 1)[1])[0]
self._GLOBAL_CONTEXT['data'][key] = data
# Offer global_data as an alias for data (Issue #2488)
self._GLOBAL_CONTEXT['global_data'] = self._GLOBAL_CONTEXT['data']
def _set_all_page_deps_from_config(self):
self.ALL_PAGE_DEPS['atom_extension'] = self.config.get('ATOM_EXTENSION')
self.ALL_PAGE_DEPS['atom_path'] = self.config.get('ATOM_PATH')
self.ALL_PAGE_DEPS['rss_extension'] = self.config.get('RSS_EXTENSION')
self.ALL_PAGE_DEPS['rss_path'] = self.config.get('RSS_PATH')
self.ALL_PAGE_DEPS['rss_filename_base'] = self.config.get('RSS_FILENAME_BASE')
self.ALL_PAGE_DEPS['atom_filename_base'] = self.config.get('ATOM_FILENAME_BASE')
self.ALL_PAGE_DEPS['slug_author_path'] = self.config.get('SLUG_AUTHOR_PATH')
self.ALL_PAGE_DEPS['slug_tag_path'] = self.config.get('SLUG_TAG_PATH')
self.ALL_PAGE_DEPS['locale'] = self.config.get('LOCALE')
def _activate_plugins_of_category(self, category):
# this code duplicated in tests/base.py
plugins = []
for plugin_info in self.plugin_manager.getPluginsOfCategory(category):
self.plugin_manager.activatePluginByName(plugin_info.name)
plugin_info.plugin_object.set_site(self)
plugins.append(plugin_info)
return plugins
def _get_themes(self):
if self._THEMES is None:
try:
self._THEMES = utils.get_theme_chain(self.config['THEME'], self.themes_dirs)
except Exception:
if self.config['THEME'] != LEGAL_VALUES['DEFAULT_THEME']:
utils.LOGGER.warning('''Cannot load theme "{0}", using '{1}' instead.'''.format(
self.config['THEME'], LEGAL_VALUES['DEFAULT_THEME']))
self.config['THEME'] = LEGAL_VALUES['DEFAULT_THEME']
return self._get_themes()
raise
# Check consistency of USE_CDN and the current THEME (Issue #386)
if self.config['USE_CDN'] and self.config['USE_CDN_WARNING']:
bootstrap_path = utils.get_asset_path(os.path.join(
'assets', 'css', 'bootstrap.min.css'), self._THEMES)
if bootstrap_path and bootstrap_path.split(os.sep)[-4] not in ['bootstrap', 'bootstrap3', 'bootstrap4']:
utils.LOGGER.warning('The USE_CDN option may be incompatible with your theme, because it uses a hosted version of bootstrap.')
return self._THEMES
THEMES = property(_get_themes)
def _get_messages(self):
try:
if self._MESSAGES is None:
self._MESSAGES = utils.load_messages(self.THEMES,
self.translations,
self.default_lang,
themes_dirs=self.themes_dirs)
return self._MESSAGES
except utils.LanguageNotFoundError as e:
utils.LOGGER.error('''Cannot load language "{0}". Please make sure it is supported by Nikola itself, or that you have the appropriate messages files in your themes.'''.format(e.lang))
sys.exit(1)
MESSAGES = property(_get_messages)
def _get_global_context(self):
if 'messages' not in self._GLOBAL_CONTEXT:
self._GLOBAL_CONTEXT['messages'] = self.MESSAGES
if 'has_custom_css' not in self._GLOBAL_CONTEXT:
# check if custom css exist and is not empty
custom_css_path = utils.get_asset_path(
'assets/css/custom.css',
self.THEMES,
self.config['FILES_FOLDERS']
)
if custom_css_path and self.file_exists(custom_css_path, not_empty=True):
self._GLOBAL_CONTEXT['has_custom_css'] = True
else:
self._GLOBAL_CONTEXT['has_custom_css'] = False
return self._GLOBAL_CONTEXT
GLOBAL_CONTEXT = property(_get_global_context)
def _get_template_system(self):
if self._template_system is None:
# Load template plugin
template_sys_name = utils.get_template_engine(self.THEMES)
pi = self.plugin_manager.getPluginByName(
template_sys_name, "TemplateSystem")
if pi is None:
sys.stderr.write("Error loading {0} template system "
"plugin\n".format(template_sys_name))
sys.exit(1)
self._template_system = pi.plugin_object
lookup_dirs = ['templates'] + [os.path.join(utils.get_theme_path(name), "templates")
for name in self.THEMES]
self._template_system.set_directories(lookup_dirs,
self.config['CACHE_FOLDER'])
self._template_system.set_site(self)
return self._template_system
template_system = property(_get_template_system)
def get_compiler(self, source_name):
ext = os.path.splitext(source_name)[1]
try:
compiler = self.inverse_compilers[ext]
except KeyError:
# Find the correct compiler for this files extension
lang_exts_tab = list(self.config['COMPILERS'].items())
langs = [lang for lang, exts in lang_exts_tab if ext in exts or
len([ext_ for ext_ in exts if source_name.endswith(ext_)]) > 0]
if len(langs) != 1:
if len(set(langs)) > 1:
sys.exit("Your file extension->compiler definition is "
"ambiguous.\nPlease remove one of the file "
"extensions from 'COMPILERS' in conf.py\n(The "
"error is in one of {0})".format(', '.join(langs)))
elif len(langs) > 1:
langs = langs[:1]
else:
sys.exit("COMPILERS in conf.py does not tell me how to "
"handle '{0}' extensions.".format(ext))
lang = langs[0]
try:
compiler = self.compilers[lang]
except KeyError:
sys.exit("Cannot find '{0}' compiler; "
"it might require an extra plugin -- "
"do you have it installed?".format(lang))
self.inverse_compilers[ext] = compiler
return compiler
def render_template(self, template_name, output_name, context, url_type=None, is_fragment=False):
local_context = {}
local_context["template_name"] = template_name
local_context.update(self.GLOBAL_CONTEXT)
local_context.update(context)
for k in self._GLOBAL_CONTEXT_TRANSLATABLE:
local_context[k] = local_context[k](local_context['lang'])
local_context['is_rtl'] = local_context['lang'] in LEGAL_VALUES['RTL_LANGUAGES']
local_context['url_type'] = self.config['URL_TYPE'] if url_type is None else url_type
local_context["translations_feedorder"] = sorted(
local_context["translations"],
key=lambda x: (int(x != local_context['lang']), x)
)
# string, arguments
local_context["formatmsg"] = lambda s, *a: s % a
for h in local_context['template_hooks'].values():
h.context = context
for func in self.config['GLOBAL_CONTEXT_FILLER']:
func(local_context, template_name)
data = self.template_system.render_template(
template_name, None, local_context)
if output_name is None:
return data
if not output_name.startswith(self.config["OUTPUT_FOLDER"]):
raise ValueError("Output path for templates must start with OUTPUT_FOLDER")
url_part = output_name[len(self.config["OUTPUT_FOLDER"]) + 1:]
# Treat our site as if output/ is "/" and then make all URLs relative,
# making the site "relocatable"
src = os.sep + url_part
src = os.path.normpath(src)
# The os.sep is because normpath will change "/" to "\" on windows
src = "/".join(src.split(os.sep))
utils.makedirs(os.path.dirname(output_name))
parser = lxml.html.HTMLParser(remove_blank_text=True)
if is_fragment:
doc = lxml.html.fragment_fromstring(data.strip(), parser)
else:
doc = lxml.html.document_fromstring(data.strip(), parser)
self.rewrite_links(doc, src, context['lang'], url_type)
if is_fragment:
# doc.text contains text before the first HTML, or None if there was no text
# The text after HTML elements is added by tostring() (because its implicit
# argument with_tail has default value True).
data = (doc.text or '').encode('utf-8') + b''.join([lxml.html.tostring(child, encoding='utf-8', method='html') for child in doc.iterchildren()])
else:
data = lxml.html.tostring(doc, encoding='utf8', method='html', pretty_print=True, doctype='<!DOCTYPE html>')
with open(output_name, "wb+") as post_file:
post_file.write(data)
def rewrite_links(self, doc, src, lang, url_type=None):
# First let lxml replace most of them
doc.rewrite_links(lambda dst: self.url_replacer(src, dst, lang, url_type), resolve_base_href=False)
# lxml ignores srcset in img and source elements, so do that by hand
objs = list(doc.xpath('(//img|//source)'))
for obj in objs:
if 'srcset' in obj.attrib:
urls = [u.strip() for u in obj.attrib['srcset'].split(',')]
urls = [self.url_replacer(src, dst, lang, url_type) for dst in urls]
obj.set('srcset', ', '.join(urls))
def url_replacer(self, src, dst, lang=None, url_type=None):
# Avoid mangling links within the page
if dst.startswith('
return dst
parsed_src = urlsplit(src)
src_elems = parsed_src.path.split('/')[1:]
dst_url = urlparse(dst)
if lang is None:
lang = self.default_lang
if url_type is None:
url_type = self.config.get('URL_TYPE')
if dst_url.scheme and dst_url.scheme not in ['http', 'https', 'link']:
return dst
# Refuse to replace links that are full URLs.
if dst_url.netloc:
if dst_url.scheme == 'link': # Magic link
if dst_url.query:
# If query strings are used in magic link, they will be
# passed to the path handler as keyword arguments (strings)
link_kwargs = {unquote(k): unquote(v[-1]) for k, v in parse_qs(dst_url.query).items()}
else:
link_kwargs = {}
# unquote from issue #2934
dst = self.link(dst_url.netloc, unquote(dst_url.path.lstrip('/')), lang, **link_kwargs)
if dst_url.fragment:
dst += '
# Assuming the site is served over one of these, and
# since those are the only URLs we want to rewrite...
else:
if '%' in dst_url.netloc:
# convert lxml percent-encoded garbage to punycode
nl = unquote(dst_url.netloc)
try:
nl = nl.decode('utf-8')
except AttributeError:
# python 3: already unicode
pass
nl = nl.encode('idna')
if isinstance(nl, bytes):
nl = nl.decode('latin-1') # so idna stays unchanged
dst = urlunsplit((dst_url.scheme,
nl,
dst_url.path,
dst_url.query,
dst_url.fragment))
return dst
elif dst_url.scheme == 'link': # Magic absolute path link:
dst = dst_url.path
return dst
# Refuse to replace links that consist of a fragment only
if ((not dst_url.scheme) and (not dst_url.netloc) and
(not dst_url.path) and (not dst_url.params) and
(not dst_url.query) and dst_url.fragment):
return dst
# Normalize
dst = urljoin(src, dst)
# Avoid empty links.
if src == dst:
if url_type == 'absolute':
dst = urljoin(self.config['BASE_URL'], dst.lstrip('/'))
return dst
elif url_type == 'full_path':
dst = urljoin(self.config['BASE_URL'], dst.lstrip('/'))
return utils.full_path_from_urlparse(urlparse(dst))
else:
return "#"
# Check that link can be made relative, otherwise return dest
parsed_dst = urlsplit(dst)
if parsed_src[:2] != parsed_dst[:2]:
if url_type == 'absolute':
dst = urljoin(self.config['BASE_URL'], dst)
return dst
if url_type in ('full_path', 'absolute'):
dst = urljoin(self.config['BASE_URL'], dst.lstrip('/'))
if url_type == 'full_path':
parsed = urlparse(urljoin(self.config['BASE_URL'], dst.lstrip('/')))
dst = utils.full_path_from_urlparse(parsed)
return dst
# Now both paths are on the same site and absolute
dst_elems = parsed_dst.path.split('/')[1:]
i = 0
for (i, s), d in zip(enumerate(src_elems), dst_elems):
if s != d:
break
# Now i is the longest common prefix
result = '/'.join(['..'] * (len(src_elems) - i - 1) + dst_elems[i:])
if not result and not parsed_dst.fragment:
result = "."
# Don't forget the query part of the link
if parsed_dst.query:
result += "?" + parsed_dst.query
if parsed_dst.fragment:
result += "#" + parsed_dst.fragment
if not result:
raise ValueError("Failed to parse link: {0}".format((src, dst, i, src_elems, dst_elems)))
return result
def _make_renderfunc(self, t_data, fname=None):
def render_shortcode(*args, **kw):
context = self.GLOBAL_CONTEXT.copy()
context.update(kw)
context['_args'] = args
context['lang'] = utils.LocaleBorg().current_lang
for k in self._GLOBAL_CONTEXT_TRANSLATABLE:
context[k] = context[k](context['lang'])
output = self.template_system.render_template_to_string(t_data, context)
if fname is not None:
dependencies = [fname] + self.template_system.get_deps(fname)
else:
dependencies = []
return output, dependencies
return render_shortcode
def _register_templated_shortcodes(self):
self.register_shortcode('template', self._template_shortcode_handler)
builtin_sc_dir = resource_filename(
'nikola',
os.path.join('data', 'shortcodes', utils.get_template_engine(self.THEMES)))
for sc_dir in [builtin_sc_dir, 'shortcodes']:
if not os.path.isdir(sc_dir):
continue
for fname in os.listdir(sc_dir):
name, ext = os.path.splitext(fname)
if ext != '.tmpl':
continue
with open(os.path.join(sc_dir, fname)) as fd:
self.register_shortcode(name, self._make_renderfunc(
fd.read(), os.path.join(sc_dir, fname)))
def _template_shortcode_handler(self, *args, **kw):
t_data = kw.pop('data', '')
context = self.GLOBAL_CONTEXT.copy()
context.update(kw)
context['_args'] = args
context['lang'] = utils.LocaleBorg().current_lang
for k in self._GLOBAL_CONTEXT_TRANSLATABLE:
context[k] = context[k](context['lang'])
output = self.template_system.render_template_to_string(t_data, context)
dependencies = self.template_system.get_string_deps(t_data)
return output, dependencies
def register_shortcode(self, name, f):
if name in self.shortcode_registry:
utils.LOGGER.warning('Shortcode name conflict: {}', name)
return
self.shortcode_registry[name] = f
def apply_shortcodes(self, data, filename=None, lang=None, extra_context=None):
if extra_context is None:
extra_context = {}
if lang is None:
lang = utils.LocaleBorg().current_lang
return shortcodes.apply_shortcodes(data, self.shortcode_registry, self, filename, lang=lang, extra_context=extra_context)
def apply_shortcodes_uuid(self, data, _shortcodes, filename=None, lang=None, extra_context=None):
if lang is None:
lang = utils.LocaleBorg().current_lang
if extra_context is None:
extra_context = {}
deps = []
for k, v in _shortcodes.items():
replacement, _deps = shortcodes.apply_shortcodes(v, self.shortcode_registry, self, filename, lang=lang, extra_context=extra_context)
data = data.replace(k, replacement)
deps.extend(_deps)
return data, deps
def _get_rss_copyright(self, lang, rss_plain):
if rss_plain:
return (
self.config['RSS_COPYRIGHT_PLAIN'](lang) or
lxml.html.fromstring(self.config['RSS_COPYRIGHT'](lang)).text_content().strip())
else:
return self.config['RSS_COPYRIGHT'](lang)
def generic_rss_feed(self, lang, title, link, description, timeline,
rss_teasers, rss_plain, feed_length=10, feed_url=None,
enclosure=_enclosure, rss_links_append_query=None, copyright_=None):
rss_obj = utils.ExtendedRSS2(
title=title,
link=utils.encodelink(link),
description=description,
lastBuildDate=datetime.datetime.utcnow(),
generator='Nikola (getnikola.com)',
language=lang
)
if copyright_ is None:
copyright_ = self._get_rss_copyright(lang, rss_plain)
# Use the configured or specified copyright string if present.
if copyright_:
rss_obj.copyright = copyright_
if feed_url:
absurl = '/' + feed_url[len(self.config['BASE_URL']):]
rss_obj.xsl_stylesheet_href = self.url_replacer(absurl, "/assets/xml/rss.xsl")
items = []
feed_append_query = None
if rss_links_append_query:
if rss_links_append_query is True:
raise ValueError("RSS_LINKS_APPEND_QUERY (or FEED_LINKS_APPEND_QUERY) cannot be True. Valid values are False or a formattable string.")
feed_append_query = rss_links_append_query.format(
feedRelUri='/' + feed_url[len(self.config['BASE_URL']):],
feedFormat="rss")
for post in timeline[:feed_length]:
data = post.text(lang, teaser_only=rss_teasers, strip_html=rss_plain,
feed_read_more_link=True, feed_links_append_query=feed_append_query)
if feed_url is not None and data:
# Massage the post's HTML (unless plain)
if not rss_plain:
if 'previewimage' in post.meta[lang] and post.meta[lang]['previewimage'] not in data:
data = "<figure><img src=\"{}\"></figure> {}".format(post.meta[lang]['previewimage'], data)
try:
doc = lxml.html.document_fromstring(data)
doc.rewrite_links(lambda dst: self.url_replacer(post.permalink(), dst, lang, 'absolute'))
try:
body = doc.body
data = (body.text or '') + ''.join(
[lxml.html.tostring(child, encoding='unicode')
for child in body.iterchildren()])
except IndexError:
data = ''
except lxml.etree.ParserError as e:
if str(e) == "Document is empty":
data = ""
else:
raise
args = {
'title': post.title(lang) if post.should_show_title() else None,
'link': post.permalink(lang, absolute=True, query=feed_append_query),
'description': data,
'pubDate': (post.date if post.date.tzinfo is None else
post.date.astimezone(dateutil.tz.tzutc())),
'categories': post._tags.get(lang, []),
'creator': post.author(lang),
'guid': post.guid(lang),
}
if post.author(lang):
rss_obj.rss_attrs["xmlns:dc"] = "http://purl.org/dc/elements/1.1/"
if enclosure:
# enclosure callback returns None if post has no enclosure, or a
# 3-tuple of (url, length (0 is valid), mimetype)
enclosure_details = enclosure(post=post, lang=lang)
if enclosure_details is not None:
args['enclosure'] = rss.Enclosure(*enclosure_details)
items.append(utils.ExtendedItem(**args))
rss_obj.items = items
rss_obj.self_url = feed_url
rss_obj.rss_attrs["xmlns:atom"] = "http://www.w3.org/2005/Atom"
return rss_obj
def generic_rss_renderer(self, lang, title, link, description, timeline, output_path,
rss_teasers, rss_plain, feed_length=10, feed_url=None,
enclosure=_enclosure, rss_links_append_query=None, copyright_=None):
rss_obj = self.generic_rss_feed(lang, title, link, description, timeline,
rss_teasers, rss_plain, feed_length=feed_length, feed_url=feed_url,
enclosure=enclosure, rss_links_append_query=rss_links_append_query, copyright_=copyright_)
utils.rss_writer(rss_obj, output_path)
def path(self, kind, name, lang=None, is_link=False, **kwargs):
if lang is None:
lang = utils.LocaleBorg().current_lang
try:
path = self.path_handlers[kind](name, lang, **kwargs)
except KeyError:
utils.LOGGER.warning("Unknown path request of kind: {0}".format(kind))
return ""
# If path handler returns a string we consider it to be an absolute URL not requiring any
# further processing, i.e 'https://getnikola.com/'. See Issue #2876.
if isinstance(path, str):
return path
if path is None:
path = "#"
else:
path = [os.path.normpath(p) for p in path if p != '.'] # Fix Issue #1028
if is_link:
link = '/' + ('/'.join(path))
index_len = len(self.config['INDEX_FILE'])
if self.config['STRIP_INDEXES'] and \
link[-(1 + index_len):] == '/' + self.config['INDEX_FILE']:
return link[:-index_len]
else:
return link
else:
return os.path.join(*path)
def post_path(self, name, lang):
return [_f for _f in [self.config['TRANSLATIONS'][lang],
os.path.dirname(name),
self.config['INDEX_FILE']] if _f]
def root_path(self, name, lang):
d = self.config['TRANSLATIONS'][lang]
if d:
return [d, '']
else:
return []
def slug_path(self, name, lang):
results = [p for p in self.timeline if p.meta('slug') == name]
if not results:
utils.LOGGER.warning("Cannot resolve path request for slug: {0}".format(name))
else:
if len(results) > 1:
utils.LOGGER.warning('Ambiguous path request for slug: {0}'.format(name))
return [_f for _f in results[0].permalink(lang).split('/')]
def filename_path(self, name, lang):
results = [p for p in self.timeline if p.source_path == name]
if not results:
utils.LOGGER.warning("Cannot resolve path request for filename: {0}".format(name))
else:
if len(results) > 1:
utils.LOGGER.error("Ambiguous path request for filename: {0}".format(name))
return [_f for _f in results[0].permalink(lang).split('/') if _f]
def register_path_handler(self, kind, f):
if kind in self.path_handlers:
utils.LOGGER.warning('Conflicting path handlers for kind: {0}'.format(kind))
else:
self.path_handlers[kind] = f
def link(self, *args, **kwargs):
url = self.path(*args, is_link=True, **kwargs)
url = utils.encodelink(url)
return url
def abs_link(self, dst, protocol_relative=False):
# Normalize
if dst: # Mako templates and empty strings evaluate to False
dst = urljoin(self.config['BASE_URL'], dst.lstrip('/'))
else:
dst = self.config['BASE_URL']
url = urlparse(dst).geturl()
if protocol_relative:
url = url.split(":", 1)[1]
url = utils.encodelink(url)
return url
def rel_link(self, src, dst):
# Normalize
src = urljoin(self.config['BASE_URL'], src)
dst = urljoin(src, dst)
# Avoid empty links.
if src == dst:
return "#"
# Check that link can be made relative, otherwise return dest
parsed_src = urlsplit(src)
parsed_dst = urlsplit(dst)
if parsed_src[:2] != parsed_dst[:2]:
return utils.encodelink(dst)
# Now both paths are on the same site and absolute
src_elems = parsed_src.path.split('/')[1:]
dst_elems = parsed_dst.path.split('/')[1:]
i = 0
for (i, s), d in zip(enumerate(src_elems), dst_elems):
if s != d:
break
else:
i += 1
# Now i is the longest common prefix
url = '/'.join(['..'] * (len(src_elems) - i - 1) + dst_elems[i:])
url = utils.encodelink(url)
return url
def register_filter(self, filter_name, filter_definition):
if filter_name in self.filters:
utils.LOGGER.warning('''The filter "{0}" is defined more than once.'''.format(filter_name))
self.filters[filter_name] = filter_definition
def file_exists(self, path, not_empty=False):
exists = os.path.exists(path)
if exists and not_empty:
exists = os.stat(path).st_size > 0
return exists
def clean_task_paths(self, task):
targets = task.get('targets', None)
if targets is not None:
task['targets'] = [os.path.normpath(t) for t in targets]
return task
def gen_tasks(self, name, plugin_category, doc=''):
def flatten(task):
if isinstance(task, dict):
yield task
else:
for t in task:
for ft in flatten(t):
yield ft
task_dep = []
for pluginInfo in self.plugin_manager.getPluginsOfCategory(plugin_category):
for task in flatten(pluginInfo.plugin_object.gen_tasks()):
if 'basename' not in task:
raise ValueError("Task {0} does not have a basename".format(task))
task = self.clean_task_paths(task)
if 'task_dep' not in task:
task['task_dep'] = []
task['task_dep'].extend(self.injected_deps[task['basename']])
yield task
for multi in self.plugin_manager.getPluginsOfCategory("TaskMultiplier"):
flag = False
for task in multi.plugin_object.process(task, name):
flag = True
yield self.clean_task_paths(task)
if flag:
task_dep.append('{0}_{1}'.format(name, multi.plugin_object.name))
if pluginInfo.plugin_object.is_default:
task_dep.append(pluginInfo.plugin_object.name)
yield {
'basename': name,
'doc': doc,
'actions': None,
'clean': True,
'task_dep': task_dep
}
def parse_category_name(self, category_name):
if self.config['CATEGORY_ALLOW_HIERARCHIES']:
try:
return hierarchy_utils.parse_escaped_hierarchical_category_name(category_name)
except Exception as e:
utils.LOGGER.error(str(e))
sys.exit(1)
else:
return [category_name] if len(category_name) > 0 else []
def category_path_to_category_name(self, category_path):
if self.config['CATEGORY_ALLOW_HIERARCHIES']:
return hierarchy_utils.join_hierarchical_category_path(category_path)
else:
return ''.join(category_path)
def _add_post_to_category(self, post, category_name):
category_path = self.parse_category_name(category_name)
current_path = []
current_subtree = self.category_hierarchy
for current in category_path:
current_path.append(current)
if current not in current_subtree:
current_subtree[current] = {}
current_subtree = current_subtree[current]
self.posts_per_category[self.category_path_to_category_name(current_path)].append(post)
def _sort_category_hierarchy(self):
# First create a hierarchy of TreeNodes
self.category_hierarchy_lookup = {}
def create_hierarchy(cat_hierarchy, parent=None):
result = []
for name, children in cat_hierarchy.items():
node = hierarchy_utils.TreeNode(name, parent)
node.children = create_hierarchy(children, node)
node.category_path = [pn.name for pn in node.get_path()]
node.category_name = self.category_path_to_category_name(node.category_path)
self.category_hierarchy_lookup[node.category_name] = node
if node.category_name not in self.config.get('HIDDEN_CATEGORIES'):
result.append(node)
return natsort.natsorted(result, key=lambda e: e.name, alg=natsort.ns.F | natsort.ns.IC)
root_list = create_hierarchy(self.category_hierarchy)
# Next, flatten the hierarchy
self.category_hierarchy = hierarchy_utils.flatten_tree_structure(root_list)
@staticmethod
def sort_posts_chronologically(posts, lang=None):
# Last tie breaker: sort by source path (A-Z)
posts = sorted(posts, key=lambda p: p.source_path)
# Next tie breaker: sort by title if language is given (A-Z)
if lang is not None:
posts = natsort.natsorted(posts, key=lambda p: p.title(lang), alg=natsort.ns.F | natsort.ns.IC)
# Next tie breaker: sort by date (reverse chronological order)
posts = sorted(posts, key=lambda p: p.date, reverse=True)
# Finally, sort by priority meta value (descending)
posts = sorted(posts, key=lambda p: int(p.meta('priority')) if p.meta('priority') else 0, reverse=True)
# Return result
return posts
def scan_posts(self, really=False, ignore_quit=False, quiet=False):
if self._scanned and not really:
return
# Reset things
self.posts = []
self.all_posts = []
self.posts_per_year = defaultdict(list)
self.posts_per_month = defaultdict(list)
self.posts_per_tag = defaultdict(list)
self.posts_per_category = defaultdict(list)
self.tags_per_language = defaultdict(list)
self.category_hierarchy = {}
self.post_per_file = {}
self.post_per_input_file = {}
self.timeline = []
self.pages = []
for p in sorted(self.plugin_manager.getPluginsOfCategory('PostScanner'), key=operator.attrgetter('name')):
try:
timeline = p.plugin_object.scan()
except Exception:
utils.LOGGER.error('Error reading timeline')
raise
# FIXME: can there be conflicts here?
self.timeline.extend(timeline)
quit = False
# Classify posts per year/tag/month/whatever
slugged_tags = defaultdict(set)
for post in self.timeline:
if post.use_in_feeds:
self.posts.append(post)
self.posts_per_year[str(post.date.year)].append(post)
self.posts_per_month[
'{0}/{1:02d}'.format(post.date.year, post.date.month)].append(post)
for lang in self.config['TRANSLATIONS'].keys():
for tag in post.tags_for_language(lang):
_tag_slugified = utils.slugify(tag, lang)
slugged_tags[lang].add(_tag_slugified)
if post not in self.posts_per_tag[tag]:
self.posts_per_tag[tag].append(post)
self.tags_per_language[lang].extend(post.tags_for_language(lang))
self._add_post_to_category(post, post.meta('category'))
if post.is_post:
# unpublished posts
self.all_posts.append(post)
else:
self.pages.append(post)
for lang in self.config['TRANSLATIONS'].keys():
dest = post.destination_path(lang=lang)
src_dest = post.destination_path(lang=lang, extension=post.source_ext())
src_file = post.translated_source_path(lang=lang)
if dest in self.post_per_file:
utils.LOGGER.error('Two posts are trying to generate {0}: {1} and {2}'.format(
dest,
self.post_per_file[dest].source_path,
post.source_path))
quit = True
if (src_dest in self.post_per_file) and self.config['COPY_SOURCES']:
utils.LOGGER.error('Two posts are trying to generate {0}: {1} and {2}'.format(
src_dest,
self.post_per_file[dest].source_path,
post.source_path))
quit = True
self.post_per_file[dest] = post
self.post_per_file[src_dest] = post
if src_file is not None:
self.post_per_input_file[src_file] = post
# deduplicate tags_per_language
self.tags_per_language[lang] = list(set(self.tags_per_language[lang]))
# Sort everything.
self.timeline = self.sort_posts_chronologically(self.timeline)
self.posts = self.sort_posts_chronologically(self.posts)
self.all_posts = self.sort_posts_chronologically(self.all_posts)
self.pages = self.sort_posts_chronologically(self.pages)
self._sort_category_hierarchy()
for i, p in enumerate(self.posts[1:]):
p.next_post = self.posts[i]
for i, p in enumerate(self.posts[:-1]):
p.prev_post = self.posts[i + 1]
self._scanned = True
if not self.quiet:
print("done!", file=sys.stderr)
if quit and not ignore_quit:
sys.exit(1)
signal('scanned').send(self)
def generic_renderer(self, lang, output_name, template_name, filters, file_deps=None, uptodate_deps=None, context=None, context_deps_remove=None, post_deps_dict=None, url_type=None, is_fragment=False):
utils.LocaleBorg().set_locale(lang)
file_deps = copy(file_deps) if file_deps else []
file_deps += self.template_system.template_deps(template_name)
file_deps = sorted(list(filter(None, file_deps)))
context = copy(context) if context else {}
context["lang"] = lang
deps_dict = copy(context)
if context_deps_remove:
for key in context_deps_remove:
deps_dict.pop(key)
deps_dict['OUTPUT_FOLDER'] = self.config['OUTPUT_FOLDER']
deps_dict['TRANSLATIONS'] = self.config['TRANSLATIONS']
deps_dict['global'] = self.GLOBAL_CONTEXT
deps_dict['all_page_deps'] = self.ALL_PAGE_DEPS
if post_deps_dict:
deps_dict.update(post_deps_dict)
for k, v in self.GLOBAL_CONTEXT['template_hooks'].items():
deps_dict['||template_hooks|{0}||'.format(k)] = v.calculate_deps()
for k in self._GLOBAL_CONTEXT_TRANSLATABLE:
deps_dict[k] = deps_dict['global'][k](lang)
for k in self._ALL_PAGE_DEPS_TRANSLATABLE:
deps_dict[k] = deps_dict['all_page_deps'][k](lang)
deps_dict['navigation_links'] = deps_dict['global']['navigation_links'](lang)
deps_dict['navigation_alt_links'] = deps_dict['global']['navigation_alt_links'](lang)
task = {
'name': os.path.normpath(output_name),
'targets': [output_name],
'file_dep': file_deps,
'actions': [(self.render_template, [template_name, output_name,
context, url_type, is_fragment])],
'clean': True,
'uptodate': [config_changed(deps_dict, 'nikola.nikola.Nikola.generic_renderer')] + ([] if uptodate_deps is None else uptodate_deps)
}
return utils.apply_filters(task, filters)
def generic_page_renderer(self, lang, post, filters, context=None):
extension = post.compiler.extension()
output_name = os.path.join(self.config['OUTPUT_FOLDER'],
post.destination_path(lang, extension))
deps = post.deps(lang)
uptodate_deps = post.deps_uptodate(lang)
deps.extend(utils.get_asset_path(x, self.THEMES) for x in ('bundles', 'parent', 'engine'))
_theme_ini = utils.get_asset_path(self.config['THEME'] + '.theme', self.THEMES)
if _theme_ini:
deps.append(_theme_ini)
context = copy(context) if context else {}
context['post'] = post
context['title'] = post.title(lang)
context['description'] = post.description(lang)
context['permalink'] = post.permalink(lang)
if 'crumbs' not in context:
crumb_path = post.permalink(lang).lstrip('/')
if crumb_path.endswith(self.config['INDEX_FILE']):
crumb_path = crumb_path[:-len(self.config['INDEX_FILE'])]
if crumb_path.endswith('/'):
context['crumbs'] = utils.get_crumbs(crumb_path.rstrip('/'), is_file=False)
else:
context['crumbs'] = utils.get_crumbs(crumb_path, is_file=True)
if 'pagekind' not in context:
context['pagekind'] = ['generic_page']
if post.use_in_feeds:
context['enable_comments'] = True
else:
context['enable_comments'] = self.config['COMMENTS_IN_PAGES']
deps_dict = {}
if post.prev_post:
deps_dict['PREV_LINK'] = [post.prev_post.permalink(lang)]
if post.next_post:
deps_dict['NEXT_LINK'] = [post.next_post.permalink(lang)]
deps_dict['comments'] = context['enable_comments']
if post:
deps_dict['post_translations'] = post.translated_to
signal('render_post').send({
'site': self,
'post': post,
'lang': lang,
'context': context,
'deps_dict': deps_dict,
})
yield self.generic_renderer(lang, output_name, post.template_name, filters,
file_deps=deps,
uptodate_deps=uptodate_deps,
context=context,
context_deps_remove=['post'],
post_deps_dict=deps_dict,
url_type=post.url_type)
def generic_post_list_renderer(self, lang, posts, output_name, template_name, filters, extra_context):
deps = []
uptodate_deps = []
for post in posts:
deps += post.deps(lang)
uptodate_deps += post.deps_uptodate(lang)
context = {}
context["posts"] = posts
context["title"] = self.config['BLOG_TITLE'](lang)
context["description"] = self.config['BLOG_DESCRIPTION'](lang)
context["prevlink"] = None
context["nextlink"] = None
if extra_context:
context.update(extra_context)
if 'has_other_languages' not in context:
context['has_other_languages'] = False
post_deps_dict = {}
post_deps_dict["posts"] = [(p.meta[lang]['title'], p.permalink(lang)) for p in posts]
return self.generic_renderer(lang, output_name, template_name, filters,
file_deps=deps,
uptodate_deps=uptodate_deps,
context=context,
post_deps_dict=post_deps_dict)
def atom_feed_renderer(self, lang, posts, output_path, filters,
extra_context):
def atom_link(link_rel, link_type, link_href):
link = lxml.etree.Element("link")
link.set("rel", link_rel)
link.set("type", link_type)
link.set("href", utils.encodelink(link_href))
return link
utils.LocaleBorg().set_locale(lang)
deps = []
uptodate_deps = []
for post in posts:
deps += post.deps(lang)
uptodate_deps += post.deps_uptodate(lang)
context = {}
blog_title = self.config['BLOG_TITLE'](lang)
context["posts"] = posts
context["title"] = blog_title
context["description"] = self.config['BLOG_DESCRIPTION'](lang)
context["lang"] = lang
context.update(extra_context)
context["title"] = "{0} ({1})".format(blog_title, context["title"]) if blog_title != context["title"] else blog_title
deps_context = copy(context)
deps_context["posts"] = [(p.meta[lang]['title'], p.permalink(lang)) for p in
posts]
deps_context["global"] = self.GLOBAL_CONTEXT
deps_context["all_page_deps"] = self.ALL_PAGE_DEPS
for k in self._GLOBAL_CONTEXT_TRANSLATABLE:
deps_context[k] = deps_context['global'][k](lang)
for k in self._ALL_PAGE_DEPS_TRANSLATABLE:
deps_context[k] = deps_context['all_page_deps'][k](lang)
feed_xsl_link = self.abs_link("/assets/xml/atom.xsl")
feed_root = lxml.etree.Element("feed")
feed_root.addprevious(lxml.etree.ProcessingInstruction(
"xml-stylesheet",
'href="' + utils.encodelink(feed_xsl_link) + '" type="text/xsl media="all"'))
feed_root.set("{http://www.w3.org/XML/1998/namespace}lang", lang)
feed_root.set("xmlns", "http://www.w3.org/2005/Atom")
feed_title = lxml.etree.SubElement(feed_root, "title")
feed_title.text = context["title"]
feed_id = lxml.etree.SubElement(feed_root, "id")
feed_id.text = self.abs_link(context["feedlink"])
feed_updated = lxml.etree.SubElement(feed_root, "updated")
feed_updated.text = utils.LocaleBorg().formatted_date('webiso', datetime.datetime.now(tz=dateutil.tz.tzutc()))
feed_author = lxml.etree.SubElement(feed_root, "author")
feed_author_name = lxml.etree.SubElement(feed_author, "name")
feed_author_name.text = self.config["BLOG_AUTHOR"](lang)
feed_root.append(atom_link("self", "application/atom+xml",
self.abs_link(context["feedlink"])))
feed_root.append(atom_link("alternate", "text/html",
self.abs_link(context["permalink"])))
feed_generator = lxml.etree.SubElement(feed_root, "generator")
feed_generator.set("uri", "https://getnikola.com/")
feed_generator.text = "Nikola"
feed_append_query = None
if self.config["FEED_LINKS_APPEND_QUERY"]:
feed_append_query = self.config["FEED_LINKS_APPEND_QUERY"].format(
feedRelUri=context["feedlink"],
feedFormat="atom")
def atom_post_text(post, text):
if not self.config["FEED_PLAIN"]:
if 'previewimage' in post.meta[lang] and post.meta[lang]['previewimage'] not in text:
text = "<figure><img src=\"{}\"></figure> {}".format(post.meta[lang]['previewimage'], text)
# FIXME: this is duplicated with code in Post.text() and generic_rss_renderer
try:
doc = lxml.html.document_fromstring(text)
doc.rewrite_links(lambda dst: self.url_replacer(post.permalink(lang), dst, lang, 'absolute'))
try:
body = doc.body
text = (body.text or '') + ''.join(
[lxml.html.tostring(child, encoding='unicode')
for child in body.iterchildren()])
except IndexError: # No body there, it happens sometimes
text = ''
except lxml.etree.ParserError as e:
if str(e) == "Document is empty":
text = ""
else: # let other errors raise
raise
return text.strip()
for post in posts:
summary = atom_post_text(post, post.text(lang, teaser_only=True,
strip_html=self.config["FEED_PLAIN"],
feed_read_more_link=True,
feed_links_append_query=feed_append_query))
content = None
if not self.config["FEED_TEASERS"]:
content = atom_post_text(post, post.text(lang, teaser_only=self.config["FEED_TEASERS"],
strip_html=self.config["FEED_PLAIN"],
feed_read_more_link=True,
feed_links_append_query=feed_append_query))
entry_root = lxml.etree.SubElement(feed_root, "entry")
entry_title = lxml.etree.SubElement(entry_root, "title")
entry_title.text = post.title(lang)
entry_id = lxml.etree.SubElement(entry_root, "id")
entry_id.text = post.permalink(lang, absolute=True)
entry_updated = lxml.etree.SubElement(entry_root, "updated")
entry_updated.text = post.formatted_updated('webiso')
entry_published = lxml.etree.SubElement(entry_root, "published")
entry_published.text = post.formatted_date('webiso')
entry_author = lxml.etree.SubElement(entry_root, "author")
entry_author_name = lxml.etree.SubElement(entry_author, "name")
entry_author_name.text = post.author(lang)
entry_root.append(atom_link("alternate", "text/html",
post.permalink(lang, absolute=True,
query=feed_append_query)))
entry_summary = lxml.etree.SubElement(entry_root, "summary")
if not self.config["FEED_PLAIN"]:
entry_summary.set("type", "html")
else:
entry_summary.set("type", "text")
entry_summary.text = summary
if content:
entry_content = lxml.etree.SubElement(entry_root, "content")
if not self.config["FEED_PLAIN"]:
entry_content.set("type", "html")
else:
entry_content.set("type", "text")
entry_content.text = content
for category in post.tags_for_language(lang):
entry_category = lxml.etree.SubElement(entry_root, "category")
entry_category.set("term", utils.slugify(category, lang))
entry_category.set("label", category)
dst_dir = os.path.dirname(output_path)
utils.makedirs(dst_dir)
with io.open(output_path, "w+", encoding="utf-8") as atom_file:
data = lxml.etree.tostring(feed_root.getroottree(), encoding="UTF-8", pretty_print=True, xml_declaration=True)
if isinstance(data, bytes):
data = data.decode('utf-8')
atom_file.write(data)
def generic_index_renderer(self, lang, posts, indexes_title, template_name, context_source, kw, basename, page_link, page_path, additional_dependencies=None):
# Update kw
kw = kw.copy()
kw["tag_pages_are_indexes"] = self.config['TAG_PAGES_ARE_INDEXES']
kw["index_display_post_count"] = self.config['INDEX_DISPLAY_POST_COUNT']
kw["index_teasers"] = self.config['INDEX_TEASERS']
kw["indexes_pages"] = self.config['INDEXES_PAGES'](lang)
kw["indexes_pages_main"] = self.config['INDEXES_PAGES_MAIN']
kw["indexes_static"] = self.config['INDEXES_STATIC']
kw['indexes_pretty_page_url'] = self.config["INDEXES_PRETTY_PAGE_URL"]
kw['show_index_page_navigation'] = self.config['SHOW_INDEX_PAGE_NAVIGATION']
if additional_dependencies is None:
additional_dependencies = []
# Split in smaller lists
lists = []
if kw["indexes_static"]:
lists.append(posts[:kw["index_display_post_count"]])
posts = posts[kw["index_display_post_count"]:]
while posts:
lists.append(posts[-kw["index_display_post_count"]:])
posts = posts[:-kw["index_display_post_count"]]
else:
while posts:
lists.append(posts[:kw["index_display_post_count"]])
posts = posts[kw["index_display_post_count"]:]
if not lists:
lists.append([])
num_pages = len(lists)
displayed_page_numbers = [utils.get_displayed_page_number(i, num_pages, self) for i in range(num_pages)]
page_links = [page_link(i, page_number, num_pages, False) for i, page_number in enumerate(displayed_page_numbers)]
if kw['show_index_page_navigation']:
# Since the list displayed_page_numbers is not necessarily
# sorted -- in case INDEXES_STATIC is True, it is of the
# form [num_pages, 1, 2, ..., num_pages - 1] -- we order it
# via a map. This allows to not replicate the logic of
# utils.get_displayed_page_number() here.
if not kw["indexes_pages_main"] and not kw["indexes_static"]:
temp_map = {page_number: link for page_number, link in zip(displayed_page_numbers, page_links)}
else:
temp_map = {page_number - 1: link for page_number, link in zip(displayed_page_numbers, page_links)}
page_links_context = [temp_map[i] for i in range(num_pages)]
for i, post_list in enumerate(lists):
context = context_source.copy()
if 'pagekind' not in context:
context['pagekind'] = ['index']
if 'has_other_languages' not in context:
context['has_other_languages'] = False
ipages_i = displayed_page_numbers[i]
if kw["indexes_pages"]:
indexes_pages = kw["indexes_pages"] % ipages_i
else:
if kw["indexes_pages_main"]:
ipages_msg = "page %d"
else:
ipages_msg = "old posts, page %d"
indexes_pages = " (" + \
kw["messages"][lang][ipages_msg] % ipages_i + ")"
if i > 0 or kw["indexes_pages_main"]:
context["title"] = indexes_title + indexes_pages
else:
context["title"] = indexes_title
context["prevlink"] = None
context["nextlink"] = None
context['index_teasers'] = kw['index_teasers']
prevlink = None
nextlink = None
if kw["indexes_static"]:
if i > 0:
if i < num_pages - 1:
prevlink = i + 1
elif i == num_pages - 1:
prevlink = 0
if num_pages > 1:
if i > 1:
nextlink = i - 1
elif i == 0:
nextlink = num_pages - 1
else:
if i >= 1:
prevlink = i - 1
if i < num_pages - 1:
nextlink = i + 1
if prevlink is not None:
context["prevlink"] = page_links[prevlink]
context["prevfeedlink"] = page_link(prevlink, displayed_page_numbers[prevlink],
num_pages, False, extension=".atom")
if nextlink is not None:
context["nextlink"] = page_links[nextlink]
context["nextfeedlink"] = page_link(nextlink, displayed_page_numbers[nextlink],
num_pages, False, extension=".atom")
context['show_index_page_navigation'] = kw['show_index_page_navigation']
if kw['show_index_page_navigation']:
context['page_links'] = page_links_context
if not kw["indexes_pages_main"] and not kw["indexes_static"]:
context['current_page'] = ipages_i
else:
context['current_page'] = ipages_i - 1
context['prev_next_links_reversed'] = kw['indexes_static']
context["permalink"] = page_links[i]
context["is_frontmost_index"] = i == 0
# Add dependencies to featured posts
if 'featured' in context:
for post in context['featured']:
additional_dependencies += post.deps_uptodate(lang)
output_name = os.path.join(kw['output_folder'], page_path(i, ipages_i, num_pages, False))
task = self.generic_post_list_renderer(
lang,
post_list,
output_name,
template_name,
kw['filters'],
context,
)
task['uptodate'] = task['uptodate'] + [utils.config_changed(kw, 'nikola.nikola.Nikola.generic_index_renderer')] + additional_dependencies
task['basename'] = basename
yield task
if kw["indexes_pages_main"] and kw['indexes_pretty_page_url'](lang):
# create redirection
output_name = os.path.join(kw['output_folder'], page_path(0, displayed_page_numbers[0], num_pages, True))
link = page_links[0]
yield utils.apply_filters({
'basename': basename,
'name': output_name,
'targets': [output_name],
'actions': [(utils.create_redirect, (output_name, link))],
'clean': True,
'uptodate': [utils.config_changed(kw, 'nikola.nikola.Nikola.generic_index_renderer')],
}, kw["filters"])
def generic_atom_renderer(self, lang, posts, context_source, kw, basename, classification, kind, additional_dependencies=None):
# Update kw
kw = kw.copy()
kw["feed_length"] = self.config['FEED_LENGTH']
kw['generate_atom'] = self.config["GENERATE_ATOM"]
kw['feed_links_append_query'] = self.config["FEED_LINKS_APPEND_QUERY"]
kw['feed_teasers'] = self.config['FEED_TEASERS']
kw['feed_plain'] = self.config['FEED_PLAIN']
if additional_dependencies is None:
additional_dependencies = []
post_list = posts[:kw["feed_length"]]
feedlink = self.link(kind + "_atom", classification, lang)
feedpath = self.path(kind + "_atom", classification, lang)
context = context_source.copy()
if 'has_other_languages' not in context:
context['has_other_languages'] = False
output_name = os.path.join(kw['output_folder'], feedpath)
context["feedlink"] = feedlink
task = {
"basename": basename,
"name": output_name,
"file_dep": sorted([_.base_path for _ in post_list]),
"task_dep": ['render_posts'],
"targets": [output_name],
"actions": [(self.atom_feed_renderer,
(lang,
post_list,
output_name,
kw['filters'],
context,))],
"clean": True,
"uptodate": [utils.config_changed(kw, 'nikola.nikola.Nikola.atom_feed_renderer')] + additional_dependencies
}
yield utils.apply_filters(task, kw['filters'])
def __repr__(self):
return '<Nikola Site: {0!r}>'.format(self.config['BLOG_TITLE'](self.config['DEFAULT_LANG']))
| true | true |
1c2b460701bd3142ad4a0f4b29e7973ff50a5b70 | 1,584 | py | Python | setup.py | anton44eg/fixturegen | dde56578911efaf802a11fe7341becda4febb15d | [
"MIT"
] | null | null | null | setup.py | anton44eg/fixturegen | dde56578911efaf802a11fe7341becda4febb15d | [
"MIT"
] | null | null | null | setup.py | anton44eg/fixturegen | dde56578911efaf802a11fe7341becda4febb15d | [
"MIT"
] | null | null | null | from __future__ import absolute_import
import os
from setuptools import setup, find_packages
VERSION = '0.8'
BASEDIR = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(BASEDIR, 'README.rst')).read()
setup(
name='fixturegen',
version=VERSION,
packages=find_packages(),
include_package_data=True,
install_requires=[
"mako >= 1.0",
"click >= 3.0",
"sqlalchemy >= 0.6"
],
entry_points={
'console_scripts':
['fixturegen-sqlalchemy = fixturegen.cli:sqlalchemy'],
},
url='https://github.com/anton44eg/fixturegen',
download_url='https://github.com/anton44eg/fixturegen/archive/{0}.tar.gz'
.format(VERSION),
license='MIT',
author='Anton Simernia',
author_email='anton.simernya@gmail.com',
keywords=['fixture', 'sqlalchemy', 'testing'],
description='Fixture generator for fixture module',
long_description=README,
package_data={
'fixturegen': ['templates/*.mako'],
},
zip_safe=False,
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Testing',
'Topic :: Database',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='test_fixturegen',
setup_requires=[
"flake8",
"nose>=1.0",
"coverage"
]
)
| 28.8 | 77 | 0.611111 | from __future__ import absolute_import
import os
from setuptools import setup, find_packages
VERSION = '0.8'
BASEDIR = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(BASEDIR, 'README.rst')).read()
setup(
name='fixturegen',
version=VERSION,
packages=find_packages(),
include_package_data=True,
install_requires=[
"mako >= 1.0",
"click >= 3.0",
"sqlalchemy >= 0.6"
],
entry_points={
'console_scripts':
['fixturegen-sqlalchemy = fixturegen.cli:sqlalchemy'],
},
url='https://github.com/anton44eg/fixturegen',
download_url='https://github.com/anton44eg/fixturegen/archive/{0}.tar.gz'
.format(VERSION),
license='MIT',
author='Anton Simernia',
author_email='anton.simernya@gmail.com',
keywords=['fixture', 'sqlalchemy', 'testing'],
description='Fixture generator for fixture module',
long_description=README,
package_data={
'fixturegen': ['templates/*.mako'],
},
zip_safe=False,
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Testing',
'Topic :: Database',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='test_fixturegen',
setup_requires=[
"flake8",
"nose>=1.0",
"coverage"
]
)
| true | true |
1c2b462f393bafc01b1dadb21b016768535775d3 | 580 | py | Python | tuples.py | vikhyatprabhu/python-intermediate | c318c515eb13376f5dfd0f4c3b3d74c8c403ab44 | [
"MIT"
] | null | null | null | tuples.py | vikhyatprabhu/python-intermediate | c318c515eb13376f5dfd0f4c3b3d74c8c403ab44 | [
"MIT"
] | null | null | null | tuples.py | vikhyatprabhu/python-intermediate | c318c515eb13376f5dfd0f4c3b3d74c8c403ab44 | [
"MIT"
] | null | null | null | #Tuple : ordered , immutable , allows duplicate
mytuple = ("Vikhyat" , 28 , "Shirali")
print(mytuple)
singletuple = ("Vikhyat",)
print(singletuple)
#Tuple from iterable
mytuple = tuple(["Vikhyat" , 28 , "Shirali"])
print(mytuple)
item = mytuple[0]
print(item)
# NOT SUPPORTED assignment
# mytuple[1] = 29
#Loop
for x in mytuple:
print(x)
letters = ('a' , 'b' , 'b' , 'c' , 'a', 'e', 'e')
print(letters.count('b'))
print(letters.index('e'))
import timeit
print(timeit.timeit(stmt="[0,1,2,3,4,5]", number=10000))
print(timeit.timeit(stmt="(0,1,2,3,4,5)", number=10000))
| 20 | 56 | 0.648276 |
mytuple = ("Vikhyat" , 28 , "Shirali")
print(mytuple)
singletuple = ("Vikhyat",)
print(singletuple)
mytuple = tuple(["Vikhyat" , 28 , "Shirali"])
print(mytuple)
item = mytuple[0]
print(item)
for x in mytuple:
print(x)
letters = ('a' , 'b' , 'b' , 'c' , 'a', 'e', 'e')
print(letters.count('b'))
print(letters.index('e'))
import timeit
print(timeit.timeit(stmt="[0,1,2,3,4,5]", number=10000))
print(timeit.timeit(stmt="(0,1,2,3,4,5)", number=10000))
| true | true |
1c2b47d3c39b4194cfb90ff049a80f39236b1d76 | 1,403 | py | Python | trackash/users/tests/test_views.py | black-redoc/trackash | 99ded8445eaaa1bdf616d43c36ba402356e2f9d3 | [
"MIT"
] | null | null | null | trackash/users/tests/test_views.py | black-redoc/trackash | 99ded8445eaaa1bdf616d43c36ba402356e2f9d3 | [
"MIT"
] | null | null | null | trackash/users/tests/test_views.py | black-redoc/trackash | 99ded8445eaaa1bdf616d43c36ba402356e2f9d3 | [
"MIT"
] | null | null | null | import pytest
from django.test import RequestFactory
from trackash.users.models import User
from trackash.users.views import UserRedirectView, UserUpdateView
pytestmark = pytest.mark.django_db
class TestUserUpdateView:
"""
TODO:
extracting view initialization code as class-scoped fixture
would be great if only pytest-django supported non-function-scoped
fixture db access -- this is a work-in-progress for now:
https://github.com/pytest-dev/pytest-django/pull/258
"""
def test_get_success_url(self, user: User, request_factory: RequestFactory):
view = UserUpdateView()
request = request_factory.get("/fake-url/")
request.user = user
view.request = request
assert view.get_success_url() == f"/users/{user.username}/"
def test_get_object(self, user: User, request_factory: RequestFactory):
view = UserUpdateView()
request = request_factory.get("/fake-url/")
request.user = user
view.request = request
assert view.get_object() == user
class TestUserRedirectView:
def test_get_redirect_url(self, user: User, request_factory: RequestFactory):
view = UserRedirectView()
request = request_factory.get("/fake-url")
request.user = user
view.request = request
assert view.get_redirect_url() == f"/users/{user.username}/"
| 29.851064 | 81 | 0.684248 | import pytest
from django.test import RequestFactory
from trackash.users.models import User
from trackash.users.views import UserRedirectView, UserUpdateView
pytestmark = pytest.mark.django_db
class TestUserUpdateView:
def test_get_success_url(self, user: User, request_factory: RequestFactory):
view = UserUpdateView()
request = request_factory.get("/fake-url/")
request.user = user
view.request = request
assert view.get_success_url() == f"/users/{user.username}/"
def test_get_object(self, user: User, request_factory: RequestFactory):
view = UserUpdateView()
request = request_factory.get("/fake-url/")
request.user = user
view.request = request
assert view.get_object() == user
class TestUserRedirectView:
def test_get_redirect_url(self, user: User, request_factory: RequestFactory):
view = UserRedirectView()
request = request_factory.get("/fake-url")
request.user = user
view.request = request
assert view.get_redirect_url() == f"/users/{user.username}/"
| true | true |
1c2b486f75bc1d0322615792bab4ef66e4af1fc3 | 1,843 | py | Python | project_euler/python/058_spiral_primes.py | Sabihxh/ProjectEuler | 8ab1387f41cbce0d5216ed98fa06d754cbc324c1 | [
"MIT"
] | 1 | 2018-03-20T12:04:06.000Z | 2018-03-20T12:04:06.000Z | project_euler/python/058_spiral_primes.py | Sabihxh/ProjectEuler | 8ab1387f41cbce0d5216ed98fa06d754cbc324c1 | [
"MIT"
] | null | null | null | project_euler/python/058_spiral_primes.py | Sabihxh/ProjectEuler | 8ab1387f41cbce0d5216ed98fa06d754cbc324c1 | [
"MIT"
] | null | null | null | from utils import is_prime
problem = """
Starting with 1 and spiralling anticlockwise in the following way,
a square spiral with side length 7 is formed.
37 36 35 34 33 32 31
38 17 16 15 14 13 30
39 18 5 4 3 12 29
40 19 6 1 2 11 28
41 20 7 8 9 10 27
42 21 22 23 24 25 26
43 44 45 46 47 48 49
It is interesting to note that the odd squares lie along the bottom right diagonal,
but what is more interesting is that 8 out of the 13 numbers lying along both diagonals
are prime; that is, a ratio of 8/13 ≈ 62%.
If one complete new layer is wrapped around the spiral above, a square spiral
with side length 9 will be formed. If this process is continued, what is the side
length of the square spiral for which the ratio of primes along both diagonals first
falls below 10%?
"""
def is_prime(n):
if n == 2 or n == 3:
return True
if n < 2 or n % 2 == 0:
return False
if n < 9:
return True
if n % 3 == 0:
return False
r = int(n ** 0.5)
f = 5
while f <= r:
if n % f == 0:
return False
if n % (f + 2) == 0:
return False
f += 6
return True
def solution():
target_ratio = 0.1
# coefficients of the 3 quadratic equations for non-squared diagonal numbers
coefficients = [(4, -10, 7), (4, -8, 5), (4, -6, 3)]
primes_count = 0
for n in range(1, 100000):
side_length = (2*n) - 1
diagonal_count = (4*n) - 3
print(f'n: {n}, side_length: {side_length}, diagonal_count: {diagonal_count}')
for coeff in coefficients:
a, b, c = coeff
s = a*(n**2) + (b*n) + c
print(f'coeff: {coeff}', s)
if is_prime(s):
primes_count += 1
ratio = primes_count/diagonal_count
print(f'primes_count: {primes_count}, diagonal_count: {diagonal_count} ratio: {ratio}')
if n > 2 and ratio < target_ratio:
print(f'side_length: {side_length}')
break
print('*'*100)
if __name__ == "__main__":
solution()
| 26.328571 | 89 | 0.66522 | from utils import is_prime
problem = """
Starting with 1 and spiralling anticlockwise in the following way,
a square spiral with side length 7 is formed.
37 36 35 34 33 32 31
38 17 16 15 14 13 30
39 18 5 4 3 12 29
40 19 6 1 2 11 28
41 20 7 8 9 10 27
42 21 22 23 24 25 26
43 44 45 46 47 48 49
It is interesting to note that the odd squares lie along the bottom right diagonal,
but what is more interesting is that 8 out of the 13 numbers lying along both diagonals
are prime; that is, a ratio of 8/13 ≈ 62%.
If one complete new layer is wrapped around the spiral above, a square spiral
with side length 9 will be formed. If this process is continued, what is the side
length of the square spiral for which the ratio of primes along both diagonals first
falls below 10%?
"""
def is_prime(n):
if n == 2 or n == 3:
return True
if n < 2 or n % 2 == 0:
return False
if n < 9:
return True
if n % 3 == 0:
return False
r = int(n ** 0.5)
f = 5
while f <= r:
if n % f == 0:
return False
if n % (f + 2) == 0:
return False
f += 6
return True
def solution():
target_ratio = 0.1
coefficients = [(4, -10, 7), (4, -8, 5), (4, -6, 3)]
primes_count = 0
for n in range(1, 100000):
side_length = (2*n) - 1
diagonal_count = (4*n) - 3
print(f'n: {n}, side_length: {side_length}, diagonal_count: {diagonal_count}')
for coeff in coefficients:
a, b, c = coeff
s = a*(n**2) + (b*n) + c
print(f'coeff: {coeff}', s)
if is_prime(s):
primes_count += 1
ratio = primes_count/diagonal_count
print(f'primes_count: {primes_count}, diagonal_count: {diagonal_count} ratio: {ratio}')
if n > 2 and ratio < target_ratio:
print(f'side_length: {side_length}')
break
print('*'*100)
if __name__ == "__main__":
solution()
| true | true |
1c2b49b6074b2fa09d9c940499b2b83155e28ee7 | 2,394 | py | Python | chaospy/distributions/operators/arccos.py | krystophny/chaospy | e09f8e3f6dfc26145f15774edd5b03665140712f | [
"MIT"
] | 1 | 2019-12-20T00:32:44.000Z | 2019-12-20T00:32:44.000Z | chaospy/distributions/operators/arccos.py | QianWanghhu/chaospy | 18ff6c4fc56c632825e53fb24e17de51a7febd7d | [
"MIT"
] | null | null | null | chaospy/distributions/operators/arccos.py | QianWanghhu/chaospy | 18ff6c4fc56c632825e53fb24e17de51a7febd7d | [
"MIT"
] | null | null | null | """Arc-Cosine."""
import numpy
from ..baseclass import Dist
from .. import evaluation, approximation
class Arccos(Dist):
"""
Arc-Cosine.
Args:
dist (Dist): Distribution to perform transformation on.
Example:
>>> distribution = chaospy.Arccos(chaospy.Uniform(0, 1))
>>> print(distribution)
Arccos(Uniform(lower=0, upper=1))
>>> q = numpy.linspace(0, 1, 6)[1:-1]
>>> print(numpy.around(distribution.inv(q), 4))
[0.6435 0.9273 1.1593 1.3694]
>>> print(numpy.around(distribution.fwd(distribution.inv(q)), 4))
[0.2 0.4 0.6 0.8]
>>> print(numpy.around(distribution.pdf(distribution.inv(q)), 4))
[0.6 0.8 0.9165 0.9798]
>>> print(numpy.around(distribution.sample(4), 4))
[1.2171 0.4843 1.5211 1.0265]
>>> print(numpy.around(distribution.mom(1), 4))
1.0
>>> print(numpy.around(distribution.ttr([0, 1, 2]), 4))
[[1. 0.8406 0.8083]
[1. 0.1416 0.1492]]
"""
def __init__(self, dist):
assert isinstance(dist, Dist)
assert numpy.all(dist.range() >= -1)
assert numpy.all(dist.range() <= 1)
Dist.__init__(self, dist=dist)
def _pdf(self, x, dist, cache):
return evaluation.evaluate_density(
dist, numpy.cos(x), cache=cache)*numpy.sin(x)
def _cdf(self, x, dist, cache):
return 1-evaluation.evaluate_forward(dist, numpy.cos(x), cache=cache)
def _ppf(self, q, dist, cache):
return numpy.arccos(evaluation.evaluate_inverse(dist, 1-q, cache=cache))
def _bnd(self, x, dist, cache):
return numpy.arccos(evaluation.evaluate_bound(
dist, numpy.cos(x), cache=cache))[::-1]
def _mom(self, x, dist, cache):
return approximation.approximate_moment(self, x)
def __len__(self):
return len(self.prm["dist"])
def __str__(self):
return self.__class__.__name__ + "(" + str(self.prm["dist"]) + ")"
def _fwd_cache(self, cache):
dist = evaluation.get_forward_cache(self.prm["dist"], cache)
if not isinstance(dist, Dist):
return numpy.arccos(dist)
return self
def _inv_cache(self, cache):
dist = evaluation.get_forward_cache(self.prm["dist"], cache)
if not isinstance(dist, Dist):
return numpy.cos(dist)
return self
| 31.92 | 80 | 0.591061 | import numpy
from ..baseclass import Dist
from .. import evaluation, approximation
class Arccos(Dist):
def __init__(self, dist):
assert isinstance(dist, Dist)
assert numpy.all(dist.range() >= -1)
assert numpy.all(dist.range() <= 1)
Dist.__init__(self, dist=dist)
def _pdf(self, x, dist, cache):
return evaluation.evaluate_density(
dist, numpy.cos(x), cache=cache)*numpy.sin(x)
def _cdf(self, x, dist, cache):
return 1-evaluation.evaluate_forward(dist, numpy.cos(x), cache=cache)
def _ppf(self, q, dist, cache):
return numpy.arccos(evaluation.evaluate_inverse(dist, 1-q, cache=cache))
def _bnd(self, x, dist, cache):
return numpy.arccos(evaluation.evaluate_bound(
dist, numpy.cos(x), cache=cache))[::-1]
def _mom(self, x, dist, cache):
return approximation.approximate_moment(self, x)
def __len__(self):
return len(self.prm["dist"])
def __str__(self):
return self.__class__.__name__ + "(" + str(self.prm["dist"]) + ")"
def _fwd_cache(self, cache):
dist = evaluation.get_forward_cache(self.prm["dist"], cache)
if not isinstance(dist, Dist):
return numpy.arccos(dist)
return self
def _inv_cache(self, cache):
dist = evaluation.get_forward_cache(self.prm["dist"], cache)
if not isinstance(dist, Dist):
return numpy.cos(dist)
return self
| true | true |
1c2b4a02dbba8f5c98568033d3c3db8e69c4b68a | 393 | py | Python | sdnantwr/wsgi.py | eewinkk/sdnantwr | 3fc395a2ae268efd7db4d4d4e3424c0c4252ed5b | [
"MIT"
] | null | null | null | sdnantwr/wsgi.py | eewinkk/sdnantwr | 3fc395a2ae268efd7db4d4d4e3424c0c4252ed5b | [
"MIT"
] | null | null | null | sdnantwr/wsgi.py | eewinkk/sdnantwr | 3fc395a2ae268efd7db4d4d4e3424c0c4252ed5b | [
"MIT"
] | null | null | null | """
WSGI config for sdnantwr project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sdnantwr.settings')
application = get_wsgi_application()
| 23.117647 | 78 | 0.78626 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sdnantwr.settings')
application = get_wsgi_application()
| true | true |
1c2b4a1c07a03c84645790de2fd147b0a49af942 | 779 | py | Python | Python Files/Dataset_Formating/Audio_splicing.py | brennanMosher/Music-Genre-Recognition-using-a-Machine-Learning-Appraoch | 7834fe5d709e894322ad76ef118067febaa78bce | [
"MIT"
] | 1 | 2021-04-13T16:22:27.000Z | 2021-04-13T16:22:27.000Z | Python Files/Dataset_Formating/Audio_splicing.py | brennanMosher/Music-Genre-Recognition-using-a-Machine-Learning-Appraoch | 7834fe5d709e894322ad76ef118067febaa78bce | [
"MIT"
] | null | null | null | Python Files/Dataset_Formating/Audio_splicing.py | brennanMosher/Music-Genre-Recognition-using-a-Machine-Learning-Appraoch | 7834fe5d709e894322ad76ef118067febaa78bce | [
"MIT"
] | null | null | null | from pydub import AudioSegment
import os
import math
from pathlib import Path
'''
Splice wav files into multiple segments.
'''
LENGTH = 3 # Set splice length in seconds
def splice(audioPath, outputPath):
# try:
# os.mkdir('Spliced Spectrogram training') # Need to figure out where to put this
# except OSError:
# print("Creation of the directory failed")
audio = AudioSegment.from_wav(audioPath)
count = math.ceil(audio.duration_seconds/LENGTH) # Do we want the last part of audio?
t1 = 0
t2 = LENGTH*1000
for i in range(count):
newAudio = audio[t1:t2]
newPath = outputPath+Path(audioPath).stem+'_splice'+str(i)+'.wav'
newAudio.export(newPath, format="wav")
t1 = t2
t2 = t2 + LENGTH*1000
| 25.129032 | 89 | 0.65982 | from pydub import AudioSegment
import os
import math
from pathlib import Path
LENGTH = 3
def splice(audioPath, outputPath):
rom_wav(audioPath)
count = math.ceil(audio.duration_seconds/LENGTH)
t1 = 0
t2 = LENGTH*1000
for i in range(count):
newAudio = audio[t1:t2]
newPath = outputPath+Path(audioPath).stem+'_splice'+str(i)+'.wav'
newAudio.export(newPath, format="wav")
t1 = t2
t2 = t2 + LENGTH*1000
| true | true |
1c2b4a412455052ce8ddb06dd979e5dc0bf88080 | 4,941 | py | Python | mstools/molecule/molecule.py | Xiangyan93/mstools | 7143dbfc2eb4e82e6631652a0c1b38a793dcc678 | [
"MIT"
] | null | null | null | mstools/molecule/molecule.py | Xiangyan93/mstools | 7143dbfc2eb4e82e6631652a0c1b38a793dcc678 | [
"MIT"
] | null | null | null | mstools/molecule/molecule.py | Xiangyan93/mstools | 7143dbfc2eb4e82e6631652a0c1b38a793dcc678 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from typing import Dict, Iterator, List, Optional, Union, Literal, Tuple
import warnings
import random
from openbabel import openbabel
import openbabel.pybel as pybel
from .saved_mol2 import get_smiles_mol2_dict
class Molecule:
"""This class is used to create molecular 3D structure from SMILES.
Parameters
----------
smiles: str
SMILES string of input molecule.
algorithm: 'openbabel'
The algorithm used to generate molecular 3D structure.
read_saved: bool
Set to True, if the molecule is saved in saved_mol2, then the structure in it will directly used.
seed: int
Random seed.
"""
def __init__(self, smiles: str,
algorithm: Literal['openbabel'] = 'openbabel',
read_saved: bool = True,
seed: int = 0):
self.smiles = smiles
self.algorithm = algorithm
self.read_saved = read_saved
self.seed = seed
if algorithm == 'openbabel':
self.mol = self._mol_openbabel(minimize=True)
elif algorithm == 'rdkit':
# TODO
pass
else:
raise RuntimeError(f'Unknown 3D coordinates generate algorithm {algorithm}')
@property
def charge(self) -> int:
if self.algorithm == 'openbabel':
return self.mol.charge
@property
def spin(self) -> int:
if self.algorithm == 'openbabel':
return self.mol.spin
@property
def n_atoms(self) -> int:
if self.algorithm == 'openbabel':
return len(self.mol.atoms)
@property
def molwt(self) -> float:
if self.algorithm == 'openbabel':
return self.mol.molwt
@property
def formula(self) -> str:
if self.algorithm == 'openbabel':
return self.mol.formula
@property
def smiles2mol2(self) -> Dict[str, str]:
if not hasattr(self, '__smiles2mol2_dict'):
self.__smiles2mol2_dict = get_smiles_mol2_dict()
return self.__smiles2mol2_dict
def write(self, file: str = None, filetype: Literal['pdb', 'mol2', 'xyz'] = 'mol2'):
if self.algorithm == 'openbabel':
mol = self.mol
if file is not None:
mol.write(filetype, file, overwrite=True)
else:
return mol.write(filetype)
def _conformers_openbabel(self, n_select: int = 10, n_try: int = 10) -> List[pybel.Molecule]:
"""Generate a list of conformers using openbabel.
Parameters
----------
n_select: int
The number of conformers to be returned.
n_try
The number of conformers try to generated by random noise to coordinates.
Returns
-------
conformers: List[pybel.Molecule]
A list consists of n_select pybel.Molecule objects.
"""
if n_select == 0:
return []
random.seed(self.seed)
ff = openbabel.OBForceField.FindForceField('mmff94')
if n_try is None:
n_try = n_select
if n_try < n_select:
warnings.warn(
f'n_try={n_try} is set to be smaller than n_select={n_select}. '
f'n_try is set to {n_select}')
n_try = n_select
x_list = []
for atom in self.mol.atoms:
for x in atom.coords:
x_list.append(x)
xmin, xmax = min(x_list), max(x_list)
xspan = xmax - xmin
conformers = []
for i in range(n_try):
conformer = self._mol_openbabel(minimize=False)
for atom in conformer.atoms:
obatom = atom.OBAtom
random_coord = [(random.random() * xspan + xmin) * k for k in [2, 1, 0.5]]
obatom.SetVector(*random_coord)
conformer.localopt()
ff.Setup(conformer.OBMol)
conformer.OBMol.SetEnergy(ff.Energy())
conformers.append(conformer)
conformers.sort(key=lambda x: x.energy)
return conformers[:n_select]
def _mol_openbabel(self, minimize: bool = False) -> pybel.Molecule:
"""Generate a openbabel conformer.
Parameters
----------
minimize: bool
If True, the molecular coordinates will be optimized using classical force field.
Returns
-------
mol: pybel.Molecule
A pybel.Molecule object.
"""
try:
mol = pybel.readstring('smi', self.smiles)
except:
raise RuntimeError('Cannot create molecule from SMILES using openbabel.')
if self.read_saved and self.smiles in self.smiles2mol2:
mol = next(pybel.readfile('mol2', self.smiles2mol2[self.smiles]))
else:
mol.addh()
mol.make3D()
if minimize:
mol.localopt()
return mol
| 31.272152 | 105 | 0.571747 |
from typing import Dict, Iterator, List, Optional, Union, Literal, Tuple
import warnings
import random
from openbabel import openbabel
import openbabel.pybel as pybel
from .saved_mol2 import get_smiles_mol2_dict
class Molecule:
def __init__(self, smiles: str,
algorithm: Literal['openbabel'] = 'openbabel',
read_saved: bool = True,
seed: int = 0):
self.smiles = smiles
self.algorithm = algorithm
self.read_saved = read_saved
self.seed = seed
if algorithm == 'openbabel':
self.mol = self._mol_openbabel(minimize=True)
elif algorithm == 'rdkit':
pass
else:
raise RuntimeError(f'Unknown 3D coordinates generate algorithm {algorithm}')
@property
def charge(self) -> int:
if self.algorithm == 'openbabel':
return self.mol.charge
@property
def spin(self) -> int:
if self.algorithm == 'openbabel':
return self.mol.spin
@property
def n_atoms(self) -> int:
if self.algorithm == 'openbabel':
return len(self.mol.atoms)
@property
def molwt(self) -> float:
if self.algorithm == 'openbabel':
return self.mol.molwt
@property
def formula(self) -> str:
if self.algorithm == 'openbabel':
return self.mol.formula
@property
def smiles2mol2(self) -> Dict[str, str]:
if not hasattr(self, '__smiles2mol2_dict'):
self.__smiles2mol2_dict = get_smiles_mol2_dict()
return self.__smiles2mol2_dict
def write(self, file: str = None, filetype: Literal['pdb', 'mol2', 'xyz'] = 'mol2'):
if self.algorithm == 'openbabel':
mol = self.mol
if file is not None:
mol.write(filetype, file, overwrite=True)
else:
return mol.write(filetype)
def _conformers_openbabel(self, n_select: int = 10, n_try: int = 10) -> List[pybel.Molecule]:
if n_select == 0:
return []
random.seed(self.seed)
ff = openbabel.OBForceField.FindForceField('mmff94')
if n_try is None:
n_try = n_select
if n_try < n_select:
warnings.warn(
f'n_try={n_try} is set to be smaller than n_select={n_select}. '
f'n_try is set to {n_select}')
n_try = n_select
x_list = []
for atom in self.mol.atoms:
for x in atom.coords:
x_list.append(x)
xmin, xmax = min(x_list), max(x_list)
xspan = xmax - xmin
conformers = []
for i in range(n_try):
conformer = self._mol_openbabel(minimize=False)
for atom in conformer.atoms:
obatom = atom.OBAtom
random_coord = [(random.random() * xspan + xmin) * k for k in [2, 1, 0.5]]
obatom.SetVector(*random_coord)
conformer.localopt()
ff.Setup(conformer.OBMol)
conformer.OBMol.SetEnergy(ff.Energy())
conformers.append(conformer)
conformers.sort(key=lambda x: x.energy)
return conformers[:n_select]
def _mol_openbabel(self, minimize: bool = False) -> pybel.Molecule:
try:
mol = pybel.readstring('smi', self.smiles)
except:
raise RuntimeError('Cannot create molecule from SMILES using openbabel.')
if self.read_saved and self.smiles in self.smiles2mol2:
mol = next(pybel.readfile('mol2', self.smiles2mol2[self.smiles]))
else:
mol.addh()
mol.make3D()
if minimize:
mol.localopt()
return mol
| true | true |
1c2b4a485b7b9fbeba082c6fd516e9b17e38a7db | 4,142 | py | Python | Week3-Web-Development-Using-Python/fastapi/service.py | gdgedmonton/Python-Bootcamp-2020 | 2d5e78608c5e94d4db97e084c2f71ac0eefb213f | [
"MIT"
] | 3 | 2021-01-15T23:24:37.000Z | 2021-08-13T04:01:11.000Z | Week3-Web-Development-Using-Python/fastapi/service.py | gdgedmonton/Python-Bootcamp-2020 | 2d5e78608c5e94d4db97e084c2f71ac0eefb213f | [
"MIT"
] | null | null | null | Week3-Web-Development-Using-Python/fastapi/service.py | gdgedmonton/Python-Bootcamp-2020 | 2d5e78608c5e94d4db97e084c2f71ac0eefb213f | [
"MIT"
] | 1 | 2021-01-31T20:11:49.000Z | 2021-01-31T20:11:49.000Z | from datetime import datetime
from typing import List
from urllib.parse import urlunparse
import uuid
import aiohttp
from fastapi import FastAPI, HTTPException
from fuzzywuzzy import process
from pydantic import AnyHttpUrl, BaseModel, Field
from starlette.requests import Request
async def fetch_team_wins() -> dict:
"""Use the NHL API to get teams and goals this season"""
standings_url = "https://statsapi.web.nhl.com/api/v1/standings?season=20192020"
session = aiohttp.ClientSession()
resp = await session.get(standings_url)
await session.close()
standings = await resp.json()
teams = {}
try:
for record in standings["records"]:
for team_record in record["teamRecords"]:
teams.update({team_record["team"]["name"]: team_record["goalsScored"]})
except KeyError:
raise HTTPException(status_code=400, detail="Invalid standings response")
return teams
DB = {} # Yikes! Please use a real database, not just a dictionary...
NAMESPACE_UUID = uuid.uuid4()
def db_uid(name: str) -> str:
return str(uuid.uuid3(NAMESPACE_UUID, name))
app = FastAPI(name="Franklin's hockey pool")
class Links(BaseModel):
self: AnyHttpUrl
submissions: AnyHttpUrl
rules: AnyHttpUrl
@classmethod
def from_url(cls, url):
links = cls(
self=str(url),
submissions=urlunparse(
(url.scheme, url.netloc, "/submissions", "", "", "")
),
rules=urlunparse((url.scheme, url.netloc, "/rules", "", "", "")),
)
return links
class RulesResponse(BaseModel):
rules: str
links: Links
@app.get("/rules", response_model=RulesResponse)
async def get_the_pool_rules(request: Request):
"""Get the rules and links to submissions"""
rules = (
"Pick three teams, guess how many combined goals they will have "
"at the end of the season, closest guess takes all!"
)
return {"rules": rules, "links": Links.from_url(request.url)}
class SubmissionsResponse(BaseModel):
submissions: List[AnyHttpUrl]
links: Links
@app.get("/submissions", response_model=SubmissionsResponse)
async def get_submissions(request: Request):
"""Get links to all submissions"""
submissions = [f"{str(request.url)}/{db_uid(sub.name)}" for sub in DB.values()]
return {"submissions": submissions, "links": Links.from_url(request.url)}
class Submission(BaseModel):
name: str = Field(
...,
description="user name",
)
teams: List[str] = Field(
...,
description="team choices",
min_items=3,
max_items=3,
)
prediction: int = Field(
...,
description="predicted total points at end of season",
ge=0,
)
class SubmissionDB(Submission):
uid: str
time: datetime
class SubmissionPostResponse(Submission):
links: Links
@app.post("/submissions", response_model=SubmissionPostResponse)
async def post_submission(submission: Submission, request: Request):
"""Add your submission to the pool"""
uid = db_uid(submission.name)
if uid in DB:
raise HTTPException(
status_code=422, detail=f"Entry already exists for {submission.name}"
)
DB[uid] = SubmissionDB(uid=uid, time=datetime.utcnow(), **submission.dict())
links = Links.from_url(request.url)
links.self += f"/{uid}"
return dict(links=links, **submission.dict())
class SubmissionGetResponse(SubmissionPostResponse):
current_score: int
@app.get("/submissions/{uid}", response_model=SubmissionGetResponse)
async def get_submission(uid: str, request: Request):
"""Get a submission with the current score"""
if uid not in DB:
raise HTTPException(status_code=404, detail="Not found")
submission = DB[uid]
standings = await fetch_team_wins()
current_score = 0
for team in submission.teams:
match = process.extractOne(team, standings.keys())
current_score += standings[match[0]]
links = Links.from_url(request.url)
return dict(links=links, current_score=current_score, **submission.dict())
| 28.965035 | 87 | 0.667069 | from datetime import datetime
from typing import List
from urllib.parse import urlunparse
import uuid
import aiohttp
from fastapi import FastAPI, HTTPException
from fuzzywuzzy import process
from pydantic import AnyHttpUrl, BaseModel, Field
from starlette.requests import Request
async def fetch_team_wins() -> dict:
standings_url = "https://statsapi.web.nhl.com/api/v1/standings?season=20192020"
session = aiohttp.ClientSession()
resp = await session.get(standings_url)
await session.close()
standings = await resp.json()
teams = {}
try:
for record in standings["records"]:
for team_record in record["teamRecords"]:
teams.update({team_record["team"]["name"]: team_record["goalsScored"]})
except KeyError:
raise HTTPException(status_code=400, detail="Invalid standings response")
return teams
DB = {}
NAMESPACE_UUID = uuid.uuid4()
def db_uid(name: str) -> str:
return str(uuid.uuid3(NAMESPACE_UUID, name))
app = FastAPI(name="Franklin's hockey pool")
class Links(BaseModel):
self: AnyHttpUrl
submissions: AnyHttpUrl
rules: AnyHttpUrl
@classmethod
def from_url(cls, url):
links = cls(
self=str(url),
submissions=urlunparse(
(url.scheme, url.netloc, "/submissions", "", "", "")
),
rules=urlunparse((url.scheme, url.netloc, "/rules", "", "", "")),
)
return links
class RulesResponse(BaseModel):
rules: str
links: Links
@app.get("/rules", response_model=RulesResponse)
async def get_the_pool_rules(request: Request):
rules = (
"Pick three teams, guess how many combined goals they will have "
"at the end of the season, closest guess takes all!"
)
return {"rules": rules, "links": Links.from_url(request.url)}
class SubmissionsResponse(BaseModel):
submissions: List[AnyHttpUrl]
links: Links
@app.get("/submissions", response_model=SubmissionsResponse)
async def get_submissions(request: Request):
submissions = [f"{str(request.url)}/{db_uid(sub.name)}" for sub in DB.values()]
return {"submissions": submissions, "links": Links.from_url(request.url)}
class Submission(BaseModel):
name: str = Field(
...,
description="user name",
)
teams: List[str] = Field(
...,
description="team choices",
min_items=3,
max_items=3,
)
prediction: int = Field(
...,
description="predicted total points at end of season",
ge=0,
)
class SubmissionDB(Submission):
uid: str
time: datetime
class SubmissionPostResponse(Submission):
links: Links
@app.post("/submissions", response_model=SubmissionPostResponse)
async def post_submission(submission: Submission, request: Request):
uid = db_uid(submission.name)
if uid in DB:
raise HTTPException(
status_code=422, detail=f"Entry already exists for {submission.name}"
)
DB[uid] = SubmissionDB(uid=uid, time=datetime.utcnow(), **submission.dict())
links = Links.from_url(request.url)
links.self += f"/{uid}"
return dict(links=links, **submission.dict())
class SubmissionGetResponse(SubmissionPostResponse):
current_score: int
@app.get("/submissions/{uid}", response_model=SubmissionGetResponse)
async def get_submission(uid: str, request: Request):
if uid not in DB:
raise HTTPException(status_code=404, detail="Not found")
submission = DB[uid]
standings = await fetch_team_wins()
current_score = 0
for team in submission.teams:
match = process.extractOne(team, standings.keys())
current_score += standings[match[0]]
links = Links.from_url(request.url)
return dict(links=links, current_score=current_score, **submission.dict())
| true | true |
1c2b4b0457b868b3077a5a8dbc6d0f7f70328b3f | 12,842 | py | Python | lib/net/fcn.py | Guo-Xiaoqing/ThresholdNet | 460026bdacd9d5e577e9b4ae1370e8c9924fcfc3 | [
"MIT"
] | 7 | 2020-12-29T14:09:27.000Z | 2021-07-08T07:12:24.000Z | lib/net/fcn.py | CityU-AIM-Group/ThresholdNet | e82da9f1266c07518c4037d0a0b3afd6290ca33d | [
"MIT"
] | null | null | null | lib/net/fcn.py | CityU-AIM-Group/ThresholdNet | e82da9f1266c07518c4037d0a0b3afd6290ca33d | [
"MIT"
] | 2 | 2021-04-08T11:59:07.000Z | 2021-08-09T15:16:41.000Z | # -*- coding: utf-8 -*-
from __future__ import print_function
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import models
from torchvision.models.vgg import VGG
class FCN32s(nn.Module):
def __init__(self, pretrained_net, n_class):
super().__init__()
self.n_class = n_class
self.pretrained_net = pretrained_net
self.relu = nn.ReLU(inplace=True)
self.deconv1 = nn.ConvTranspose2d(512, 512, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn1 = nn.BatchNorm2d(512)
self.deconv2 = nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn2 = nn.BatchNorm2d(256)
self.deconv3 = nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn3 = nn.BatchNorm2d(128)
self.deconv4 = nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn4 = nn.BatchNorm2d(64)
self.deconv5 = nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn5 = nn.BatchNorm2d(32)
self.classifier = nn.Conv2d(32, n_class, kernel_size=1)
def forward(self, x):
output = self.pretrained_net(x)
x5 = output['x5'] # size=(N, 512, x.H/32, x.W/32)
score = self.bn1(self.relu(self.deconv1(x5))) # size=(N, 512, x.H/16, x.W/16)
score = self.bn2(self.relu(self.deconv2(score))) # size=(N, 256, x.H/8, x.W/8)
score = self.bn3(self.relu(self.deconv3(score))) # size=(N, 128, x.H/4, x.W/4)
score = self.bn4(self.relu(self.deconv4(score))) # size=(N, 64, x.H/2, x.W/2)
score = self.bn5(self.relu(self.deconv5(score))) # size=(N, 32, x.H, x.W)
score = self.classifier(score) # size=(N, n_class, x.H/1, x.W/1)
return score # size=(N, n_class, x.H/1, x.W/1)
class FCN16s(nn.Module):
def __init__(self, pretrained_net, n_class):
super().__init__()
self.n_class = n_class
self.pretrained_net = pretrained_net
self.relu = nn.ReLU(inplace=True)
self.deconv1 = nn.ConvTranspose2d(512, 512, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn1 = nn.BatchNorm2d(512)
self.deconv2 = nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn2 = nn.BatchNorm2d(256)
self.deconv3 = nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn3 = nn.BatchNorm2d(128)
self.deconv4 = nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn4 = nn.BatchNorm2d(64)
self.deconv5 = nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn5 = nn.BatchNorm2d(32)
self.classifier = nn.Conv2d(32, n_class, kernel_size=1)
def forward(self, x):
output = self.pretrained_net(x)
x5 = output['x5'] # size=(N, 512, x.H/32, x.W/32)
x4 = output['x4'] # size=(N, 512, x.H/16, x.W/16)
score = self.relu(self.deconv1(x5)) # size=(N, 512, x.H/16, x.W/16)
score = self.bn1(score + x4) # element-wise add, size=(N, 512, x.H/16, x.W/16)
score = self.bn2(self.relu(self.deconv2(score))) # size=(N, 256, x.H/8, x.W/8)
score = self.bn3(self.relu(self.deconv3(score))) # size=(N, 128, x.H/4, x.W/4)
score = self.bn4(self.relu(self.deconv4(score))) # size=(N, 64, x.H/2, x.W/2)
score = self.bn5(self.relu(self.deconv5(score))) # size=(N, 32, x.H, x.W)
score = self.classifier(score) # size=(N, n_class, x.H/1, x.W/1)
return score # size=(N, n_class, x.H/1, x.W/1)
class FCN8s(nn.Module):
def __init__(self, cfg):
super().__init__()
self.n_class = cfg.MODEL_NUM_CLASSES
self.pretrained_net = VGGNet(requires_grad=True, remove_fc=True)
self.relu = nn.ReLU(inplace=True)
self.deconv1 = nn.ConvTranspose2d(512, 512, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn1 = nn.BatchNorm2d(512)
self.deconv2 = nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn2 = nn.BatchNorm2d(256)
self.deconv3 = nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn3 = nn.BatchNorm2d(128)
self.deconv4 = nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn4 = nn.BatchNorm2d(64)
self.deconv5 = nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn5 = nn.BatchNorm2d(32)
self.classifier = nn.Conv2d(32, cfg.MODEL_NUM_CLASSES, kernel_size=1)
def forward(self, x):
output = self.pretrained_net(x)
x5 = output['x5'] # size=(N, 512, x.H/32, x.W/32)
x4 = output['x4'] # size=(N, 512, x.H/16, x.W/16)
x3 = output['x3'] # size=(N, 256, x.H/8, x.W/8)
score = self.relu(self.deconv1(x5)) # size=(N, 512, x.H/16, x.W/16)
score = self.bn1(score + x4) # element-wise add, size=(N, 512, x.H/16, x.W/16)
score = self.relu(self.deconv2(score)) # size=(N, 256, x.H/8, x.W/8)
score = self.bn2(score + x3) # element-wise add, size=(N, 256, x.H/8, x.W/8)
score = self.bn3(self.relu(self.deconv3(score))) # size=(N, 128, x.H/4, x.W/4)
score = self.bn4(self.relu(self.deconv4(score))) # size=(N, 64, x.H/2, x.W/2)
score = self.bn5(self.relu(self.deconv5(score))) # size=(N, 32, x.H, x.W)
score = self.classifier(score) # size=(N, n_class, x.H/1, x.W/1)
return score # size=(N, n_class, x.H/1, x.W/1)
class FCNs(nn.Module):
def __init__(self, pretrained_net, n_class):
super().__init__()
self.n_class = n_class
self.pretrained_net = pretrained_net
self.relu = nn.ReLU(inplace=True)
self.deconv1 = nn.ConvTranspose2d(512, 512, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn1 = nn.BatchNorm2d(512)
self.deconv2 = nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn2 = nn.BatchNorm2d(256)
self.deconv3 = nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn3 = nn.BatchNorm2d(128)
self.deconv4 = nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn4 = nn.BatchNorm2d(64)
self.deconv5 = nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn5 = nn.BatchNorm2d(32)
self.classifier = nn.Conv2d(32, n_class, kernel_size=1)
def forward(self, x):
output = self.pretrained_net(x)
x5 = output['x5'] # size=(N, 512, x.H/32, x.W/32)
x4 = output['x4'] # size=(N, 512, x.H/16, x.W/16)
x3 = output['x3'] # size=(N, 256, x.H/8, x.W/8)
x2 = output['x2'] # size=(N, 128, x.H/4, x.W/4)
x1 = output['x1'] # size=(N, 64, x.H/2, x.W/2)
score = self.bn1(self.relu(self.deconv1(x5))) # size=(N, 512, x.H/16, x.W/16)
score = score + x4 # element-wise add, size=(N, 512, x.H/16, x.W/16)
score = self.bn2(self.relu(self.deconv2(score))) # size=(N, 256, x.H/8, x.W/8)
score = score + x3 # element-wise add, size=(N, 256, x.H/8, x.W/8)
score = self.bn3(self.relu(self.deconv3(score))) # size=(N, 128, x.H/4, x.W/4)
score = score + x2 # element-wise add, size=(N, 128, x.H/4, x.W/4)
score = self.bn4(self.relu(self.deconv4(score))) # size=(N, 64, x.H/2, x.W/2)
score = score + x1 # element-wise add, size=(N, 64, x.H/2, x.W/2)
score = self.bn5(self.relu(self.deconv5(score))) # size=(N, 32, x.H, x.W)
score = self.classifier(score) # size=(N, n_class, x.H/1, x.W/1)
return score # size=(N, n_class, x.H/1, x.W/1)
class VGGNet(VGG):
def __init__(self, pretrained=False, model='vgg16', requires_grad=True, remove_fc=True, show_params=False):
super().__init__(make_layers(cfg[model]))
self.ranges = ranges[model]
if pretrained:
exec("self.load_state_dict(models.%s(pretrained=True).state_dict())" % model)
if not requires_grad:
for param in super().parameters():
param.requires_grad = False
if remove_fc: # delete redundant fully-connected layer params, can save memory
del self.classifier
if show_params:
for name, param in self.named_parameters():
print(name, param.size())
def forward(self, x):
output = {}
# get the output of each maxpooling layer (5 maxpool in VGG net)
for idx in range(len(self.ranges)):
for layer in range(self.ranges[idx][0], self.ranges[idx][1]):
x = self.features[layer](x)
output["x%d"%(idx+1)] = x
return output
ranges = {
'vgg11': ((0, 3), (3, 6), (6, 11), (11, 16), (16, 21)),
'vgg13': ((0, 5), (5, 10), (10, 15), (15, 20), (20, 25)),
'vgg16': ((0, 5), (5, 10), (10, 17), (17, 24), (24, 31)),
'vgg19': ((0, 5), (5, 10), (10, 19), (19, 28), (28, 37))
}
# cropped version from https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py
cfg = {
'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
if __name__ == "__main__":
batch_size, n_class, h, w = 10, 20, 160, 160
# test output size
vgg_model = VGGNet(requires_grad=True)
input = torch.autograd.Variable(torch.randn(batch_size, 3, 224, 224))
output = vgg_model(input)
assert output['x5'].size() == torch.Size([batch_size, 512, 7, 7])
fcn_model = FCN32s(pretrained_net=vgg_model, n_class=n_class)
input = torch.autograd.Variable(torch.randn(batch_size, 3, h, w))
output = fcn_model(input)
assert output.size() == torch.Size([batch_size, n_class, h, w])
fcn_model = FCN16s(pretrained_net=vgg_model, n_class=n_class)
input = torch.autograd.Variable(torch.randn(batch_size, 3, h, w))
output = fcn_model(input)
assert output.size() == torch.Size([batch_size, n_class, h, w])
fcn_model = FCN8s(pretrained_net=vgg_model, n_class=n_class)
input = torch.autograd.Variable(torch.randn(batch_size, 3, h, w))
output = fcn_model(input)
assert output.size() == torch.Size([batch_size, n_class, h, w])
fcn_model = FCNs(pretrained_net=vgg_model, n_class=n_class)
input = torch.autograd.Variable(torch.randn(batch_size, 3, h, w))
output = fcn_model(input)
assert output.size() == torch.Size([batch_size, n_class, h, w])
print("Pass size check")
# test a random batch, loss should decrease
fcn_model = FCNs(pretrained_net=vgg_model, n_class=n_class)
criterion = nn.BCELoss()
optimizer = optim.SGD(fcn_model.parameters(), lr=1e-3, momentum=0.9)
input = torch.autograd.Variable(torch.randn(batch_size, 3, h, w))
y = torch.autograd.Variable(torch.randn(batch_size, n_class, h, w), requires_grad=False)
for iter in range(10):
optimizer.zero_grad()
output = fcn_model(input)
output = nn.functional.sigmoid(output)
loss = criterion(output, y)
loss.backward()
print("iter{}, loss {}".format(iter, loss.data[0]))
optimizer.step()
| 48.460377 | 117 | 0.592275 |
from __future__ import print_function
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import models
from torchvision.models.vgg import VGG
class FCN32s(nn.Module):
def __init__(self, pretrained_net, n_class):
super().__init__()
self.n_class = n_class
self.pretrained_net = pretrained_net
self.relu = nn.ReLU(inplace=True)
self.deconv1 = nn.ConvTranspose2d(512, 512, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn1 = nn.BatchNorm2d(512)
self.deconv2 = nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn2 = nn.BatchNorm2d(256)
self.deconv3 = nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn3 = nn.BatchNorm2d(128)
self.deconv4 = nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn4 = nn.BatchNorm2d(64)
self.deconv5 = nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn5 = nn.BatchNorm2d(32)
self.classifier = nn.Conv2d(32, n_class, kernel_size=1)
def forward(self, x):
output = self.pretrained_net(x)
x5 = output['x5']
score = self.bn1(self.relu(self.deconv1(x5)))
score = self.bn2(self.relu(self.deconv2(score)))
score = self.bn3(self.relu(self.deconv3(score)))
score = self.bn4(self.relu(self.deconv4(score)))
score = self.bn5(self.relu(self.deconv5(score)))
score = self.classifier(score)
return score
class FCN16s(nn.Module):
def __init__(self, pretrained_net, n_class):
super().__init__()
self.n_class = n_class
self.pretrained_net = pretrained_net
self.relu = nn.ReLU(inplace=True)
self.deconv1 = nn.ConvTranspose2d(512, 512, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn1 = nn.BatchNorm2d(512)
self.deconv2 = nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn2 = nn.BatchNorm2d(256)
self.deconv3 = nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn3 = nn.BatchNorm2d(128)
self.deconv4 = nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn4 = nn.BatchNorm2d(64)
self.deconv5 = nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn5 = nn.BatchNorm2d(32)
self.classifier = nn.Conv2d(32, n_class, kernel_size=1)
def forward(self, x):
output = self.pretrained_net(x)
x5 = output['x5']
x4 = output['x4']
score = self.relu(self.deconv1(x5))
score = self.bn1(score + x4)
score = self.bn2(self.relu(self.deconv2(score)))
score = self.bn3(self.relu(self.deconv3(score)))
score = self.bn4(self.relu(self.deconv4(score)))
score = self.bn5(self.relu(self.deconv5(score)))
score = self.classifier(score)
return score
class FCN8s(nn.Module):
def __init__(self, cfg):
super().__init__()
self.n_class = cfg.MODEL_NUM_CLASSES
self.pretrained_net = VGGNet(requires_grad=True, remove_fc=True)
self.relu = nn.ReLU(inplace=True)
self.deconv1 = nn.ConvTranspose2d(512, 512, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn1 = nn.BatchNorm2d(512)
self.deconv2 = nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn2 = nn.BatchNorm2d(256)
self.deconv3 = nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn3 = nn.BatchNorm2d(128)
self.deconv4 = nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn4 = nn.BatchNorm2d(64)
self.deconv5 = nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn5 = nn.BatchNorm2d(32)
self.classifier = nn.Conv2d(32, cfg.MODEL_NUM_CLASSES, kernel_size=1)
def forward(self, x):
output = self.pretrained_net(x)
x5 = output['x5']
x4 = output['x4']
x3 = output['x3']
score = self.relu(self.deconv1(x5))
score = self.bn1(score + x4)
score = self.relu(self.deconv2(score))
score = self.bn2(score + x3)
score = self.bn3(self.relu(self.deconv3(score)))
score = self.bn4(self.relu(self.deconv4(score)))
score = self.bn5(self.relu(self.deconv5(score)))
score = self.classifier(score)
return score
class FCNs(nn.Module):
def __init__(self, pretrained_net, n_class):
super().__init__()
self.n_class = n_class
self.pretrained_net = pretrained_net
self.relu = nn.ReLU(inplace=True)
self.deconv1 = nn.ConvTranspose2d(512, 512, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn1 = nn.BatchNorm2d(512)
self.deconv2 = nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn2 = nn.BatchNorm2d(256)
self.deconv3 = nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn3 = nn.BatchNorm2d(128)
self.deconv4 = nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn4 = nn.BatchNorm2d(64)
self.deconv5 = nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn5 = nn.BatchNorm2d(32)
self.classifier = nn.Conv2d(32, n_class, kernel_size=1)
def forward(self, x):
output = self.pretrained_net(x)
x5 = output['x5']
x4 = output['x4']
x3 = output['x3']
x2 = output['x2']
x1 = output['x1']
score = self.bn1(self.relu(self.deconv1(x5)))
score = score + x4
score = self.bn2(self.relu(self.deconv2(score)))
score = score + x3
score = self.bn3(self.relu(self.deconv3(score)))
score = score + x2
score = self.bn4(self.relu(self.deconv4(score)))
score = score + x1
score = self.bn5(self.relu(self.deconv5(score)))
score = self.classifier(score)
return score
class VGGNet(VGG):
def __init__(self, pretrained=False, model='vgg16', requires_grad=True, remove_fc=True, show_params=False):
super().__init__(make_layers(cfg[model]))
self.ranges = ranges[model]
if pretrained:
exec("self.load_state_dict(models.%s(pretrained=True).state_dict())" % model)
if not requires_grad:
for param in super().parameters():
param.requires_grad = False
if remove_fc:
del self.classifier
if show_params:
for name, param in self.named_parameters():
print(name, param.size())
def forward(self, x):
output = {}
for idx in range(len(self.ranges)):
for layer in range(self.ranges[idx][0], self.ranges[idx][1]):
x = self.features[layer](x)
output["x%d"%(idx+1)] = x
return output
ranges = {
'vgg11': ((0, 3), (3, 6), (6, 11), (11, 16), (16, 21)),
'vgg13': ((0, 5), (5, 10), (10, 15), (15, 20), (20, 25)),
'vgg16': ((0, 5), (5, 10), (10, 17), (17, 24), (24, 31)),
'vgg19': ((0, 5), (5, 10), (10, 19), (19, 28), (28, 37))
}
cfg = {
'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
if __name__ == "__main__":
batch_size, n_class, h, w = 10, 20, 160, 160
vgg_model = VGGNet(requires_grad=True)
input = torch.autograd.Variable(torch.randn(batch_size, 3, 224, 224))
output = vgg_model(input)
assert output['x5'].size() == torch.Size([batch_size, 512, 7, 7])
fcn_model = FCN32s(pretrained_net=vgg_model, n_class=n_class)
input = torch.autograd.Variable(torch.randn(batch_size, 3, h, w))
output = fcn_model(input)
assert output.size() == torch.Size([batch_size, n_class, h, w])
fcn_model = FCN16s(pretrained_net=vgg_model, n_class=n_class)
input = torch.autograd.Variable(torch.randn(batch_size, 3, h, w))
output = fcn_model(input)
assert output.size() == torch.Size([batch_size, n_class, h, w])
fcn_model = FCN8s(pretrained_net=vgg_model, n_class=n_class)
input = torch.autograd.Variable(torch.randn(batch_size, 3, h, w))
output = fcn_model(input)
assert output.size() == torch.Size([batch_size, n_class, h, w])
fcn_model = FCNs(pretrained_net=vgg_model, n_class=n_class)
input = torch.autograd.Variable(torch.randn(batch_size, 3, h, w))
output = fcn_model(input)
assert output.size() == torch.Size([batch_size, n_class, h, w])
print("Pass size check")
fcn_model = FCNs(pretrained_net=vgg_model, n_class=n_class)
criterion = nn.BCELoss()
optimizer = optim.SGD(fcn_model.parameters(), lr=1e-3, momentum=0.9)
input = torch.autograd.Variable(torch.randn(batch_size, 3, h, w))
y = torch.autograd.Variable(torch.randn(batch_size, n_class, h, w), requires_grad=False)
for iter in range(10):
optimizer.zero_grad()
output = fcn_model(input)
output = nn.functional.sigmoid(output)
loss = criterion(output, y)
loss.backward()
print("iter{}, loss {}".format(iter, loss.data[0]))
optimizer.step()
| true | true |
1c2b4bf497975578da315d9f93461e4bcfe65e56 | 2,198 | py | Python | tests/python-opcua/examples/simple-client-server-xml/server.py | iit-danieli-joint-lab/opcua-modeling-tool | f8c3d940a61334b0e6deda9099844a6b429d7c08 | [
"MIT"
] | 32 | 2018-03-27T12:25:24.000Z | 2022-01-11T21:20:06.000Z | tests/python-opcua/examples/simple-client-server-xml/server.py | iit-danieli-joint-lab/opcua-modeling-tool | f8c3d940a61334b0e6deda9099844a6b429d7c08 | [
"MIT"
] | 42 | 2020-08-20T04:01:12.000Z | 2021-01-09T18:50:21.000Z | python-opcua/examples/simple-client-server-xml/server.py | ssriblo/ionic-smarthome-test-1 | 060bc247e0b8295d6cd869d90b364756515cfc19 | [
"MIT"
] | 12 | 2018-06-04T20:06:06.000Z | 2021-07-02T22:09:53.000Z | import os.path
try:
from IPython import embed
except ImportError:
import code
def embed():
vars = globals()
vars.update(locals())
shell = code.InteractiveConsole(vars)
shell.interact()
from opcua import ua, uamethod, Server
@uamethod
def say_hello_xml(parent, happy):
print("Calling say_hello_xml")
if happy:
result = "I'm happy"
else:
result = "I'm not happy"
print(result)
return result
@uamethod
def say_hello(parent, happy):
if happy:
result = "I'm happy"
else:
result = "I'm not happy"
print(result)
return result
@uamethod
def say_hello_array(parent, happy):
if happy:
result = "I'm happy"
else:
result = "I'm not happy"
print(result)
return [result, "Actually I am"]
class HelloServer:
def __init__(self, endpoint, name, model_filepath):
self.server = Server()
# This need to be imported at the start or else it will overwrite the data
self.server.import_xml(model_filepath)
self.server.set_endpoint(endpoint)
self.server.set_server_name(name)
objects = self.server.get_objects_node()
freeopcua_namespace = self.server.get_namespace_index("urn:freeopcua:python:server")
hellower = objects.get_child("0:Hellower")
hellower_say_hello = hellower.get_child("0:SayHello")
self.server.link_method(hellower_say_hello, say_hello_xml)
hellower.add_method(
freeopcua_namespace, "SayHello2", say_hello, [ua.VariantType.Boolean], [ua.VariantType.String])
hellower.add_method(
freeopcua_namespace, "SayHelloArray", say_hello_array, [ua.VariantType.Boolean], [ua.VariantType.String])
def __enter__(self):
self.server.start()
return self.server
def __exit__(self, exc_type, exc_val, exc_tb):
self.server.stop()
if __name__ == '__main__':
script_dir = os.path.dirname(__file__)
with HelloServer(
"opc.tcp://0.0.0.0:40840/freeopcua/server/",
"FreeOpcUa Example Server",
os.path.join(script_dir, "test_saying.xml")) as server:
embed()
| 25.55814 | 117 | 0.645132 | import os.path
try:
from IPython import embed
except ImportError:
import code
def embed():
vars = globals()
vars.update(locals())
shell = code.InteractiveConsole(vars)
shell.interact()
from opcua import ua, uamethod, Server
@uamethod
def say_hello_xml(parent, happy):
print("Calling say_hello_xml")
if happy:
result = "I'm happy"
else:
result = "I'm not happy"
print(result)
return result
@uamethod
def say_hello(parent, happy):
if happy:
result = "I'm happy"
else:
result = "I'm not happy"
print(result)
return result
@uamethod
def say_hello_array(parent, happy):
if happy:
result = "I'm happy"
else:
result = "I'm not happy"
print(result)
return [result, "Actually I am"]
class HelloServer:
def __init__(self, endpoint, name, model_filepath):
self.server = Server()
self.server.import_xml(model_filepath)
self.server.set_endpoint(endpoint)
self.server.set_server_name(name)
objects = self.server.get_objects_node()
freeopcua_namespace = self.server.get_namespace_index("urn:freeopcua:python:server")
hellower = objects.get_child("0:Hellower")
hellower_say_hello = hellower.get_child("0:SayHello")
self.server.link_method(hellower_say_hello, say_hello_xml)
hellower.add_method(
freeopcua_namespace, "SayHello2", say_hello, [ua.VariantType.Boolean], [ua.VariantType.String])
hellower.add_method(
freeopcua_namespace, "SayHelloArray", say_hello_array, [ua.VariantType.Boolean], [ua.VariantType.String])
def __enter__(self):
self.server.start()
return self.server
def __exit__(self, exc_type, exc_val, exc_tb):
self.server.stop()
if __name__ == '__main__':
script_dir = os.path.dirname(__file__)
with HelloServer(
"opc.tcp://0.0.0.0:40840/freeopcua/server/",
"FreeOpcUa Example Server",
os.path.join(script_dir, "test_saying.xml")) as server:
embed()
| true | true |
1c2b4c8a02572640013e7d4e503ee3614d3ab2b7 | 598 | py | Python | codes/views.py | DanielArturoAlejoAlvarez/Cersei | 365cb4e554146143fb3521a09ebf9fadb127a564 | [
"MIT"
] | 5 | 2020-04-07T14:31:45.000Z | 2021-04-30T05:11:43.000Z | codes/views.py | DanielArturoAlejoAlvarez/Cersei | 365cb4e554146143fb3521a09ebf9fadb127a564 | [
"MIT"
] | null | null | null | codes/views.py | DanielArturoAlejoAlvarez/Cersei | 365cb4e554146143fb3521a09ebf9fadb127a564 | [
"MIT"
] | null | null | null | from django.shortcuts import render
# Create your views here.
from rest_framework import viewsets
from .models import Language,Paradigm,Programmer
from .serializers import LanguageSerializer,ParadigmSerializer,ProgrammerSerializer
class LanguageView(viewsets.ModelViewSet):
queryset=Language.objects.all()
serializer_class=LanguageSerializer
class ParadigmView(viewsets.ModelViewSet):
queryset=Paradigm.objects.all()
serializer_class=ParadigmSerializer
class ProgrammerView(viewsets.ModelViewSet):
queryset=Programmer.objects.all()
serializer_class=ProgrammerSerializer
| 31.473684 | 83 | 0.832776 | from django.shortcuts import render
from rest_framework import viewsets
from .models import Language,Paradigm,Programmer
from .serializers import LanguageSerializer,ParadigmSerializer,ProgrammerSerializer
class LanguageView(viewsets.ModelViewSet):
queryset=Language.objects.all()
serializer_class=LanguageSerializer
class ParadigmView(viewsets.ModelViewSet):
queryset=Paradigm.objects.all()
serializer_class=ParadigmSerializer
class ProgrammerView(viewsets.ModelViewSet):
queryset=Programmer.objects.all()
serializer_class=ProgrammerSerializer
| true | true |
1c2b4ca4ff9cd52813ee18f751ac9262164b8b7c | 825 | py | Python | test/post.py | 6923403/Python_Demo | 69ebc7fe5589b46a470c7d88507ce2c73d4c6678 | [
"MIT"
] | null | null | null | test/post.py | 6923403/Python_Demo | 69ebc7fe5589b46a470c7d88507ce2c73d4c6678 | [
"MIT"
] | null | null | null | test/post.py | 6923403/Python_Demo | 69ebc7fe5589b46a470c7d88507ce2c73d4c6678 | [
"MIT"
] | null | null | null | import requests
import json
def main():
host='http://fanyi.youdao.com/translate_o?smartresult=dict&smartresult=rule'
word = host
endpoint = "post"
url=''.join([host, endpoint])
m_data={
"i": "晚安",
"from": "AUTO",
"to": "AUTO",
"smartresult": "dict",
"client": "fanyideskweb",
"salt": "16027840699404",
"sign": "3049c9ec63fc27774b93f384a0497330",
"lts": "1602784069940",
"bv": "0c00cda0db2530a31944351caf80d8b0",
"doctype": "json",
"version": "2.1",
"keyfrom": "fanyi.web",
"action": "FY_BY_CLICKBUTTION"
}
response=requests.post(url,m_data)
# 将Json格式字符串转字典
content=json.loads(response.text)
# print(content['translateResult'][0][0]['tgt'])
print(content)
if __name__ == '__main__':
main()
| 24.264706 | 81 | 0.591515 | import requests
import json
def main():
host='http://fanyi.youdao.com/translate_o?smartresult=dict&smartresult=rule'
word = host
endpoint = "post"
url=''.join([host, endpoint])
m_data={
"i": "晚安",
"from": "AUTO",
"to": "AUTO",
"smartresult": "dict",
"client": "fanyideskweb",
"salt": "16027840699404",
"sign": "3049c9ec63fc27774b93f384a0497330",
"lts": "1602784069940",
"bv": "0c00cda0db2530a31944351caf80d8b0",
"doctype": "json",
"version": "2.1",
"keyfrom": "fanyi.web",
"action": "FY_BY_CLICKBUTTION"
}
response=requests.post(url,m_data)
content=json.loads(response.text)
print(content)
if __name__ == '__main__':
main()
| true | true |
1c2b4caaa1a2a9e62eca83baf2fcc132b0a26879 | 8,564 | py | Python | great_expectations/expectations/core/expect_column_values_to_be_unique.py | andyjessen/great_expectations | 74f7f2aa7b51144f34156ed49490dae4edaa5cb7 | [
"Apache-2.0"
] | null | null | null | great_expectations/expectations/core/expect_column_values_to_be_unique.py | andyjessen/great_expectations | 74f7f2aa7b51144f34156ed49490dae4edaa5cb7 | [
"Apache-2.0"
] | null | null | null | great_expectations/expectations/core/expect_column_values_to_be_unique.py | andyjessen/great_expectations | 74f7f2aa7b51144f34156ed49490dae4edaa5cb7 | [
"Apache-2.0"
] | null | null | null | from typing import Optional
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.expectations.expectation import (
ColumnMapExpectation,
InvalidExpectationConfigurationError,
)
from great_expectations.expectations.util import render_evaluation_parameter_string
from great_expectations.render.renderer.renderer import renderer
from great_expectations.render.types import RenderedStringTemplateContent
from great_expectations.render.util import (
num_to_str,
parse_row_condition_string_pandas_engine,
substitute_none_for_missing,
)
try:
import sqlalchemy as sa # noqa: F401
except ImportError:
pass
class ExpectColumnValuesToBeUnique(ColumnMapExpectation):
"""Expect each column value to be unique.
This expectation detects duplicates. All duplicated values are counted as exceptions.
For example, `[1, 2, 3, 3, 3]` will return `[3, 3, 3]` in `result.exceptions_list`, with \
`unexpected_percent = 60.0`.
expect_column_values_to_be_unique is a \
:func:`column_map_expectation <great_expectations.execution_engine.execution_engine.MetaExecutionEngine
.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"maturity": "production",
"tags": ["core expectation", "column map expectation"],
"contributors": ["@great_expectations"],
"requirements": [],
"has_full_test_suite": True,
"manually_reviewed_code": True,
}
map_metric = "column_values.unique"
success_keys = ("mostly",)
default_kwarg_values = {
"row_condition": None,
"condition_parser": None, # we expect this to be explicitly set whenever a row_condition is passed
"mostly": 1,
"parse_strings_as_datetimes": False,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": True,
}
args_keys = ("column",)
def validate_configuration(
self, configuration: Optional[ExpectationConfiguration]
) -> None:
super().validate_configuration(configuration)
try:
assert (
"column" in configuration.kwargs
), "'column' parameter is required for column map expectations"
if "mostly" in configuration.kwargs:
mostly = configuration.kwargs["mostly"]
assert isinstance(
mostly, (int, float)
), "'mostly' parameter must be an integer or float"
assert 0 <= mostly <= 1, "'mostly' parameter must be between 0 and 1"
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
@classmethod
def _atomic_prescriptive_template(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs,
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name", True)
include_column_name = (
include_column_name if include_column_name is not None else True
)
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
["column", "mostly", "row_condition", "condition_parser"],
)
params_with_json_schema = {
"column": {"schema": {"type": "string"}, "value": params.get("column")},
"mostly": {"schema": {"type": "number"}, "value": params.get("mostly")},
"mostly_pct": {
"schema": {"type": "string"},
"value": params.get("mostly_pct"),
},
"row_condition": {
"schema": {"type": "string"},
"value": params.get("row_condition"),
},
"condition_parser": {
"schema": {"type": "string"},
"value": params.get("condition_parser"),
},
}
if include_column_name:
template_str = "$column values must be unique"
else:
template_str = "values must be unique"
if params["mostly"] is not None and params["mostly"] < 1.0:
params_with_json_schema["mostly_pct"]["value"] = num_to_str(
params["mostly"] * 100, precision=15, no_scientific=True
)
# params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".")
template_str += ", at least $mostly_pct % of the time."
else:
template_str += "."
if params["row_condition"] is not None:
(
conditional_template_str,
conditional_params,
) = parse_row_condition_string_pandas_engine(
params["row_condition"], with_schema=True
)
template_str = f"{conditional_template_str}, then {template_str}"
params_with_json_schema.update(conditional_params)
return (template_str, params_with_json_schema, styling)
@classmethod
@renderer(renderer_type="renderer.prescriptive")
@render_evaluation_parameter_string
def _prescriptive_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs,
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name", True)
include_column_name = (
include_column_name if include_column_name is not None else True
)
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
["column", "mostly", "row_condition", "condition_parser"],
)
if include_column_name:
template_str = "$column values must be unique"
else:
template_str = "values must be unique"
if params["mostly"] is not None and params["mostly"] < 1.0:
params["mostly_pct"] = num_to_str(
params["mostly"] * 100, precision=15, no_scientific=True
)
# params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".")
template_str += ", at least $mostly_pct % of the time."
else:
template_str += "."
if params["row_condition"] is not None:
(
conditional_template_str,
conditional_params,
) = parse_row_condition_string_pandas_engine(params["row_condition"])
template_str = f"{conditional_template_str}, then {template_str}"
params.update(conditional_params)
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
| 38.232143 | 107 | 0.608711 | from typing import Optional
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.expectations.expectation import (
ColumnMapExpectation,
InvalidExpectationConfigurationError,
)
from great_expectations.expectations.util import render_evaluation_parameter_string
from great_expectations.render.renderer.renderer import renderer
from great_expectations.render.types import RenderedStringTemplateContent
from great_expectations.render.util import (
num_to_str,
parse_row_condition_string_pandas_engine,
substitute_none_for_missing,
)
try:
import sqlalchemy as sa
except ImportError:
pass
class ExpectColumnValuesToBeUnique(ColumnMapExpectation):
library_metadata = {
"maturity": "production",
"tags": ["core expectation", "column map expectation"],
"contributors": ["@great_expectations"],
"requirements": [],
"has_full_test_suite": True,
"manually_reviewed_code": True,
}
map_metric = "column_values.unique"
success_keys = ("mostly",)
default_kwarg_values = {
"row_condition": None,
"condition_parser": None,
"mostly": 1,
"parse_strings_as_datetimes": False,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": True,
}
args_keys = ("column",)
def validate_configuration(
self, configuration: Optional[ExpectationConfiguration]
) -> None:
super().validate_configuration(configuration)
try:
assert (
"column" in configuration.kwargs
), "'column' parameter is required for column map expectations"
if "mostly" in configuration.kwargs:
mostly = configuration.kwargs["mostly"]
assert isinstance(
mostly, (int, float)
), "'mostly' parameter must be an integer or float"
assert 0 <= mostly <= 1, "'mostly' parameter must be between 0 and 1"
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
@classmethod
def _atomic_prescriptive_template(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs,
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name", True)
include_column_name = (
include_column_name if include_column_name is not None else True
)
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
["column", "mostly", "row_condition", "condition_parser"],
)
params_with_json_schema = {
"column": {"schema": {"type": "string"}, "value": params.get("column")},
"mostly": {"schema": {"type": "number"}, "value": params.get("mostly")},
"mostly_pct": {
"schema": {"type": "string"},
"value": params.get("mostly_pct"),
},
"row_condition": {
"schema": {"type": "string"},
"value": params.get("row_condition"),
},
"condition_parser": {
"schema": {"type": "string"},
"value": params.get("condition_parser"),
},
}
if include_column_name:
template_str = "$column values must be unique"
else:
template_str = "values must be unique"
if params["mostly"] is not None and params["mostly"] < 1.0:
params_with_json_schema["mostly_pct"]["value"] = num_to_str(
params["mostly"] * 100, precision=15, no_scientific=True
)
template_str += ", at least $mostly_pct % of the time."
else:
template_str += "."
if params["row_condition"] is not None:
(
conditional_template_str,
conditional_params,
) = parse_row_condition_string_pandas_engine(
params["row_condition"], with_schema=True
)
template_str = f"{conditional_template_str}, then {template_str}"
params_with_json_schema.update(conditional_params)
return (template_str, params_with_json_schema, styling)
@classmethod
@renderer(renderer_type="renderer.prescriptive")
@render_evaluation_parameter_string
def _prescriptive_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs,
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name", True)
include_column_name = (
include_column_name if include_column_name is not None else True
)
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
["column", "mostly", "row_condition", "condition_parser"],
)
if include_column_name:
template_str = "$column values must be unique"
else:
template_str = "values must be unique"
if params["mostly"] is not None and params["mostly"] < 1.0:
params["mostly_pct"] = num_to_str(
params["mostly"] * 100, precision=15, no_scientific=True
)
template_str += ", at least $mostly_pct % of the time."
else:
template_str += "."
if params["row_condition"] is not None:
(
conditional_template_str,
conditional_params,
) = parse_row_condition_string_pandas_engine(params["row_condition"])
template_str = f"{conditional_template_str}, then {template_str}"
params.update(conditional_params)
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
| true | true |
1c2b4d2407ea1b399ac8adc0f3b5894a3aaab7e3 | 542 | py | Python | multiples_of_x_and_y/calculator.py | corker/multiples_of_x_and_y | 38a9da13594a3de0a6b8f018193fde20ba38eb7d | [
"MIT"
] | null | null | null | multiples_of_x_and_y/calculator.py | corker/multiples_of_x_and_y | 38a9da13594a3de0a6b8f018193fde20ba38eb7d | [
"MIT"
] | null | null | null | multiples_of_x_and_y/calculator.py | corker/multiples_of_x_and_y | 38a9da13594a3de0a6b8f018193fde20ba38eb7d | [
"MIT"
] | null | null | null |
MIN_GOAL = 1
def calculate(x, y, goal):
assert x > 0
assert y > 0
assert goal >= MIN_GOAL
range_numbers = range(MIN_GOAL, goal)
condition = as_condition(x, y)
filtered_numbers = filter(condition, range_numbers)
return tuple(filtered_numbers)
def as_condition(x, y):
assert x > 0
assert y > 0
selector = lambda value: can_divide(x, value) | can_divide(y, value)
return selector
def can_divide(divider, dividend):
assert divider > 0
assert dividend > 0
return dividend % divider == 0
| 23.565217 | 72 | 0.667897 |
MIN_GOAL = 1
def calculate(x, y, goal):
assert x > 0
assert y > 0
assert goal >= MIN_GOAL
range_numbers = range(MIN_GOAL, goal)
condition = as_condition(x, y)
filtered_numbers = filter(condition, range_numbers)
return tuple(filtered_numbers)
def as_condition(x, y):
assert x > 0
assert y > 0
selector = lambda value: can_divide(x, value) | can_divide(y, value)
return selector
def can_divide(divider, dividend):
assert divider > 0
assert dividend > 0
return dividend % divider == 0
| true | true |
1c2b4debf7e4d8dde139c8cb3e4ed6b1436b41ad | 6,817 | py | Python | ch_05/src/classifier.py | real-slim-chadi/Python-Object-Oriented-Programming---4th-edition | 7c486866171786b620795fa33a79ec9ac9a8ba1b | [
"MIT"
] | 43 | 2021-06-03T18:39:09.000Z | 2022-03-29T20:32:13.000Z | ch_05/src/classifier.py | real-slim-chadi/Python-Object-Oriented-Programming---4th-edition | 7c486866171786b620795fa33a79ec9ac9a8ba1b | [
"MIT"
] | 9 | 2022-03-12T01:04:07.000Z | 2022-03-12T01:05:01.000Z | ch_05/src/classifier.py | real-slim-chadi/Python-Object-Oriented-Programming---4th-edition | 7c486866171786b620795fa33a79ec9ac9a8ba1b | [
"MIT"
] | 36 | 2021-06-19T07:14:09.000Z | 2022-03-12T22:17:09.000Z | """
Python 3 Object-Oriented Programming Case Study
Chapter 5, When to Use Object-Oriented Programming
"""
from __future__ import annotations
import base64
import csv
from enum import Enum, auto
from functools import wraps
from pathlib import Path
from typing import (
cast,
Optional,
Callable,
Any,
Type,
Set,
Mapping,
overload,
Iterable,
Union,
Iterator,
)
import werkzeug.security
from flask import Flask, current_app, jsonify, request, abort, g, Response
class Role(str, Enum):
UNDEFINED = ""
BOTANIST = "botanist"
RESEARCHER = "researcher"
class User:
"""
A user. Has a Role: Botanist or Researcher.
The password must be of the form: ``method$salt$hexdigest``.
For example: ``"md5$ZD8agylg$90c2494aa8a4965b20410e4cdb9e823d"``
"""
headers = ["username", "email", "real_name", "role", "password"]
def __init__(
self,
username: str,
email: str,
real_name: str,
role: Role,
password: Optional[str] = None,
) -> None:
self.username = username
self.email = email
self.real_name = real_name
self.role = role
self.password = password
@staticmethod
def from_dict(csv_row: dict[str, str]) -> "User":
return User(
username=csv_row["username"],
email=csv_row["email"],
real_name=csv_row["real_name"],
role=Role(csv_row["role"]),
password=csv_row["password"],
)
def __eq__(self, other: Any) -> bool:
other = cast(User, other)
return all(
[
self.username == other.username,
self.email == other.email,
self.real_name == other.real_name,
self.role == other.role,
]
)
def set_password(self, plain_text: str) -> None:
self.password = werkzeug.security.generate_password_hash(plain_text)
def is_valid_password(self, plain_text: str) -> bool:
return werkzeug.security.check_password_hash(
self.password or "md5$$", plain_text
)
def __repr__(self) -> str:
return (
f"{self.__class__.__name__}("
f"username={self.username!r}, "
f"email={self.email!r}, "
f"real_name={self.real_name!r}, "
f"role={self.role.value!r}, "
f"password={self.password!r})"
)
def asdict(self) -> dict[str, Optional[str]]:
return {
"username": self.username,
"email": self.email,
"real_name": self.real_name,
"role": self.role.value,
"password": self.password,
}
class Users:
def __init__(self, init: Optional[dict[str, User]] = None) -> None:
self.users = init or {}
self.anonymous = User("", "", "", Role.UNDEFINED)
self.app: Optional[Flask] = None
def init_app(self, app: Flask) -> None:
self.app = app
self.app.config.setdefault("USER_FILE", Path("users.csv"))
def get_user(self, name: str, default: Optional[User] = None) -> User:
if not self.app:
raise RuntimeError("Users not bound to an app")
if not self.users:
# Load file when needed.
with self.app.config["USER_FILE"].open() as user_file:
row_iter = csv.DictReader(user_file)
user_iter = (User.from_dict(row) for row in row_iter if row)
self.users = {user.username: user for user in user_iter}
return self.users.get(name, default or self.anonymous)
def add_user(self, user: User) -> None:
if user.username in self.users:
raise ValueError("Duplicate Username")
self.users[user.username] = user
def save(self) -> None:
if not self.app:
raise RuntimeError("Users not bound to an app")
with self.app.config["USER_FILE"].open("w", newline="") as user_file:
writer = csv.DictWriter(user_file, User.headers)
writer.writeheader()
writer.writerows(u.asdict() for u in self.users.values())
def __len__(self) -> int:
return len(self.users)
def values(self) -> Iterator[User]:
return iter(self.users.values())
class NotAuthorized(Exception):
status_code = 401
def __init__(
self,
message: str,
status_code: Optional[int] = None,
payload: Optional[dict[str, str]] = None,
) -> None:
super().__init__(message)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def asdict(self) -> dict[str, Any]:
rv: dict[str, Any] = dict(self.payload or ())
rv["message"] = self.message
return rv
def authenticate(view_function: Callable[..., Response]) -> Callable[..., Response]:
@wraps(view_function)
def decorated_function(*args: str) -> Response:
auth_body = request.headers.get("Authorization", "").split(" ")
auth_type, credentials = auth_body if len(auth_body) == 2 else ("", ":")
username, _, password = (
base64.b64decode(credentials).decode("utf-8").partition(":")
)
g.user = users.get_user(username) # type: ignore[attr-defined]
conditions = [
auth_type.upper() == "BASIC",
g.user.is_valid_password(password), # type: ignore[attr-defined]
]
if not all(conditions):
raise NotAuthorized("Unknown User")
return view_function(*args)
return decorated_function
class Config:
USER_FILE = Path("data/users.csv")
class Demo(Config):
ENV = "development"
DEBUG = True
TESTING = True
app = Flask(__name__)
app.config.from_object(Demo) # os.environ["CLASSIFIER_CONFIG"]
users = Users()
users.init_app(app)
@app.errorhandler(NotAuthorized) # type: ignore[misc]
def handle_unauthorized(error: NotAuthorized) -> Response:
response = jsonify(error.asdict())
response.status_code = error.status_code
return response
@app.route("/health")
def user_list() -> Response:
# Be sure the users database gets loaded.
users.get_user("")
response = {"status": "OK", "user_count": len(users)}
if app.config["TESTING"]:
response["users"] = [u.asdict() for u in users.values()]
return jsonify(response)
@app.route("/whoami")
@authenticate
def who_am_i() -> Response:
app.logger.info(f"whoami with {request.headers}: User {g.user}") # type: ignore[attr-defined]
return jsonify(
{
"status": "OK",
"user": g.user.asdict(), # type: ignore[attr-defined]
}
)
if __name__ == "__main__":
app.run(ssl_context="adhoc")
| 28.885593 | 98 | 0.593663 | from __future__ import annotations
import base64
import csv
from enum import Enum, auto
from functools import wraps
from pathlib import Path
from typing import (
cast,
Optional,
Callable,
Any,
Type,
Set,
Mapping,
overload,
Iterable,
Union,
Iterator,
)
import werkzeug.security
from flask import Flask, current_app, jsonify, request, abort, g, Response
class Role(str, Enum):
UNDEFINED = ""
BOTANIST = "botanist"
RESEARCHER = "researcher"
class User:
headers = ["username", "email", "real_name", "role", "password"]
def __init__(
self,
username: str,
email: str,
real_name: str,
role: Role,
password: Optional[str] = None,
) -> None:
self.username = username
self.email = email
self.real_name = real_name
self.role = role
self.password = password
@staticmethod
def from_dict(csv_row: dict[str, str]) -> "User":
return User(
username=csv_row["username"],
email=csv_row["email"],
real_name=csv_row["real_name"],
role=Role(csv_row["role"]),
password=csv_row["password"],
)
def __eq__(self, other: Any) -> bool:
other = cast(User, other)
return all(
[
self.username == other.username,
self.email == other.email,
self.real_name == other.real_name,
self.role == other.role,
]
)
def set_password(self, plain_text: str) -> None:
self.password = werkzeug.security.generate_password_hash(plain_text)
def is_valid_password(self, plain_text: str) -> bool:
return werkzeug.security.check_password_hash(
self.password or "md5$$", plain_text
)
def __repr__(self) -> str:
return (
f"{self.__class__.__name__}("
f"username={self.username!r}, "
f"email={self.email!r}, "
f"real_name={self.real_name!r}, "
f"role={self.role.value!r}, "
f"password={self.password!r})"
)
def asdict(self) -> dict[str, Optional[str]]:
return {
"username": self.username,
"email": self.email,
"real_name": self.real_name,
"role": self.role.value,
"password": self.password,
}
class Users:
def __init__(self, init: Optional[dict[str, User]] = None) -> None:
self.users = init or {}
self.anonymous = User("", "", "", Role.UNDEFINED)
self.app: Optional[Flask] = None
def init_app(self, app: Flask) -> None:
self.app = app
self.app.config.setdefault("USER_FILE", Path("users.csv"))
def get_user(self, name: str, default: Optional[User] = None) -> User:
if not self.app:
raise RuntimeError("Users not bound to an app")
if not self.users:
with self.app.config["USER_FILE"].open() as user_file:
row_iter = csv.DictReader(user_file)
user_iter = (User.from_dict(row) for row in row_iter if row)
self.users = {user.username: user for user in user_iter}
return self.users.get(name, default or self.anonymous)
def add_user(self, user: User) -> None:
if user.username in self.users:
raise ValueError("Duplicate Username")
self.users[user.username] = user
def save(self) -> None:
if not self.app:
raise RuntimeError("Users not bound to an app")
with self.app.config["USER_FILE"].open("w", newline="") as user_file:
writer = csv.DictWriter(user_file, User.headers)
writer.writeheader()
writer.writerows(u.asdict() for u in self.users.values())
def __len__(self) -> int:
return len(self.users)
def values(self) -> Iterator[User]:
return iter(self.users.values())
class NotAuthorized(Exception):
status_code = 401
def __init__(
self,
message: str,
status_code: Optional[int] = None,
payload: Optional[dict[str, str]] = None,
) -> None:
super().__init__(message)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def asdict(self) -> dict[str, Any]:
rv: dict[str, Any] = dict(self.payload or ())
rv["message"] = self.message
return rv
def authenticate(view_function: Callable[..., Response]) -> Callable[..., Response]:
@wraps(view_function)
def decorated_function(*args: str) -> Response:
auth_body = request.headers.get("Authorization", "").split(" ")
auth_type, credentials = auth_body if len(auth_body) == 2 else ("", ":")
username, _, password = (
base64.b64decode(credentials).decode("utf-8").partition(":")
)
g.user = users.get_user(username)
conditions = [
auth_type.upper() == "BASIC",
g.user.is_valid_password(password),
]
if not all(conditions):
raise NotAuthorized("Unknown User")
return view_function(*args)
return decorated_function
class Config:
USER_FILE = Path("data/users.csv")
class Demo(Config):
ENV = "development"
DEBUG = True
TESTING = True
app = Flask(__name__)
app.config.from_object(Demo)
users = Users()
users.init_app(app)
@app.errorhandler(NotAuthorized)
def handle_unauthorized(error: NotAuthorized) -> Response:
response = jsonify(error.asdict())
response.status_code = error.status_code
return response
@app.route("/health")
def user_list() -> Response:
users.get_user("")
response = {"status": "OK", "user_count": len(users)}
if app.config["TESTING"]:
response["users"] = [u.asdict() for u in users.values()]
return jsonify(response)
@app.route("/whoami")
@authenticate
def who_am_i() -> Response:
app.logger.info(f"whoami with {request.headers}: User {g.user}")
return jsonify(
{
"status": "OK",
"user": g.user.asdict(),
}
)
if __name__ == "__main__":
app.run(ssl_context="adhoc")
| true | true |
1c2b4eecc8e1717a45520c3b8840de0d0f2b3a1f | 41 | py | Python | pyml_ensemble/model/__init__.py | anthonymorast/pyml-ensemble | a52e454f4c8d92412b3ee66140f78d19da32b53c | [
"MIT"
] | null | null | null | pyml_ensemble/model/__init__.py | anthonymorast/pyml-ensemble | a52e454f4c8d92412b3ee66140f78d19da32b53c | [
"MIT"
] | 5 | 2020-02-13T03:55:53.000Z | 2021-02-12T17:53:15.000Z | pyml_ensemble/model/__init__.py | anthonymorast/pyml-ensemble | a52e454f4c8d92412b3ee66140f78d19da32b53c | [
"MIT"
] | null | null | null | from .model import *
from .tree import *
| 13.666667 | 20 | 0.707317 | from .model import *
from .tree import *
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.